summaryrefslogtreecommitdiffstats
path: root/sys/amd64
diff options
context:
space:
mode:
authorats <ats@FreeBSD.org>1995-01-19 22:05:27 +0000
committerats <ats@FreeBSD.org>1995-01-19 22:05:27 +0000
commit8f8e6b75d80a8a4004fc19ba70a910ac0a622700 (patch)
tree40da23a5166a85babafb0d9ba25b5f57969e3b57 /sys/amd64
parent13f4ae2eb99cd673ca5b7321bcd998998a7ca4e6 (diff)
downloadFreeBSD-src-8f8e6b75d80a8a4004fc19ba70a910ac0a622700.zip
FreeBSD-src-8f8e6b75d80a8a4004fc19ba70a910ac0a622700.tar.gz
Submitted by: Bruce Evans
Put in the much shorter and cleaner version for the calibrate_cycle_counter for the Pentium that Bruce suggested. Tested here on my Pentium and it works okay.
Diffstat (limited to 'sys/amd64')
-rw-r--r--sys/amd64/amd64/tsc.c39
-rw-r--r--sys/amd64/isa/clock.c39
2 files changed, 18 insertions, 60 deletions
diff --git a/sys/amd64/amd64/tsc.c b/sys/amd64/amd64/tsc.c
index 95bef69..d3ca0f0 100644
--- a/sys/amd64/amd64/tsc.c
+++ b/sys/amd64/amd64/tsc.c
@@ -282,40 +282,19 @@ static long long cycles_per_sec = 0;
void
calibrate_cyclecounter(void)
{
- volatile long edx, eax, lasteax, lastedx;
+ /*
+ * Don't need volatile; should always use unsigned if 2's
+ * complement arithmetic is desired.
+ */
+ unsigned long long count, last_count;
- __asm __volatile(".byte 0x0f, 0x31" : "=a"(lasteax), "=d"(lastedx) : );
+ __asm __volatile(".byte 0xf,0x31" : "=A" (last_count));
DELAY(1000000);
- __asm __volatile(".byte 0x0f, 0x31" : "=a"(eax), "=d"(edx) : );
-
+ __asm __volatile(".byte 0xf,0x31" : "=A" (count));
/*
- * This assumes that you will never have a clock rate higher
- * than 4GHz, probably a good assumption.
+ * XX lose if the clock rate is not nearly a multiple of 1000000.
*/
- /* The following C code is correct, but our current gcc 2.6.3
- * seems to produce bad assembly code for it , ATS , XXXX */
-#if 0
- cycles_per_sec = ((long long)edx << 32) + eax;
- cycles_per_sec -= ((long long)lastedx << 32) + lasteax;
- pentium_mhz = ((long)cycles_per_sec + 500000) / 1000000; /* round up */
-#else
- /* produce a workaround for the code above */
- {
- union {
- long long extralong;
- long shorty[2];
- } tmp;
-
- tmp.shorty[0] = eax;
- tmp.shorty[1] = edx;
- cycles_per_sec = tmp.extralong;
- tmp.shorty[0] = lasteax;
- tmp.shorty[1] = lastedx;
- cycles_per_sec -= tmp.extralong;
- /* round up */
- pentium_mhz = (long) ((cycles_per_sec + 500000) / 1000000);
- }
-#endif
+ pentium_mhz = ((count - last_count) + 500000) / 1000000;
}
#endif
diff --git a/sys/amd64/isa/clock.c b/sys/amd64/isa/clock.c
index 95bef69..d3ca0f0 100644
--- a/sys/amd64/isa/clock.c
+++ b/sys/amd64/isa/clock.c
@@ -282,40 +282,19 @@ static long long cycles_per_sec = 0;
void
calibrate_cyclecounter(void)
{
- volatile long edx, eax, lasteax, lastedx;
+ /*
+ * Don't need volatile; should always use unsigned if 2's
+ * complement arithmetic is desired.
+ */
+ unsigned long long count, last_count;
- __asm __volatile(".byte 0x0f, 0x31" : "=a"(lasteax), "=d"(lastedx) : );
+ __asm __volatile(".byte 0xf,0x31" : "=A" (last_count));
DELAY(1000000);
- __asm __volatile(".byte 0x0f, 0x31" : "=a"(eax), "=d"(edx) : );
-
+ __asm __volatile(".byte 0xf,0x31" : "=A" (count));
/*
- * This assumes that you will never have a clock rate higher
- * than 4GHz, probably a good assumption.
+ * XX lose if the clock rate is not nearly a multiple of 1000000.
*/
- /* The following C code is correct, but our current gcc 2.6.3
- * seems to produce bad assembly code for it , ATS , XXXX */
-#if 0
- cycles_per_sec = ((long long)edx << 32) + eax;
- cycles_per_sec -= ((long long)lastedx << 32) + lasteax;
- pentium_mhz = ((long)cycles_per_sec + 500000) / 1000000; /* round up */
-#else
- /* produce a workaround for the code above */
- {
- union {
- long long extralong;
- long shorty[2];
- } tmp;
-
- tmp.shorty[0] = eax;
- tmp.shorty[1] = edx;
- cycles_per_sec = tmp.extralong;
- tmp.shorty[0] = lasteax;
- tmp.shorty[1] = lastedx;
- cycles_per_sec -= tmp.extralong;
- /* round up */
- pentium_mhz = (long) ((cycles_per_sec + 500000) / 1000000);
- }
-#endif
+ pentium_mhz = ((count - last_count) + 500000) / 1000000;
}
#endif
OpenPOWER on IntegriCloud