summaryrefslogtreecommitdiffstats
path: root/sys/x86
diff options
context:
space:
mode:
authorjkim <jkim@FreeBSD.org>2011-06-17 21:41:06 +0000
committerjkim <jkim@FreeBSD.org>2011-06-17 21:41:06 +0000
commit8a9fdbb838682bca3d92ebd804102b651699ab25 (patch)
tree26ac1140ee2ae222240b86fc45dea7d39d7f685b /sys/x86
parent93d2d762ed960f4b01b6c523ab0b070977a5beea (diff)
downloadFreeBSD-src-8a9fdbb838682bca3d92ebd804102b651699ab25.zip
FreeBSD-src-8a9fdbb838682bca3d92ebd804102b651699ab25.tar.gz
Teach the compiler how to shift TSC value efficiently. As noted in r220631,
some times compiler inserts redundant instructions to preserve unused upper 32 bits even when it is casted to a 32-bit value. Unfortunately, it seems the problem becomes more serious when it is shifted, especially on amd64.
Diffstat (limited to 'sys/x86')
-rw-r--r--sys/x86/x86/tsc.c7
1 files changed, 5 insertions, 2 deletions
diff --git a/sys/x86/x86/tsc.c b/sys/x86/x86/tsc.c
index 70c176a..9501eee 100644
--- a/sys/x86/x86/tsc.c
+++ b/sys/x86/x86/tsc.c
@@ -461,7 +461,7 @@ init_TSC_tc(void)
tsc_timecounter.tc_quality = 1000;
init:
- for (shift = 0; shift < 32 && (tsc_freq >> shift) > max_freq; shift++)
+ for (shift = 0; shift < 31 && (tsc_freq >> shift) > max_freq; shift++)
;
if (shift > 0) {
tsc_timecounter.tc_get_timecount = tsc_get_timecount_low;
@@ -579,6 +579,9 @@ tsc_get_timecount(struct timecounter *tc __unused)
static u_int
tsc_get_timecount_low(struct timecounter *tc)
{
+ uint32_t rv;
- return (rdtsc() >> (int)(intptr_t)tc->tc_priv);
+ __asm __volatile("rdtsc; shrd %%cl, %%edx, %0"
+ : "=a" (rv) : "c" ((int)(intptr_t)tc->tc_priv) : "edx");
+ return (rv);
}
OpenPOWER on IntegriCloud