diff options
author | kib <kib@FreeBSD.org> | 2013-02-01 16:48:55 +0000 |
---|---|---|
committer | kib <kib@FreeBSD.org> | 2013-02-01 16:48:55 +0000 |
commit | 5c708a87b105b5ed98582031452348132c5a02e8 (patch) | |
tree | f05e1ef6e8398d50521127ee28d9bdc09fcd623e | |
parent | b55183a894ce23691df0c9c6b6fec645a777d401 (diff) | |
download | FreeBSD-src-5c708a87b105b5ed98582031452348132c5a02e8.zip FreeBSD-src-5c708a87b105b5ed98582031452348132c5a02e8.tar.gz |
The change to reduce default smp_tsc_shift caused tsc shift to become
zero on slower machines, which make the fenced get_timecount methods
not used despite needed. Remove the (shift > 0) condition when
selecting the get_timecount() implementation.
Rename smp_tsc_shift to tsc_shift, and apply it for the UP case too.
Allow shift to reach value of 31 instead of 30, as it was previously
(should be nop).
Reorganize the tc quality calculation to remove the conditionally
compiled block. Rename test_smp_tsc() to test_tsc() and provide
separate versions for SMP and UP builds. The check for virtialized
hardware is more natural to perform in the smp version of the
test_tsc(), since it is only done for smp case.
Noted and reviewed by: bde (previous version)
MFC after: 12 days
-rw-r--r-- | sys/x86/x86/tsc.c | 78 |
1 files changed, 43 insertions, 35 deletions
diff --git a/sys/x86/x86/tsc.c b/sys/x86/x86/tsc.c index 1714572..4f67818 100644 --- a/sys/x86/x86/tsc.c +++ b/sys/x86/x86/tsc.c @@ -65,14 +65,13 @@ static int smp_tsc; SYSCTL_INT(_kern_timecounter, OID_AUTO, smp_tsc, CTLFLAG_RDTUN, &smp_tsc, 0, "Indicates whether the TSC is safe to use in SMP mode"); TUNABLE_INT("kern.timecounter.smp_tsc", &smp_tsc); - -static int smp_tsc_shift = 1; -SYSCTL_INT(_kern_timecounter, OID_AUTO, smp_tsc_shift, CTLFLAG_RDTUN, - &smp_tsc_shift, 0, - "Shift to pre-apply for the maximum TSC frequency in SMP mode"); -TUNABLE_INT("kern.timecounter.smp_tsc_shift", &smp_tsc_shift); #endif +static int tsc_shift = 1; +SYSCTL_INT(_kern_timecounter, OID_AUTO, tsc_shift, CTLFLAG_RDTUN, + &tsc_shift, 0, "Shift to pre-apply for the maximum TSC frequency"); +TUNABLE_INT("kern.timecounter.tsc_shift", &tsc_shift); + static int tsc_disabled; SYSCTL_INT(_machdep, OID_AUTO, disable_tsc, CTLFLAG_RDTUN, &tsc_disabled, 0, "Disable x86 Time Stamp Counter"); @@ -405,12 +404,12 @@ comp_smp_tsc(void *arg) } static int -test_smp_tsc(void) +test_tsc(void) { uint64_t *data, *tsc; u_int i, size; - if (!smp_tsc && !tsc_is_invariant) + if ((!smp_tsc && !tsc_is_invariant) || vm_guest) return (-100); size = (mp_maxid + 1) * 3; data = malloc(sizeof(*data) * size * N, M_TEMP, M_WAITOK); @@ -450,6 +449,19 @@ test_smp_tsc(void) #undef N +#else + +/* + * The function is not called, it is provided to avoid linking failure + * on uniprocessor kernel. + */ +static int +test_tsc(void) +{ + + return (0); +} + #endif /* SMP */ static void @@ -498,41 +510,37 @@ init_TSC_tc(void) goto init; } -#ifdef SMP /* - * We can not use the TSC in SMP mode unless the TSCs on all CPUs are - * synchronized. If the user is sure that the system has synchronized - * TSCs, set kern.timecounter.smp_tsc tunable to a non-zero value. - * We also limit the frequency even lower to avoid "temporal anomalies" - * as much as possible. The TSC seems unreliable in virtualized SMP + * We can not use the TSC in SMP mode unless the TSCs on all CPUs + * are synchronized. If the user is sure that the system has + * synchronized TSCs, set kern.timecounter.smp_tsc tunable to a + * non-zero value. The TSC seems unreliable in virtualized SMP * environments, so it is set to a negative quality in those cases. */ - if (smp_cpus > 1) { - if (vm_guest != 0) { - tsc_timecounter.tc_quality = -100; - } else { - tsc_timecounter.tc_quality = test_smp_tsc(); - max_freq >>= smp_tsc_shift; - } - } else -#endif - if (tsc_is_invariant) + if (mp_ncpus > 1) + tsc_timecounter.tc_quality = test_tsc(); + else if (tsc_is_invariant) tsc_timecounter.tc_quality = 1000; + max_freq >>= tsc_shift; init: - for (shift = 0; shift < 31 && (tsc_freq >> shift) > max_freq; shift++) + for (shift = 0; shift <= 31 && (tsc_freq >> shift) > max_freq; shift++) ; + if ((cpu_feature & CPUID_SSE2) != 0 && mp_ncpus > 1) { + if (cpu_vendor_id == CPU_VENDOR_AMD) { + tsc_timecounter.tc_get_timecount = shift > 0 ? + tsc_get_timecount_low_mfence : + tsc_get_timecount_mfence; + } else { + tsc_timecounter.tc_get_timecount = shift > 0 ? + tsc_get_timecount_low_lfence : + tsc_get_timecount_lfence; + } + } else { + tsc_timecounter.tc_get_timecount = shift > 0 ? + tsc_get_timecount_low : tsc_get_timecount; + } if (shift > 0) { - if (cpu_feature & CPUID_SSE2) { - if (cpu_vendor_id == CPU_VENDOR_AMD) { - tsc_timecounter.tc_get_timecount = - tsc_get_timecount_low_mfence; - } else { - tsc_timecounter.tc_get_timecount = - tsc_get_timecount_low_lfence; - } - } else - tsc_timecounter.tc_get_timecount = tsc_get_timecount_low; tsc_timecounter.tc_name = "TSC-low"; if (bootverbose) printf("TSC timecounter discards lower %d bit(s)\n", |