summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--sys/amd64/amd64/legacy.c3
-rw-r--r--sys/amd64/amd64/machdep.c11
-rw-r--r--sys/amd64/amd64/prof_machdep.c16
-rw-r--r--sys/cddl/dev/dtrace/amd64/dtrace_subr.c2
-rw-r--r--sys/cddl/dev/dtrace/i386/dtrace_subr.c2
-rw-r--r--sys/compat/linprocfs/linprocfs.c8
-rw-r--r--sys/contrib/altq/altq/altq_subr.c5
-rw-r--r--sys/dev/acpica/acpi_cpu.c2
-rw-r--r--sys/i386/i386/legacy.c3
-rw-r--r--sys/i386/i386/machdep.c11
-rw-r--r--sys/i386/i386/perfmon.c6
-rw-r--r--sys/i386/isa/prof_machdep.c28
-rw-r--r--sys/pc98/pc98/machdep.c9
-rw-r--r--sys/x86/cpufreq/est.c2
-rw-r--r--sys/x86/isa/clock.c10
-rw-r--r--sys/x86/x86/tsc.c14
16 files changed, 78 insertions, 54 deletions
diff --git a/sys/amd64/amd64/legacy.c b/sys/amd64/amd64/legacy.c
index 0e7bac1..100ce7c 100644
--- a/sys/amd64/amd64/legacy.c
+++ b/sys/amd64/amd64/legacy.c
@@ -321,7 +321,8 @@ cpu_read_ivar(device_t dev, device_t child, int index, uintptr_t *result)
break;
case CPU_IVAR_NOMINAL_MHZ:
if (tsc_is_invariant) {
- *result = (uintptr_t)(tsc_freq / 1000000);
+ *result = (uintptr_t)(atomic_load_acq_64(&tsc_freq) /
+ 1000000);
break;
}
/* FALLTHROUGH */
diff --git a/sys/amd64/amd64/machdep.c b/sys/amd64/amd64/machdep.c
index 59a6ca8..07a42e6 100644
--- a/sys/amd64/amd64/machdep.c
+++ b/sys/amd64/amd64/machdep.c
@@ -546,18 +546,19 @@ int
cpu_est_clockrate(int cpu_id, uint64_t *rate)
{
register_t reg;
- uint64_t tsc1, tsc2;
+ uint64_t freq, tsc1, tsc2;
if (pcpu_find(cpu_id) == NULL || rate == NULL)
return (EINVAL);
+ freq = atomic_load_acq_64(&tsc_freq);
/* If TSC is P-state invariant, DELAY(9) based logic fails. */
- if (tsc_is_invariant && tsc_freq != 0)
+ if (tsc_is_invariant && freq != 0)
return (EOPNOTSUPP);
/* If we're booting, trust the rate calibrated moments ago. */
- if (cold && tsc_freq != 0) {
- *rate = tsc_freq;
+ if (cold && freq != 0) {
+ *rate = freq;
return (0);
}
@@ -586,7 +587,7 @@ cpu_est_clockrate(int cpu_id, uint64_t *rate)
#endif
tsc2 -= tsc1;
- if (tsc_freq != 0) {
+ if (freq != 0) {
*rate = tsc2 * 1000;
return (0);
}
diff --git a/sys/amd64/amd64/prof_machdep.c b/sys/amd64/amd64/prof_machdep.c
index 6ebc038..273c833 100644
--- a/sys/amd64/amd64/prof_machdep.c
+++ b/sys/amd64/amd64/prof_machdep.c
@@ -311,18 +311,22 @@ void
startguprof(gp)
struct gmonparam *gp;
{
+ uint64_t freq;
+
+ freq = atomic_load_acq_64(&tsc_freq);
if (cputime_clock == CPUTIME_CLOCK_UNINITIALIZED) {
- cputime_clock = CPUTIME_CLOCK_I8254;
- if (tsc_freq != 0 && mp_ncpus == 1)
+ if (freq != 0 && mp_ncpus == 1)
cputime_clock = CPUTIME_CLOCK_TSC;
+ else
+ cputime_clock = CPUTIME_CLOCK_I8254;
}
- gp->profrate = i8254_freq << CPUTIME_CLOCK_I8254_SHIFT;
if (cputime_clock == CPUTIME_CLOCK_TSC) {
- gp->profrate = tsc_freq >> 1;
+ gp->profrate = freq >> 1;
cputime_prof_active = 1;
- }
+ } else
+ gp->profrate = i8254_freq << CPUTIME_CLOCK_I8254_SHIFT;
#if defined(PERFMON) && defined(I586_PMC_GUPROF)
- else if (cputime_clock == CPUTIME_CLOCK_I586_PMC) {
+ if (cputime_clock == CPUTIME_CLOCK_I586_PMC) {
if (perfmon_avail() &&
perfmon_setup(0, cputime_clock_pmc_conf) == 0) {
if (perfmon_start(0) != 0)
diff --git a/sys/cddl/dev/dtrace/amd64/dtrace_subr.c b/sys/cddl/dev/dtrace/amd64/dtrace_subr.c
index 52da1b7..102ea04 100644
--- a/sys/cddl/dev/dtrace/amd64/dtrace_subr.c
+++ b/sys/cddl/dev/dtrace/amd64/dtrace_subr.c
@@ -403,7 +403,7 @@ dtrace_gethrtime_init(void *arg)
* Otherwise tick->time conversion will be inaccurate, but
* will preserve monotonic property of TSC.
*/
- tsc_f = tsc_freq;
+ tsc_f = atomic_load_acq_64(&tsc_freq);
/*
* The following line checks that nsec_scale calculated below
diff --git a/sys/cddl/dev/dtrace/i386/dtrace_subr.c b/sys/cddl/dev/dtrace/i386/dtrace_subr.c
index c5ea277..50793c4 100644
--- a/sys/cddl/dev/dtrace/i386/dtrace_subr.c
+++ b/sys/cddl/dev/dtrace/i386/dtrace_subr.c
@@ -403,7 +403,7 @@ dtrace_gethrtime_init(void *arg)
* Otherwise tick->time conversion will be inaccurate, but
* will preserve monotonic property of TSC.
*/
- tsc_f = tsc_freq;
+ tsc_f = atomic_load_acq_64(&tsc_freq);
/*
* The following line checks that nsec_scale calculated below
diff --git a/sys/compat/linprocfs/linprocfs.c b/sys/compat/linprocfs/linprocfs.c
index b3d4e9c..ddbd8b4 100644
--- a/sys/compat/linprocfs/linprocfs.c
+++ b/sys/compat/linprocfs/linprocfs.c
@@ -221,6 +221,7 @@ linprocfs_docpuinfo(PFS_FILL_ARGS)
{
int hw_model[2];
char model[128];
+ uint64_t freq;
size_t size;
int class, fqmhz, fqkhz;
int i;
@@ -303,9 +304,10 @@ linprocfs_docpuinfo(PFS_FILL_ARGS)
if (cpu_feature & (1 << i))
sbuf_printf(sb, " %s", flags[i]);
sbuf_cat(sb, "\n");
- if (class >= 5) {
- fqmhz = (tsc_freq + 4999) / 1000000;
- fqkhz = ((tsc_freq + 4999) / 10000) % 100;
+ freq = atomic_load_acq_64(&tsc_freq);
+ if (freq != 0) {
+ fqmhz = (freq + 4999) / 1000000;
+ fqkhz = ((freq + 4999) / 10000) % 100;
sbuf_printf(sb,
"cpu MHz\t\t: %d.%02d\n"
"bogomips\t: %d.%02d\n",
diff --git a/sys/contrib/altq/altq/altq_subr.c b/sys/contrib/altq/altq/altq_subr.c
index 524e752..f5fe990 100644
--- a/sys/contrib/altq/altq/altq_subr.c
+++ b/sys/contrib/altq/altq/altq_subr.c
@@ -929,7 +929,8 @@ init_machclk_setup(void)
#if defined(__amd64__) || defined(__i386__)
/* check if TSC is available */
#ifdef __FreeBSD__
- if ((cpu_feature & CPUID_TSC) == 0 || tsc_freq == 0)
+ if ((cpu_feature & CPUID_TSC) == 0 ||
+ atomic_load_acq_64(&tsc_freq) == 0)
#else
if ((cpu_feature & CPUID_TSC) == 0)
#endif
@@ -964,7 +965,7 @@ init_machclk(void)
*/
#if defined(__amd64__) || defined(__i386__)
#ifdef __FreeBSD__
- machclk_freq = tsc_freq;
+ machclk_freq = atomic_load_acq_64(&tsc_freq);
#elif defined(__NetBSD__)
machclk_freq = (u_int32_t)cpu_tsc_freq;
#elif defined(__OpenBSD__) && (defined(I586_CPU) || defined(I686_CPU))
diff --git a/sys/dev/acpica/acpi_cpu.c b/sys/dev/acpica/acpi_cpu.c
index 85a706f..8cb6858 100644
--- a/sys/dev/acpica/acpi_cpu.c
+++ b/sys/dev/acpica/acpi_cpu.c
@@ -516,7 +516,7 @@ acpi_cpu_read_ivar(device_t dev, device_t child, int index, uintptr_t *result)
#if defined(__amd64__) || defined(__i386__)
case CPU_IVAR_NOMINAL_MHZ:
if (tsc_is_invariant) {
- *result = (uintptr_t)(tsc_freq / 1000000);
+ *result = (uintptr_t)(atomic_load_acq_64(&tsc_freq) / 1000000);
break;
}
/* FALLTHROUGH */
diff --git a/sys/i386/i386/legacy.c b/sys/i386/i386/legacy.c
index 4d342f5..2136d80 100644
--- a/sys/i386/i386/legacy.c
+++ b/sys/i386/i386/legacy.c
@@ -342,7 +342,8 @@ cpu_read_ivar(device_t dev, device_t child, int index, uintptr_t *result)
break;
case CPU_IVAR_NOMINAL_MHZ:
if (tsc_is_invariant) {
- *result = (uintptr_t)(tsc_freq / 1000000);
+ *result = (uintptr_t)(atomic_load_acq_64(&tsc_freq) /
+ 1000000);
break;
}
/* FALLTHROUGH */
diff --git a/sys/i386/i386/machdep.c b/sys/i386/i386/machdep.c
index 0c3e8fe..5869c21 100644
--- a/sys/i386/i386/machdep.c
+++ b/sys/i386/i386/machdep.c
@@ -1137,20 +1137,21 @@ int
cpu_est_clockrate(int cpu_id, uint64_t *rate)
{
register_t reg;
- uint64_t tsc1, tsc2;
+ uint64_t freq, tsc1, tsc2;
if (pcpu_find(cpu_id) == NULL || rate == NULL)
return (EINVAL);
if ((cpu_feature & CPUID_TSC) == 0)
return (EOPNOTSUPP);
+ freq = atomic_load_acq_64(&tsc_freq);
/* If TSC is P-state invariant, DELAY(9) based logic fails. */
- if (tsc_is_invariant && tsc_freq != 0)
+ if (tsc_is_invariant && freq != 0)
return (EOPNOTSUPP);
/* If we're booting, trust the rate calibrated moments ago. */
- if (cold && tsc_freq != 0) {
- *rate = tsc_freq;
+ if (cold && freq != 0) {
+ *rate = freq;
return (0);
}
@@ -1179,7 +1180,7 @@ cpu_est_clockrate(int cpu_id, uint64_t *rate)
#endif
tsc2 -= tsc1;
- if (tsc_freq != 0) {
+ if (freq != 0) {
*rate = tsc2 * 1000;
return (0);
}
diff --git a/sys/i386/i386/perfmon.c b/sys/i386/i386/perfmon.c
index c61e8fe..3f35b59 100644
--- a/sys/i386/i386/perfmon.c
+++ b/sys/i386/i386/perfmon.c
@@ -336,6 +336,7 @@ perfmon_ioctl(struct cdev *dev, u_long cmd, caddr_t param, int flags, struct thr
struct pmc *pmc;
struct pmc_data *pmcd;
struct pmc_tstamp *pmct;
+ uint64_t freq;
int *ip;
int rv;
@@ -386,13 +387,14 @@ perfmon_ioctl(struct cdev *dev, u_long cmd, caddr_t param, int flags, struct thr
break;
case PMIOTSTAMP:
- if (!tsc_freq) {
+ freq = atomic_load_acq_64(&tsc_freq);
+ if (freq == 0) {
rv = ENOTTY;
break;
}
pmct = (struct pmc_tstamp *)param;
/* XXX interface loses precision. */
- pmct->pmct_rate = tsc_freq / 1000000;
+ pmct->pmct_rate = freq / 1000000;
pmct->pmct_value = rdtsc();
rv = 0;
break;
diff --git a/sys/i386/isa/prof_machdep.c b/sys/i386/isa/prof_machdep.c
index adeddf2..bdcd7e4 100644
--- a/sys/i386/isa/prof_machdep.c
+++ b/sys/i386/isa/prof_machdep.c
@@ -170,8 +170,8 @@ cputime()
{
u_int count;
int delta;
-#if (defined(I586_CPU) || defined(I686_CPU)) && !defined(SMP) && \
- defined(PERFMON) && defined(I586_PMC_GUPROF)
+#if (defined(I586_CPU) || defined(I686_CPU)) && \
+ defined(PERFMON) && defined(I586_PMC_GUPROF) && !defined(SMP)
u_quad_t event_count;
#endif
u_char high, low;
@@ -286,21 +286,23 @@ void
startguprof(gp)
struct gmonparam *gp;
{
- if (cputime_clock == CPUTIME_CLOCK_UNINITIALIZED) {
- cputime_clock = CPUTIME_CLOCK_I8254;
#if defined(I586_CPU) || defined(I686_CPU)
- if (tsc_freq != 0 && mp_ncpus == 1)
+ uint64_t freq;
+
+ freq = atomic_load_acq_64(&tsc_freq);
+ if (cputime_clock == CPUTIME_CLOCK_UNINITIALIZED) {
+ if (freq != 0 && mp_ncpus == 1)
cputime_clock = CPUTIME_CLOCK_TSC;
-#endif
+ else
+ cputime_clock = CPUTIME_CLOCK_I8254;
}
- gp->profrate = i8254_freq << CPUTIME_CLOCK_I8254_SHIFT;
-#if defined(I586_CPU) || defined(I686_CPU)
if (cputime_clock == CPUTIME_CLOCK_TSC) {
- gp->profrate = tsc_freq >> 1;
+ gp->profrate = freq >> 1;
cputime_prof_active = 1;
- }
+ } else
+ gp->profrate = i8254_freq << CPUTIME_CLOCK_I8254_SHIFT;
#if defined(PERFMON) && defined(I586_PMC_GUPROF)
- else if (cputime_clock == CPUTIME_CLOCK_I586_PMC) {
+ if (cputime_clock == CPUTIME_CLOCK_I586_PMC) {
if (perfmon_avail() &&
perfmon_setup(0, cputime_clock_pmc_conf) == 0) {
if (perfmon_start(0) != 0)
@@ -325,6 +327,10 @@ startguprof(gp)
}
}
#endif /* PERFMON && I586_PMC_GUPROF */
+#else /* !(I586_CPU || I686_CPU) */
+ if (cputime_clock == CPUTIME_CLOCK_UNINITIALIZED)
+ cputime_clock = CPUTIME_CLOCK_I8254;
+ gp->profrate = i8254_freq << CPUTIME_CLOCK_I8254_SHIFT;
#endif /* I586_CPU || I686_CPU */
cputime_bias = 0;
cputime();
diff --git a/sys/pc98/pc98/machdep.c b/sys/pc98/pc98/machdep.c
index 5e7215e..4727351 100644
--- a/sys/pc98/pc98/machdep.c
+++ b/sys/pc98/pc98/machdep.c
@@ -1072,16 +1072,17 @@ int
cpu_est_clockrate(int cpu_id, uint64_t *rate)
{
register_t reg;
- uint64_t tsc1, tsc2;
+ uint64_t freq, tsc1, tsc2;
if (pcpu_find(cpu_id) == NULL || rate == NULL)
return (EINVAL);
if ((cpu_feature & CPUID_TSC) == 0)
return (EOPNOTSUPP);
+ freq = atomic_load_acq_64(&tsc_freq);
/* If we're booting, trust the rate calibrated moments ago. */
- if (cold && tsc_freq != 0) {
- *rate = tsc_freq;
+ if (cold && freq != 0) {
+ *rate = freq;
return (0);
}
@@ -1110,7 +1111,7 @@ cpu_est_clockrate(int cpu_id, uint64_t *rate)
#endif
tsc2 -= tsc1;
- if (tsc_freq != 0) {
+ if (freq != 0) {
*rate = tsc2 * 1000;
return (0);
}
diff --git a/sys/x86/cpufreq/est.c b/sys/x86/cpufreq/est.c
index 678efb6..cc9502d 100644
--- a/sys/x86/cpufreq/est.c
+++ b/sys/x86/cpufreq/est.c
@@ -1215,7 +1215,7 @@ est_msr_info(device_t dev, uint64_t msr, freq_info **freqs)
return (EOPNOTSUPP);
/* Figure out the bus clock. */
- freq = tsc_freq / 1000000;
+ freq = atomic_load_acq_64(&tsc_freq) / 1000000;
id = msr >> 32;
bus = freq / (id >> 8);
device_printf(dev, "Guessed bus clock (high) of %d MHz\n", bus);
diff --git a/sys/x86/isa/clock.c b/sys/x86/isa/clock.c
index f0016b2..87d906a 100644
--- a/sys/x86/isa/clock.c
+++ b/sys/x86/isa/clock.c
@@ -246,13 +246,13 @@ getit(void)
}
static __inline void
-delay_tsc(int n)
+delay_tsc(int n, uint64_t freq)
{
uint64_t start, end, now;
sched_pin();
start = rdtsc();
- end = start + (tsc_freq * n) / 1000000;
+ end = start + (freq * n) / 1000000;
do {
cpu_spinwait();
now = rdtsc();
@@ -290,6 +290,7 @@ void
DELAY(int n)
{
struct timecounter *tc;
+ uint64_t freq;
int delta, prev_tick, tick, ticks_left;
#ifdef DELAYDEBUG
@@ -298,8 +299,9 @@ DELAY(int n)
static int state = 0;
#endif
- if (tsc_freq != 0) {
- delay_tsc(n);
+ freq = atomic_load_acq_64(&tsc_freq);
+ if (freq != 0) {
+ delay_tsc(n, freq);
return;
}
tc = timecounter;
diff --git a/sys/x86/x86/tsc.c b/sys/x86/x86/tsc.c
index 1781a78..2b1c89a 100644
--- a/sys/x86/x86/tsc.c
+++ b/sys/x86/x86/tsc.c
@@ -245,14 +245,16 @@ tsc_freq_changing(void *arg, const struct cf_level *level, int *status)
static void
tsc_freq_changed(void *arg, const struct cf_level *level, int status)
{
+ uint64_t freq;
/* If there was an error during the transition, don't do anything. */
if (tsc_disabled || status != 0)
return;
/* Total setting for this level gives the new frequency in MHz. */
- tsc_freq = (uint64_t)level->total_set.freq * 1000000;
- tsc_timecounter.tc_frequency = tsc_freq;
+ freq = (uint64_t)level->total_set.freq * 1000000;
+ atomic_store_rel_64(&tsc_freq, freq);
+ atomic_store_rel_64(&tsc_timecounter.tc_frequency, freq);
}
static int
@@ -261,13 +263,13 @@ sysctl_machdep_tsc_freq(SYSCTL_HANDLER_ARGS)
int error;
uint64_t freq;
- if (tsc_timecounter.tc_frequency == 0)
+ freq = atomic_load_acq_64(&tsc_freq);
+ if (freq == 0)
return (EOPNOTSUPP);
- freq = tsc_freq;
error = sysctl_handle_64(oidp, &freq, 0, req);
if (error == 0 && req->newptr != NULL) {
- tsc_freq = freq;
- tsc_timecounter.tc_frequency = tsc_freq;
+ atomic_store_rel_64(&tsc_freq, freq);
+ atomic_store_rel_64(&tsc_timecounter.tc_frequency, freq);
}
return (error);
}
OpenPOWER on IntegriCloud