summaryrefslogtreecommitdiffstats
path: root/sys/sparc64
diff options
context:
space:
mode:
authormarius <marius@FreeBSD.org>2015-12-27 17:58:58 +0000
committermarius <marius@FreeBSD.org>2015-12-27 17:58:58 +0000
commit111ac5b2caecac1ba63a4734ab7ce72ebe84dd1a (patch)
tree319b88bacb0384e14e52bd10199ae631704cc1a2 /sys/sparc64
parent8eba6000110bc8f94779df2e6697239d645d3986 (diff)
downloadFreeBSD-src-111ac5b2caecac1ba63a4734ab7ce72ebe84dd1a.zip
FreeBSD-src-111ac5b2caecac1ba63a4734ab7ce72ebe84dd1a.tar.gz
MFC: r287728
Merge r286374 from x86: Formally pair store_rel(&smp_started) with load_acq(&smp_started). Similarly to x86, this change is mostly a NOP due to the kernel being run in total store order.
Diffstat (limited to 'sys/sparc64')
-rw-r--r--sys/sparc64/include/smp.h20
1 files changed, 11 insertions, 9 deletions
diff --git a/sys/sparc64/include/smp.h b/sys/sparc64/include/smp.h
index c46c4f8..266187e 100644
--- a/sys/sparc64/include/smp.h
+++ b/sys/sparc64/include/smp.h
@@ -47,6 +47,7 @@
#include <sys/sched.h>
#include <sys/smp.h>
+#include <machine/atomic.h>
#include <machine/intr_machdep.h>
#include <machine/tte.h>
@@ -143,7 +144,7 @@ ipi_all_but_self(u_int ipi)
{
cpuset_t cpus;
- if (__predict_false(smp_started == 0))
+ if (__predict_false(atomic_load_acq_int(&smp_started) == 0))
return;
cpus = all_cpus;
sched_pin();
@@ -158,7 +159,8 @@ static __inline void
ipi_selected(cpuset_t cpus, u_int ipi)
{
- if (__predict_false(smp_started == 0 || CPU_EMPTY(&cpus)))
+ if (__predict_false(atomic_load_acq_int(&smp_started) == 0 ||
+ CPU_EMPTY(&cpus)))
return;
mtx_lock_spin(&ipi_mtx);
cpu_ipi_selected(cpus, 0, (u_long)tl_ipi_level, ipi);
@@ -169,7 +171,7 @@ static __inline void
ipi_cpu(int cpu, u_int ipi)
{
- if (__predict_false(smp_started == 0))
+ if (__predict_false(atomic_load_acq_int(&smp_started) == 0))
return;
mtx_lock_spin(&ipi_mtx);
cpu_ipi_single(cpu, 0, (u_long)tl_ipi_level, ipi);
@@ -183,7 +185,7 @@ ipi_dcache_page_inval(void *func, vm_paddr_t pa)
{
struct ipi_cache_args *ica;
- if (__predict_false(smp_started == 0))
+ if (__predict_false(atomic_load_acq_int(&smp_started) == 0))
return (NULL);
sched_pin();
ica = &ipi_cache_args;
@@ -200,7 +202,7 @@ ipi_icache_page_inval(void *func, vm_paddr_t pa)
{
struct ipi_cache_args *ica;
- if (__predict_false(smp_started == 0))
+ if (__predict_false(atomic_load_acq_int(&smp_started) == 0))
return (NULL);
sched_pin();
ica = &ipi_cache_args;
@@ -217,7 +219,7 @@ ipi_rd(u_int cpu, void *func, u_long *val)
{
struct ipi_rd_args *ira;
- if (__predict_false(smp_started == 0))
+ if (__predict_false(atomic_load_acq_int(&smp_started) == 0))
return (NULL);
sched_pin();
ira = &ipi_rd_args;
@@ -234,7 +236,7 @@ ipi_tlb_context_demap(struct pmap *pm)
struct ipi_tlb_args *ita;
cpuset_t cpus;
- if (__predict_false(smp_started == 0))
+ if (__predict_false(atomic_load_acq_int(&smp_started) == 0))
return (NULL);
sched_pin();
cpus = pm->pm_active;
@@ -259,7 +261,7 @@ ipi_tlb_page_demap(struct pmap *pm, vm_offset_t va)
struct ipi_tlb_args *ita;
cpuset_t cpus;
- if (__predict_false(smp_started == 0))
+ if (__predict_false(atomic_load_acq_int(&smp_started) == 0))
return (NULL);
sched_pin();
cpus = pm->pm_active;
@@ -284,7 +286,7 @@ ipi_tlb_range_demap(struct pmap *pm, vm_offset_t start, vm_offset_t end)
struct ipi_tlb_args *ita;
cpuset_t cpus;
- if (__predict_false(smp_started == 0))
+ if (__predict_false(atomic_load_acq_int(&smp_started) == 0))
return (NULL);
sched_pin();
cpus = pm->pm_active;
OpenPOWER on IntegriCloud