summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2015-05-12 10:51:55 +0200
committerIngo Molnar <mingo@kernel.org>2015-05-19 08:32:00 +0200
commitb92b8b35a2e38bde319fd1d68ec84628c1f1b0fb (patch)
tree45aea6d12a580b60848363c10dc355f3307ec1ff
parentab3f02fc237211f0583c1e7ba3bf504747be9b8d (diff)
downloadop-kernel-dev-b92b8b35a2e38bde319fd1d68ec84628c1f1b0fb.zip
op-kernel-dev-b92b8b35a2e38bde319fd1d68ec84628c1f1b0fb.tar.gz
locking/arch: Rename set_mb() to smp_store_mb()
Since set_mb() is really about an smp_mb() -- not a IO/DMA barrier like mb() rename it to match the recent smp_load_acquire() and smp_store_release(). Suggested-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--Documentation/memory-barriers.txt6
-rw-r--r--arch/arm/include/asm/barrier.h2
-rw-r--r--arch/arm64/include/asm/barrier.h2
-rw-r--r--arch/ia64/include/asm/barrier.h7
-rw-r--r--arch/metag/include/asm/barrier.h2
-rw-r--r--arch/mips/include/asm/barrier.h2
-rw-r--r--arch/powerpc/include/asm/barrier.h2
-rw-r--r--arch/s390/include/asm/barrier.h2
-rw-r--r--arch/sh/include/asm/barrier.h2
-rw-r--r--arch/sparc/include/asm/barrier_64.h2
-rw-r--r--arch/x86/include/asm/barrier.h4
-rw-r--r--arch/x86/um/asm/barrier.h3
-rw-r--r--fs/select.c6
-rw-r--r--include/asm-generic/barrier.h4
-rw-r--r--include/linux/sched.h8
-rw-r--r--kernel/futex.c2
-rw-r--r--kernel/locking/qspinlock_paravirt.h2
-rw-r--r--kernel/sched/wait.c4
18 files changed, 29 insertions, 33 deletions
diff --git a/Documentation/memory-barriers.txt b/Documentation/memory-barriers.txt
index f957461..fe4020e 100644
--- a/Documentation/memory-barriers.txt
+++ b/Documentation/memory-barriers.txt
@@ -1662,7 +1662,7 @@ CPU from reordering them.
There are some more advanced barrier functions:
- (*) set_mb(var, value)
+ (*) smp_store_mb(var, value)
This assigns the value to the variable and then inserts a full memory
barrier after it, depending on the function. It isn't guaranteed to
@@ -1975,7 +1975,7 @@ after it has altered the task state:
CPU 1
===============================
set_current_state();
- set_mb();
+ smp_store_mb();
STORE current->state
<general barrier>
LOAD event_indicated
@@ -2016,7 +2016,7 @@ between the STORE to indicate the event and the STORE to set TASK_RUNNING:
CPU 1 CPU 2
=============================== ===============================
set_current_state(); STORE event_indicated
- set_mb(); wake_up();
+ smp_store_mb(); wake_up();
STORE current->state <write barrier>
<general barrier> STORE current->state
LOAD event_indicated
diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h
index 993150a..6c2327e 100644
--- a/arch/arm/include/asm/barrier.h
+++ b/arch/arm/include/asm/barrier.h
@@ -81,7 +81,7 @@ do { \
#define read_barrier_depends() do { } while(0)
#define smp_read_barrier_depends() do { } while(0)
-#define set_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0)
+#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0)
#define smp_mb__before_atomic() smp_mb()
#define smp_mb__after_atomic() smp_mb()
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
index ff7de78..0fa47c4 100644
--- a/arch/arm64/include/asm/barrier.h
+++ b/arch/arm64/include/asm/barrier.h
@@ -114,7 +114,7 @@ do { \
#define read_barrier_depends() do { } while(0)
#define smp_read_barrier_depends() do { } while(0)
-#define set_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0)
+#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0)
#define nop() asm volatile("nop");
#define smp_mb__before_atomic() smp_mb()
diff --git a/arch/ia64/include/asm/barrier.h b/arch/ia64/include/asm/barrier.h
index 03117e7..843ba43 100644
--- a/arch/ia64/include/asm/barrier.h
+++ b/arch/ia64/include/asm/barrier.h
@@ -77,12 +77,7 @@ do { \
___p1; \
})
-/*
- * XXX check on this ---I suspect what Linus really wants here is
- * acquire vs release semantics but we can't discuss this stuff with
- * Linus just yet. Grrr...
- */
-#define set_mb(var, value) do { WRITE_ONCE(var, value); mb(); } while (0)
+#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); mb(); } while (0)
/*
* The group barrier in front of the rsm & ssm are necessary to ensure
diff --git a/arch/metag/include/asm/barrier.h b/arch/metag/include/asm/barrier.h
index 97eb018..5a696e5 100644
--- a/arch/metag/include/asm/barrier.h
+++ b/arch/metag/include/asm/barrier.h
@@ -84,7 +84,7 @@ static inline void fence(void)
#define read_barrier_depends() do { } while (0)
#define smp_read_barrier_depends() do { } while (0)
-#define set_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0)
+#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0)
#define smp_store_release(p, v) \
do { \
diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h
index cff1bbd..7ecba84 100644
--- a/arch/mips/include/asm/barrier.h
+++ b/arch/mips/include/asm/barrier.h
@@ -112,7 +112,7 @@
#define __WEAK_LLSC_MB " \n"
#endif
-#define set_mb(var, value) \
+#define smp_store_mb(var, value) \
do { WRITE_ONCE(var, value); smp_mb(); } while (0)
#define smp_llsc_mb() __asm__ __volatile__(__WEAK_LLSC_MB : : :"memory")
diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h
index 2a072e4..39505d6 100644
--- a/arch/powerpc/include/asm/barrier.h
+++ b/arch/powerpc/include/asm/barrier.h
@@ -34,7 +34,7 @@
#define rmb() __asm__ __volatile__ ("sync" : : : "memory")
#define wmb() __asm__ __volatile__ ("sync" : : : "memory")
-#define set_mb(var, value) do { WRITE_ONCE(var, value); mb(); } while (0)
+#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); mb(); } while (0)
#ifdef __SUBARCH_HAS_LWSYNC
# define SMPWMB LWSYNC
diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h
index b66cd53..e6f8615 100644
--- a/arch/s390/include/asm/barrier.h
+++ b/arch/s390/include/asm/barrier.h
@@ -36,7 +36,7 @@
#define smp_mb__before_atomic() smp_mb()
#define smp_mb__after_atomic() smp_mb()
-#define set_mb(var, value) do { WRITE_ONCE(var, value); mb(); } while (0)
+#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); mb(); } while (0)
#define smp_store_release(p, v) \
do { \
diff --git a/arch/sh/include/asm/barrier.h b/arch/sh/include/asm/barrier.h
index 4371530..bf91037 100644
--- a/arch/sh/include/asm/barrier.h
+++ b/arch/sh/include/asm/barrier.h
@@ -32,7 +32,7 @@
#define ctrl_barrier() __asm__ __volatile__ ("nop;nop;nop;nop;nop;nop;nop;nop")
#endif
-#define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
+#define smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0)
#include <asm-generic/barrier.h>
diff --git a/arch/sparc/include/asm/barrier_64.h b/arch/sparc/include/asm/barrier_64.h
index 125fec7..809941e 100644
--- a/arch/sparc/include/asm/barrier_64.h
+++ b/arch/sparc/include/asm/barrier_64.h
@@ -40,7 +40,7 @@ do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \
#define dma_rmb() rmb()
#define dma_wmb() wmb()
-#define set_mb(__var, __value) \
+#define smp_store_mb(__var, __value) \
do { WRITE_ONCE(__var, __value); membar_safe("#StoreLoad"); } while(0)
#ifdef CONFIG_SMP
diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
index 9de5cde..e51a8f8 100644
--- a/arch/x86/include/asm/barrier.h
+++ b/arch/x86/include/asm/barrier.h
@@ -35,12 +35,12 @@
#define smp_mb() mb()
#define smp_rmb() dma_rmb()
#define smp_wmb() barrier()
-#define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
+#define smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0)
#else /* !SMP */
#define smp_mb() barrier()
#define smp_rmb() barrier()
#define smp_wmb() barrier()
-#define set_mb(var, value) do { WRITE_ONCE(var, value); barrier(); } while (0)
+#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); barrier(); } while (0)
#endif /* SMP */
#define read_barrier_depends() do { } while (0)
diff --git a/arch/x86/um/asm/barrier.h b/arch/x86/um/asm/barrier.h
index cc0cb01..b9531d3 100644
--- a/arch/x86/um/asm/barrier.h
+++ b/arch/x86/um/asm/barrier.h
@@ -39,7 +39,8 @@
#define smp_mb() barrier()
#define smp_rmb() barrier()
#define smp_wmb() barrier()
-#define set_mb(var, value) do { WRITE_ONCE(var, value); barrier(); } while (0)
+
+#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); barrier(); } while (0)
#define read_barrier_depends() do { } while (0)
#define smp_read_barrier_depends() do { } while (0)
diff --git a/fs/select.c b/fs/select.c
index f684c75..0155473 100644
--- a/fs/select.c
+++ b/fs/select.c
@@ -189,7 +189,7 @@ static int __pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key)
* doesn't imply write barrier and the users expect write
* barrier semantics on wakeup functions. The following
* smp_wmb() is equivalent to smp_wmb() in try_to_wake_up()
- * and is paired with set_mb() in poll_schedule_timeout.
+ * and is paired with smp_store_mb() in poll_schedule_timeout.
*/
smp_wmb();
pwq->triggered = 1;
@@ -244,7 +244,7 @@ int poll_schedule_timeout(struct poll_wqueues *pwq, int state,
/*
* Prepare for the next iteration.
*
- * The following set_mb() serves two purposes. First, it's
+ * The following smp_store_mb() serves two purposes. First, it's
* the counterpart rmb of the wmb in pollwake() such that data
* written before wake up is always visible after wake up.
* Second, the full barrier guarantees that triggered clearing
@@ -252,7 +252,7 @@ int poll_schedule_timeout(struct poll_wqueues *pwq, int state,
* this problem doesn't exist for the first iteration as
* add_wait_queue() has full barrier semantics.
*/
- set_mb(pwq->triggered, 0);
+ smp_store_mb(pwq->triggered, 0);
return rc;
}
diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h
index 3938716..e6a83d7 100644
--- a/include/asm-generic/barrier.h
+++ b/include/asm-generic/barrier.h
@@ -66,8 +66,8 @@
#define smp_read_barrier_depends() do { } while (0)
#endif
-#ifndef set_mb
-#define set_mb(var, value) do { WRITE_ONCE(var, value); mb(); } while (0)
+#ifndef smp_store_mb
+#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); mb(); } while (0)
#endif
#ifndef smp_mb__before_atomic
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 26a2e61..18f1972 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -252,7 +252,7 @@ extern char ___assert_task_state[1 - 2*!!(
#define set_task_state(tsk, state_value) \
do { \
(tsk)->task_state_change = _THIS_IP_; \
- set_mb((tsk)->state, (state_value)); \
+ smp_store_mb((tsk)->state, (state_value)); \
} while (0)
/*
@@ -274,7 +274,7 @@ extern char ___assert_task_state[1 - 2*!!(
#define set_current_state(state_value) \
do { \
current->task_state_change = _THIS_IP_; \
- set_mb(current->state, (state_value)); \
+ smp_store_mb(current->state, (state_value)); \
} while (0)
#else
@@ -282,7 +282,7 @@ extern char ___assert_task_state[1 - 2*!!(
#define __set_task_state(tsk, state_value) \
do { (tsk)->state = (state_value); } while (0)
#define set_task_state(tsk, state_value) \
- set_mb((tsk)->state, (state_value))
+ smp_store_mb((tsk)->state, (state_value))
/*
* set_current_state() includes a barrier so that the write of current->state
@@ -298,7 +298,7 @@ extern char ___assert_task_state[1 - 2*!!(
#define __set_current_state(state_value) \
do { current->state = (state_value); } while (0)
#define set_current_state(state_value) \
- set_mb(current->state, (state_value))
+ smp_store_mb(current->state, (state_value))
#endif
diff --git a/kernel/futex.c b/kernel/futex.c
index 2579e40..55ca63ad9 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -2055,7 +2055,7 @@ static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q,
{
/*
* The task state is guaranteed to be set before another task can
- * wake it. set_current_state() is implemented using set_mb() and
+ * wake it. set_current_state() is implemented using smp_store_mb() and
* queue_me() calls spin_unlock() upon completion, both serializing
* access to the hash list and forcing another memory barrier.
*/
diff --git a/kernel/locking/qspinlock_paravirt.h b/kernel/locking/qspinlock_paravirt.h
index 27ab96d..04ab181 100644
--- a/kernel/locking/qspinlock_paravirt.h
+++ b/kernel/locking/qspinlock_paravirt.h
@@ -175,7 +175,7 @@ static void pv_wait_node(struct mcs_spinlock *node)
*
* Matches the xchg() from pv_kick_node().
*/
- set_mb(pn->state, vcpu_halted);
+ smp_store_mb(pn->state, vcpu_halted);
if (!READ_ONCE(node->locked))
pv_wait(&pn->state, vcpu_halted);
diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c
index 852143a..9bc8232 100644
--- a/kernel/sched/wait.c
+++ b/kernel/sched/wait.c
@@ -341,7 +341,7 @@ long wait_woken(wait_queue_t *wait, unsigned mode, long timeout)
* condition being true _OR_ WQ_FLAG_WOKEN such that we will not miss
* an event.
*/
- set_mb(wait->flags, wait->flags & ~WQ_FLAG_WOKEN); /* B */
+ smp_store_mb(wait->flags, wait->flags & ~WQ_FLAG_WOKEN); /* B */
return timeout;
}
@@ -354,7 +354,7 @@ int woken_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key)
* doesn't imply write barrier and the users expects write
* barrier semantics on wakeup functions. The following
* smp_wmb() is equivalent to smp_wmb() in try_to_wake_up()
- * and is paired with set_mb() in wait_woken().
+ * and is paired with smp_store_mb() in wait_woken().
*/
smp_wmb(); /* C */
wait->flags |= WQ_FLAG_WOKEN;
OpenPOWER on IntegriCloud