From 5a1b26d7c629915446222ebe77d16567c98426ff Mon Sep 17 00:00:00 2001 From: Davidlohr Bueso Date: Thu, 31 Dec 2015 12:09:13 +0200 Subject: lcoking/barriers, arch: Use smp barriers in smp_store_release() With commit b92b8b35a2e ("locking/arch: Rename set_mb() to smp_store_mb()") it was made clear that the context of this call (and thus set_mb) is strictly for CPU ordering, as opposed to IO. As such all archs should use the smp variant of mb(), respecting the semantics and saving a mandatory barrier on UP. Signed-off-by: Davidlohr Bueso Signed-off-by: Peter Zijlstra (Intel) Cc: Cc: Andrew Morton Cc: Benjamin Herrenschmidt Cc: Heiko Carstens Cc: Linus Torvalds Cc: Paul E. McKenney Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Tony Luck Cc: dave@stgolabs.net Link: http://lkml.kernel.org/r/1445975631-17047-3-git-send-email-dave@stgolabs.net Signed-off-by: Ingo Molnar Reviewed-by: Paul E. McKenney --- arch/ia64/include/asm/barrier.h | 2 +- arch/powerpc/include/asm/barrier.h | 2 +- arch/s390/include/asm/barrier.h | 2 +- include/asm-generic/barrier.h | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/ia64/include/asm/barrier.h b/arch/ia64/include/asm/barrier.h index df896a1..209c4b8 100644 --- a/arch/ia64/include/asm/barrier.h +++ b/arch/ia64/include/asm/barrier.h @@ -77,7 +77,7 @@ do { \ ___p1; \ }) -#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); mb(); } while (0) +#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0) /* * The group barrier in front of the rsm & ssm are necessary to ensure diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h index 0eca6ef..a7af5fb 100644 --- a/arch/powerpc/include/asm/barrier.h +++ b/arch/powerpc/include/asm/barrier.h @@ -34,7 +34,7 @@ #define rmb() __asm__ __volatile__ ("sync" : : : "memory") #define wmb() __asm__ __volatile__ ("sync" : : : "memory") -#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); mb(); } while (0) +#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0) #ifdef __SUBARCH_HAS_LWSYNC # define SMPWMB LWSYNC diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h index d68e11e..7ffd0b1 100644 --- a/arch/s390/include/asm/barrier.h +++ b/arch/s390/include/asm/barrier.h @@ -36,7 +36,7 @@ #define smp_mb__before_atomic() smp_mb() #define smp_mb__after_atomic() smp_mb() -#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); mb(); } while (0) +#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0) #define smp_store_release(p, v) \ do { \ diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h index b42afad..0f45f93 100644 --- a/include/asm-generic/barrier.h +++ b/include/asm-generic/barrier.h @@ -93,7 +93,7 @@ #endif /* CONFIG_SMP */ #ifndef smp_store_mb -#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); mb(); } while (0) +#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0) #endif #ifndef smp_mb__before_atomic -- cgit v1.1 From 57f7c0370f386d5e0960e25d2c3ceb0b8e8c489d Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Sun, 27 Dec 2015 14:10:52 +0200 Subject: asm-generic: guard smp_store_release/load_acquire Allow architectures to override smp_store_release and smp_load_acquire by guarding the defines in asm-generic/barrier.h with ifndef directives. This is in preparation to reusing asm-generic/barrier.h on architectures which have their own definition of these macros. Signed-off-by: Michael S. Tsirkin Acked-by: Arnd Bergmann Acked-by: Peter Zijlstra (Intel) --- include/asm-generic/barrier.h | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h index 0f45f93..987b2e0 100644 --- a/include/asm-generic/barrier.h +++ b/include/asm-generic/barrier.h @@ -104,13 +104,16 @@ #define smp_mb__after_atomic() smp_mb() #endif +#ifndef smp_store_release #define smp_store_release(p, v) \ do { \ compiletime_assert_atomic_type(*p); \ smp_mb(); \ WRITE_ONCE(*p, v); \ } while (0) +#endif +#ifndef smp_load_acquire #define smp_load_acquire(p) \ ({ \ typeof(*p) ___p1 = READ_ONCE(*p); \ @@ -118,6 +121,7 @@ do { \ smp_mb(); \ ___p1; \ }) +#endif #endif /* !__ASSEMBLY__ */ #endif /* __ASM_GENERIC_BARRIER_H */ -- cgit v1.1 From 9505ec0825a09ea97426d026f2524d1cefa83a84 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Mon, 28 Dec 2015 13:58:06 +0200 Subject: ia64: rename nop->iosapic_nop asm-generic/barrier.h defines a nop() macro. To be able to use this header on ia64, we shouldn't call local functions/variables nop(). There's one instance where this breaks on ia64: rename the function to iosapic_nop to avoid the conflict. Signed-off-by: Michael S. Tsirkin Acked-by: Tony Luck Acked-by: Arnd Bergmann Acked-by: Peter Zijlstra (Intel) --- arch/ia64/kernel/iosapic.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c index d2fae05..90fde5b 100644 --- a/arch/ia64/kernel/iosapic.c +++ b/arch/ia64/kernel/iosapic.c @@ -256,7 +256,7 @@ set_rte (unsigned int gsi, unsigned int irq, unsigned int dest, int mask) } static void -nop (struct irq_data *data) +iosapic_nop (struct irq_data *data) { /* do nothing... */ } @@ -415,7 +415,7 @@ iosapic_unmask_level_irq (struct irq_data *data) #define iosapic_shutdown_level_irq mask_irq #define iosapic_enable_level_irq unmask_irq #define iosapic_disable_level_irq mask_irq -#define iosapic_ack_level_irq nop +#define iosapic_ack_level_irq iosapic_nop static struct irq_chip irq_type_iosapic_level = { .name = "IO-SAPIC-level", @@ -453,7 +453,7 @@ iosapic_ack_edge_irq (struct irq_data *data) } #define iosapic_enable_edge_irq unmask_irq -#define iosapic_disable_edge_irq nop +#define iosapic_disable_edge_irq iosapic_nop static struct irq_chip irq_type_iosapic_edge = { .name = "IO-SAPIC-edge", -- cgit v1.1 From 53a05ac15ee04b56ce02f0f831556e2fcdcce93f Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Mon, 21 Dec 2015 09:22:18 +0200 Subject: ia64: reuse asm-generic/barrier.h On ia64 smp_rmb, smp_wmb, read_barrier_depends, smp_read_barrier_depends and smp_store_mb() match the asm-generic variants exactly. Drop the local definitions and pull in asm-generic/barrier.h instead. This is in preparation to refactoring this code area. Signed-off-by: Michael S. Tsirkin Acked-by: Tony Luck Acked-by: Arnd Bergmann Acked-by: Peter Zijlstra (Intel) --- arch/ia64/include/asm/barrier.h | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/arch/ia64/include/asm/barrier.h b/arch/ia64/include/asm/barrier.h index 209c4b8..2f93348 100644 --- a/arch/ia64/include/asm/barrier.h +++ b/arch/ia64/include/asm/barrier.h @@ -48,12 +48,6 @@ # define smp_mb() barrier() #endif -#define smp_rmb() smp_mb() -#define smp_wmb() smp_mb() - -#define read_barrier_depends() do { } while (0) -#define smp_read_barrier_depends() do { } while (0) - #define smp_mb__before_atomic() barrier() #define smp_mb__after_atomic() barrier() @@ -77,12 +71,12 @@ do { \ ___p1; \ }) -#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0) - /* * The group barrier in front of the rsm & ssm are necessary to ensure * that none of the previous instructions in the same group are * affected by the rsm/ssm. */ +#include + #endif /* _ASM_IA64_BARRIER_H */ -- cgit v1.1 From fbd7ec02363cee4264aca2cc46692c9322fd6b42 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Mon, 21 Dec 2015 09:22:18 +0200 Subject: powerpc: reuse asm-generic/barrier.h On powerpc read_barrier_depends, smp_read_barrier_depends smp_store_mb(), smp_mb__before_atomic and smp_mb__after_atomic match the asm-generic variants exactly. Drop the local definitions and pull in asm-generic/barrier.h instead. This is in preparation to refactoring this code area. Signed-off-by: Michael S. Tsirkin Acked-by: Arnd Bergmann Acked-by: Peter Zijlstra (Intel) Reviewed-by: Paul E. McKenney --- arch/powerpc/include/asm/barrier.h | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h index a7af5fb..980ad0c 100644 --- a/arch/powerpc/include/asm/barrier.h +++ b/arch/powerpc/include/asm/barrier.h @@ -34,8 +34,6 @@ #define rmb() __asm__ __volatile__ ("sync" : : : "memory") #define wmb() __asm__ __volatile__ ("sync" : : : "memory") -#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0) - #ifdef __SUBARCH_HAS_LWSYNC # define SMPWMB LWSYNC #else @@ -60,9 +58,6 @@ #define smp_wmb() barrier() #endif /* CONFIG_SMP */ -#define read_barrier_depends() do { } while (0) -#define smp_read_barrier_depends() do { } while (0) - /* * This is a barrier which prevents following instructions from being * started until the value of the argument x is known. For example, if @@ -87,8 +82,8 @@ do { \ ___p1; \ }) -#define smp_mb__before_atomic() smp_mb() -#define smp_mb__after_atomic() smp_mb() #define smp_mb__before_spinlock() smp_mb() +#include + #endif /* _ASM_POWERPC_BARRIER_H */ -- cgit v1.1 From 21535aaed9e33b4cc485ab2245dd2958816ee916 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Mon, 21 Dec 2015 09:22:18 +0200 Subject: s390: reuse asm-generic/barrier.h On s390 read_barrier_depends, smp_read_barrier_depends smp_store_mb(), smp_mb__before_atomic and smp_mb__after_atomic match the asm-generic variants exactly. Drop the local definitions and pull in asm-generic/barrier.h instead. This is in preparation to refactoring this code area. Signed-off-by: Michael S. Tsirkin Acked-by: Arnd Bergmann Acked-by: Peter Zijlstra (Intel) --- arch/s390/include/asm/barrier.h | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h index 7ffd0b1..c358c31 100644 --- a/arch/s390/include/asm/barrier.h +++ b/arch/s390/include/asm/barrier.h @@ -30,14 +30,6 @@ #define smp_rmb() rmb() #define smp_wmb() wmb() -#define read_barrier_depends() do { } while (0) -#define smp_read_barrier_depends() do { } while (0) - -#define smp_mb__before_atomic() smp_mb() -#define smp_mb__after_atomic() smp_mb() - -#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0) - #define smp_store_release(p, v) \ do { \ compiletime_assert_atomic_type(*p); \ @@ -53,4 +45,6 @@ do { \ ___p1; \ }) +#include + #endif /* __ASM_BARRIER_H */ -- cgit v1.1 From 519be0438e6963b8efd2430e9d0595f5aeee915e Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Mon, 21 Dec 2015 09:22:18 +0200 Subject: sparc: reuse asm-generic/barrier.h On sparc 64 bit dma_rmb, dma_wmb, smp_store_mb, smp_mb, smp_rmb, smp_wmb, read_barrier_depends and smp_read_barrier_depends match the asm-generic variants exactly. Drop the local definitions and pull in asm-generic/barrier.h instead. nop uses __asm__ __volatile but is otherwise identical to the generic version, drop that as well. This is in preparation to refactoring this code area. Note: nop() was in processor.h and not in barrier.h as on other architectures. Nothing seems to depend on it being there though. Signed-off-by: Michael S. Tsirkin Acked-by: Arnd Bergmann Acked-by: David S. Miller Acked-by: Peter Zijlstra (Intel) --- arch/sparc/include/asm/barrier_32.h | 1 - arch/sparc/include/asm/barrier_64.h | 21 ++------------------- arch/sparc/include/asm/processor.h | 3 --- 3 files changed, 2 insertions(+), 23 deletions(-) diff --git a/arch/sparc/include/asm/barrier_32.h b/arch/sparc/include/asm/barrier_32.h index ae69eda..8059130 100644 --- a/arch/sparc/include/asm/barrier_32.h +++ b/arch/sparc/include/asm/barrier_32.h @@ -1,7 +1,6 @@ #ifndef __SPARC_BARRIER_H #define __SPARC_BARRIER_H -#include /* for nop() */ #include #endif /* !(__SPARC_BARRIER_H) */ diff --git a/arch/sparc/include/asm/barrier_64.h b/arch/sparc/include/asm/barrier_64.h index 14a9286..26c3f72 100644 --- a/arch/sparc/include/asm/barrier_64.h +++ b/arch/sparc/include/asm/barrier_64.h @@ -37,25 +37,6 @@ do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \ #define rmb() __asm__ __volatile__("":::"memory") #define wmb() __asm__ __volatile__("":::"memory") -#define dma_rmb() rmb() -#define dma_wmb() wmb() - -#define smp_store_mb(__var, __value) \ - do { WRITE_ONCE(__var, __value); membar_safe("#StoreLoad"); } while(0) - -#ifdef CONFIG_SMP -#define smp_mb() mb() -#define smp_rmb() rmb() -#define smp_wmb() wmb() -#else -#define smp_mb() __asm__ __volatile__("":::"memory") -#define smp_rmb() __asm__ __volatile__("":::"memory") -#define smp_wmb() __asm__ __volatile__("":::"memory") -#endif - -#define read_barrier_depends() do { } while (0) -#define smp_read_barrier_depends() do { } while (0) - #define smp_store_release(p, v) \ do { \ compiletime_assert_atomic_type(*p); \ @@ -74,4 +55,6 @@ do { \ #define smp_mb__before_atomic() barrier() #define smp_mb__after_atomic() barrier() +#include + #endif /* !(__SPARC64_BARRIER_H) */ diff --git a/arch/sparc/include/asm/processor.h b/arch/sparc/include/asm/processor.h index 2fe99e6..9da9646 100644 --- a/arch/sparc/include/asm/processor.h +++ b/arch/sparc/include/asm/processor.h @@ -5,7 +5,4 @@ #else #include #endif - -#define nop() __asm__ __volatile__ ("nop") - #endif -- cgit v1.1 From 335390d6096f647311980f50312b304b377e616f Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Mon, 21 Dec 2015 09:22:18 +0200 Subject: arm: reuse asm-generic/barrier.h On arm smp_store_mb, read_barrier_depends, smp_read_barrier_depends, smp_store_release, smp_load_acquire, smp_mb__before_atomic and smp_mb__after_atomic match the asm-generic variants exactly. Drop the local definitions and pull in asm-generic/barrier.h instead. This is in preparation to refactoring this code area. Signed-off-by: Michael S. Tsirkin Acked-by: Arnd Bergmann Acked-by: Russell King Acked-by: Peter Zijlstra (Intel) --- arch/arm/include/asm/barrier.h | 23 +---------------------- 1 file changed, 1 insertion(+), 22 deletions(-) diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h index 3ff5642..31152e8 100644 --- a/arch/arm/include/asm/barrier.h +++ b/arch/arm/include/asm/barrier.h @@ -70,28 +70,7 @@ extern void arm_heavy_mb(void); #define smp_wmb() dmb(ishst) #endif -#define smp_store_release(p, v) \ -do { \ - compiletime_assert_atomic_type(*p); \ - smp_mb(); \ - WRITE_ONCE(*p, v); \ -} while (0) - -#define smp_load_acquire(p) \ -({ \ - typeof(*p) ___p1 = READ_ONCE(*p); \ - compiletime_assert_atomic_type(*p); \ - smp_mb(); \ - ___p1; \ -}) - -#define read_barrier_depends() do { } while(0) -#define smp_read_barrier_depends() do { } while(0) - -#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0) - -#define smp_mb__before_atomic() smp_mb() -#define smp_mb__after_atomic() smp_mb() +#include #endif /* !__ASSEMBLY__ */ #endif /* __ASM_BARRIER_H */ -- cgit v1.1 From 90ff6a17d0e07d689886cba4244674bfd41e7a2d Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Mon, 21 Dec 2015 09:22:18 +0200 Subject: arm64: reuse asm-generic/barrier.h On arm64 nop, read_barrier_depends, smp_read_barrier_depends smp_store_mb(), smp_mb__before_atomic and smp_mb__after_atomic match the asm-generic variants exactly. Drop the local definitions and pull in asm-generic/barrier.h instead. This is in preparation to refactoring this code area. Signed-off-by: Michael S. Tsirkin Acked-by: Arnd Bergmann Acked-by: Peter Zijlstra (Intel) --- arch/arm64/include/asm/barrier.h | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h index 9622eb4..91a43f4 100644 --- a/arch/arm64/include/asm/barrier.h +++ b/arch/arm64/include/asm/barrier.h @@ -91,14 +91,7 @@ do { \ __u.__val; \ }) -#define read_barrier_depends() do { } while(0) -#define smp_read_barrier_depends() do { } while(0) - -#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0) -#define nop() asm volatile("nop"); - -#define smp_mb__before_atomic() smp_mb() -#define smp_mb__after_atomic() smp_mb() +#include #endif /* __ASSEMBLY__ */ -- cgit v1.1 From abe114d9f0a80f27bc5040cd2287dca80423d13e Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Mon, 21 Dec 2015 09:22:18 +0200 Subject: metag: reuse asm-generic/barrier.h On metag dma_rmb, dma_wmb, smp_store_mb, read_barrier_depends, smp_read_barrier_depends, smp_store_release and smp_load_acquire match the asm-generic variants exactly. Drop the local definitions and pull in asm-generic/barrier.h instead. This is in preparation to refactoring this code area. Signed-off-by: Michael S. Tsirkin Acked-by: Arnd Bergmann Acked-by: James Hogan Acked-by: Peter Zijlstra (Intel) --- arch/metag/include/asm/barrier.h | 25 ++----------------------- 1 file changed, 2 insertions(+), 23 deletions(-) diff --git a/arch/metag/include/asm/barrier.h b/arch/metag/include/asm/barrier.h index 172b7e5..b5b778b 100644 --- a/arch/metag/include/asm/barrier.h +++ b/arch/metag/include/asm/barrier.h @@ -44,9 +44,6 @@ static inline void wr_fence(void) #define rmb() barrier() #define wmb() mb() -#define dma_rmb() rmb() -#define dma_wmb() wmb() - #ifndef CONFIG_SMP #define fence() do { } while (0) #define smp_mb() barrier() @@ -81,27 +78,9 @@ static inline void fence(void) #endif #endif -#define read_barrier_depends() do { } while (0) -#define smp_read_barrier_depends() do { } while (0) - -#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0) - -#define smp_store_release(p, v) \ -do { \ - compiletime_assert_atomic_type(*p); \ - smp_mb(); \ - WRITE_ONCE(*p, v); \ -} while (0) - -#define smp_load_acquire(p) \ -({ \ - typeof(*p) ___p1 = READ_ONCE(*p); \ - compiletime_assert_atomic_type(*p); \ - smp_mb(); \ - ___p1; \ -}) - #define smp_mb__before_atomic() barrier() #define smp_mb__after_atomic() barrier() +#include + #endif /* _ASM_METAG_BARRIER_H */ -- cgit v1.1 From fa083e28f89a78b95ba8b7da86db40c13c60e95d Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Mon, 21 Dec 2015 09:22:18 +0200 Subject: mips: reuse asm-generic/barrier.h On mips dma_rmb, dma_wmb, smp_store_mb, read_barrier_depends, smp_read_barrier_depends, smp_store_release and smp_load_acquire match the asm-generic variants exactly. Drop the local definitions and pull in asm-generic/barrier.h instead. This is in preparation to refactoring this code area. Signed-off-by: Michael S. Tsirkin Acked-by: Arnd Bergmann Acked-by: Peter Zijlstra (Intel) --- arch/mips/include/asm/barrier.h | 25 ++----------------------- 1 file changed, 2 insertions(+), 23 deletions(-) diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h index 752e0b8..3eac4b9 100644 --- a/arch/mips/include/asm/barrier.h +++ b/arch/mips/include/asm/barrier.h @@ -10,9 +10,6 @@ #include -#define read_barrier_depends() do { } while(0) -#define smp_read_barrier_depends() do { } while(0) - #ifdef CONFIG_CPU_HAS_SYNC #define __sync() \ __asm__ __volatile__( \ @@ -87,8 +84,6 @@ #define wmb() fast_wmb() #define rmb() fast_rmb() -#define dma_wmb() fast_wmb() -#define dma_rmb() fast_rmb() #if defined(CONFIG_WEAK_ORDERING) && defined(CONFIG_SMP) # ifdef CONFIG_CPU_CAVIUM_OCTEON @@ -112,9 +107,6 @@ #define __WEAK_LLSC_MB " \n" #endif -#define smp_store_mb(var, value) \ - do { WRITE_ONCE(var, value); smp_mb(); } while (0) - #define smp_llsc_mb() __asm__ __volatile__(__WEAK_LLSC_MB : : :"memory") #ifdef CONFIG_CPU_CAVIUM_OCTEON @@ -129,22 +121,9 @@ #define nudge_writes() mb() #endif -#define smp_store_release(p, v) \ -do { \ - compiletime_assert_atomic_type(*p); \ - smp_mb(); \ - WRITE_ONCE(*p, v); \ -} while (0) - -#define smp_load_acquire(p) \ -({ \ - typeof(*p) ___p1 = READ_ONCE(*p); \ - compiletime_assert_atomic_type(*p); \ - smp_mb(); \ - ___p1; \ -}) - #define smp_mb__before_atomic() smp_mb__before_llsc() #define smp_mb__after_atomic() smp_llsc_mb() +#include + #endif /* __ASM_BARRIER_H */ -- cgit v1.1 From 577f183acc88645eae116326cc2203dc88ea730c Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Mon, 21 Dec 2015 09:22:18 +0200 Subject: x86/um: reuse asm-generic/barrier.h On x86/um CONFIG_SMP is never defined. As a result, several macros match the asm-generic variant exactly. Drop the local definitions and pull in asm-generic/barrier.h instead. This is in preparation to refactoring this code area. Signed-off-by: Michael S. Tsirkin Acked-by: Arnd Bergmann Acked-by: Richard Weinberger Acked-by: Peter Zijlstra (Intel) --- arch/x86/um/asm/barrier.h | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/arch/x86/um/asm/barrier.h b/arch/x86/um/asm/barrier.h index 755481f..174781a 100644 --- a/arch/x86/um/asm/barrier.h +++ b/arch/x86/um/asm/barrier.h @@ -36,13 +36,6 @@ #endif /* CONFIG_X86_PPRO_FENCE */ #define dma_wmb() barrier() -#define smp_mb() barrier() -#define smp_rmb() barrier() -#define smp_wmb() barrier() - -#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); barrier(); } while (0) - -#define read_barrier_depends() do { } while (0) -#define smp_read_barrier_depends() do { } while (0) +#include #endif -- cgit v1.1 From 300b06d4555305dc227748674f75970f2f84c224 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Mon, 21 Dec 2015 09:22:18 +0200 Subject: x86: reuse asm-generic/barrier.h As on most architectures, on x86 read_barrier_depends and smp_read_barrier_depends are empty. Drop the local definitions and pull the generic ones from asm-generic/barrier.h instead: they are identical. This is in preparation to refactoring this code area. Signed-off-by: Michael S. Tsirkin Acked-by: Arnd Bergmann Acked-by: Peter Zijlstra (Intel) Reviewed-by: Thomas Gleixner --- arch/x86/include/asm/barrier.h | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h index 0681d25..cc4c2a7 100644 --- a/arch/x86/include/asm/barrier.h +++ b/arch/x86/include/asm/barrier.h @@ -43,9 +43,6 @@ #define smp_store_mb(var, value) do { WRITE_ONCE(var, value); barrier(); } while (0) #endif /* SMP */ -#define read_barrier_depends() do { } while (0) -#define smp_read_barrier_depends() do { } while (0) - #if defined(CONFIG_X86_PPRO_FENCE) /* @@ -91,4 +88,6 @@ do { \ #define smp_mb__before_atomic() barrier() #define smp_mb__after_atomic() barrier() +#include + #endif /* _ASM_X86_BARRIER_H */ -- cgit v1.1 From a9e4252a9b147043142282ebb65da94dcb951e2a Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Sun, 27 Dec 2015 13:50:07 +0200 Subject: asm-generic: add __smp_xxx wrappers On !SMP, most architectures define their barriers as compiler barriers. On SMP, most need an actual barrier. Make it possible to remove the code duplication for !SMP by defining low-level __smp_xxx barriers which do not depend on the value of SMP, then use them from asm-generic conditionally. Besides reducing code duplication, these low level APIs will also be useful for virtualization, where a barrier is sometimes needed even if !SMP since we might be talking to another kernel on the same SMP system. Both virtio and Xen drivers will benefit. The smp_xxx variants should use __smp_XXX ones or barrier() depending on SMP, identically for all architectures. We keep ifndef guards around them for now - once/if all architectures are converted to use the generic code, we'll be able to remove these. Suggested-by: Peter Zijlstra Signed-off-by: Michael S. Tsirkin Acked-by: Arnd Bergmann Acked-by: Peter Zijlstra (Intel) --- include/asm-generic/barrier.h | 91 ++++++++++++++++++++++++++++++++++++++----- 1 file changed, 82 insertions(+), 9 deletions(-) diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h index 987b2e0..8752964 100644 --- a/include/asm-generic/barrier.h +++ b/include/asm-generic/barrier.h @@ -54,22 +54,38 @@ #define read_barrier_depends() do { } while (0) #endif +#ifndef __smp_mb +#define __smp_mb() mb() +#endif + +#ifndef __smp_rmb +#define __smp_rmb() rmb() +#endif + +#ifndef __smp_wmb +#define __smp_wmb() wmb() +#endif + +#ifndef __smp_read_barrier_depends +#define __smp_read_barrier_depends() read_barrier_depends() +#endif + #ifdef CONFIG_SMP #ifndef smp_mb -#define smp_mb() mb() +#define smp_mb() __smp_mb() #endif #ifndef smp_rmb -#define smp_rmb() rmb() +#define smp_rmb() __smp_rmb() #endif #ifndef smp_wmb -#define smp_wmb() wmb() +#define smp_wmb() __smp_wmb() #endif #ifndef smp_read_barrier_depends -#define smp_read_barrier_depends() read_barrier_depends() +#define smp_read_barrier_depends() __smp_read_barrier_depends() #endif #else /* !CONFIG_SMP */ @@ -92,23 +108,78 @@ #endif /* CONFIG_SMP */ +#ifndef __smp_store_mb +#define __smp_store_mb(var, value) do { WRITE_ONCE(var, value); __smp_mb(); } while (0) +#endif + +#ifndef __smp_mb__before_atomic +#define __smp_mb__before_atomic() __smp_mb() +#endif + +#ifndef __smp_mb__after_atomic +#define __smp_mb__after_atomic() __smp_mb() +#endif + +#ifndef __smp_store_release +#define __smp_store_release(p, v) \ +do { \ + compiletime_assert_atomic_type(*p); \ + __smp_mb(); \ + WRITE_ONCE(*p, v); \ +} while (0) +#endif + +#ifndef __smp_load_acquire +#define __smp_load_acquire(p) \ +({ \ + typeof(*p) ___p1 = READ_ONCE(*p); \ + compiletime_assert_atomic_type(*p); \ + __smp_mb(); \ + ___p1; \ +}) +#endif + +#ifdef CONFIG_SMP + +#ifndef smp_store_mb +#define smp_store_mb(var, value) __smp_store_mb(var, value) +#endif + +#ifndef smp_mb__before_atomic +#define smp_mb__before_atomic() __smp_mb__before_atomic() +#endif + +#ifndef smp_mb__after_atomic +#define smp_mb__after_atomic() __smp_mb__after_atomic() +#endif + +#ifndef smp_store_release +#define smp_store_release(p, v) __smp_store_release(p, v) +#endif + +#ifndef smp_load_acquire +#define smp_load_acquire(p) __smp_load_acquire(p) +#endif + +#else /* !CONFIG_SMP */ + #ifndef smp_store_mb -#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0) +#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); barrier(); } while (0) #endif #ifndef smp_mb__before_atomic -#define smp_mb__before_atomic() smp_mb() +#define smp_mb__before_atomic() barrier() #endif #ifndef smp_mb__after_atomic -#define smp_mb__after_atomic() smp_mb() +#define smp_mb__after_atomic() barrier() #endif #ifndef smp_store_release #define smp_store_release(p, v) \ do { \ compiletime_assert_atomic_type(*p); \ - smp_mb(); \ + barrier(); \ WRITE_ONCE(*p, v); \ } while (0) #endif @@ -118,10 +189,12 @@ do { \ ({ \ typeof(*p) ___p1 = READ_ONCE(*p); \ compiletime_assert_atomic_type(*p); \ - smp_mb(); \ + barrier(); \ ___p1; \ }) #endif +#endif + #endif /* !__ASSEMBLY__ */ #endif /* __ASM_GENERIC_BARRIER_H */ -- cgit v1.1 From 003472a93ad019bfd054a5cbb30c6eec7d0395a3 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Sun, 27 Dec 2015 15:04:42 +0200 Subject: powerpc: define __smp_xxx This defines __smp_xxx barriers for powerpc for use by virtualization. smp_xxx barriers are removed as they are defined correctly by asm-generic/barriers.h This reduces the amount of arch-specific boiler-plate code. Signed-off-by: Michael S. Tsirkin Acked-by: Arnd Bergmann Acked-by: Boqun Feng Acked-by: Peter Zijlstra (Intel) --- arch/powerpc/include/asm/barrier.h | 24 ++++++++---------------- 1 file changed, 8 insertions(+), 16 deletions(-) diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h index 980ad0c..c0deafc 100644 --- a/arch/powerpc/include/asm/barrier.h +++ b/arch/powerpc/include/asm/barrier.h @@ -44,19 +44,11 @@ #define dma_rmb() __lwsync() #define dma_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory") -#ifdef CONFIG_SMP -#define smp_lwsync() __lwsync() +#define __smp_lwsync() __lwsync() -#define smp_mb() mb() -#define smp_rmb() __lwsync() -#define smp_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory") -#else -#define smp_lwsync() barrier() - -#define smp_mb() barrier() -#define smp_rmb() barrier() -#define smp_wmb() barrier() -#endif /* CONFIG_SMP */ +#define __smp_mb() mb() +#define __smp_rmb() __lwsync() +#define __smp_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory") /* * This is a barrier which prevents following instructions from being @@ -67,18 +59,18 @@ #define data_barrier(x) \ asm volatile("twi 0,%0,0; isync" : : "r" (x) : "memory"); -#define smp_store_release(p, v) \ +#define __smp_store_release(p, v) \ do { \ compiletime_assert_atomic_type(*p); \ - smp_lwsync(); \ + __smp_lwsync(); \ WRITE_ONCE(*p, v); \ } while (0) -#define smp_load_acquire(p) \ +#define __smp_load_acquire(p) \ ({ \ typeof(*p) ___p1 = READ_ONCE(*p); \ compiletime_assert_atomic_type(*p); \ - smp_lwsync(); \ + __smp_lwsync(); \ ___p1; \ }) -- cgit v1.1 From fd072df850e536bf46e9981be4be95961ce5eef3 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Sun, 27 Dec 2015 15:04:42 +0200 Subject: arm64: define __smp_xxx This defines __smp_xxx barriers for arm64, for use by virtualization. smp_xxx barriers are removed as they are defined correctly by asm-generic/barriers.h Note: arm64 does not support !SMP config, so smp_xxx and __smp_xxx are always equivalent. Signed-off-by: Michael S. Tsirkin Acked-by: Arnd Bergmann Acked-by: Peter Zijlstra (Intel) --- arch/arm64/include/asm/barrier.h | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h index 91a43f4..dae5c49 100644 --- a/arch/arm64/include/asm/barrier.h +++ b/arch/arm64/include/asm/barrier.h @@ -35,11 +35,11 @@ #define dma_rmb() dmb(oshld) #define dma_wmb() dmb(oshst) -#define smp_mb() dmb(ish) -#define smp_rmb() dmb(ishld) -#define smp_wmb() dmb(ishst) +#define __smp_mb() dmb(ish) +#define __smp_rmb() dmb(ishld) +#define __smp_wmb() dmb(ishst) -#define smp_store_release(p, v) \ +#define __smp_store_release(p, v) \ do { \ compiletime_assert_atomic_type(*p); \ switch (sizeof(*p)) { \ @@ -62,7 +62,7 @@ do { \ } \ } while (0) -#define smp_load_acquire(p) \ +#define __smp_load_acquire(p) \ ({ \ union { typeof(*p) __val; char __c[1]; } __u; \ compiletime_assert_atomic_type(*p); \ -- cgit v1.1 From 2b1f3de10267dd1034d24f9e77dd5e8f07793925 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Sun, 27 Dec 2015 15:04:42 +0200 Subject: arm: define __smp_xxx This defines __smp_xxx barriers for arm, for use by virtualization. smp_xxx barriers are removed as they are defined correctly by asm-generic/barriers.h This reduces the amount of arch-specific boiler-plate code. Signed-off-by: Michael S. Tsirkin Acked-by: Arnd Bergmann Acked-by: Russell King Acked-by: Peter Zijlstra (Intel) --- arch/arm/include/asm/barrier.h | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h index 31152e8..112cc1a 100644 --- a/arch/arm/include/asm/barrier.h +++ b/arch/arm/include/asm/barrier.h @@ -60,15 +60,9 @@ extern void arm_heavy_mb(void); #define dma_wmb() barrier() #endif -#ifndef CONFIG_SMP -#define smp_mb() barrier() -#define smp_rmb() barrier() -#define smp_wmb() barrier() -#else -#define smp_mb() dmb(ish) -#define smp_rmb() smp_mb() -#define smp_wmb() dmb(ishst) -#endif +#define __smp_mb() dmb(ish) +#define __smp_rmb() __smp_mb() +#define __smp_wmb() dmb(ishst) #include -- cgit v1.1 From 27f6cabc0ebf9e452c3251bf0511c41cf2c75dde Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Sun, 27 Dec 2015 15:04:42 +0200 Subject: blackfin: define __smp_xxx This defines __smp_xxx barriers for blackfin, for use by virtualization. smp_xxx barriers are removed as they are defined correctly by asm-generic/barriers.h Signed-off-by: Michael S. Tsirkin Acked-by: Arnd Bergmann Acked-by: Peter Zijlstra (Intel) --- arch/blackfin/include/asm/barrier.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/blackfin/include/asm/barrier.h b/arch/blackfin/include/asm/barrier.h index dfb66fe..7cca51c 100644 --- a/arch/blackfin/include/asm/barrier.h +++ b/arch/blackfin/include/asm/barrier.h @@ -78,8 +78,8 @@ #endif /* !CONFIG_SMP */ -#define smp_mb__before_atomic() barrier() -#define smp_mb__after_atomic() barrier() +#define __smp_mb__before_atomic() barrier() +#define __smp_mb__after_atomic() barrier() #include -- cgit v1.1 From eebd1b927822f13429ec09d0a48fe92716b22840 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Sun, 27 Dec 2015 15:04:42 +0200 Subject: ia64: define __smp_xxx This defines __smp_xxx barriers for ia64, for use by virtualization. smp_xxx barriers are removed as they are defined correctly by asm-generic/barriers.h This reduces the amount of arch-specific boiler-plate code. Signed-off-by: Michael S. Tsirkin Acked-by: Tony Luck Acked-by: Arnd Bergmann Acked-by: Peter Zijlstra (Intel) --- arch/ia64/include/asm/barrier.h | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/arch/ia64/include/asm/barrier.h b/arch/ia64/include/asm/barrier.h index 2f93348..588f161 100644 --- a/arch/ia64/include/asm/barrier.h +++ b/arch/ia64/include/asm/barrier.h @@ -42,28 +42,24 @@ #define dma_rmb() mb() #define dma_wmb() mb() -#ifdef CONFIG_SMP -# define smp_mb() mb() -#else -# define smp_mb() barrier() -#endif +# define __smp_mb() mb() -#define smp_mb__before_atomic() barrier() -#define smp_mb__after_atomic() barrier() +#define __smp_mb__before_atomic() barrier() +#define __smp_mb__after_atomic() barrier() /* * IA64 GCC turns volatile stores into st.rel and volatile loads into ld.acq no * need for asm trickery! */ -#define smp_store_release(p, v) \ +#define __smp_store_release(p, v) \ do { \ compiletime_assert_atomic_type(*p); \ barrier(); \ WRITE_ONCE(*p, v); \ } while (0) -#define smp_load_acquire(p) \ +#define __smp_load_acquire(p) \ ({ \ typeof(*p) ___p1 = READ_ONCE(*p); \ compiletime_assert_atomic_type(*p); \ -- cgit v1.1 From afc22de0c0ca8b4697a8aec2bbb35d4cc385e7e0 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Sun, 27 Dec 2015 15:04:42 +0200 Subject: metag: define __smp_xxx This defines __smp_xxx barriers for metag, for use by virtualization. smp_xxx barriers are removed as they are defined correctly by asm-generic/barriers.h Note: as __smp_XX macros should not depend on CONFIG_SMP, they can not use the existing fence() macro since that is defined differently between SMP and !SMP. For this reason, this patch introduces a wrapper metag_fence() that doesn't depend on CONFIG_SMP. fence() is then defined using that, depending on CONFIG_SMP. Signed-off-by: Michael S. Tsirkin Acked-by: Arnd Bergmann Acked-by: James Hogan Acked-by: Peter Zijlstra (Intel) --- arch/metag/include/asm/barrier.h | 32 +++++++++++++++----------------- 1 file changed, 15 insertions(+), 17 deletions(-) diff --git a/arch/metag/include/asm/barrier.h b/arch/metag/include/asm/barrier.h index b5b778b..5418517 100644 --- a/arch/metag/include/asm/barrier.h +++ b/arch/metag/include/asm/barrier.h @@ -44,13 +44,6 @@ static inline void wr_fence(void) #define rmb() barrier() #define wmb() mb() -#ifndef CONFIG_SMP -#define fence() do { } while (0) -#define smp_mb() barrier() -#define smp_rmb() barrier() -#define smp_wmb() barrier() -#else - #ifdef CONFIG_METAG_SMP_WRITE_REORDERING /* * Write to the atomic memory unlock system event register (command 0). This is @@ -60,26 +53,31 @@ static inline void wr_fence(void) * incoherence). It is therefore ineffective if used after and on the same * thread as a write. */ -static inline void fence(void) +static inline void metag_fence(void) { volatile int *flushptr = (volatile int *) LINSYSEVENT_WR_ATOMIC_UNLOCK; barrier(); *flushptr = 0; barrier(); } -#define smp_mb() fence() -#define smp_rmb() fence() -#define smp_wmb() barrier() +#define __smp_mb() metag_fence() +#define __smp_rmb() metag_fence() +#define __smp_wmb() barrier() #else -#define fence() do { } while (0) -#define smp_mb() barrier() -#define smp_rmb() barrier() -#define smp_wmb() barrier() +#define metag_fence() do { } while (0) +#define __smp_mb() barrier() +#define __smp_rmb() barrier() +#define __smp_wmb() barrier() #endif + +#ifdef CONFIG_SMP +#define fence() metag_fence() +#else +#define fence() do { } while (0) #endif -#define smp_mb__before_atomic() barrier() -#define smp_mb__after_atomic() barrier() +#define __smp_mb__before_atomic() barrier() +#define __smp_mb__after_atomic() barrier() #include -- cgit v1.1 From a60514bae72ee41b506b8702dfdd6eeeffe58556 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Sun, 27 Dec 2015 15:04:42 +0200 Subject: mips: define __smp_xxx This defines __smp_xxx barriers for mips, for use by virtualization. smp_xxx barriers are removed as they are defined correctly by asm-generic/barriers.h Note: the only exception is smp_mb__before_llsc which is mips-specific. We define both the __smp_mb__before_llsc variant (for use in asm/barriers.h) and smp_mb__before_llsc (for use elsewhere on this architecture). Signed-off-by: Michael S. Tsirkin Acked-by: Arnd Bergmann Acked-by: Peter Zijlstra (Intel) --- arch/mips/include/asm/barrier.h | 26 ++++++++++++++------------ 1 file changed, 14 insertions(+), 12 deletions(-) diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h index 3eac4b9..d296633 100644 --- a/arch/mips/include/asm/barrier.h +++ b/arch/mips/include/asm/barrier.h @@ -85,20 +85,20 @@ #define wmb() fast_wmb() #define rmb() fast_rmb() -#if defined(CONFIG_WEAK_ORDERING) && defined(CONFIG_SMP) +#if defined(CONFIG_WEAK_ORDERING) # ifdef CONFIG_CPU_CAVIUM_OCTEON -# define smp_mb() __sync() -# define smp_rmb() barrier() -# define smp_wmb() __syncw() +# define __smp_mb() __sync() +# define __smp_rmb() barrier() +# define __smp_wmb() __syncw() # else -# define smp_mb() __asm__ __volatile__("sync" : : :"memory") -# define smp_rmb() __asm__ __volatile__("sync" : : :"memory") -# define smp_wmb() __asm__ __volatile__("sync" : : :"memory") +# define __smp_mb() __asm__ __volatile__("sync" : : :"memory") +# define __smp_rmb() __asm__ __volatile__("sync" : : :"memory") +# define __smp_wmb() __asm__ __volatile__("sync" : : :"memory") # endif #else -#define smp_mb() barrier() -#define smp_rmb() barrier() -#define smp_wmb() barrier() +#define __smp_mb() barrier() +#define __smp_rmb() barrier() +#define __smp_wmb() barrier() #endif #if defined(CONFIG_WEAK_REORDERING_BEYOND_LLSC) && defined(CONFIG_SMP) @@ -111,6 +111,7 @@ #ifdef CONFIG_CPU_CAVIUM_OCTEON #define smp_mb__before_llsc() smp_wmb() +#define __smp_mb__before_llsc() __smp_wmb() /* Cause previous writes to become visible on all CPUs as soon as possible */ #define nudge_writes() __asm__ __volatile__(".set push\n\t" \ ".set arch=octeon\n\t" \ @@ -118,11 +119,12 @@ ".set pop" : : : "memory") #else #define smp_mb__before_llsc() smp_llsc_mb() +#define __smp_mb__before_llsc() smp_llsc_mb() #define nudge_writes() mb() #endif -#define smp_mb__before_atomic() smp_mb__before_llsc() -#define smp_mb__after_atomic() smp_llsc_mb() +#define __smp_mb__before_atomic() __smp_mb__before_llsc() +#define __smp_mb__after_atomic() smp_llsc_mb() #include -- cgit v1.1 From 82b44496abd91781ed4120c3b0c1a3d111c3e28e Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Sun, 27 Dec 2015 15:04:42 +0200 Subject: s390: define __smp_xxx This defines __smp_xxx barriers for s390, for use by virtualization. Some smp_xxx barriers are removed as they are defined correctly by asm-generic/barriers.h Note: smp_mb, smp_rmb and smp_wmb are defined as full barriers unconditionally on this architecture. Signed-off-by: Michael S. Tsirkin Acked-by: Arnd Bergmann Acked-by: Martin Schwidefsky Acked-by: Peter Zijlstra (Intel) --- arch/s390/include/asm/barrier.h | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h index c358c31..fbd25b2 100644 --- a/arch/s390/include/asm/barrier.h +++ b/arch/s390/include/asm/barrier.h @@ -26,18 +26,21 @@ #define wmb() barrier() #define dma_rmb() mb() #define dma_wmb() mb() -#define smp_mb() mb() -#define smp_rmb() rmb() -#define smp_wmb() wmb() - -#define smp_store_release(p, v) \ +#define __smp_mb() mb() +#define __smp_rmb() rmb() +#define __smp_wmb() wmb() +#define smp_mb() __smp_mb() +#define smp_rmb() __smp_rmb() +#define smp_wmb() __smp_wmb() + +#define __smp_store_release(p, v) \ do { \ compiletime_assert_atomic_type(*p); \ barrier(); \ WRITE_ONCE(*p, v); \ } while (0) -#define smp_load_acquire(p) \ +#define __smp_load_acquire(p) \ ({ \ typeof(*p) ___p1 = READ_ONCE(*p); \ compiletime_assert_atomic_type(*p); \ -- cgit v1.1 From 90a3ccb0be538a914e6a5c51ae919762261563ad Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Sun, 27 Dec 2015 15:04:42 +0200 Subject: sh: define __smp_xxx, fix smp_store_mb for !SMP sh variant of smp_store_mb() calls xchg() on !SMP which is stronger than implied by both the name and the documentation. define __smp_store_mb instead: code in asm-generic/barrier.h will then define smp_store_mb correctly depending on CONFIG_SMP. Signed-off-by: Michael S. Tsirkin Acked-by: Arnd Bergmann Acked-by: Peter Zijlstra (Intel) --- arch/sh/include/asm/barrier.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/sh/include/asm/barrier.h b/arch/sh/include/asm/barrier.h index bf91037..f887c64 100644 --- a/arch/sh/include/asm/barrier.h +++ b/arch/sh/include/asm/barrier.h @@ -32,7 +32,8 @@ #define ctrl_barrier() __asm__ __volatile__ ("nop;nop;nop;nop;nop;nop;nop;nop") #endif -#define smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0) +#define __smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0) +#define smp_store_mb(var, value) __smp_store_mb(var, value) #include -- cgit v1.1 From 45d9b859411cb6d4dccc4e488336160acf9a6df5 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Sun, 27 Dec 2015 15:04:42 +0200 Subject: sparc: define __smp_xxx This defines __smp_xxx barriers for sparc, for use by virtualization. smp_xxx barriers are removed as they are defined correctly by asm-generic/barriers.h Signed-off-by: Michael S. Tsirkin Acked-by: Arnd Bergmann Acked-by: David S. Miller Acked-by: Peter Zijlstra (Intel) --- arch/sparc/include/asm/barrier_64.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/sparc/include/asm/barrier_64.h b/arch/sparc/include/asm/barrier_64.h index 26c3f72..c9f6ee6 100644 --- a/arch/sparc/include/asm/barrier_64.h +++ b/arch/sparc/include/asm/barrier_64.h @@ -37,14 +37,14 @@ do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \ #define rmb() __asm__ __volatile__("":::"memory") #define wmb() __asm__ __volatile__("":::"memory") -#define smp_store_release(p, v) \ +#define __smp_store_release(p, v) \ do { \ compiletime_assert_atomic_type(*p); \ barrier(); \ WRITE_ONCE(*p, v); \ } while (0) -#define smp_load_acquire(p) \ +#define __smp_load_acquire(p) \ ({ \ typeof(*p) ___p1 = READ_ONCE(*p); \ compiletime_assert_atomic_type(*p); \ @@ -52,8 +52,8 @@ do { \ ___p1; \ }) -#define smp_mb__before_atomic() barrier() -#define smp_mb__after_atomic() barrier() +#define __smp_mb__before_atomic() barrier() +#define __smp_mb__after_atomic() barrier() #include -- cgit v1.1 From d39886a7d31e0182f8b7e3b0e953e636b0af96fd Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Sun, 27 Dec 2015 15:04:42 +0200 Subject: tile: define __smp_xxx This defines __smp_xxx barriers for tile, for use by virtualization. Some smp_xxx barriers are removed as they are defined correctly by asm-generic/barriers.h Note: for 32 bit, keep smp_mb__after_atomic around since it's faster than the generic implementation. Signed-off-by: Michael S. Tsirkin Acked-by: Arnd Bergmann Acked-by: Peter Zijlstra (Intel) --- arch/tile/include/asm/barrier.h | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/arch/tile/include/asm/barrier.h b/arch/tile/include/asm/barrier.h index 96a42ae..d552228 100644 --- a/arch/tile/include/asm/barrier.h +++ b/arch/tile/include/asm/barrier.h @@ -79,11 +79,12 @@ mb_incoherent(void) * But after the word is updated, the routine issues an "mf" before returning, * and since it's a function call, we don't even need a compiler barrier. */ -#define smp_mb__before_atomic() smp_mb() -#define smp_mb__after_atomic() do { } while (0) +#define __smp_mb__before_atomic() __smp_mb() +#define __smp_mb__after_atomic() do { } while (0) +#define smp_mb__after_atomic() __smp_mb__after_atomic() #else /* 64 bit */ -#define smp_mb__before_atomic() smp_mb() -#define smp_mb__after_atomic() smp_mb() +#define __smp_mb__before_atomic() __smp_mb() +#define __smp_mb__after_atomic() __smp_mb() #endif #include -- cgit v1.1 From 1ce790913b6ce67cd85235083aeb67898bad1a1f Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Sun, 27 Dec 2015 15:04:42 +0200 Subject: xtensa: define __smp_xxx This defines __smp_xxx barriers for xtensa, for use by virtualization. smp_xxx barriers are removed as they are defined correctly by asm-generic/barriers.h Signed-off-by: Michael S. Tsirkin Acked-by: Arnd Bergmann Acked-by: Peter Zijlstra (Intel) --- arch/xtensa/include/asm/barrier.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/xtensa/include/asm/barrier.h b/arch/xtensa/include/asm/barrier.h index 5b88774..956596e 100644 --- a/arch/xtensa/include/asm/barrier.h +++ b/arch/xtensa/include/asm/barrier.h @@ -13,8 +13,8 @@ #define rmb() barrier() #define wmb() mb() -#define smp_mb__before_atomic() barrier() -#define smp_mb__after_atomic() barrier() +#define __smp_mb__before_atomic() barrier() +#define __smp_mb__after_atomic() barrier() #include -- cgit v1.1 From 1638fb72070f8faf2ac0787fafbb839d0c859d5b Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Sun, 27 Dec 2015 15:04:42 +0200 Subject: x86: define __smp_xxx This defines __smp_xxx barriers for x86, for use by virtualization. smp_xxx barriers are removed as they are defined correctly by asm-generic/barriers.h Signed-off-by: Michael S. Tsirkin Acked-by: Arnd Bergmann Acked-by: Peter Zijlstra (Intel) Reviewed-by: Thomas Gleixner --- arch/x86/include/asm/barrier.h | 31 ++++++++++++------------------- 1 file changed, 12 insertions(+), 19 deletions(-) diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h index cc4c2a7..a584e1c 100644 --- a/arch/x86/include/asm/barrier.h +++ b/arch/x86/include/asm/barrier.h @@ -31,17 +31,10 @@ #endif #define dma_wmb() barrier() -#ifdef CONFIG_SMP -#define smp_mb() mb() -#define smp_rmb() dma_rmb() -#define smp_wmb() barrier() -#define smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0) -#else /* !SMP */ -#define smp_mb() barrier() -#define smp_rmb() barrier() -#define smp_wmb() barrier() -#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); barrier(); } while (0) -#endif /* SMP */ +#define __smp_mb() mb() +#define __smp_rmb() dma_rmb() +#define __smp_wmb() barrier() +#define __smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0) #if defined(CONFIG_X86_PPRO_FENCE) @@ -50,31 +43,31 @@ * model and we should fall back to full barriers. */ -#define smp_store_release(p, v) \ +#define __smp_store_release(p, v) \ do { \ compiletime_assert_atomic_type(*p); \ - smp_mb(); \ + __smp_mb(); \ WRITE_ONCE(*p, v); \ } while (0) -#define smp_load_acquire(p) \ +#define __smp_load_acquire(p) \ ({ \ typeof(*p) ___p1 = READ_ONCE(*p); \ compiletime_assert_atomic_type(*p); \ - smp_mb(); \ + __smp_mb(); \ ___p1; \ }) #else /* regular x86 TSO memory ordering */ -#define smp_store_release(p, v) \ +#define __smp_store_release(p, v) \ do { \ compiletime_assert_atomic_type(*p); \ barrier(); \ WRITE_ONCE(*p, v); \ } while (0) -#define smp_load_acquire(p) \ +#define __smp_load_acquire(p) \ ({ \ typeof(*p) ___p1 = READ_ONCE(*p); \ compiletime_assert_atomic_type(*p); \ @@ -85,8 +78,8 @@ do { \ #endif /* Atomic operations are already serializing on x86 */ -#define smp_mb__before_atomic() barrier() -#define smp_mb__after_atomic() barrier() +#define __smp_mb__before_atomic() barrier() +#define __smp_mb__after_atomic() barrier() #include -- cgit v1.1 From 6a65d26385bf487926a0616650927303058551e3 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Sun, 27 Dec 2015 18:23:01 +0200 Subject: asm-generic: implement virt_xxx memory barriers Guests running within virtual machines might be affected by SMP effects even if the guest itself is compiled without SMP support. This is an artifact of interfacing with an SMP host while running an UP kernel. Using mandatory barriers for this use-case would be possible but is often suboptimal. In particular, virtio uses a bunch of confusing ifdefs to work around this, while xen just uses the mandatory barriers. To better handle this case, low-level virt_mb() etc macros are made available. These are implemented trivially using the low-level __smp_xxx macros, the purpose of these wrappers is to annotate those specific cases. These have the same effect as smp_mb() etc when SMP is enabled, but generate identical code for SMP and non-SMP systems. For example, virtual machine guests should use virt_mb() rather than smp_mb() when synchronizing against a (possibly SMP) host. Suggested-by: David Miller Signed-off-by: Michael S. Tsirkin Acked-by: Peter Zijlstra (Intel) --- Documentation/memory-barriers.txt | 28 +++++++++++++++++++++++----- include/asm-generic/barrier.h | 11 +++++++++++ 2 files changed, 34 insertions(+), 5 deletions(-) diff --git a/Documentation/memory-barriers.txt b/Documentation/memory-barriers.txt index aef9487..8f4a93a 100644 --- a/Documentation/memory-barriers.txt +++ b/Documentation/memory-barriers.txt @@ -1655,17 +1655,18 @@ macro is a good place to start looking. SMP memory barriers are reduced to compiler barriers on uniprocessor compiled systems because it is assumed that a CPU will appear to be self-consistent, and will order overlapping accesses correctly with respect to itself. +However, see the subsection on "Virtual Machine Guests" below. [!] Note that SMP memory barriers _must_ be used to control the ordering of references to shared memory on SMP systems, though the use of locking instead is sufficient. Mandatory barriers should not be used to control SMP effects, since mandatory -barriers unnecessarily impose overhead on UP systems. They may, however, be -used to control MMIO effects on accesses through relaxed memory I/O windows. -These are required even on non-SMP systems as they affect the order in which -memory operations appear to a device by prohibiting both the compiler and the -CPU from reordering them. +barriers impose unnecessary overhead on both SMP and UP systems. They may, +however, be used to control MMIO effects on accesses through relaxed memory I/O +windows. These barriers are required even on non-SMP systems as they affect +the order in which memory operations appear to a device by prohibiting both the +compiler and the CPU from reordering them. There are some more advanced barrier functions: @@ -2948,6 +2949,23 @@ The Alpha defines the Linux kernel's memory barrier model. See the subsection on "Cache Coherency" above. +VIRTUAL MACHINE GUESTS +------------------- + +Guests running within virtual machines might be affected by SMP effects even if +the guest itself is compiled without SMP support. This is an artifact of +interfacing with an SMP host while running an UP kernel. Using mandatory +barriers for this use-case would be possible but is often suboptimal. + +To handle this case optimally, low-level virt_mb() etc macros are available. +These have the same effect as smp_mb() etc when SMP is enabled, but generate +identical code for SMP and non-SMP systems. For example, virtual machine guests +should use virt_mb() rather than smp_mb() when synchronizing against a +(possibly SMP) host. + +These are equivalent to smp_mb() etc counterparts in all other respects, +in particular, they do not control MMIO effects: to control +MMIO effects, use mandatory barriers. ============ EXAMPLE USES diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h index 8752964..1cceca14 100644 --- a/include/asm-generic/barrier.h +++ b/include/asm-generic/barrier.h @@ -196,5 +196,16 @@ do { \ #endif +/* Barriers for virtual machine guests when talking to an SMP host */ +#define virt_mb() __smp_mb() +#define virt_rmb() __smp_rmb() +#define virt_wmb() __smp_wmb() +#define virt_read_barrier_depends() __smp_read_barrier_depends() +#define virt_store_mb(var, value) __smp_store_mb(var, value) +#define virt_mb__before_atomic() __smp_mb__before_atomic() +#define virt_mb__after_atomic() __smp_mb__after_atomic() +#define virt_store_release(p, v) __smp_store_release(p, v) +#define virt_load_acquire(p) __smp_load_acquire(p) + #endif /* !__ASSEMBLY__ */ #endif /* __ASM_GENERIC_BARRIER_H */ -- cgit v1.1 From d307fb16f788823b29aab9aa7e4821ac8a124b19 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Sun, 20 Dec 2015 13:52:10 +0200 Subject: Revert "virtio_ring: Update weak barriers to use dma_wmb/rmb" This reverts commit 9e1a27ea42691429e31f158cce6fc61bc79bb2e9. While that commit optimizes !CONFIG_SMP, it mixes up DMA and SMP concepts, making the code hard to figure out. A better way to optimize this is with the new __smp_XXX barriers. As a first step, go back to full rmb/wmb barriers for !SMP. We switch to __smp_XXX barriers in the next patch. Cc: Peter Zijlstra Cc: Alexander Duyck Signed-off-by: Michael S. Tsirkin Acked-by: Peter Zijlstra (Intel) --- include/linux/virtio_ring.h | 23 +++++++++++++++++++---- 1 file changed, 19 insertions(+), 4 deletions(-) diff --git a/include/linux/virtio_ring.h b/include/linux/virtio_ring.h index 8e50888..67e06fe 100644 --- a/include/linux/virtio_ring.h +++ b/include/linux/virtio_ring.h @@ -21,20 +21,19 @@ * actually quite cheap. */ +#ifdef CONFIG_SMP static inline void virtio_mb(bool weak_barriers) { -#ifdef CONFIG_SMP if (weak_barriers) smp_mb(); else -#endif mb(); } static inline void virtio_rmb(bool weak_barriers) { if (weak_barriers) - dma_rmb(); + smp_rmb(); else rmb(); } @@ -42,10 +41,26 @@ static inline void virtio_rmb(bool weak_barriers) static inline void virtio_wmb(bool weak_barriers) { if (weak_barriers) - dma_wmb(); + smp_wmb(); else wmb(); } +#else +static inline void virtio_mb(bool weak_barriers) +{ + mb(); +} + +static inline void virtio_rmb(bool weak_barriers) +{ + rmb(); +} + +static inline void virtio_wmb(bool weak_barriers) +{ + wmb(); +} +#endif struct virtio_device; struct virtqueue; -- cgit v1.1 From a65961272e1ebdb60804bbe2bb440481fcbd1c76 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Sun, 27 Dec 2015 17:55:35 +0200 Subject: virtio_ring: update weak barriers to use virt_xxx virtio ring uses smp_wmb on SMP and wmb on !SMP, the reason for the later being that it might be talking to another kernel on the same SMP machine. This is exactly what virt_xxx barriers do, so switch to these instead of homegrown ifdef hacks. Cc: Peter Zijlstra Cc: Alexander Duyck Signed-off-by: Michael S. Tsirkin Acked-by: Peter Zijlstra (Intel) --- include/linux/virtio_ring.h | 25 ++++--------------------- 1 file changed, 4 insertions(+), 21 deletions(-) diff --git a/include/linux/virtio_ring.h b/include/linux/virtio_ring.h index 67e06fe..f3fa55b 100644 --- a/include/linux/virtio_ring.h +++ b/include/linux/virtio_ring.h @@ -12,7 +12,7 @@ * anyone care? * * For virtio_pci on SMP, we don't need to order with respect to MMIO - * accesses through relaxed memory I/O windows, so smp_mb() et al are + * accesses through relaxed memory I/O windows, so virt_mb() et al are * sufficient. * * For using virtio to talk to real devices (eg. other heterogeneous @@ -21,11 +21,10 @@ * actually quite cheap. */ -#ifdef CONFIG_SMP static inline void virtio_mb(bool weak_barriers) { if (weak_barriers) - smp_mb(); + virt_mb(); else mb(); } @@ -33,7 +32,7 @@ static inline void virtio_mb(bool weak_barriers) static inline void virtio_rmb(bool weak_barriers) { if (weak_barriers) - smp_rmb(); + virt_rmb(); else rmb(); } @@ -41,26 +40,10 @@ static inline void virtio_rmb(bool weak_barriers) static inline void virtio_wmb(bool weak_barriers) { if (weak_barriers) - smp_wmb(); + virt_wmb(); else wmb(); } -#else -static inline void virtio_mb(bool weak_barriers) -{ - mb(); -} - -static inline void virtio_rmb(bool weak_barriers) -{ - rmb(); -} - -static inline void virtio_wmb(bool weak_barriers) -{ - wmb(); -} -#endif struct virtio_device; struct virtqueue; -- cgit v1.1 From 3226aad81aa670015a59e51458a0deb2d3bcb600 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Thu, 7 Jan 2016 17:54:54 +0200 Subject: sh: support 1 and 2 byte xchg This completes the xchg implementation for sh architecture. Note: The llsc variant is tricky since this only supports 4 byte atomics, the existing implementation of 1 byte xchg is wrong: we need to do a 4 byte cmpxchg and retry if any bytes changed meanwhile. Write this in C for clarity. Suggested-by: Rich Felker Signed-off-by: Michael S. Tsirkin Acked-by: Peter Zijlstra (Intel) --- arch/sh/include/asm/cmpxchg-grb.h | 22 +++++++++++++++ arch/sh/include/asm/cmpxchg-irq.h | 11 ++++++++ arch/sh/include/asm/cmpxchg-llsc.h | 58 +++++++++++++++++++++++--------------- arch/sh/include/asm/cmpxchg.h | 3 ++ 4 files changed, 72 insertions(+), 22 deletions(-) diff --git a/arch/sh/include/asm/cmpxchg-grb.h b/arch/sh/include/asm/cmpxchg-grb.h index f848dec..2ed557b 100644 --- a/arch/sh/include/asm/cmpxchg-grb.h +++ b/arch/sh/include/asm/cmpxchg-grb.h @@ -23,6 +23,28 @@ static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val) return retval; } +static inline unsigned long xchg_u16(volatile u16 *m, unsigned long val) +{ + unsigned long retval; + + __asm__ __volatile__ ( + " .align 2 \n\t" + " mova 1f, r0 \n\t" /* r0 = end point */ + " mov r15, r1 \n\t" /* r1 = saved sp */ + " mov #-6, r15 \n\t" /* LOGIN */ + " mov.w @%1, %0 \n\t" /* load old value */ + " extu.w %0, %0 \n\t" /* extend as unsigned */ + " mov.w %2, @%1 \n\t" /* store new value */ + "1: mov r1, r15 \n\t" /* LOGOUT */ + : "=&r" (retval), + "+r" (m), + "+r" (val) /* inhibit r15 overloading */ + : + : "memory" , "r0", "r1"); + + return retval; +} + static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val) { unsigned long retval; diff --git a/arch/sh/include/asm/cmpxchg-irq.h b/arch/sh/include/asm/cmpxchg-irq.h index bd11f63..f888772 100644 --- a/arch/sh/include/asm/cmpxchg-irq.h +++ b/arch/sh/include/asm/cmpxchg-irq.h @@ -14,6 +14,17 @@ static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val) return retval; } +static inline unsigned long xchg_u16(volatile u16 *m, unsigned long val) +{ + unsigned long flags, retval; + + local_irq_save(flags); + retval = *m; + *m = val; + local_irq_restore(flags); + return retval; +} + static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val) { unsigned long flags, retval; diff --git a/arch/sh/include/asm/cmpxchg-llsc.h b/arch/sh/include/asm/cmpxchg-llsc.h index 4713666..e754794 100644 --- a/arch/sh/include/asm/cmpxchg-llsc.h +++ b/arch/sh/include/asm/cmpxchg-llsc.h @@ -1,6 +1,9 @@ #ifndef __ASM_SH_CMPXCHG_LLSC_H #define __ASM_SH_CMPXCHG_LLSC_H +#include +#include + static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val) { unsigned long retval; @@ -22,29 +25,8 @@ static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val) return retval; } -static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val) -{ - unsigned long retval; - unsigned long tmp; - - __asm__ __volatile__ ( - "1: \n\t" - "movli.l @%2, %0 ! xchg_u8 \n\t" - "mov %0, %1 \n\t" - "mov %3, %0 \n\t" - "movco.l %0, @%2 \n\t" - "bf 1b \n\t" - "synco \n\t" - : "=&z"(tmp), "=&r" (retval) - : "r" (m), "r" (val & 0xff) - : "t", "memory" - ); - - return retval; -} - static inline unsigned long -__cmpxchg_u32(volatile int *m, unsigned long old, unsigned long new) +__cmpxchg_u32(volatile u32 *m, unsigned long old, unsigned long new) { unsigned long retval; unsigned long tmp; @@ -68,4 +50,36 @@ __cmpxchg_u32(volatile int *m, unsigned long old, unsigned long new) return retval; } +static inline u32 __xchg_cmpxchg(volatile void *ptr, u32 x, int size) +{ + int off = (unsigned long)ptr % sizeof(u32); + volatile u32 *p = ptr - off; +#ifdef __BIG_ENDIAN + int bitoff = (sizeof(u32) - 1 - off) * BITS_PER_BYTE; +#else + int bitoff = off * BITS_PER_BYTE; +#endif + u32 bitmask = ((0x1 << size * BITS_PER_BYTE) - 1) << bitoff; + u32 oldv, newv; + u32 ret; + + do { + oldv = READ_ONCE(*p); + ret = (oldv & bitmask) >> bitoff; + newv = (oldv & ~bitmask) | (x << bitoff); + } while (__cmpxchg_u32(p, oldv, newv) != oldv); + + return ret; +} + +static inline unsigned long xchg_u16(volatile u16 *m, unsigned long val) +{ + return __xchg_cmpxchg(m, val, sizeof *m); +} + +static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val) +{ + return __xchg_cmpxchg(m, val, sizeof *m); +} + #endif /* __ASM_SH_CMPXCHG_LLSC_H */ diff --git a/arch/sh/include/asm/cmpxchg.h b/arch/sh/include/asm/cmpxchg.h index 85c97b18..5225916 100644 --- a/arch/sh/include/asm/cmpxchg.h +++ b/arch/sh/include/asm/cmpxchg.h @@ -27,6 +27,9 @@ extern void __xchg_called_with_bad_pointer(void); case 4: \ __xchg__res = xchg_u32(__xchg_ptr, x); \ break; \ + case 2: \ + __xchg__res = xchg_u16(__xchg_ptr, x); \ + break; \ case 1: \ __xchg__res = xchg_u8(__xchg_ptr, x); \ break; \ -- cgit v1.1 From 9e3f84ce416663c84a191cb3ead300fc1a4adadc Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Fri, 8 Jan 2016 09:23:58 +0200 Subject: sh: move xchg_cmpxchg to a header by itself Looks like future sh variants will support a 4-byte cas which will be used to implement 1 and 2 byte xchg. This is exactly what we do for llsc now, move the portable part of the code into a separate header so it's easy to reuse. Suggested-by: Rich Felker Signed-off-by: Michael S. Tsirkin Acked-by: Peter Zijlstra (Intel) --- arch/sh/include/asm/cmpxchg-llsc.h | 35 +------------------------- arch/sh/include/asm/cmpxchg-xchg.h | 51 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 52 insertions(+), 34 deletions(-) create mode 100644 arch/sh/include/asm/cmpxchg-xchg.h diff --git a/arch/sh/include/asm/cmpxchg-llsc.h b/arch/sh/include/asm/cmpxchg-llsc.h index e754794..fcfd322 100644 --- a/arch/sh/include/asm/cmpxchg-llsc.h +++ b/arch/sh/include/asm/cmpxchg-llsc.h @@ -1,9 +1,6 @@ #ifndef __ASM_SH_CMPXCHG_LLSC_H #define __ASM_SH_CMPXCHG_LLSC_H -#include -#include - static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val) { unsigned long retval; @@ -50,36 +47,6 @@ __cmpxchg_u32(volatile u32 *m, unsigned long old, unsigned long new) return retval; } -static inline u32 __xchg_cmpxchg(volatile void *ptr, u32 x, int size) -{ - int off = (unsigned long)ptr % sizeof(u32); - volatile u32 *p = ptr - off; -#ifdef __BIG_ENDIAN - int bitoff = (sizeof(u32) - 1 - off) * BITS_PER_BYTE; -#else - int bitoff = off * BITS_PER_BYTE; -#endif - u32 bitmask = ((0x1 << size * BITS_PER_BYTE) - 1) << bitoff; - u32 oldv, newv; - u32 ret; - - do { - oldv = READ_ONCE(*p); - ret = (oldv & bitmask) >> bitoff; - newv = (oldv & ~bitmask) | (x << bitoff); - } while (__cmpxchg_u32(p, oldv, newv) != oldv); - - return ret; -} - -static inline unsigned long xchg_u16(volatile u16 *m, unsigned long val) -{ - return __xchg_cmpxchg(m, val, sizeof *m); -} - -static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val) -{ - return __xchg_cmpxchg(m, val, sizeof *m); -} +#include #endif /* __ASM_SH_CMPXCHG_LLSC_H */ diff --git a/arch/sh/include/asm/cmpxchg-xchg.h b/arch/sh/include/asm/cmpxchg-xchg.h new file mode 100644 index 0000000..7219719 --- /dev/null +++ b/arch/sh/include/asm/cmpxchg-xchg.h @@ -0,0 +1,51 @@ +#ifndef __ASM_SH_CMPXCHG_XCHG_H +#define __ASM_SH_CMPXCHG_XCHG_H + +/* + * Copyright (C) 2016 Red Hat, Inc. + * Author: Michael S. Tsirkin + * + * This work is licensed under the terms of the GNU GPL, version 2. See the + * file "COPYING" in the main directory of this archive for more details. + */ +#include +#include + +/* + * Portable implementations of 1 and 2 byte xchg using a 4 byte cmpxchg. + * Note: this header isn't self-contained: before including it, __cmpxchg_u32 + * must be defined first. + */ +static inline u32 __xchg_cmpxchg(volatile void *ptr, u32 x, int size) +{ + int off = (unsigned long)ptr % sizeof(u32); + volatile u32 *p = ptr - off; +#ifdef __BIG_ENDIAN + int bitoff = (sizeof(u32) - 1 - off) * BITS_PER_BYTE; +#else + int bitoff = off * BITS_PER_BYTE; +#endif + u32 bitmask = ((0x1 << size * BITS_PER_BYTE) - 1) << bitoff; + u32 oldv, newv; + u32 ret; + + do { + oldv = READ_ONCE(*p); + ret = (oldv & bitmask) >> bitoff; + newv = (oldv & ~bitmask) | (x << bitoff); + } while (__cmpxchg_u32(p, oldv, newv) != oldv); + + return ret; +} + +static inline unsigned long xchg_u16(volatile u16 *m, unsigned long val) +{ + return __xchg_cmpxchg(m, val, sizeof *m); +} + +static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val) +{ + return __xchg_cmpxchg(m, val, sizeof *m); +} + +#endif /* __ASM_SH_CMPXCHG_XCHG_H */ -- cgit v1.1 From 788e5b3a5da24cc8d93ce2f7c6508181cd7d7fb6 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Thu, 17 Dec 2015 12:20:39 +0200 Subject: virtio_ring: use virt_store_mb We need a full barrier after writing out event index, using virt_store_mb there seems better than open-coding. As usual, we need a wrapper to account for strong barriers. It's tempting to use this in vhost as well, for that, we'll need a variant of smp_store_mb that works on __user pointers. Signed-off-by: Michael S. Tsirkin Acked-by: Peter Zijlstra (Intel) --- drivers/virtio/virtio_ring.c | 15 +++++++++------ include/linux/virtio_ring.h | 11 +++++++++++ 2 files changed, 20 insertions(+), 6 deletions(-) diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index ee663c4..e12e385 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c @@ -517,10 +517,10 @@ void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len) /* If we expect an interrupt for the next entry, tell host * by writing event index and flush out the write before * the read in the next get_buf call. */ - if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) { - vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, vq->last_used_idx); - virtio_mb(vq->weak_barriers); - } + if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) + virtio_store_mb(vq->weak_barriers, + &vring_used_event(&vq->vring), + cpu_to_virtio16(_vq->vdev, vq->last_used_idx)); #ifdef DEBUG vq->last_add_time_valid = false; @@ -653,8 +653,11 @@ bool virtqueue_enable_cb_delayed(struct virtqueue *_vq) } /* TODO: tune this threshold */ bufs = (u16)(vq->avail_idx_shadow - vq->last_used_idx) * 3 / 4; - vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs); - virtio_mb(vq->weak_barriers); + + virtio_store_mb(vq->weak_barriers, + &vring_used_event(&vq->vring), + cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs)); + if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->vring.used->idx) - vq->last_used_idx) > bufs)) { END_USE(vq); return false; diff --git a/include/linux/virtio_ring.h b/include/linux/virtio_ring.h index f3fa55b..a156e2b 100644 --- a/include/linux/virtio_ring.h +++ b/include/linux/virtio_ring.h @@ -45,6 +45,17 @@ static inline void virtio_wmb(bool weak_barriers) wmb(); } +static inline void virtio_store_mb(bool weak_barriers, + __virtio16 *p, __virtio16 v) +{ + if (weak_barriers) { + virt_store_mb(*p, v); + } else { + WRITE_ONCE(*p, v); + mb(); + } +} + struct virtio_device; struct virtqueue; -- cgit v1.1 From 5bb0c9be2ac9a98513d43969e2b5c3d02cb271c8 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Sun, 27 Dec 2015 18:02:16 +0200 Subject: xenbus: use virt_xxx barriers drivers/xen/xenbus/xenbus_comms.c uses full memory barriers to communicate with the other side. For guests compiled with CONFIG_SMP, smp_wmb and smp_mb would be sufficient, so mb() and wmb() here are only needed if a non-SMP guest runs on an SMP host. Switch to virt_xxx barriers which serve this exact purpose. Signed-off-by: Michael S. Tsirkin Acked-by: David Vrabel Acked-by: Peter Zijlstra (Intel) --- drivers/xen/xenbus/xenbus_comms.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/xen/xenbus/xenbus_comms.c b/drivers/xen/xenbus/xenbus_comms.c index fdb0f33..ecdecce 100644 --- a/drivers/xen/xenbus/xenbus_comms.c +++ b/drivers/xen/xenbus/xenbus_comms.c @@ -123,14 +123,14 @@ int xb_write(const void *data, unsigned len) avail = len; /* Must write data /after/ reading the consumer index. */ - mb(); + virt_mb(); memcpy(dst, data, avail); data += avail; len -= avail; /* Other side must not see new producer until data is there. */ - wmb(); + virt_wmb(); intf->req_prod += avail; /* Implies mb(): other side will see the updated producer. */ @@ -180,14 +180,14 @@ int xb_read(void *data, unsigned len) avail = len; /* Must read data /after/ reading the producer index. */ - rmb(); + virt_rmb(); memcpy(data, src, avail); data += avail; len -= avail; /* Other side must not see free space until we've copied out */ - mb(); + virt_mb(); intf->rsp_cons += avail; pr_debug("Finished read of %i bytes (%i to go)\n", avail, len); -- cgit v1.1 From 506b02eb1cf79838b76c6e3e43171cf960f01fc8 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Sun, 27 Dec 2015 18:02:16 +0200 Subject: xen/io: use virt_xxx barriers include/xen/interface/io/ring.h uses full memory barriers to communicate with the other side. For guests compiled with CONFIG_SMP, smp_wmb and smp_mb would be sufficient, so mb() and wmb() here are only needed if a non-SMP guest runs on an SMP host. Switch to virt_xxx barriers which serve this exact purpose. Signed-off-by: Michael S. Tsirkin Acked-by: David Vrabel Acked-by: Peter Zijlstra (Intel) --- include/xen/interface/io/ring.h | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/include/xen/interface/io/ring.h b/include/xen/interface/io/ring.h index 7dc685b..21f4fbd 100644 --- a/include/xen/interface/io/ring.h +++ b/include/xen/interface/io/ring.h @@ -208,12 +208,12 @@ struct __name##_back_ring { \ #define RING_PUSH_REQUESTS(_r) do { \ - wmb(); /* back sees requests /before/ updated producer index */ \ + virt_wmb(); /* back sees requests /before/ updated producer index */ \ (_r)->sring->req_prod = (_r)->req_prod_pvt; \ } while (0) #define RING_PUSH_RESPONSES(_r) do { \ - wmb(); /* front sees responses /before/ updated producer index */ \ + virt_wmb(); /* front sees responses /before/ updated producer index */ \ (_r)->sring->rsp_prod = (_r)->rsp_prod_pvt; \ } while (0) @@ -250,9 +250,9 @@ struct __name##_back_ring { \ #define RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(_r, _notify) do { \ RING_IDX __old = (_r)->sring->req_prod; \ RING_IDX __new = (_r)->req_prod_pvt; \ - wmb(); /* back sees requests /before/ updated producer index */ \ + virt_wmb(); /* back sees requests /before/ updated producer index */ \ (_r)->sring->req_prod = __new; \ - mb(); /* back sees new requests /before/ we check req_event */ \ + virt_mb(); /* back sees new requests /before/ we check req_event */ \ (_notify) = ((RING_IDX)(__new - (_r)->sring->req_event) < \ (RING_IDX)(__new - __old)); \ } while (0) @@ -260,9 +260,9 @@ struct __name##_back_ring { \ #define RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(_r, _notify) do { \ RING_IDX __old = (_r)->sring->rsp_prod; \ RING_IDX __new = (_r)->rsp_prod_pvt; \ - wmb(); /* front sees responses /before/ updated producer index */ \ + virt_wmb(); /* front sees responses /before/ updated producer index */ \ (_r)->sring->rsp_prod = __new; \ - mb(); /* front sees new responses /before/ we check rsp_event */ \ + virt_mb(); /* front sees new responses /before/ we check rsp_event */ \ (_notify) = ((RING_IDX)(__new - (_r)->sring->rsp_event) < \ (RING_IDX)(__new - __old)); \ } while (0) @@ -271,7 +271,7 @@ struct __name##_back_ring { \ (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \ if (_work_to_do) break; \ (_r)->sring->req_event = (_r)->req_cons + 1; \ - mb(); \ + virt_mb(); \ (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \ } while (0) @@ -279,7 +279,7 @@ struct __name##_back_ring { \ (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \ if (_work_to_do) break; \ (_r)->sring->rsp_event = (_r)->rsp_cons + 1; \ - mb(); \ + virt_mb(); \ (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \ } while (0) -- cgit v1.1 From 234927540e6ac82f5f083a11c3e860e346ed09bc Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Sun, 27 Dec 2015 18:02:16 +0200 Subject: xen/events: use virt_xxx barriers drivers/xen/events/events_fifo.c uses rmb() to communicate with the other side. For guests compiled with CONFIG_SMP, smp_rmb would be sufficient, so rmb() here is only needed if a non-SMP guest runs on an SMP host. Switch to the virt_rmb barrier which serves this exact purpose. Pull in asm/barrier.h here to make sure the file is self-contained. Suggested-by: David Vrabel Signed-off-by: Michael S. Tsirkin Acked-by: David Vrabel Acked-by: Peter Zijlstra (Intel) --- drivers/xen/events/events_fifo.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/xen/events/events_fifo.c b/drivers/xen/events/events_fifo.c index 96a1b8d..eff2b88 100644 --- a/drivers/xen/events/events_fifo.c +++ b/drivers/xen/events/events_fifo.c @@ -41,6 +41,7 @@ #include #include +#include #include #include #include @@ -296,7 +297,7 @@ static void consume_one_event(unsigned cpu, * control block. */ if (head == 0) { - rmb(); /* Ensure word is up-to-date before reading head. */ + virt_rmb(); /* Ensure word is up-to-date before reading head. */ head = control_block->head[priority]; } -- cgit v1.1 From a677f4869576eb177570ffee68598d2202de030f Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Wed, 6 Jan 2016 09:13:14 +0200 Subject: s390: use generic memory barriers The s390 kernel is SMP to 99.99%, we just didn't bother with a non-smp variant for the memory-barriers. If the generic header is used we'd get the non-smp version for free. It will save a small amount of text space for CONFIG_SMP=n. Suggested-by: Martin Schwidefsky Signed-off-by: Michael S. Tsirkin Acked-by: Peter Zijlstra (Intel) --- arch/s390/include/asm/barrier.h | 3 --- 1 file changed, 3 deletions(-) diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h index fbd25b2..4d26fa4 100644 --- a/arch/s390/include/asm/barrier.h +++ b/arch/s390/include/asm/barrier.h @@ -29,9 +29,6 @@ #define __smp_mb() mb() #define __smp_rmb() rmb() #define __smp_wmb() wmb() -#define smp_mb() __smp_mb() -#define smp_rmb() __smp_rmb() -#define smp_wmb() __smp_wmb() #define __smp_store_release(p, v) \ do { \ -- cgit v1.1 From 779a6a36961b50cd154da5705d9e6508f819cc4e Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Sun, 10 Jan 2016 13:19:38 +0200 Subject: s390: more efficient smp barriers As per: lkml.kernel.org/r/20150921112252.3c2937e1@mschwide atomics imply a barrier on s390, so s390 should change smp_mb__before_atomic and smp_mb__after_atomic to barrier() instead of smp_mb() and hence should not use the generic versions. Suggested-by: Peter Zijlstra Suggested-by: Martin Schwidefsky Signed-off-by: Michael S. Tsirkin Acked-by: Martin Schwidefsky Acked-by: Peter Zijlstra (Intel) --- arch/s390/include/asm/barrier.h | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h index 4d26fa4..5c8db3c 100644 --- a/arch/s390/include/asm/barrier.h +++ b/arch/s390/include/asm/barrier.h @@ -45,6 +45,9 @@ do { \ ___p1; \ }) +#define __smp_mb__before_atomic() barrier() +#define __smp_mb__after_atomic() barrier() + #include #endif /* __ASM_BARRIER_H */ -- cgit v1.1 From f68b992bbb474641881932c61c92dcfa6f5b3689 Mon Sep 17 00:00:00 2001 From: Minchan Kim Date: Mon, 28 Dec 2015 08:35:12 +0900 Subject: virtio_balloon: fix race by fill and leak During my compaction-related stuff, I encountered a bug with ballooning. With repeated inflating and deflating cycle, guest memory( ie, cat /proc/meminfo | grep MemTotal) is decreased and couldn't be recovered. The reason is balloon_lock doesn't cover release_pages_balloon so struct virtio_balloon fields could be overwritten by race of fill_balloon(e,g, vb->*pfns could be critical). This patch fixes it in my test. Cc: Signed-off-by: Minchan Kim Signed-off-by: Michael S. Tsirkin --- drivers/virtio/virtio_balloon.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index 7efc329..7d3e5d0 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c @@ -209,8 +209,8 @@ static unsigned leak_balloon(struct virtio_balloon *vb, size_t num) */ if (vb->num_pfns != 0) tell_host(vb, vb->deflate_vq); - mutex_unlock(&vb->balloon_lock); release_pages_balloon(vb); + mutex_unlock(&vb->balloon_lock); return num_freed_pages; } -- cgit v1.1 From 21ea9fb69e7c4b1b1559c3e410943d3ff248ffcb Mon Sep 17 00:00:00 2001 From: Minchan Kim Date: Mon, 28 Dec 2015 08:35:13 +0900 Subject: virtio_balloon: fix race between migration and ballooning In balloon_page_dequeue, pages_lock should cover the loop (ie, list_for_each_entry_safe). Otherwise, the cursor page could be isolated by compaction and then list_del by isolation could poison the page->lru.{prev,next} so the loop finally could access wrong address like this. This patch fixes the bug. general protection fault: 0000 [#1] SMP Dumping ftrace buffer: (ftrace buffer empty) Modules linked in: CPU: 2 PID: 82 Comm: vballoon Not tainted 4.4.0-rc5-mm1-access_bit+ #1906 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Bochs 01/01/2011 task: ffff8800a7ff0000 ti: ffff8800a7fec000 task.ti: ffff8800a7fec000 RIP: 0010:[] [] balloon_page_dequeue+0x54/0x130 RSP: 0018:ffff8800a7fefdc0 EFLAGS: 00010246 RAX: ffff88013fff9a70 RBX: ffffea000056fe00 RCX: 0000000000002b7d RDX: ffff88013fff9a70 RSI: ffffea000056fe00 RDI: ffff88013fff9a68 RBP: ffff8800a7fefde8 R08: ffffea000056fda0 R09: 0000000000000000 R10: ffff8800a7fefd90 R11: 0000000000000001 R12: dead0000000000e0 R13: ffffea000056fe20 R14: ffff880138809070 R15: ffff880138809060 FS: 0000000000000000(0000) GS:ffff88013fc40000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b CR2: 00007f229c10e000 CR3: 00000000b8b53000 CR4: 00000000000006a0 Stack: 0000000000000100 ffff880138809088 ffff880138809000 ffff880138809060 0000000000000046 ffff8800a7fefe28 ffffffff812c86d3 ffff880138809020 ffff880138809000 fffffffffff91900 0000000000000100 ffff880138809060 Call Trace: [] leak_balloon+0x93/0x1a0 [] balloon+0x217/0x2a0 [] ? __schedule+0x31e/0x8b0 [] ? abort_exclusive_wait+0xb0/0xb0 [] ? update_balloon_stats+0xf0/0xf0 [] kthread+0xc9/0xe0 [] ? kthread_park+0x60/0x60 [] ret_from_fork+0x3f/0x70 [] ? kthread_park+0x60/0x60 Code: 8d 60 e0 0f 84 af 00 00 00 48 8b 43 20 a8 01 75 3b 48 89 d8 f0 0f ba 28 00 72 10 48 8b 03 f6 c4 08 75 2f 48 89 df e8 8c 83 f9 ff <49> 8b 44 24 20 4d 8d 6c 24 20 48 83 e8 20 4d 39 f5 74 7a 4c 89 RIP [] balloon_page_dequeue+0x54/0x130 RSP ---[ end trace 43cf28060d708d5f ]--- Kernel panic - not syncing: Fatal exception Dumping ftrace buffer: (ftrace buffer empty) Kernel Offset: disabled Cc: Signed-off-by: Minchan Kim Signed-off-by: Michael S. Tsirkin Acked-by: Rafael Aquini --- mm/balloon_compaction.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mm/balloon_compaction.c b/mm/balloon_compaction.c index d3116be..300117f 100644 --- a/mm/balloon_compaction.c +++ b/mm/balloon_compaction.c @@ -61,6 +61,7 @@ struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info) bool dequeued_page; dequeued_page = false; + spin_lock_irqsave(&b_dev_info->pages_lock, flags); list_for_each_entry_safe(page, tmp, &b_dev_info->pages, lru) { /* * Block others from accessing the 'page' while we get around @@ -75,15 +76,14 @@ struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info) continue; } #endif - spin_lock_irqsave(&b_dev_info->pages_lock, flags); balloon_page_delete(page); __count_vm_event(BALLOON_DEFLATE); - spin_unlock_irqrestore(&b_dev_info->pages_lock, flags); unlock_page(page); dequeued_page = true; break; } } + spin_unlock_irqrestore(&b_dev_info->pages_lock, flags); if (!dequeued_page) { /* -- cgit v1.1 From f7ad26ff952b3ca2702d7da03aad0ab1f6c01d7c Mon Sep 17 00:00:00 2001 From: Stefan Hajnoczi Date: Thu, 17 Dec 2015 16:53:43 +0800 Subject: virtio: make find_vqs() checkpatch.pl-friendly checkpatch.pl wants arrays of strings declared as follows: static const char * const names[] = { "vq-1", "vq-2", "vq-3" }; Currently the find_vqs() function takes a const char *names[] argument so passing checkpatch.pl's const char * const names[] results in a compiler error due to losing the second const. This patch adjusts the find_vqs() prototype and updates all virtio transports. This makes it possible for virtio_balloon.c, virtio_input.c, virtgpu_kms.c, and virtio_rpmsg_bus.c to use the checkpatch.pl-friendly type. Signed-off-by: Stefan Hajnoczi Signed-off-by: Michael S. Tsirkin Acked-by: Bjorn Andersson --- drivers/gpu/drm/virtio/virtgpu_kms.c | 2 +- drivers/misc/mic/card/mic_virtio.c | 2 +- drivers/remoteproc/remoteproc_virtio.c | 2 +- drivers/rpmsg/virtio_rpmsg_bus.c | 2 +- drivers/s390/virtio/kvm_virtio.c | 2 +- drivers/s390/virtio/virtio_ccw.c | 2 +- drivers/virtio/virtio_balloon.c | 2 +- drivers/virtio/virtio_input.c | 2 +- drivers/virtio/virtio_mmio.c | 2 +- drivers/virtio/virtio_pci_common.c | 4 ++-- drivers/virtio/virtio_pci_common.h | 2 +- drivers/virtio/virtio_pci_modern.c | 2 +- include/linux/virtio_config.h | 2 +- 13 files changed, 14 insertions(+), 14 deletions(-) diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c index 06496a1..4150873 100644 --- a/drivers/gpu/drm/virtio/virtgpu_kms.c +++ b/drivers/gpu/drm/virtio/virtgpu_kms.c @@ -130,7 +130,7 @@ int virtio_gpu_driver_load(struct drm_device *dev, unsigned long flags) static vq_callback_t *callbacks[] = { virtio_gpu_ctrl_ack, virtio_gpu_cursor_ack }; - static const char *names[] = { "control", "cursor" }; + static const char * const names[] = { "control", "cursor" }; struct virtio_gpu_device *vgdev; /* this will expand later */ diff --git a/drivers/misc/mic/card/mic_virtio.c b/drivers/misc/mic/card/mic_virtio.c index e486a0c..f6ed57d 100644 --- a/drivers/misc/mic/card/mic_virtio.c +++ b/drivers/misc/mic/card/mic_virtio.c @@ -311,7 +311,7 @@ unmap: static int mic_find_vqs(struct virtio_device *vdev, unsigned nvqs, struct virtqueue *vqs[], vq_callback_t *callbacks[], - const char *names[]) + const char * const names[]) { struct mic_vdev *mvdev = to_micvdev(vdev); struct mic_device_ctrl __iomem *dc = mvdev->dc; diff --git a/drivers/remoteproc/remoteproc_virtio.c b/drivers/remoteproc/remoteproc_virtio.c index e1a1023..e44872f 100644 --- a/drivers/remoteproc/remoteproc_virtio.c +++ b/drivers/remoteproc/remoteproc_virtio.c @@ -147,7 +147,7 @@ static void rproc_virtio_del_vqs(struct virtio_device *vdev) static int rproc_virtio_find_vqs(struct virtio_device *vdev, unsigned nvqs, struct virtqueue *vqs[], vq_callback_t *callbacks[], - const char *names[]) + const char * const names[]) { struct rproc *rproc = vdev_to_rproc(vdev); int i, ret; diff --git a/drivers/rpmsg/virtio_rpmsg_bus.c b/drivers/rpmsg/virtio_rpmsg_bus.c index 73354ee..1fcd27c 100644 --- a/drivers/rpmsg/virtio_rpmsg_bus.c +++ b/drivers/rpmsg/virtio_rpmsg_bus.c @@ -945,7 +945,7 @@ static void rpmsg_ns_cb(struct rpmsg_channel *rpdev, void *data, int len, static int rpmsg_probe(struct virtio_device *vdev) { vq_callback_t *vq_cbs[] = { rpmsg_recv_done, rpmsg_xmit_done }; - const char *names[] = { "input", "output" }; + static const char * const names[] = { "input", "output" }; struct virtqueue *vqs[2]; struct virtproc_info *vrp; void *bufs_va; diff --git a/drivers/s390/virtio/kvm_virtio.c b/drivers/s390/virtio/kvm_virtio.c index 53fb975..1d060fd 100644 --- a/drivers/s390/virtio/kvm_virtio.c +++ b/drivers/s390/virtio/kvm_virtio.c @@ -255,7 +255,7 @@ static void kvm_del_vqs(struct virtio_device *vdev) static int kvm_find_vqs(struct virtio_device *vdev, unsigned nvqs, struct virtqueue *vqs[], vq_callback_t *callbacks[], - const char *names[]) + const char * const names[]) { struct kvm_device *kdev = to_kvmdev(vdev); int i; diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c index 1b83159..bf2d130 100644 --- a/drivers/s390/virtio/virtio_ccw.c +++ b/drivers/s390/virtio/virtio_ccw.c @@ -635,7 +635,7 @@ out: static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs, struct virtqueue *vqs[], vq_callback_t *callbacks[], - const char *names[]) + const char * const names[]) { struct virtio_ccw_device *vcdev = to_vc_device(vdev); unsigned long *indicatorp = NULL; diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index 7d3e5d0..0c3691f 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c @@ -388,7 +388,7 @@ static int init_vqs(struct virtio_balloon *vb) { struct virtqueue *vqs[3]; vq_callback_t *callbacks[] = { balloon_ack, balloon_ack, stats_request }; - const char *names[] = { "inflate", "deflate", "stats" }; + static const char * const names[] = { "inflate", "deflate", "stats" }; int err, nvqs; /* diff --git a/drivers/virtio/virtio_input.c b/drivers/virtio/virtio_input.c index c96944b..350a2a5 100644 --- a/drivers/virtio/virtio_input.c +++ b/drivers/virtio/virtio_input.c @@ -170,7 +170,7 @@ static int virtinput_init_vqs(struct virtio_input *vi) struct virtqueue *vqs[2]; vq_callback_t *cbs[] = { virtinput_recv_events, virtinput_recv_status }; - static const char *names[] = { "events", "status" }; + static const char * const names[] = { "events", "status" }; int err; err = vi->vdev->config->find_vqs(vi->vdev, 2, vqs, cbs, names); diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c index f499d9d..745c6ee 100644 --- a/drivers/virtio/virtio_mmio.c +++ b/drivers/virtio/virtio_mmio.c @@ -482,7 +482,7 @@ error_available: static int vm_find_vqs(struct virtio_device *vdev, unsigned nvqs, struct virtqueue *vqs[], vq_callback_t *callbacks[], - const char *names[]) + const char * const names[]) { struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); unsigned int irq = platform_get_irq(vm_dev->pdev, 0); diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c index 78f804a..36205c2 100644 --- a/drivers/virtio/virtio_pci_common.c +++ b/drivers/virtio/virtio_pci_common.c @@ -296,7 +296,7 @@ void vp_del_vqs(struct virtio_device *vdev) static int vp_try_to_find_vqs(struct virtio_device *vdev, unsigned nvqs, struct virtqueue *vqs[], vq_callback_t *callbacks[], - const char *names[], + const char * const names[], bool use_msix, bool per_vq_vectors) { @@ -376,7 +376,7 @@ error_find: int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs, struct virtqueue *vqs[], vq_callback_t *callbacks[], - const char *names[]) + const char * const names[]) { int err; diff --git a/drivers/virtio/virtio_pci_common.h b/drivers/virtio/virtio_pci_common.h index b976d96..2cc2522 100644 --- a/drivers/virtio/virtio_pci_common.h +++ b/drivers/virtio/virtio_pci_common.h @@ -139,7 +139,7 @@ void vp_del_vqs(struct virtio_device *vdev); int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs, struct virtqueue *vqs[], vq_callback_t *callbacks[], - const char *names[]); + const char * const names[]); const char *vp_bus_name(struct virtio_device *vdev); /* Setup the affinity for a virtqueue: diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c index 8e5cf19..c0c11fa 100644 --- a/drivers/virtio/virtio_pci_modern.c +++ b/drivers/virtio/virtio_pci_modern.c @@ -418,7 +418,7 @@ err_new_queue: static int vp_modern_find_vqs(struct virtio_device *vdev, unsigned nvqs, struct virtqueue *vqs[], vq_callback_t *callbacks[], - const char *names[]) + const char * const names[]) { struct virtio_pci_device *vp_dev = to_vp_device(vdev); struct virtqueue *vq; diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h index e5ce8ab..6e6cb0c 100644 --- a/include/linux/virtio_config.h +++ b/include/linux/virtio_config.h @@ -70,7 +70,7 @@ struct virtio_config_ops { int (*find_vqs)(struct virtio_device *, unsigned nvqs, struct virtqueue *vqs[], vq_callback_t *callbacks[], - const char *names[]); + const char * const names[]); void (*del_vqs)(struct virtio_device *); u64 (*get_features)(struct virtio_device *vdev); int (*finalize_features)(struct virtio_device *vdev); -- cgit v1.1 From 402c2553a24729688ba32453a9f6889e12754147 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Mon, 4 Jan 2016 09:39:01 +0200 Subject: checkpatch.pl: add missing memory barriers SMP-only barriers were missing in checkpatch.pl Refactor code slightly to make adding more variants easier. Signed-off-by: Michael S. Tsirkin Acked-by: Julian Calaby Acked-by: Joe Perches Acked-by: Peter Zijlstra (Intel) --- scripts/checkpatch.pl | 22 +++++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index 2b3c228..94b4e33 100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl @@ -5116,7 +5116,27 @@ sub process { } } # check for memory barriers without a comment. - if ($line =~ /\b(mb|rmb|wmb|read_barrier_depends|smp_mb|smp_rmb|smp_wmb|smp_read_barrier_depends)\(/) { + + my $barriers = qr{ + mb| + rmb| + wmb| + read_barrier_depends + }x; + my $barrier_stems = qr{ + mb__before_atomic| + mb__after_atomic| + store_release| + load_acquire| + store_mb| + (?:$barriers) + }x; + my $all_barriers = qr{ + (?:$barriers)| + smp_(?:$barrier_stems) + }x; + + if ($line =~ /\b(?:$all_barriers)\s*\(/) { if (!ctx_has_comment($first_line, $linenr)) { WARN("MEMORY_BARRIER", "memory barrier without comment\n" . $herecurr); -- cgit v1.1 From f4073b0f6e11a5436d1162c623401c8f2f7cf783 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Mon, 4 Jan 2016 09:54:51 +0200 Subject: checkpatch: check for __smp outside barrier.h Introduction of __smp barriers cleans up a bunch of duplicate code, but it gives people an additional handle onto a "new" set of barriers - just because they're prefixed with __* unfortunately doesn't stop anyone from using it (as happened with other arch stuff before.) Add a checkpatch test so it will trigger a warning. Reported-by: Russell King Signed-off-by: Michael S. Tsirkin Acked-by: Julian Calaby Acked-by: Joe Perches Acked-by: Peter Zijlstra (Intel) --- scripts/checkpatch.pl | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index 94b4e33..25476c2 100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl @@ -5143,6 +5143,16 @@ sub process { } } + my $underscore_smp_barriers = qr{__smp_(?:$barrier_stems)}x; + + if ($realfile !~ m@^include/asm-generic/@ && + $realfile !~ m@/barrier\.h$@ && + $line =~ m/\b(?:$underscore_smp_barriers)\s*\(/ && + $line !~ m/^.\s*\#\s*define\s+(?:$underscore_smp_barriers)\s*\(/) { + WARN("MEMORY_BARRIER", + "__smp memory barriers shouldn't be used outside barrier.h and asm-generic\n" . $herecurr); + } + # check for waitqueue_active without a comment. if ($line =~ /\bwaitqueue_active\s*\(/) { if (!ctx_has_comment($first_line, $linenr)) { -- cgit v1.1 From 43e361f23c49dbddf74f56ddf6cdd85c5dbff6da Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Mon, 4 Jan 2016 10:00:10 +0200 Subject: checkpatch: add virt barriers Add virt_ barriers to list of barriers to check for presence of a comment. Signed-off-by: Michael S. Tsirkin Acked-by: Julian Calaby Acked-by: Joe Perches Acked-by: Peter Zijlstra (Intel) --- scripts/checkpatch.pl | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index 25476c2..c7bf1aa 100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl @@ -5133,7 +5133,8 @@ sub process { }x; my $all_barriers = qr{ (?:$barriers)| - smp_(?:$barrier_stems) + smp_(?:$barrier_stems)| + virt_(?:$barrier_stems) }x; if ($line =~ /\b(?:$all_barriers)\s*\(/) { -- cgit v1.1