From 361c564eeff4b78f1303b86e8e8f07fc547bd2c9 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Thu, 30 Apr 2015 12:33:22 -0300 Subject: perf tools: Move x86 barrier.h stuff to tools/arch/x86/include/asm/barrier.h We will need it for atomic.h, so move it from the ad-hoc tools/perf/ place to a tools/ subset of the kernel arch/ hierarchy. Other aches will follow, each in a cset. Cc: Adrian Hunter Cc: Borislav Petkov Cc: David Ahern Cc: Don Zickus Cc: Frederic Weisbecker Cc: Jiri Olsa Cc: Namhyung Kim Cc: Stephane Eranian Link: http://lkml.kernel.org/n/tip-vy6bqmsvm6puibpay2cy4wid@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/arch/x86/include/asm/barrier.h | 28 ++++++++++++++++++++++++++++ tools/include/asm/barrier.h | 3 +++ tools/perf/MANIFEST | 2 ++ tools/perf/perf-sys.h | 7 +------ 4 files changed, 34 insertions(+), 6 deletions(-) create mode 100644 tools/arch/x86/include/asm/barrier.h create mode 100644 tools/include/asm/barrier.h diff --git a/tools/arch/x86/include/asm/barrier.h b/tools/arch/x86/include/asm/barrier.h new file mode 100644 index 0000000..f366d8e --- /dev/null +++ b/tools/arch/x86/include/asm/barrier.h @@ -0,0 +1,28 @@ +#ifndef _TOOLS_LINUX_ASM_X86_BARRIER_H +#define _TOOLS_LINUX_ASM_X86_BARRIER_H + +/* + * Copied from the Linux kernel sources, and also moving code + * out from tools/perf/perf-sys.h so as to make it be located + * in a place similar as in the kernel sources. + * + * Force strict CPU ordering. + * And yes, this is required on UP too when we're talking + * to devices. + */ + +#if defined(__i386__) +/* + * Some non-Intel clones support out of order store. wmb() ceases to be a + * nop for these. + */ +#define mb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory") +#define rmb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory") +#define wmb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory") +#elif defined(__x86_64__) +#define mb() asm volatile("mfence":::"memory") +#define rmb() asm volatile("lfence":::"memory") +#define wmb() asm volatile("sfence" ::: "memory") +#endif + +#endif /* _TOOLS_LINUX_ASM_X86_BARRIER_H */ diff --git a/tools/include/asm/barrier.h b/tools/include/asm/barrier.h new file mode 100644 index 0000000..9a55c12 --- /dev/null +++ b/tools/include/asm/barrier.h @@ -0,0 +1,3 @@ +#if defined(__i386__) || defined(__x86_64__) +#include "../../arch/x86/include/asm/barrier.h" +#endif diff --git a/tools/perf/MANIFEST b/tools/perf/MANIFEST index 11ccbb2..594737a 100644 --- a/tools/perf/MANIFEST +++ b/tools/perf/MANIFEST @@ -1,4 +1,5 @@ tools/perf +tools/arch/x86/include/asm/barrier.h tools/scripts tools/build tools/lib/traceevent @@ -6,6 +7,7 @@ tools/lib/api tools/lib/symbol/kallsyms.c tools/lib/symbol/kallsyms.h tools/lib/util/find_next_bit.c +tools/include/asm/barrier.h tools/include/asm/bug.h tools/include/asm-generic/bitops/arch_hweight.h tools/include/asm-generic/bitops/atomic.h diff --git a/tools/perf/perf-sys.h b/tools/perf/perf-sys.h index 6ef6816..781d441 100644 --- a/tools/perf/perf-sys.h +++ b/tools/perf/perf-sys.h @@ -6,11 +6,9 @@ #include #include #include +#include #if defined(__i386__) -#define mb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory") -#define wmb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory") -#define rmb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory") #define cpu_relax() asm volatile("rep; nop" ::: "memory"); #define CPUINFO_PROC {"model name"} #ifndef __NR_perf_event_open @@ -25,9 +23,6 @@ #endif #if defined(__x86_64__) -#define mb() asm volatile("mfence" ::: "memory") -#define wmb() asm volatile("sfence" ::: "memory") -#define rmb() asm volatile("lfence" ::: "memory") #define cpu_relax() asm volatile("rep; nop" ::: "memory"); #define CPUINFO_PROC {"model name"} #ifndef __NR_perf_event_open -- cgit v1.1