From 10013ebb5d7856c243541870f4e62fed68253e88 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Thu, 22 Oct 2015 15:07:20 -0700 Subject: x86: Add an inlined __copy_from_user_nmi() variant Add a inlined __ variant of copy_from_user_nmi. The inlined variant allows the user to: - batch the access_ok() check for multiple accesses - avoid having a pagefault_disable/enable() on every access if the caller already ensures disabled page faults due to its context. - get all the optimizations in copy_*_user() for small constant sized transfers It is just a define to __copy_from_user_inatomic(). Signed-off-by: Andi Kleen Signed-off-by: Peter Zijlstra (Intel) Cc: Arnaldo Carvalho de Melo Cc: Jiri Olsa Cc: Linus Torvalds Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/1445551641-13379-1-git-send-email-andi@firstfloor.org Signed-off-by: Ingo Molnar --- arch/x86/include/asm/uaccess.h | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'arch/x86/include/asm') diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index 09b1b0a..660458a 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h @@ -745,5 +745,14 @@ copy_to_user(void __user *to, const void *from, unsigned long n) #undef __copy_from_user_overflow #undef __copy_to_user_overflow +/* + * We rely on the nested NMI work to allow atomic faults from the NMI path; the + * nested NMI paths are careful to preserve CR2. + * + * Caller must use pagefault_enable/disable, or run in interrupt context, + * and also do a uaccess_ok() check + */ +#define __copy_from_user_nmi __copy_from_user_inatomic + #endif /* _ASM_X86_UACCESS_H */ -- cgit v1.1 From 24cc12b17679f8e9046746f92fd377f589efc163 Mon Sep 17 00:00:00 2001 From: Takao Indoh Date: Wed, 4 Nov 2015 14:22:32 +0900 Subject: perf/x86/intel/pt: Add interface to stop Intel PT logging This patch add a function for external components to stop Intel PT. Basically this function is used when kernel panic occurs. When it is called, the intel_pt driver disables Intel PT and saves its registers using pt_event_stop(), which is also used by pmu.stop handler. This function stops Intel PT on the CPU where it is working, therefore users of it need to call it for each CPU to stop all logging. Signed-off-by: Takao Indoh Signed-off-by: Peter Zijlstra (Intel) Cc: Alexander Shishkin Cc: Arnaldo Carvalho de Melo Cc: Arnaldo Carvalho de Melo Cc: H.Peter Anvin Cc: Jiri Olsa Cc: Linus Torvalds Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Stephane Eranian Cc: Thomas Gleixner Cc: Vince Weaver Cc: Vivek Goyal Link: http://lkml.kernel.org/r/1446614553-6072-2-git-send-email-indou.takao@jp.fujitsu.com Signed-off-by: Ingo Molnar --- arch/x86/include/asm/intel_pt.h | 10 ++++++++++ 1 file changed, 10 insertions(+) create mode 100644 arch/x86/include/asm/intel_pt.h (limited to 'arch/x86/include/asm') diff --git a/arch/x86/include/asm/intel_pt.h b/arch/x86/include/asm/intel_pt.h new file mode 100644 index 0000000..e1a4117 --- /dev/null +++ b/arch/x86/include/asm/intel_pt.h @@ -0,0 +1,10 @@ +#ifndef _ASM_X86_INTEL_PT_H +#define _ASM_X86_INTEL_PT_H + +#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL) +void cpu_emergency_stop_pt(void); +#else +static inline void cpu_emergency_stop_pt(void) {} +#endif + +#endif /* _ASM_X86_INTEL_PT_H */ -- cgit v1.1 From 153a4334c439cfb62e1d31cee0c790ba4157813d Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Tue, 1 Dec 2015 17:00:57 -0800 Subject: x86/headers: Don't include asm/processor.h in asm/atomic.h asm/atomic.h doesn't really need asm/processor.h anymore. Everything it uses has moved to other header files. So remove that include. processor.h is a nasty header that includes lots of other headers and makes it prone to include loops. Removing the include here makes asm/atomic.h a "leaf" header that can be safely included in most other headers. The only fallout is in the lib/atomic tester which relied on this implicit include. Give it an explicit include. (the include is in ifdef because the user is also in ifdef) Signed-off-by: Andi Kleen Signed-off-by: Peter Zijlstra (Intel) Cc: Arnaldo Carvalho de Melo Cc: Jiri Olsa Cc: Linus Torvalds Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Stephane Eranian Cc: Thomas Gleixner Cc: Vince Weaver Cc: rostedt@goodmis.org Link: http://lkml.kernel.org/r/1449018060-1742-1-git-send-email-andi@firstfloor.org Signed-off-by: Ingo Molnar --- arch/x86/include/asm/atomic.h | 1 - arch/x86/include/asm/atomic64_32.h | 1 - 2 files changed, 2 deletions(-) (limited to 'arch/x86/include/asm') diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h index ae5fb83..3e86742 100644 --- a/arch/x86/include/asm/atomic.h +++ b/arch/x86/include/asm/atomic.h @@ -3,7 +3,6 @@ #include #include -#include #include #include #include diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h index a11c30b..a984111 100644 --- a/arch/x86/include/asm/atomic64_32.h +++ b/arch/x86/include/asm/atomic64_32.h @@ -3,7 +3,6 @@ #include #include -#include //#include /* An 64bit atomic type */ -- cgit v1.1 From 7f47d8cc039f8746e0038fe05f1ddcb15a2e27f0 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Tue, 1 Dec 2015 17:00:59 -0800 Subject: x86, tracing, perf: Add trace point for MSR accesses For debugging low level code interacting with the CPU it is often useful to trace the MSR read/writes. This gives a concise summary of PMU and other operations. perf has an ad-hoc way to do this using trace_printk, but it's somewhat limited (and also now spews ugly boot messages when enabled) Instead define real trace points for all MSR accesses. This adds three new trace points: read_msr and write_msr and rdpmc. They also report if the access faulted (if *_safe is used) This allows filtering and triggering on specific MSR values, which allows various more advanced debugging techniques. All the values are well defined in the CPU documentation. The trace can be post processed with Documentation/trace/postprocess/decode_msr.py to add symbolic MSR names to the trace. I only added it to native MSR accesses in C, not paravirtualized or in entry*.S (which is not too interesting) Originally the patch kit moved the MSRs out of line. This uses an alternative approach recommended by Steven Rostedt of only moving the trace calls out of line, but open coding the access to the jump label. Signed-off-by: Andi Kleen Signed-off-by: Peter Zijlstra (Intel) Acked-by: Steven Rostedt Cc: Arnaldo Carvalho de Melo Cc: Jiri Olsa Cc: Linus Torvalds Cc: Mike Galbraith Cc: Peter Zijlstra Cc: Stephane Eranian Cc: Thomas Gleixner Cc: Vince Weaver Link: http://lkml.kernel.org/r/1449018060-1742-3-git-send-email-andi@firstfloor.org Signed-off-by: Ingo Molnar --- arch/x86/include/asm/msr-trace.h | 57 ++++++++++++++++++++++++++++++++++++++++ arch/x86/include/asm/msr.h | 31 ++++++++++++++++++++++ 2 files changed, 88 insertions(+) create mode 100644 arch/x86/include/asm/msr-trace.h (limited to 'arch/x86/include/asm') diff --git a/arch/x86/include/asm/msr-trace.h b/arch/x86/include/asm/msr-trace.h new file mode 100644 index 0000000..7567225 --- /dev/null +++ b/arch/x86/include/asm/msr-trace.h @@ -0,0 +1,57 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM msr + +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE msr-trace + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH asm/ + +#if !defined(_TRACE_MSR_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_MSR_H + +#include + +/* + * Tracing for x86 model specific registers. Directly maps to the + * RDMSR/WRMSR instructions. + */ + +DECLARE_EVENT_CLASS(msr_trace_class, + TP_PROTO(unsigned msr, u64 val, int failed), + TP_ARGS(msr, val, failed), + TP_STRUCT__entry( + __field( unsigned, msr ) + __field( u64, val ) + __field( int, failed ) + ), + TP_fast_assign( + __entry->msr = msr; + __entry->val = val; + __entry->failed = failed; + ), + TP_printk("%x, value %llx%s", + __entry->msr, + __entry->val, + __entry->failed ? " #GP" : "") +); + +DEFINE_EVENT(msr_trace_class, read_msr, + TP_PROTO(unsigned msr, u64 val, int failed), + TP_ARGS(msr, val, failed) +); + +DEFINE_EVENT(msr_trace_class, write_msr, + TP_PROTO(unsigned msr, u64 val, int failed), + TP_ARGS(msr, val, failed) +); + +DEFINE_EVENT(msr_trace_class, rdpmc, + TP_PROTO(unsigned msr, u64 val, int failed), + TP_ARGS(msr, val, failed) +); + +#endif /* _TRACE_MSR_H */ + +/* This part must be outside protection */ +#include diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h index 77d8b28..fedd6e6 100644 --- a/arch/x86/include/asm/msr.h +++ b/arch/x86/include/asm/msr.h @@ -57,11 +57,34 @@ static inline unsigned long long native_read_tscp(unsigned int *aux) #define EAX_EDX_RET(val, low, high) "=A" (val) #endif +#ifdef CONFIG_TRACEPOINTS +/* + * Be very careful with includes. This header is prone to include loops. + */ +#include +#include + +extern struct tracepoint __tracepoint_read_msr; +extern struct tracepoint __tracepoint_write_msr; +extern struct tracepoint __tracepoint_rdpmc; +#define msr_tracepoint_active(t) static_key_false(&(t).key) +extern void do_trace_write_msr(unsigned msr, u64 val, int failed); +extern void do_trace_read_msr(unsigned msr, u64 val, int failed); +extern void do_trace_rdpmc(unsigned msr, u64 val, int failed); +#else +#define msr_tracepoint_active(t) false +static inline void do_trace_write_msr(unsigned msr, u64 val, int failed) {} +static inline void do_trace_read_msr(unsigned msr, u64 val, int failed) {} +static inline void do_trace_rdpmc(unsigned msr, u64 val, int failed) {} +#endif + static inline unsigned long long native_read_msr(unsigned int msr) { DECLARE_ARGS(val, low, high); asm volatile("rdmsr" : EAX_EDX_RET(val, low, high) : "c" (msr)); + if (msr_tracepoint_active(__tracepoint_read_msr)) + do_trace_read_msr(msr, EAX_EDX_VAL(val, low, high), 0); return EAX_EDX_VAL(val, low, high); } @@ -78,6 +101,8 @@ static inline unsigned long long native_read_msr_safe(unsigned int msr, _ASM_EXTABLE(2b, 3b) : [err] "=r" (*err), EAX_EDX_RET(val, low, high) : "c" (msr), [fault] "i" (-EIO)); + if (msr_tracepoint_active(__tracepoint_read_msr)) + do_trace_read_msr(msr, EAX_EDX_VAL(val, low, high), *err); return EAX_EDX_VAL(val, low, high); } @@ -85,6 +110,8 @@ static inline void native_write_msr(unsigned int msr, unsigned low, unsigned high) { asm volatile("wrmsr" : : "c" (msr), "a"(low), "d" (high) : "memory"); + if (msr_tracepoint_active(__tracepoint_read_msr)) + do_trace_write_msr(msr, ((u64)high << 32 | low), 0); } /* Can be uninlined because referenced by paravirt */ @@ -102,6 +129,8 @@ notrace static inline int native_write_msr_safe(unsigned int msr, : "c" (msr), "0" (low), "d" (high), [fault] "i" (-EIO) : "memory"); + if (msr_tracepoint_active(__tracepoint_read_msr)) + do_trace_write_msr(msr, ((u64)high << 32 | low), err); return err; } @@ -160,6 +189,8 @@ static inline unsigned long long native_read_pmc(int counter) DECLARE_ARGS(val, low, high); asm volatile("rdpmc" : EAX_EDX_RET(val, low, high) : "c" (counter)); + if (msr_tracepoint_active(__tracepoint_rdpmc)) + do_trace_rdpmc(counter, EAX_EDX_VAL(val, low, high), 0); return EAX_EDX_VAL(val, low, high); } -- cgit v1.1