summaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/include/asm/syscall.h2
-rw-r--r--arch/x86/kernel/ftrace.c36
3 files changed, 29 insertions, 10 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index eb40925..0896008 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -45,6 +45,7 @@ config X86
select HAVE_GENERIC_DMA_COHERENT if X86_32
select HAVE_EFFICIENT_UNALIGNED_ACCESS
select USER_STACKTRACE_SUPPORT
+ select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_DMA_API_DEBUG
select HAVE_KERNEL_GZIP
select HAVE_KERNEL_BZIP2
diff --git a/arch/x86/include/asm/syscall.h b/arch/x86/include/asm/syscall.h
index 8d33bc5..c4a348f 100644
--- a/arch/x86/include/asm/syscall.h
+++ b/arch/x86/include/asm/syscall.h
@@ -16,6 +16,8 @@
#include <linux/sched.h>
#include <linux/err.h>
+extern const unsigned long sys_call_table[];
+
/*
* Only the low 32 bits of orig_ax are meaningful, so we return int.
* This importantly ignores the high bits on 64-bit, so comparisons
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 3096892..cd37469 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -30,14 +30,32 @@
#ifdef CONFIG_DYNAMIC_FTRACE
+/*
+ * modifying_code is set to notify NMIs that they need to use
+ * memory barriers when entering or exiting. But we don't want
+ * to burden NMIs with unnecessary memory barriers when code
+ * modification is not being done (which is most of the time).
+ *
+ * A mutex is already held when ftrace_arch_code_modify_prepare
+ * and post_process are called. No locks need to be taken here.
+ *
+ * Stop machine will make sure currently running NMIs are done
+ * and new NMIs will see the updated variable before we need
+ * to worry about NMIs doing memory barriers.
+ */
+static int modifying_code __read_mostly;
+static DEFINE_PER_CPU(int, save_modifying_code);
+
int ftrace_arch_code_modify_prepare(void)
{
set_kernel_text_rw();
+ modifying_code = 1;
return 0;
}
int ftrace_arch_code_modify_post_process(void)
{
+ modifying_code = 0;
set_kernel_text_ro();
return 0;
}
@@ -149,6 +167,11 @@ static void ftrace_mod_code(void)
void ftrace_nmi_enter(void)
{
+ __get_cpu_var(save_modifying_code) = modifying_code;
+
+ if (!__get_cpu_var(save_modifying_code))
+ return;
+
if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
smp_rmb();
ftrace_mod_code();
@@ -160,6 +183,9 @@ void ftrace_nmi_enter(void)
void ftrace_nmi_exit(void)
{
+ if (!__get_cpu_var(save_modifying_code))
+ return;
+
/* Finish all executions before clearing nmi_running */
smp_mb();
atomic_dec(&nmi_running);
@@ -484,13 +510,3 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
}
}
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
-
-#ifdef CONFIG_FTRACE_SYSCALLS
-
-extern unsigned long *sys_call_table;
-
-unsigned long __init arch_syscall_addr(int nr)
-{
- return (unsigned long)(&sys_call_table)[nr];
-}
-#endif
OpenPOWER on IntegriCloud