From c608906165355089a4de3c9133c72e81e011096c Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 24 Nov 2017 23:54:22 +0000 Subject: ARM: probes: avoid adding kprobes to sensitive kernel-entry/exit code Avoid adding kprobes to any of the kernel entry/exit or startup assembly code, or code in the identity-mapped region. This code does not conform to the standard C conventions, which means that the expectations of the kprobes code is not forfilled. Placing kprobes at some of these locations results in the kernel trying to return to userspace addresses while retaining the CPU in kernel mode. Tested-by: Naresh Kamboju Signed-off-by: Russell King --- arch/arm/kernel/entry-armv.S | 6 +----- arch/arm/kernel/entry-common.S | 1 + arch/arm/kernel/stacktrace.c | 14 ++------------ arch/arm/kernel/traps.c | 4 ++-- arch/arm/kernel/vmlinux-xip.lds.S | 6 +++--- arch/arm/kernel/vmlinux.lds.S | 6 +++--- 6 files changed, 12 insertions(+), 25 deletions(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index fbc7076..1752033 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -82,11 +82,7 @@ #endif .endm -#ifdef CONFIG_KPROBES - .section .kprobes.text,"ax",%progbits -#else - .text -#endif + .section .entry.text,"ax",%progbits /* * Invalid mode handlers diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index e655dcd..3c4f887 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -37,6 +37,7 @@ saved_pc .req lr #define TRACE(x...) #endif + .section .entry.text,"ax",%progbits .align 5 #if !(IS_ENABLED(CONFIG_TRACE_IRQFLAGS) || IS_ENABLED(CONFIG_CONTEXT_TRACKING)) /* diff --git a/arch/arm/kernel/stacktrace.c b/arch/arm/kernel/stacktrace.c index 65228bf4..a56e7c8 100644 --- a/arch/arm/kernel/stacktrace.c +++ b/arch/arm/kernel/stacktrace.c @@ -3,6 +3,7 @@ #include #include +#include #include #include @@ -63,7 +64,6 @@ EXPORT_SYMBOL(walk_stackframe); #ifdef CONFIG_STACKTRACE struct stack_trace_data { struct stack_trace *trace; - unsigned long last_pc; unsigned int no_sched_functions; unsigned int skip; }; @@ -87,16 +87,7 @@ static int save_trace(struct stackframe *frame, void *d) if (trace->nr_entries >= trace->max_entries) return 1; - /* - * in_exception_text() is designed to test if the PC is one of - * the functions which has an exception stack above it, but - * unfortunately what is in frame->pc is the return LR value, - * not the saved PC value. So, we need to track the previous - * frame PC value when doing this. - */ - addr = data->last_pc; - data->last_pc = frame->pc; - if (!in_exception_text(addr)) + if (!in_entry_text(frame->pc)) return 0; regs = (struct pt_regs *)frame->sp; @@ -114,7 +105,6 @@ static noinline void __save_stack_trace(struct task_struct *tsk, struct stackframe frame; data.trace = trace; - data.last_pc = ULONG_MAX; data.skip = trace->skip; data.no_sched_functions = nosched; diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index 5cf0488..e344bdd 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -72,7 +72,7 @@ void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from); #endif - if (in_exception_text(where)) + if (in_entry_text(from)) dump_mem("", "Exception stack", frame + 4, frame + 4 + sizeof(struct pt_regs)); } @@ -433,7 +433,7 @@ static int call_undef_hook(struct pt_regs *regs, unsigned int instr) return fn ? fn(regs, instr) : 1; } -asmlinkage void __exception do_undefinstr(struct pt_regs *regs) +asmlinkage void do_undefinstr(struct pt_regs *regs) { unsigned int instr; siginfo_t info; diff --git a/arch/arm/kernel/vmlinux-xip.lds.S b/arch/arm/kernel/vmlinux-xip.lds.S index ec4b3f9..12b8759 100644 --- a/arch/arm/kernel/vmlinux-xip.lds.S +++ b/arch/arm/kernel/vmlinux-xip.lds.S @@ -96,9 +96,9 @@ SECTIONS .text : { /* Real text segment */ _stext = .; /* Text and read-only data */ IDMAP_TEXT - __exception_text_start = .; - *(.exception.text) - __exception_text_end = .; + __entry_text_start = .; + *(.entry.text) + __entry_text_end = .; IRQENTRY_TEXT TEXT_TEXT SCHED_TEXT diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index ee53f65..84a1ae3 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S @@ -105,9 +105,9 @@ SECTIONS .text : { /* Real text segment */ _stext = .; /* Text and read-only data */ IDMAP_TEXT - __exception_text_start = .; - *(.exception.text) - __exception_text_end = .; + __entry_text_start = .; + *(.entry.text) + __entry_text_end = .; IRQENTRY_TEXT SOFTIRQENTRY_TEXT TEXT_TEXT -- cgit v1.1 From 670431eaacbfa30be59f4ea027a2d9662ddf2bff Mon Sep 17 00:00:00 2001 From: Jinbum Park Date: Tue, 12 Dec 2017 01:20:51 +0100 Subject: ARM: 8733/1: hw_breakpoint: Mark variables as __ro_after_init core_num_brps, core_num_wrps, debug_arch, has_ossr, max_watchpoint_len are setup once while init stage, and never changed after that. so it is good candidate for __ro_after_init. Reviewed-by: Kees Cook Signed-off-by: Jinbum Park Signed-off-by: Russell King --- arch/arm/kernel/hw_breakpoint.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c index af2a7f1..629e251 100644 --- a/arch/arm/kernel/hw_breakpoint.c +++ b/arch/arm/kernel/hw_breakpoint.c @@ -44,17 +44,17 @@ static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[ARM_MAX_BRP]); static DEFINE_PER_CPU(struct perf_event *, wp_on_reg[ARM_MAX_WRP]); /* Number of BRP/WRP registers on this CPU. */ -static int core_num_brps; -static int core_num_wrps; +static int core_num_brps __ro_after_init; +static int core_num_wrps __ro_after_init; /* Debug architecture version. */ -static u8 debug_arch; +static u8 debug_arch __ro_after_init; /* Does debug architecture support OS Save and Restore? */ -static bool has_ossr; +static bool has_ossr __ro_after_init; /* Maximum supported watchpoint length. */ -static u8 max_watchpoint_len; +static u8 max_watchpoint_len __ro_after_init; #define READ_WB_REG_CASE(OP2, M, VAL) \ case ((OP2 << 4) + M): \ -- cgit v1.1 From 62d1c95d577ce4d40189e4c01025b616917e3c65 Mon Sep 17 00:00:00 2001 From: Vladimir Murzin Date: Wed, 27 Dec 2017 10:38:55 +0100 Subject: ARM: 8739/1: NOMMU: Setup VBAR/Hivecs for secondaries cores With switch to dynamic exception base address setting, VBAR/Hivecs set only for boot CPU, but secondaries stay unaware of that. That might lead to weird effects when trying up to bring up secondaries. Fixes: ad475117d201 ("ARM: 8649/2: nommu: remove Hivecs configuration is asm") Signed-off-by: Vladimir Murzin Acked-by: afzal mohammed Signed-off-by: Russell King --- arch/arm/kernel/smp.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index b4fbf00..2da0879 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -379,6 +379,9 @@ asmlinkage void secondary_start_kernel(void) cpu_init(); +#ifndef CONFIG_MMU + setup_vectors_base(); +#endif pr_debug("CPU%u: Booted secondary processor\n", cpu); preempt_disable(); -- cgit v1.1 From ff5fdafc9e9702846480e0cea55ba861f72140a2 Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Fri, 19 Jan 2018 18:17:46 +0100 Subject: ARM: 8745/1: get rid of __memzero() The __memzero assembly code is almost identical to memset's except for two orr instructions. The runtime performance of __memset(p, n) and memset(p, 0, n) is accordingly almost identical. However, the memset() macro used to guard against a zero length and to call __memzero at compile time when the fill value is a constant zero interferes with compiler optimizations. Arnd found tha the test against a zero length brings up some new warnings with gcc v8: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82103 And successively rremoving the test against a zero length and the call to __memzero optimization produces the following kernel sizes for defconfig with gcc 6: text data bss dec hex filename 12248142 6278960 413588 18940690 1210312 vmlinux.orig 12244474 6278960 413588 18937022 120f4be vmlinux.no_zero_test 12239160 6278960 413588 18931708 120dffc vmlinux.no_memzero So it is probably not worth keeping __memzero around given that the compiler can do a better job at inlining trivial memset(p,0,n) on its own. And the memset code already handles a zero length just fine. Suggested-by: Arnd Bergmann Signed-off-by: Nicolas Pitre Acked-by: Ard Biesheuvel Acked-by: Arnd Bergmann Signed-off-by: Russell King --- arch/arm/kernel/armksyms.c | 1 - arch/arm/kernel/head-common.S | 5 +++-- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'arch/arm/kernel') diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c index 5266fd9..783fbb4 100644 --- a/arch/arm/kernel/armksyms.c +++ b/arch/arm/kernel/armksyms.c @@ -92,7 +92,6 @@ EXPORT_SYMBOL(__memset64); EXPORT_SYMBOL(memcpy); EXPORT_SYMBOL(memmove); EXPORT_SYMBOL(memchr); -EXPORT_SYMBOL(__memzero); EXPORT_SYMBOL(mmioset); EXPORT_SYMBOL(mmiocpy); diff --git a/arch/arm/kernel/head-common.S b/arch/arm/kernel/head-common.S index 21dde77..6e0375e 100644 --- a/arch/arm/kernel/head-common.S +++ b/arch/arm/kernel/head-common.S @@ -105,8 +105,9 @@ __mmap_switched: ARM( ldmia r4!, {r0, r1, sp} ) THUMB( ldmia r4!, {r0, r1, r3} ) THUMB( mov sp, r3 ) - sub r1, r1, r0 - bl __memzero @ clear .bss + sub r2, r1, r0 + mov r1, #0 + bl memset @ clear .bss ldmia r4, {r0, r1, r2, r3} str r9, [r0] @ Save processor ID -- cgit v1.1