diff options
author | Filippo Arcidiacono <filippo.arcidiacono@st.com> | 2012-04-19 15:45:57 +0900 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2012-04-19 15:45:57 +0900 |
commit | 5d920bb929a99446062a48cf90867bbca57b8e77 (patch) | |
tree | fdadebe0b0fe8906ffd81ad9f726430d6428a8f5 /arch | |
parent | 932e9f352b5d685725076f21b237f7c7d804b29c (diff) | |
download | op-kernel-dev-5d920bb929a99446062a48cf90867bbca57b8e77.zip op-kernel-dev-5d920bb929a99446062a48cf90867bbca57b8e77.tar.gz |
sh: initial stack protector support.
This implements basic -fstack-protector support, based on the early ARM
version in c743f38013aeff58ef6252601e397b5ba281c633. The SMP case is
limited to the initial canary value, while the UP case handles per-task
granularity (limited to 32-bit sh until a new enough sh64 compiler
manifests itself).
Signed-off-by: Filippo Arcidiacono <filippo.arcidiacono@st.com>
Reviewed-by: Carmelo Amoroso <carmelo.amoroso@st.com>
Signed-off-by: Stuart Menefy <stuart.menefy@st.com>
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/sh/Kconfig | 14 | ||||
-rw-r--r-- | arch/sh/Makefile | 4 | ||||
-rw-r--r-- | arch/sh/include/asm/stackprotector.h | 27 | ||||
-rw-r--r-- | arch/sh/kernel/process.c | 7 | ||||
-rw-r--r-- | arch/sh/kernel/process_32.c | 5 |
5 files changed, 57 insertions, 0 deletions
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index ff9e033..60ed366 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -685,6 +685,20 @@ config SECCOMP If unsure, say N. +config CC_STACKPROTECTOR + bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)" + depends on SUPERH32 && EXPERIMENTAL + help + This option turns on the -fstack-protector GCC feature. This + feature puts, at the beginning of functions, a canary value on + the stack just before the return address, and validates + the value just before actually returning. Stack based buffer + overflows (that need to overwrite this return address) now also + overwrite the canary, which gets detected and the attack is then + neutralized via a kernel panic. + + This feature requires gcc version 4.2 or above. + config SMP bool "Symmetric multi-processing support" depends on SYS_SUPPORTS_SMP diff --git a/arch/sh/Makefile b/arch/sh/Makefile index 3fc0f41..24875c8 100644 --- a/arch/sh/Makefile +++ b/arch/sh/Makefile @@ -199,6 +199,10 @@ ifeq ($(CONFIG_DWARF_UNWINDER),y) KBUILD_CFLAGS += -fasynchronous-unwind-tables endif +ifeq ($(CONFIG_CC_STACKPROTECTOR),y) + KBUILD_CFLAGS += -fstack-protector +endif + libs-$(CONFIG_SUPERH32) := arch/sh/lib/ $(libs-y) libs-$(CONFIG_SUPERH64) := arch/sh/lib64/ $(libs-y) diff --git a/arch/sh/include/asm/stackprotector.h b/arch/sh/include/asm/stackprotector.h new file mode 100644 index 0000000..d9df3a7 --- /dev/null +++ b/arch/sh/include/asm/stackprotector.h @@ -0,0 +1,27 @@ +#ifndef __ASM_SH_STACKPROTECTOR_H +#define __ASM_SH_STACKPROTECTOR_H + +#include <linux/random.h> +#include <linux/version.h> + +extern unsigned long __stack_chk_guard; + +/* + * Initialize the stackprotector canary value. + * + * NOTE: this must only be called from functions that never return, + * and it must always be inlined. + */ +static __always_inline void boot_init_stack_canary(void) +{ + unsigned long canary; + + /* Try to get a semi random initial value. */ + get_random_bytes(&canary, sizeof(canary)); + canary ^= LINUX_VERSION_CODE; + + current->stack_canary = canary; + __stack_chk_guard = current->stack_canary; +} + +#endif /* __ASM_SH_STACKPROTECTOR_H */ diff --git a/arch/sh/kernel/process.c b/arch/sh/kernel/process.c index 325f98b..f3f03e4 100644 --- a/arch/sh/kernel/process.c +++ b/arch/sh/kernel/process.c @@ -2,10 +2,17 @@ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/sched.h> +#include <linux/export.h> +#include <linux/stackprotector.h> struct kmem_cache *task_xstate_cachep = NULL; unsigned int xstate_size; +#ifdef CONFIG_CC_STACKPROTECTOR +unsigned long __stack_chk_guard __read_mostly; +EXPORT_SYMBOL(__stack_chk_guard); +#endif + int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) { *dst = *src; diff --git a/arch/sh/kernel/process_32.c b/arch/sh/kernel/process_32.c index 94273aa..f78cc42 100644 --- a/arch/sh/kernel/process_32.c +++ b/arch/sh/kernel/process_32.c @@ -22,6 +22,7 @@ #include <linux/ftrace.h> #include <linux/hw_breakpoint.h> #include <linux/prefetch.h> +#include <linux/stackprotector.h> #include <asm/uaccess.h> #include <asm/mmu_context.h> #include <asm/fpu.h> @@ -220,6 +221,10 @@ __switch_to(struct task_struct *prev, struct task_struct *next) { struct thread_struct *next_t = &next->thread; +#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) + __stack_chk_guard = next->stack_canary; +#endif + unlazy_fpu(prev, task_pt_regs(prev)); /* we're going to use this soon, after a few expensive things */ |