diff options
33 files changed, 39 insertions, 39 deletions
diff --git a/Documentation/kbuild/kconfig-language.txt b/Documentation/kbuild/kconfig-language.txt index a4eb018..3534a84 100644 --- a/Documentation/kbuild/kconfig-language.txt +++ b/Documentation/kbuild/kconfig-language.txt @@ -480,7 +480,7 @@ There are several features that need compiler support. The recommended way to describe the dependency on the compiler feature is to use "depends on" followed by a test macro. -config CC_STACKPROTECTOR +config STACKPROTECTOR bool "Stack Protector buffer overflow detection" depends on $(cc-option,-fstack-protector) ... diff --git a/Documentation/security/self-protection.rst b/Documentation/security/self-protection.rst index 0f53826..e1ca698 100644 --- a/Documentation/security/self-protection.rst +++ b/Documentation/security/self-protection.rst @@ -156,7 +156,7 @@ The classic stack buffer overflow involves writing past the expected end of a variable stored on the stack, ultimately writing a controlled value to the stack frame's stored return address. The most widely used defense is the presence of a stack canary between the stack variables and the -return address (``CONFIG_CC_STACKPROTECTOR``), which is verified just before +return address (``CONFIG_STACKPROTECTOR``), which is verified just before the function returns. Other defenses include things like shadow stacks. Stack depth overflow @@ -687,8 +687,8 @@ KBUILD_CFLAGS += $(call cc-option,-Wframe-larger-than=${CONFIG_FRAME_WARN}) endif stackp-flags-$(CONFIG_CC_HAS_STACKPROTECTOR_NONE) := -fno-stack-protector -stackp-flags-$(CONFIG_CC_STACKPROTECTOR) := -fstack-protector -stackp-flags-$(CONFIG_CC_STACKPROTECTOR_STRONG) := -fstack-protector-strong +stackp-flags-$(CONFIG_STACKPROTECTOR) := -fstack-protector +stackp-flags-$(CONFIG_STACKPROTECTOR_STRONG) := -fstack-protector-strong KBUILD_CFLAGS += $(stackp-flags-y) diff --git a/arch/Kconfig b/arch/Kconfig index ebbb450..c302b3d 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -558,7 +558,7 @@ config HAVE_CC_STACKPROTECTOR config CC_HAS_STACKPROTECTOR_NONE def_bool $(cc-option,-fno-stack-protector) -config CC_STACKPROTECTOR +config STACKPROTECTOR bool "Stack Protector buffer overflow detection" depends on HAVE_CC_STACKPROTECTOR depends on $(cc-option,-fstack-protector) @@ -582,9 +582,9 @@ config CC_STACKPROTECTOR about 3% of all kernel functions, which increases kernel code size by about 0.3%. -config CC_STACKPROTECTOR_STRONG +config STACKPROTECTOR_STRONG bool "Strong Stack Protector" - depends on CC_STACKPROTECTOR + depends on STACKPROTECTOR depends on $(cc-option,-fstack-protector-strong) default y help diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index 27c5381..974d8d7 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c @@ -61,7 +61,7 @@ int main(void) { DEFINE(TSK_ACTIVE_MM, offsetof(struct task_struct, active_mm)); -#ifdef CONFIG_CC_STACKPROTECTOR +#ifdef CONFIG_STACKPROTECTOR DEFINE(TSK_STACK_CANARY, offsetof(struct task_struct, stack_canary)); #endif BLANK(); diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 1752033..179a9f6 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -791,7 +791,7 @@ ENTRY(__switch_to) ldr r6, [r2, #TI_CPU_DOMAIN] #endif switch_tls r1, r4, r5, r3, r7 -#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) +#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP) ldr r7, [r2, #TI_TASK] ldr r8, =__stack_chk_guard .if (TSK_STACK_CANARY > IMM12_MASK) @@ -807,7 +807,7 @@ ENTRY(__switch_to) ldr r0, =thread_notify_head mov r1, #THREAD_NOTIFY_SWITCH bl atomic_notifier_call_chain -#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) +#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP) str r7, [r8] #endif THUMB( mov ip, r4 ) diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 1523cb1..225d1c5 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -39,7 +39,7 @@ #include <asm/tls.h> #include <asm/vdso.h> -#ifdef CONFIG_CC_STACKPROTECTOR +#ifdef CONFIG_STACKPROTECTOR #include <linux/stackprotector.h> unsigned long __stack_chk_guard __read_mostly; EXPORT_SYMBOL(__stack_chk_guard); diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index f08a2ed..e10bc363 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -59,7 +59,7 @@ #include <asm/processor.h> #include <asm/stacktrace.h> -#ifdef CONFIG_CC_STACKPROTECTOR +#ifdef CONFIG_STACKPROTECTOR #include <linux/stackprotector.h> unsigned long __stack_chk_guard __read_mostly; EXPORT_SYMBOL(__stack_chk_guard); diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c index c1cd414..cbe4742 100644 --- a/arch/mips/kernel/asm-offsets.c +++ b/arch/mips/kernel/asm-offsets.c @@ -83,7 +83,7 @@ void output_task_defines(void) OFFSET(TASK_FLAGS, task_struct, flags); OFFSET(TASK_MM, task_struct, mm); OFFSET(TASK_PID, task_struct, pid); -#if defined(CONFIG_CC_STACKPROTECTOR) +#if defined(CONFIG_STACKPROTECTOR) OFFSET(TASK_STACK_CANARY, task_struct, stack_canary); #endif DEFINE(TASK_STRUCT_SIZE, sizeof(struct task_struct)); diff --git a/arch/mips/kernel/octeon_switch.S b/arch/mips/kernel/octeon_switch.S index e42113f..896080b 100644 --- a/arch/mips/kernel/octeon_switch.S +++ b/arch/mips/kernel/octeon_switch.S @@ -61,7 +61,7 @@ #endif 3: -#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) +#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP) PTR_LA t8, __stack_chk_guard LONG_L t9, TASK_STACK_CANARY(a1) LONG_S t9, 0(t8) diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index 3775a8d..8d85046 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c @@ -180,7 +180,7 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long usp, return 0; } -#ifdef CONFIG_CC_STACKPROTECTOR +#ifdef CONFIG_STACKPROTECTOR #include <linux/stackprotector.h> unsigned long __stack_chk_guard __read_mostly; EXPORT_SYMBOL(__stack_chk_guard); diff --git a/arch/mips/kernel/r2300_switch.S b/arch/mips/kernel/r2300_switch.S index 6658971..71b1aaf 100644 --- a/arch/mips/kernel/r2300_switch.S +++ b/arch/mips/kernel/r2300_switch.S @@ -36,7 +36,7 @@ LEAF(resume) cpu_save_nonscratch a0 sw ra, THREAD_REG31(a0) -#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) +#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP) PTR_LA t8, __stack_chk_guard LONG_L t9, TASK_STACK_CANARY(a1) LONG_S t9, 0(t8) diff --git a/arch/mips/kernel/r4k_switch.S b/arch/mips/kernel/r4k_switch.S index 17cf934..58232ae 100644 --- a/arch/mips/kernel/r4k_switch.S +++ b/arch/mips/kernel/r4k_switch.S @@ -31,7 +31,7 @@ cpu_save_nonscratch a0 LONG_S ra, THREAD_REG31(a0) -#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) +#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP) PTR_LA t8, __stack_chk_guard LONG_L t9, TASK_STACK_CANARY(a1) LONG_S t9, 0(t8) diff --git a/arch/sh/kernel/process.c b/arch/sh/kernel/process.c index 68b1a67..4d1bfc8 100644 --- a/arch/sh/kernel/process.c +++ b/arch/sh/kernel/process.c @@ -12,7 +12,7 @@ struct kmem_cache *task_xstate_cachep = NULL; unsigned int xstate_size; -#ifdef CONFIG_CC_STACKPROTECTOR +#ifdef CONFIG_STACKPROTECTOR unsigned long __stack_chk_guard __read_mostly; EXPORT_SYMBOL(__stack_chk_guard); #endif diff --git a/arch/sh/kernel/process_32.c b/arch/sh/kernel/process_32.c index 9352206..27fddb5 100644 --- a/arch/sh/kernel/process_32.c +++ b/arch/sh/kernel/process_32.c @@ -177,7 +177,7 @@ __switch_to(struct task_struct *prev, struct task_struct *next) { struct thread_struct *next_t = &next->thread; -#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) +#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP) __stack_chk_guard = next->stack_canary; #endif diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S index bef8e2b..2582881 100644 --- a/arch/x86/entry/entry_32.S +++ b/arch/x86/entry/entry_32.S @@ -239,7 +239,7 @@ ENTRY(__switch_to_asm) movl %esp, TASK_threadsp(%eax) movl TASK_threadsp(%edx), %esp -#ifdef CONFIG_CC_STACKPROTECTOR +#ifdef CONFIG_STACKPROTECTOR movl TASK_stack_canary(%edx), %ebx movl %ebx, PER_CPU_VAR(stack_canary)+stack_canary_offset #endif diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index 3166b96..73a522d 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -357,7 +357,7 @@ ENTRY(__switch_to_asm) movq %rsp, TASK_threadsp(%rdi) movq TASK_threadsp(%rsi), %rsp -#ifdef CONFIG_CC_STACKPROTECTOR +#ifdef CONFIG_STACKPROTECTOR movq TASK_stack_canary(%rsi), %rbx movq %rbx, PER_CPU_VAR(irq_stack_union)+stack_canary_offset #endif diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index e28add6..cfd29ee 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -412,7 +412,7 @@ extern asmlinkage void ignore_sysret(void); void save_fsgs_for_kvm(void); #endif #else /* X86_64 */ -#ifdef CONFIG_CC_STACKPROTECTOR +#ifdef CONFIG_STACKPROTECTOR /* * Make sure stack canary segment base is cached-aligned: * "For Intel Atom processors, avoid non zero segment base address diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h index 8f09012b..e293c12 100644 --- a/arch/x86/include/asm/segment.h +++ b/arch/x86/include/asm/segment.h @@ -146,7 +146,7 @@ # define __KERNEL_PERCPU 0 #endif -#ifdef CONFIG_CC_STACKPROTECTOR +#ifdef CONFIG_STACKPROTECTOR # define __KERNEL_STACK_CANARY (GDT_ENTRY_STACK_CANARY*8) #else # define __KERNEL_STACK_CANARY 0 diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h index 371b3a4..8ec97a6 100644 --- a/arch/x86/include/asm/stackprotector.h +++ b/arch/x86/include/asm/stackprotector.h @@ -34,7 +34,7 @@ #ifndef _ASM_STACKPROTECTOR_H #define _ASM_STACKPROTECTOR_H 1 -#ifdef CONFIG_CC_STACKPROTECTOR +#ifdef CONFIG_STACKPROTECTOR #include <asm/tsc.h> #include <asm/processor.h> @@ -105,7 +105,7 @@ static inline void load_stack_canary_segment(void) #endif } -#else /* CC_STACKPROTECTOR */ +#else /* STACKPROTECTOR */ #define GDT_STACK_CANARY_INIT @@ -121,5 +121,5 @@ static inline void load_stack_canary_segment(void) #endif } -#endif /* CC_STACKPROTECTOR */ +#endif /* STACKPROTECTOR */ #endif /* _ASM_STACKPROTECTOR_H */ diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c index 76417a9..dcb008c 100644 --- a/arch/x86/kernel/asm-offsets.c +++ b/arch/x86/kernel/asm-offsets.c @@ -32,7 +32,7 @@ void common(void) { BLANK(); OFFSET(TASK_threadsp, task_struct, thread.sp); -#ifdef CONFIG_CC_STACKPROTECTOR +#ifdef CONFIG_STACKPROTECTOR OFFSET(TASK_stack_canary, task_struct, stack_canary); #endif diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c index f91ba53..a4a3be3 100644 --- a/arch/x86/kernel/asm-offsets_32.c +++ b/arch/x86/kernel/asm-offsets_32.c @@ -50,7 +50,7 @@ void foo(void) DEFINE(TSS_sysenter_sp0, offsetof(struct cpu_entry_area, tss.x86_tss.sp0) - offsetofend(struct cpu_entry_area, entry_stack_page.stack)); -#ifdef CONFIG_CC_STACKPROTECTOR +#ifdef CONFIG_STACKPROTECTOR BLANK(); OFFSET(stack_canary_offset, stack_canary, canary); #endif diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c index bf51e51..b2dcd16 100644 --- a/arch/x86/kernel/asm-offsets_64.c +++ b/arch/x86/kernel/asm-offsets_64.c @@ -69,7 +69,7 @@ int main(void) OFFSET(TSS_sp1, tss_struct, x86_tss.sp1); BLANK(); -#ifdef CONFIG_CC_STACKPROTECTOR +#ifdef CONFIG_STACKPROTECTOR DEFINE(stack_canary_offset, offsetof(union irq_stack_union, stack_canary)); BLANK(); #endif diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 910b47e..0df7151 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -1599,7 +1599,7 @@ DEFINE_PER_CPU(unsigned long, cpu_current_top_of_stack) = (unsigned long)&init_thread_union + THREAD_SIZE; EXPORT_PER_CPU_SYMBOL(cpu_current_top_of_stack); -#ifdef CONFIG_CC_STACKPROTECTOR +#ifdef CONFIG_STACKPROTECTOR DEFINE_PER_CPU_ALIGNED(struct stack_canary, stack_canary); #endif diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S index b59e4fb..abe6df1 100644 --- a/arch/x86/kernel/head_32.S +++ b/arch/x86/kernel/head_32.S @@ -375,7 +375,7 @@ ENDPROC(startup_32_smp) */ __INIT setup_once: -#ifdef CONFIG_CC_STACKPROTECTOR +#ifdef CONFIG_STACKPROTECTOR /* * Configure the stack canary. The linker can't handle this by * relocation. Manually set base address in stack canary diff --git a/arch/xtensa/kernel/asm-offsets.c b/arch/xtensa/kernel/asm-offsets.c index 022cf91..67904f5 100644 --- a/arch/xtensa/kernel/asm-offsets.c +++ b/arch/xtensa/kernel/asm-offsets.c @@ -76,7 +76,7 @@ int main(void) DEFINE(TASK_PID, offsetof (struct task_struct, pid)); DEFINE(TASK_THREAD, offsetof (struct task_struct, thread)); DEFINE(TASK_THREAD_INFO, offsetof (struct task_struct, stack)); -#ifdef CONFIG_CC_STACKPROTECTOR +#ifdef CONFIG_STACKPROTECTOR DEFINE(TASK_STACK_CANARY, offsetof(struct task_struct, stack_canary)); #endif DEFINE(TASK_STRUCT_SIZE, sizeof (struct task_struct)); diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S index 5caff07..9cbc380 100644 --- a/arch/xtensa/kernel/entry.S +++ b/arch/xtensa/kernel/entry.S @@ -1971,7 +1971,7 @@ ENTRY(_switch_to) s32i a1, a2, THREAD_SP # save stack pointer #endif -#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) +#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP) movi a6, __stack_chk_guard l32i a8, a3, TASK_STACK_CANARY s32i a8, a6, 0 diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c index 8dd0593..483dcfb 100644 --- a/arch/xtensa/kernel/process.c +++ b/arch/xtensa/kernel/process.c @@ -58,7 +58,7 @@ void (*pm_power_off)(void) = NULL; EXPORT_SYMBOL(pm_power_off); -#ifdef CONFIG_CC_STACKPROTECTOR +#ifdef CONFIG_STACKPROTECTOR #include <linux/stackprotector.h> unsigned long __stack_chk_guard __read_mostly; EXPORT_SYMBOL(__stack_chk_guard); diff --git a/include/linux/sched.h b/include/linux/sched.h index 16e4d98..cfb7da8 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -742,7 +742,7 @@ struct task_struct { pid_t pid; pid_t tgid; -#ifdef CONFIG_CC_STACKPROTECTOR +#ifdef CONFIG_STACKPROTECTOR /* Canary value for the -fstack-protector GCC feature: */ unsigned long stack_canary; #endif diff --git a/include/linux/stackprotector.h b/include/linux/stackprotector.h index 03696c7..6b792d0 100644 --- a/include/linux/stackprotector.h +++ b/include/linux/stackprotector.h @@ -6,7 +6,7 @@ #include <linux/sched.h> #include <linux/random.h> -#ifdef CONFIG_CC_STACKPROTECTOR +#ifdef CONFIG_STACKPROTECTOR # include <asm/stackprotector.h> #else static inline void boot_init_stack_canary(void) diff --git a/kernel/configs/android-recommended.config b/kernel/configs/android-recommended.config index 946fb92..81e9af7 100644 --- a/kernel/configs/android-recommended.config +++ b/kernel/configs/android-recommended.config @@ -12,7 +12,7 @@ CONFIG_BLK_DEV_DM=y CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_SIZE=8192 -CONFIG_CC_STACKPROTECTOR_STRONG=y +CONFIG_STACKPROTECTOR_STRONG=y CONFIG_COMPACTION=y CONFIG_CPU_SW_DOMAIN_PAN=y CONFIG_DM_CRYPT=y diff --git a/kernel/fork.c b/kernel/fork.c index 08c6e5e..92870be 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -811,7 +811,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node) clear_tsk_need_resched(tsk); set_task_stack_end_magic(tsk); -#ifdef CONFIG_CC_STACKPROTECTOR +#ifdef CONFIG_STACKPROTECTOR tsk->stack_canary = get_random_canary(); #endif diff --git a/kernel/panic.c b/kernel/panic.c index 42e4874..8b2e002 100644 --- a/kernel/panic.c +++ b/kernel/panic.c @@ -623,7 +623,7 @@ static __init int register_warn_debugfs(void) device_initcall(register_warn_debugfs); #endif -#ifdef CONFIG_CC_STACKPROTECTOR +#ifdef CONFIG_STACKPROTECTOR /* * Called when gcc's -fstack-protector feature is used, and |