diff options
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r-- | arch/arm/kernel/entry-armv.S | 24 | ||||
-rw-r--r-- | arch/arm/kernel/iwmmxt.S | 2 | ||||
-rw-r--r-- | arch/arm/kernel/process.c | 24 | ||||
-rw-r--r-- | arch/arm/kernel/signal.c | 21 |
4 files changed, 41 insertions, 30 deletions
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index ab8e600..86c9252 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -20,6 +20,7 @@ #include <asm/glue.h> #include <asm/vfpmacros.h> #include <asm/arch/entry-macro.S> +#include <asm/thread_notify.h> #include "entry-header.S" @@ -560,10 +561,8 @@ ENTRY(__switch_to) add ip, r1, #TI_CPU_SAVE ldr r3, [r2, #TI_TP_VALUE] stmia ip!, {r4 - sl, fp, sp, lr} @ Store most regs on stack -#ifndef CONFIG_MMU - add r2, r2, #TI_CPU_DOMAIN -#else - ldr r6, [r2, #TI_CPU_DOMAIN]! +#ifdef CONFIG_MMU + ldr r6, [r2, #TI_CPU_DOMAIN] #endif #if __LINUX_ARM_ARCH__ >= 6 #ifdef CONFIG_CPU_32v6K @@ -585,21 +584,20 @@ ENTRY(__switch_to) #ifdef CONFIG_MMU mcr p15, 0, r6, c3, c0, 0 @ Set domain register #endif -#ifdef CONFIG_VFP - @ Always disable VFP so we can lazily save/restore the old - @ state. This occurs in the context of the previous thread. - VFPFMRX r4, FPEXC - bic r4, r4, #FPEXC_ENABLE - VFPFMXR FPEXC, r4 -#endif #if defined(CONFIG_IWMMXT) bl iwmmxt_task_switch #elif defined(CONFIG_CPU_XSCALE) - add r4, r2, #40 @ cpu_context_save->extra + add r4, r2, #TI_CPU_DOMAIN + 40 @ cpu_context_save->extra ldmib r4, {r4, r5} mar acc0, r4, r5 #endif - ldmib r2, {r4 - sl, fp, sp, pc} @ Load all regs saved previously + mov r5, r0 + add r4, r2, #TI_CPU_SAVE + ldr r0, =thread_notify_head + mov r1, #THREAD_NOTIFY_SWITCH + bl atomic_notifier_call_chain + mov r0, r5 + ldmia r4, {r4 - sl, fp, sp, pc} @ Load all regs saved previously __INIT diff --git a/arch/arm/kernel/iwmmxt.S b/arch/arm/kernel/iwmmxt.S index 24c7b04..af9e0ae 100644 --- a/arch/arm/kernel/iwmmxt.S +++ b/arch/arm/kernel/iwmmxt.S @@ -285,7 +285,7 @@ ENTRY(iwmmxt_task_switch) bne 1f @ yes: block them for next task ldr r5, =concan_owner - add r6, r2, #(TI_IWMMXT_STATE - TI_CPU_DOMAIN) @ get next task Concan save area + add r6, r2, #TI_IWMMXT_STATE @ get next task Concan save area ldr r5, [r5] @ get current Concan owner teq r5, r6 @ next task owns it? movne pc, lr @ no: leave Concan disabled diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 17c38db..e1c77ee 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -33,6 +33,7 @@ #include <asm/leds.h> #include <asm/processor.h> #include <asm/system.h> +#include <asm/thread_notify.h> #include <asm/uaccess.h> #include <asm/mach/time.h> @@ -338,13 +339,9 @@ void exit_thread(void) { } -static void default_fp_init(union fp_state *fp) -{ - memset(fp, 0, sizeof(union fp_state)); -} +ATOMIC_NOTIFIER_HEAD(thread_notify_head); -void (*fp_init)(union fp_state *) = default_fp_init; -EXPORT_SYMBOL(fp_init); +EXPORT_SYMBOL_GPL(thread_notify_head); void flush_thread(void) { @@ -353,22 +350,21 @@ void flush_thread(void) memset(thread->used_cp, 0, sizeof(thread->used_cp)); memset(&tsk->thread.debug, 0, sizeof(struct debug_info)); + memset(&thread->fpstate, 0, sizeof(union fp_state)); + + thread_notify(THREAD_NOTIFY_FLUSH, thread); #if defined(CONFIG_IWMMXT) iwmmxt_task_release(thread); #endif - fp_init(&thread->fpstate); -#if defined(CONFIG_VFP) - vfp_flush_thread(&thread->vfpstate); -#endif } void release_thread(struct task_struct *dead_task) { -#if defined(CONFIG_VFP) - vfp_release_thread(&task_thread_info(dead_task)->vfpstate); -#endif + struct thread_info *thread = task_thread_info(dead_task); + + thread_notify(THREAD_NOTIFY_RELEASE, thread); #if defined(CONFIG_IWMMXT) - iwmmxt_task_release(task_thread_info(dead_task)); + iwmmxt_task_release(thread); #endif } diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index a0cd0a9..e9fe780 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c @@ -665,17 +665,33 @@ static int do_signal(sigset_t *oldset, struct pt_regs *regs, int syscall) if (syscall) { if (regs->ARM_r0 == -ERESTART_RESTARTBLOCK) { if (thumb_mode(regs)) { - regs->ARM_r7 = __NR_restart_syscall; + regs->ARM_r7 = __NR_restart_syscall - __NR_SYSCALL_BASE; regs->ARM_pc -= 2; } else { +#if defined(CONFIG_AEABI) && !defined(CONFIG_OABI_COMPAT) + regs->ARM_r7 = __NR_restart_syscall; + regs->ARM_pc -= 4; +#else u32 __user *usp; + u32 swival = __NR_restart_syscall; regs->ARM_sp -= 12; usp = (u32 __user *)regs->ARM_sp; + /* + * Either we supports OABI only, or we have + * EABI with the OABI compat layer enabled. + * In the later case we don't know if user + * space is EABI or not, and if not we must + * not clobber r7. Always using the OABI + * syscall solves that issue and works for + * all those cases. + */ + swival = swival - __NR_SYSCAll_BASE + __NR_OABI_SYSCALL_BASE; + put_user(regs->ARM_pc, &usp[0]); /* swi __NR_restart_syscall */ - put_user(0xef000000 | __NR_restart_syscall, &usp[1]); + put_user(0xef000000 | swival, &usp[1]); /* ldr pc, [sp], #12 */ put_user(0xe49df00c, &usp[2]); @@ -683,6 +699,7 @@ static int do_signal(sigset_t *oldset, struct pt_regs *regs, int syscall) (unsigned long)(usp + 3)); regs->ARM_pc = regs->ARM_sp + 4; +#endif } } if (regs->ARM_r0 == -ERESTARTNOHAND || |