diff options
author | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2009-06-02 21:17:37 +0000 |
---|---|---|
committer | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2009-06-09 16:46:25 +1000 |
commit | e821ea70f3b4873b50056a1e0f74befed1014c09 (patch) | |
tree | 5c7808e9ab65af2577a05a79726fb211f673feb8 /arch/powerpc/kernel/vector.S | |
parent | d3f6204a7d65030ba92bf43a278b3f3054353e0b (diff) | |
download | op-kernel-dev-e821ea70f3b4873b50056a1e0f74befed1014c09.zip op-kernel-dev-e821ea70f3b4873b50056a1e0f74befed1014c09.tar.gz |
powerpc: Move VMX and VSX asm code to vector.S
Currently, load_up_altivec and give_up_altivec are duplicated
in 32-bit and 64-bit. This creates a common implementation that
is moved away from head_32.S, head_64.S and misc_64.S and into
vector.S, using the same macros we already use for our common
implementation of load_up_fpu.
I also moved the VSX code over to vector.S though in that case
I didn't make it build on 32-bit (yet).
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/kernel/vector.S')
-rw-r--r-- | arch/powerpc/kernel/vector.S | 210 |
1 files changed, 210 insertions, 0 deletions
diff --git a/arch/powerpc/kernel/vector.S b/arch/powerpc/kernel/vector.S index 49ac3d6..ef36cbbc 100644 --- a/arch/powerpc/kernel/vector.S +++ b/arch/powerpc/kernel/vector.S @@ -1,5 +1,215 @@ +#include <asm/processor.h> #include <asm/ppc_asm.h> #include <asm/reg.h> +#include <asm/asm-offsets.h> +#include <asm/cputable.h> +#include <asm/thread_info.h> +#include <asm/page.h> + +/* + * load_up_altivec(unused, unused, tsk) + * Disable VMX for the task which had it previously, + * and save its vector registers in its thread_struct. + * Enables the VMX for use in the kernel on return. + * On SMP we know the VMX is free, since we give it up every + * switch (ie, no lazy save of the vector registers). + */ +_GLOBAL(load_up_altivec) + mfmsr r5 /* grab the current MSR */ + oris r5,r5,MSR_VEC@h + MTMSRD(r5) /* enable use of AltiVec now */ + isync + +/* + * For SMP, we don't do lazy VMX switching because it just gets too + * horrendously complex, especially when a task switches from one CPU + * to another. Instead we call giveup_altvec in switch_to. + * VRSAVE isn't dealt with here, that is done in the normal context + * switch code. Note that we could rely on vrsave value to eventually + * avoid saving all of the VREGs here... + */ +#ifndef CONFIG_SMP + LOAD_REG_ADDRBASE(r3, last_task_used_altivec) + toreal(r3) + PPC_LL r4,ADDROFF(last_task_used_altivec)(r3) + PPC_LCMPI 0,r4,0 + beq 1f + + /* Save VMX state to last_task_used_altivec's THREAD struct */ + toreal(r4) + addi r4,r4,THREAD + SAVE_32VRS(0,r5,r4) + mfvscr vr0 + li r10,THREAD_VSCR + stvx vr0,r10,r4 + /* Disable VMX for last_task_used_altivec */ + PPC_LL r5,PT_REGS(r4) + toreal(r5) + PPC_LL r4,_MSR-STACK_FRAME_OVERHEAD(r5) + lis r10,MSR_VEC@h + andc r4,r4,r10 + PPC_STL r4,_MSR-STACK_FRAME_OVERHEAD(r5) +1: +#endif /* CONFIG_SMP */ + + /* Hack: if we get an altivec unavailable trap with VRSAVE + * set to all zeros, we assume this is a broken application + * that fails to set it properly, and thus we switch it to + * all 1's + */ + mfspr r4,SPRN_VRSAVE + cmpdi 0,r4,0 + bne+ 1f + li r4,-1 + mtspr SPRN_VRSAVE,r4 +1: + /* enable use of VMX after return */ +#ifdef CONFIG_PPC32 + mfspr r5,SPRN_SPRG3 /* current task's THREAD (phys) */ + oris r9,r9,MSR_VEC@h +#else + ld r4,PACACURRENT(r13) + addi r5,r4,THREAD /* Get THREAD */ + oris r12,r12,MSR_VEC@h + std r12,_MSR(r1) +#endif + li r4,1 + li r10,THREAD_VSCR + stw r4,THREAD_USED_VR(r5) + lvx vr0,r10,r5 + mtvscr vr0 + REST_32VRS(0,r4,r5) +#ifndef CONFIG_SMP + /* Update last_task_used_math to 'current' */ + subi r4,r5,THREAD /* Back to 'current' */ + fromreal(r4) + PPC_STL r4,ADDROFF(last_task_used_math)(r3) +#endif /* CONFIG_SMP */ + /* restore registers and return */ + blr + +/* + * giveup_altivec(tsk) + * Disable VMX for the task given as the argument, + * and save the vector registers in its thread_struct. + * Enables the VMX for use in the kernel on return. + */ +_GLOBAL(giveup_altivec) + mfmsr r5 + oris r5,r5,MSR_VEC@h + SYNC + MTMSRD(r5) /* enable use of VMX now */ + isync + PPC_LCMPI 0,r3,0 + beqlr- /* if no previous owner, done */ + addi r3,r3,THREAD /* want THREAD of task */ + PPC_LL r5,PT_REGS(r3) + PPC_LCMPI 0,r5,0 + SAVE_32VRS(0,r4,r3) + mfvscr vr0 + li r4,THREAD_VSCR + stvx vr0,r4,r3 + beq 1f + PPC_LL r4,_MSR-STACK_FRAME_OVERHEAD(r5) +#ifdef CONFIG_VSX +BEGIN_FTR_SECTION + lis r3,(MSR_VEC|MSR_VSX)@h +FTR_SECTION_ELSE + lis r3,MSR_VEC@h +ALT_FTR_SECTION_END_IFSET(CPU_FTR_VSX) +#else + lis r3,MSR_VEC@h +#endif + andc r4,r4,r3 /* disable FP for previous task */ + PPC_STL r4,_MSR-STACK_FRAME_OVERHEAD(r5) +1: +#ifndef CONFIG_SMP + li r5,0 + LOAD_REG_ADDRBASE(r4,last_task_used_altivec) + PPC_STL r5,ADDROFF(last_task_used_altivec)(r4) +#endif /* CONFIG_SMP */ + blr + +#ifdef CONFIG_VSX + +#ifdef CONFIG_PPC32 +#error This asm code isn't ready for 32-bit kernels +#endif + +/* + * load_up_vsx(unused, unused, tsk) + * Disable VSX for the task which had it previously, + * and save its vector registers in its thread_struct. + * Reuse the fp and vsx saves, but first check to see if they have + * been saved already. + */ +_GLOBAL(load_up_vsx) +/* Load FP and VSX registers if they haven't been done yet */ + andi. r5,r12,MSR_FP + beql+ load_up_fpu /* skip if already loaded */ + andis. r5,r12,MSR_VEC@h + beql+ load_up_altivec /* skip if already loaded */ + +#ifndef CONFIG_SMP + ld r3,last_task_used_vsx@got(r2) + ld r4,0(r3) + cmpdi 0,r4,0 + beq 1f + /* Disable VSX for last_task_used_vsx */ + addi r4,r4,THREAD + ld r5,PT_REGS(r4) + ld r4,_MSR-STACK_FRAME_OVERHEAD(r5) + lis r6,MSR_VSX@h + andc r6,r4,r6 + std r6,_MSR-STACK_FRAME_OVERHEAD(r5) +1: +#endif /* CONFIG_SMP */ + ld r4,PACACURRENT(r13) + addi r4,r4,THREAD /* Get THREAD */ + li r6,1 + stw r6,THREAD_USED_VSR(r4) /* ... also set thread used vsr */ + /* enable use of VSX after return */ + oris r12,r12,MSR_VSX@h + std r12,_MSR(r1) +#ifndef CONFIG_SMP + /* Update last_task_used_math to 'current' */ + ld r4,PACACURRENT(r13) + std r4,0(r3) +#endif /* CONFIG_SMP */ + b fast_exception_return + +/* + * __giveup_vsx(tsk) + * Disable VSX for the task given as the argument. + * Does NOT save vsx registers. + * Enables the VSX for use in the kernel on return. + */ +_GLOBAL(__giveup_vsx) + mfmsr r5 + oris r5,r5,MSR_VSX@h + mtmsrd r5 /* enable use of VSX now */ + isync + + cmpdi 0,r3,0 + beqlr- /* if no previous owner, done */ + addi r3,r3,THREAD /* want THREAD of task */ + ld r5,PT_REGS(r3) + cmpdi 0,r5,0 + beq 1f + ld r4,_MSR-STACK_FRAME_OVERHEAD(r5) + lis r3,MSR_VSX@h + andc r4,r4,r3 /* disable VSX for previous task */ + std r4,_MSR-STACK_FRAME_OVERHEAD(r5) +1: +#ifndef CONFIG_SMP + li r5,0 + ld r4,last_task_used_vsx@got(r2) + std r5,0(r4) +#endif /* CONFIG_SMP */ + blr + +#endif /* CONFIG_VSX */ + /* * The routines below are in assembler so we can closely control the |