diff options
author | Paul Mackerras <paulus@samba.org> | 2013-09-10 20:21:10 +1000 |
---|---|---|
committer | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2013-10-11 17:26:50 +1100 |
commit | 18461960cbf50bf345ef0667d45d5f64de8fb893 (patch) | |
tree | 58947fe30598814684f7e33424675e204316e8ef /arch/powerpc/kernel/vector.S | |
parent | de79f7b9f6f92ec1bd6f61fa1f20de60728a5b5e (diff) | |
download | op-kernel-dev-18461960cbf50bf345ef0667d45d5f64de8fb893.zip op-kernel-dev-18461960cbf50bf345ef0667d45d5f64de8fb893.tar.gz |
powerpc: Provide for giveup_fpu/altivec to save state in alternate location
This provides a facility which is intended for use by KVM, where the
contents of the FP/VSX and VMX (Altivec) registers can be saved away
to somewhere other than the thread_struct when kernel code wants to
use floating point or VMX instructions. This is done by providing a
pointer in the thread_struct to indicate where the state should be
saved to. The giveup_fpu() and giveup_altivec() functions test these
pointers and save state to the indicated location if they are non-NULL.
Note that the MSR_FP/VEC bits in task->thread.regs->msr are still used
to indicate whether the CPU register state is live, even when an
alternate save location is being used.
This also provides load_fp_state() and load_vr_state() functions, which
load up FP/VSX and VMX state from memory into the CPU registers, and
corresponding store_fp_state() and store_vr_state() functions, which
store FP/VSX and VMX state into memory from the CPU registers.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/kernel/vector.S')
-rw-r--r-- | arch/powerpc/kernel/vector.S | 29 |
1 files changed, 27 insertions, 2 deletions
diff --git a/arch/powerpc/kernel/vector.S b/arch/powerpc/kernel/vector.S index a48df87..eacda4e 100644 --- a/arch/powerpc/kernel/vector.S +++ b/arch/powerpc/kernel/vector.S @@ -37,6 +37,28 @@ _GLOBAL(do_load_up_transact_altivec) #endif /* + * Load state from memory into VMX registers including VSCR. + * Assumes the caller has enabled VMX in the MSR. + */ +_GLOBAL(load_vr_state) + li r4,VRSTATE_VSCR + lvx vr0,r4,r3 + mtvscr vr0 + REST_32VRS(0,r4,r3) + blr + +/* + * Store VMX state into memory, including VSCR. + * Assumes the caller has enabled VMX in the MSR. + */ +_GLOBAL(store_vr_state) + SAVE_32VRS(0, r4, r3) + mfvscr vr0 + li r4, VRSTATE_VSCR + stvx vr0, r4, r3 + blr + +/* * Disable VMX for the task which had it previously, * and save its vector registers in its thread_struct. * Enables the VMX for use in the kernel on return. @@ -144,9 +166,12 @@ _GLOBAL(giveup_altivec) PPC_LCMPI 0,r3,0 beqlr /* if no previous owner, done */ addi r3,r3,THREAD /* want THREAD of task */ - addi r7,r3,THREAD_VRSTATE + PPC_LL r7,THREAD_VRSAVEAREA(r3) PPC_LL r5,PT_REGS(r3) - PPC_LCMPI 0,r5,0 + PPC_LCMPI 0,r7,0 + bne 2f + addi r7,r3,THREAD_VRSTATE +2: PPC_LCMPI 0,r5,0 SAVE_32VRS(0,r4,r7) mfvscr vr0 li r4,VRSTATE_VSCR |