summaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorCyril Bur <cyrilbur@gmail.com>2016-02-29 17:53:48 +1100
committerMichael Ellerman <mpe@ellerman.id.au>2016-03-02 23:34:48 +1100
commitde2a20aa7237b45d3c14a2505804a8daa95a8f53 (patch)
tree13593cbcaa3c487fa1a8a9ce76c1bdc5e50b602f /arch/powerpc
parent70fe3d980f5f14d8125869125ba9a0ea95e09c6b (diff)
downloadop-kernel-dev-de2a20aa7237b45d3c14a2505804a8daa95a8f53.zip
op-kernel-dev-de2a20aa7237b45d3c14a2505804a8daa95a8f53.tar.gz
powerpc: Prepare for splitting giveup_{fpu, altivec, vsx} in two
This prepares for the decoupling of saving {fpu,altivec,vsx} registers and marking {fpu,altivec,vsx} as being unused by a thread. Currently giveup_{fpu,altivec,vsx}() does both however optimisations to task switching can be made if these two operations are decoupled. save_all() will permit the saving of registers to thread structs and leave threads MSR with bits enabled. This patch introduces no functional change. Signed-off-by: Cyril Bur <cyrilbur@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/include/asm/reg.h8
-rw-r--r--arch/powerpc/include/asm/switch_to.h7
-rw-r--r--arch/powerpc/kernel/process.c31
3 files changed, 45 insertions, 1 deletions
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index 11a81bd..52ed654 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -75,6 +75,14 @@
#define MSR_HV 0
#endif
+/*
+ * To be used in shared book E/book S, this avoids needing to worry about
+ * book S/book E in shared code
+ */
+#ifndef MSR_SPE
+#define MSR_SPE 0
+#endif
+
#define MSR_VEC __MASK(MSR_VEC_LG) /* Enable AltiVec */
#define MSR_VSX __MASK(MSR_VSX_LG) /* Enable VSX */
#define MSR_POW __MASK(MSR_POW_LG) /* Enable Power Management */
diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h
index 5b268b6..3690041 100644
--- a/arch/powerpc/include/asm/switch_to.h
+++ b/arch/powerpc/include/asm/switch_to.h
@@ -34,6 +34,7 @@ static inline void disable_kernel_fp(void)
msr_check_and_clear(MSR_FP);
}
#else
+static inline void __giveup_fpu(struct task_struct *t) { }
static inline void flush_fp_to_thread(struct task_struct *t) { }
#endif
@@ -46,6 +47,8 @@ static inline void disable_kernel_altivec(void)
{
msr_check_and_clear(MSR_VEC);
}
+#else
+static inline void __giveup_altivec(struct task_struct *t) { }
#endif
#ifdef CONFIG_VSX
@@ -57,6 +60,8 @@ static inline void disable_kernel_vsx(void)
{
msr_check_and_clear(MSR_FP|MSR_VEC|MSR_VSX);
}
+#else
+static inline void __giveup_vsx(struct task_struct *t) { }
#endif
#ifdef CONFIG_SPE
@@ -68,6 +73,8 @@ static inline void disable_kernel_spe(void)
{
msr_check_and_clear(MSR_SPE);
}
+#else
+static inline void __giveup_spe(struct task_struct *t) { }
#endif
static inline void clear_task_ebb(struct task_struct *t)
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 55c1eb0..29da07f 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -444,12 +444,41 @@ void restore_math(struct pt_regs *regs)
regs->msr = msr;
}
+void save_all(struct task_struct *tsk)
+{
+ unsigned long usermsr;
+
+ if (!tsk->thread.regs)
+ return;
+
+ usermsr = tsk->thread.regs->msr;
+
+ if ((usermsr & msr_all_available) == 0)
+ return;
+
+ msr_check_and_set(msr_all_available);
+
+ if (usermsr & MSR_FP)
+ __giveup_fpu(tsk);
+
+ if (usermsr & MSR_VEC)
+ __giveup_altivec(tsk);
+
+ if (usermsr & MSR_VSX)
+ __giveup_vsx(tsk);
+
+ if (usermsr & MSR_SPE)
+ __giveup_spe(tsk);
+
+ msr_check_and_clear(msr_all_available);
+}
+
void flush_all_to_thread(struct task_struct *tsk)
{
if (tsk->thread.regs) {
preempt_disable();
BUG_ON(tsk != current);
- giveup_all(tsk);
+ save_all(tsk);
#ifdef CONFIG_SPE
if (tsk->thread.regs->msr & MSR_SPE)
OpenPOWER on IntegriCloud