summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/mips/kernel/process.c40
1 files changed, 17 insertions, 23 deletions
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index dcccc6d..a6b3dc5 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -576,11 +576,19 @@ int mips_get_process_fp_mode(struct task_struct *task)
return value;
}
+static void prepare_for_fp_mode_switch(void *info)
+{
+ struct mm_struct *mm = info;
+
+ if (current->mm == mm)
+ lose_fpu(1);
+}
+
int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
{
const unsigned int known_bits = PR_FP_MODE_FR | PR_FP_MODE_FRE;
- unsigned long switch_count;
struct task_struct *t;
+ int max_users;
/* Check the value is valid */
if (value & ~known_bits)
@@ -609,31 +617,17 @@ int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
smp_mb__after_atomic();
/*
- * If there are multiple online CPUs then wait until all threads whose
- * FP mode is about to change have been context switched. This approach
- * allows us to only worry about whether an FP mode switch is in
- * progress when FP is first used in a tasks time slice. Pretty much all
- * of the mode switch overhead can thus be confined to cases where mode
- * switches are actually occurring. That is, to here. However for the
- * thread performing the mode switch it may take a while...
+ * If there are multiple online CPUs then force any which are running
+ * threads in this process to lose their FPU context, which they can't
+ * regain until fp_mode_switching is cleared later.
*/
if (num_online_cpus() > 1) {
- spin_lock_irq(&task->sighand->siglock);
-
- for_each_thread(task, t) {
- if (t == current)
- continue;
-
- switch_count = t->nvcsw + t->nivcsw;
-
- do {
- spin_unlock_irq(&task->sighand->siglock);
- cond_resched();
- spin_lock_irq(&task->sighand->siglock);
- } while ((t->nvcsw + t->nivcsw) == switch_count);
- }
+ /* No need to send an IPI for the local CPU */
+ max_users = (task->mm == current->mm) ? 1 : 0;
- spin_unlock_irq(&task->sighand->siglock);
+ if (atomic_read(&current->mm->mm_users) > max_users)
+ smp_call_function(prepare_for_fp_mode_switch,
+ (void *)current->mm, 1);
}
/*
OpenPOWER on IntegriCloud