diff options
author | akpm@osdl.org <akpm@osdl.org> | 2006-01-12 01:05:32 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-01-12 09:08:50 -0800 |
commit | d7102e95b7b9c00277562c29aad421d2d521c5f6 (patch) | |
tree | 3ad3d94c329095962c6cd6dcea41e1ccf2db5a7e | |
parent | 198e2f181163233b379dc7ce8a6d7516b84042e7 (diff) | |
download | op-kernel-dev-d7102e95b7b9c00277562c29aad421d2d521c5f6.zip op-kernel-dev-d7102e95b7b9c00277562c29aad421d2d521c5f6.tar.gz |
[PATCH] sched: filter affine wakeups
)
From: Nick Piggin <nickpiggin@yahoo.com.au>
Track the last waker CPU, and only consider wakeup-balancing if there's a
match between current waker CPU and the previous waker CPU. This ensures
that there is some correlation between two subsequent wakeup events before
we move the task. Should help random-wakeup workloads on large SMP
systems, by reducing the migration attempts by a factor of nr_cpus.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Nick Piggin <npiggin@suse.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r-- | include/linux/sched.h | 5 | ||||
-rw-r--r-- | kernel/sched.c | 10 |
2 files changed, 13 insertions, 2 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index 5d6b922..b5ef92a 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -696,9 +696,12 @@ struct task_struct { int lock_depth; /* BKL lock depth */ -#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) +#if defined(CONFIG_SMP) + int last_waker_cpu; /* CPU that last woke this task up */ +#if defined(__ARCH_WANT_UNLOCKED_CTXSW) int oncpu; #endif +#endif int prio, static_prio; struct list_head run_list; prio_array_t *array; diff --git a/kernel/sched.c b/kernel/sched.c index 98461de..c9dec2a 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -1290,6 +1290,9 @@ static int try_to_wake_up(task_t *p, unsigned int state, int sync) } } + if (p->last_waker_cpu != this_cpu) + goto out_set_cpu; + if (unlikely(!cpu_isset(this_cpu, p->cpus_allowed))) goto out_set_cpu; @@ -1360,6 +1363,8 @@ out_set_cpu: cpu = task_cpu(p); } + p->last_waker_cpu = this_cpu; + out_activate: #endif /* CONFIG_SMP */ if (old_state == TASK_UNINTERRUPTIBLE) { @@ -1441,9 +1446,12 @@ void fastcall sched_fork(task_t *p, int clone_flags) #ifdef CONFIG_SCHEDSTATS memset(&p->sched_info, 0, sizeof(p->sched_info)); #endif -#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) +#if defined(CONFIG_SMP) + p->last_waker_cpu = cpu; +#if defined(__ARCH_WANT_UNLOCKED_CTXSW) p->oncpu = 0; #endif +#endif #ifdef CONFIG_PREEMPT /* Want to start with kernel preemption disabled. */ task_thread_info(p)->preempt_count = 1; |