diff options
author | Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com> | 2012-05-24 19:46:41 +0530 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2012-07-24 13:53:18 +0200 |
commit | 80d1fa6463d934969b7aebf04107fc133463f0f6 (patch) | |
tree | 9e86e50c41016e68df655890c93a3b200c6a3ec0 | |
parent | d35be8bab9b0ce44bed4b9453f86ebf64062721e (diff) | |
download | op-kernel-dev-80d1fa6463d934969b7aebf04107fc133463f0f6.zip op-kernel-dev-80d1fa6463d934969b7aebf04107fc133463f0f6.tar.gz |
cpusets, hotplug: Implement cpuset tree traversal in a helper function
At present, the functions that deal with cpusets during CPU/Mem hotplug
are quite messy, since a lot of the functionality is mixed up without clear
separation. And this takes a toll on optimization as well. For example,
the function cpuset_update_active_cpus() is called on both CPU offline and CPU
online events; and it invokes scan_for_empty_cpusets(), which makes sense
only for CPU offline events. And hence, the current code ends up unnecessarily
traversing the cpuset tree during CPU online also.
As a first step towards cleaning up those functions, encapsulate the cpuset
tree traversal in a helper function, so as to facilitate upcoming changes.
Signed-off-by: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/20120524141635.3692.893.stgit@srivatsabhat.in.ibm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r-- | kernel/cpuset.c | 36 |
1 files changed, 27 insertions, 9 deletions
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 746d1ee..ba96349 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c @@ -1990,6 +1990,32 @@ static void remove_tasks_in_empty_cpuset(struct cpuset *cs) } /* + * Helper function to traverse cpusets. + * It can be used to walk the cpuset tree from top to bottom, completing + * one layer before dropping down to the next (thus always processing a + * node before any of its children). + */ +static struct cpuset *cpuset_next(struct list_head *queue) +{ + struct cpuset *cp; + struct cpuset *child; /* scans child cpusets of cp */ + struct cgroup *cont; + + if (list_empty(queue)) + return NULL; + + cp = list_first_entry(queue, struct cpuset, stack_list); + list_del(queue->next); + list_for_each_entry(cont, &cp->css.cgroup->children, sibling) { + child = cgroup_cs(cont); + list_add_tail(&child->stack_list, queue); + } + + return cp; +} + + +/* * Walk the specified cpuset subtree and look for empty cpusets. * The tasks of such cpuset must be moved to a parent cpuset. * @@ -2008,19 +2034,11 @@ static void scan_for_empty_cpusets(struct cpuset *root) { LIST_HEAD(queue); struct cpuset *cp; /* scans cpusets being updated */ - struct cpuset *child; /* scans child cpusets of cp */ - struct cgroup *cont; static nodemask_t oldmems; /* protected by cgroup_mutex */ list_add_tail((struct list_head *)&root->stack_list, &queue); - while (!list_empty(&queue)) { - cp = list_first_entry(&queue, struct cpuset, stack_list); - list_del(queue.next); - list_for_each_entry(cont, &cp->css.cgroup->children, sibling) { - child = cgroup_cs(cont); - list_add_tail(&child->stack_list, &queue); - } + while ((cp = cpuset_next(&queue)) != NULL) { /* Continue past cpusets with all cpus, mems online */ if (cpumask_subset(cp->cpus_allowed, cpu_active_mask) && |