summaryrefslogtreecommitdiffstats
path: root/kernel/sched_fair.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-12-17 17:45:42 +0100
committerIngo Molnar <mingo@elte.hu>2010-01-21 13:40:11 +0100
commit897c395f4c94ae19302f92393a0b8304e414ee06 (patch)
tree6126d2e5ab28a78af2fa2ba1e397b326baa35f1a /kernel/sched_fair.c
parentee00e66ffff250fb0d3a789e5565462f71c7c9a7 (diff)
downloadop-kernel-dev-897c395f4c94ae19302f92393a0b8304e414ee06.zip
op-kernel-dev-897c395f4c94ae19302f92393a0b8304e414ee06.tar.gz
sched: Remove rq_iterator from move_one_task
Again, since we only iterate the fair class, remove the abstraction. Since this is the last user of the rq_iterator, remove all that too. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r--kernel/sched_fair.c146
1 files changed, 36 insertions, 110 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 709deb3..e48e459 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1814,54 +1814,6 @@ static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
* Fair scheduling class load-balancing methods:
*/
-/*
- * Load-balancing iterator. Note: while the runqueue stays locked
- * during the whole iteration, the current task might be
- * dequeued so the iterator has to be dequeue-safe. Here we
- * achieve that by always pre-iterating before returning
- * the current task:
- */
-static struct task_struct *
-__load_balance_iterator(struct cfs_rq *cfs_rq, struct list_head *next)
-{
- struct task_struct *p = NULL;
- struct sched_entity *se;
-
- if (next == &cfs_rq->tasks)
- return NULL;
-
- se = list_entry(next, struct sched_entity, group_node);
- p = task_of(se);
- cfs_rq->balance_iterator = next->next;
-
- return p;
-}
-
-static struct task_struct *load_balance_start_fair(void *arg)
-{
- struct cfs_rq *cfs_rq = arg;
-
- return __load_balance_iterator(cfs_rq, cfs_rq->tasks.next);
-}
-
-static struct task_struct *load_balance_next_fair(void *arg)
-{
- struct cfs_rq *cfs_rq = arg;
-
- return __load_balance_iterator(cfs_rq, cfs_rq->balance_iterator);
-}
-
-/*
- * runqueue iterator, to support SMP load-balancing between different
- * scheduling classes, without having to expose their internal data
- * structures to the load-balancing proper:
- */
-struct rq_iterator {
- void *arg;
- struct task_struct *(*start)(void *);
- struct task_struct *(*next)(void *);
-};
-
static unsigned long
balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
unsigned long max_load_move, struct sched_domain *sd,
@@ -1929,42 +1881,6 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
}
#endif
-static int
-iter_move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
- struct sched_domain *sd, enum cpu_idle_type idle,
- struct rq_iterator *iterator);
-
-/*
- * move_one_task tries to move exactly one task from busiest to this_rq, as
- * part of active balancing operations within "domain".
- * Returns 1 if successful and 0 otherwise.
- *
- * Called with both runqueues locked.
- */
-static int
-move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
- struct sched_domain *sd, enum cpu_idle_type idle)
-{
- struct cfs_rq *busy_cfs_rq;
- struct rq_iterator cfs_rq_iterator;
-
- cfs_rq_iterator.start = load_balance_start_fair;
- cfs_rq_iterator.next = load_balance_next_fair;
-
- for_each_leaf_cfs_rq(busiest, busy_cfs_rq) {
- /*
- * pass busy_cfs_rq argument into
- * load_balance_[start|next]_fair iterators
- */
- cfs_rq_iterator.arg = busy_cfs_rq;
- if (iter_move_one_task(this_rq, this_cpu, busiest, sd, idle,
- &cfs_rq_iterator))
- return 1;
- }
-
- return 0;
-}
-
/*
* pull_task - move a task from a remote runqueue to the local runqueue.
* Both runqueues must be locked.
@@ -2029,6 +1945,42 @@ int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu,
return 1;
}
+/*
+ * move_one_task tries to move exactly one task from busiest to this_rq, as
+ * part of active balancing operations within "domain".
+ * Returns 1 if successful and 0 otherwise.
+ *
+ * Called with both runqueues locked.
+ */
+static int
+move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
+ struct sched_domain *sd, enum cpu_idle_type idle)
+{
+ struct task_struct *p, *n;
+ struct cfs_rq *cfs_rq;
+ int pinned = 0;
+
+ for_each_leaf_cfs_rq(busiest, cfs_rq) {
+ list_for_each_entry_safe(p, n, &cfs_rq->tasks, se.group_node) {
+
+ if (!can_migrate_task(p, busiest, this_cpu,
+ sd, idle, &pinned))
+ continue;
+
+ pull_task(busiest, p, this_rq, this_cpu);
+ /*
+ * Right now, this is only the second place pull_task()
+ * is called, so we can safely collect pull_task()
+ * stats here rather than inside pull_task().
+ */
+ schedstat_inc(sd, lb_gained[idle]);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
static unsigned long
balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
unsigned long max_load_move, struct sched_domain *sd,
@@ -2126,32 +2078,6 @@ static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
return total_load_moved > 0;
}
-static int
-iter_move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
- struct sched_domain *sd, enum cpu_idle_type idle,
- struct rq_iterator *iterator)
-{
- struct task_struct *p = iterator->start(iterator->arg);
- int pinned = 0;
-
- while (p) {
- if (can_migrate_task(p, busiest, this_cpu, sd, idle, &pinned)) {
- pull_task(busiest, p, this_rq, this_cpu);
- /*
- * Right now, this is only the second place pull_task()
- * is called, so we can safely collect pull_task()
- * stats here rather than inside pull_task().
- */
- schedstat_inc(sd, lb_gained[idle]);
-
- return 1;
- }
- p = iterator->next(iterator->arg);
- }
-
- return 0;
-}
-
/********** Helpers for find_busiest_group ************************/
/*
* sd_lb_stats - Structure to store the statistics of a sched_domain
OpenPOWER on IntegriCloud