summaryrefslogtreecommitdiffstats
path: root/kernel/sched_fair.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2008-04-19 19:45:00 +0200
committerIngo Molnar <mingo@elte.hu>2008-04-19 19:45:00 +0200
commit4a55bd5e97b1775913f88f11108a4f144f590e89 (patch)
tree4514f2370d898b93086779c821023319fe4c8b9d /kernel/sched_fair.c
parentac884dec6d4a7df252150af875cffddf8f1d9c15 (diff)
downloadop-kernel-dev-4a55bd5e97b1775913f88f11108a4f144f590e89.zip
op-kernel-dev-4a55bd5e97b1775913f88f11108a4f144f590e89.tar.gz
sched: fair-group: de-couple load-balancing from the rb-trees
De-couple load-balancing from the rb-trees, so that I can change their organization. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r--kernel/sched_fair.c21
1 files changed, 13 insertions, 8 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 9e301a2..ed8ce32 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -533,6 +533,7 @@ account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
add_cfs_task_weight(cfs_rq, se->load.weight);
cfs_rq->nr_running++;
se->on_rq = 1;
+ list_add(&se->group_node, &cfs_rq->tasks);
}
static void
@@ -545,6 +546,7 @@ account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
add_cfs_task_weight(cfs_rq, -se->load.weight);
cfs_rq->nr_running--;
se->on_rq = 0;
+ list_del_init(&se->group_node);
}
static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
@@ -1289,21 +1291,24 @@ static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
* the current task:
*/
static struct task_struct *
-__load_balance_iterator(struct cfs_rq *cfs_rq, struct rb_node *curr)
+__load_balance_iterator(struct cfs_rq *cfs_rq, struct list_head *next)
{
struct task_struct *p = NULL;
struct sched_entity *se;
- if (!curr)
+ if (next == &cfs_rq->tasks)
return NULL;
/* Skip over entities that are not tasks */
do {
- se = rb_entry(curr, struct sched_entity, run_node);
- curr = rb_next(curr);
- } while (curr && !entity_is_task(se));
+ se = list_entry(next, struct sched_entity, group_node);
+ next = next->next;
+ } while (next != &cfs_rq->tasks && !entity_is_task(se));
- cfs_rq->rb_load_balance_curr = curr;
+ if (next == &cfs_rq->tasks)
+ return NULL;
+
+ cfs_rq->balance_iterator = next;
if (entity_is_task(se))
p = task_of(se);
@@ -1315,14 +1320,14 @@ static struct task_struct *load_balance_start_fair(void *arg)
{
struct cfs_rq *cfs_rq = arg;
- return __load_balance_iterator(cfs_rq, first_fair(cfs_rq));
+ return __load_balance_iterator(cfs_rq, cfs_rq->tasks.next);
}
static struct task_struct *load_balance_next_fair(void *arg)
{
struct cfs_rq *cfs_rq = arg;
- return __load_balance_iterator(cfs_rq, cfs_rq->rb_load_balance_curr);
+ return __load_balance_iterator(cfs_rq, cfs_rq->balance_iterator);
}
static unsigned long
OpenPOWER on IntegriCloud