summaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorGautham R Shenoy <ego@in.ibm.com>2009-03-25 14:44:06 +0530
committerIngo Molnar <mingo@elte.hu>2009-03-25 10:30:47 +0100
commit2e6f44aeda426054fc58464df1ad571aecca0c92 (patch)
treeeb6f6788ac6f0ac6c71e82f7741ed8f830c3b2b4 /kernel/sched.c
parent37abe198b1246ddd206319c43502a687db62d347 (diff)
downloadop-kernel-dev-2e6f44aeda426054fc58464df1ad571aecca0c92.zip
op-kernel-dev-2e6f44aeda426054fc58464df1ad571aecca0c92.tar.gz
sched: Create helper to calculate small_imbalance in fbg()
Impact: cleanup We have two places in find_busiest_group() where we need to calculate the minor imbalance before returning the busiest group. Encapsulate this functionality into a seperate helper function. Credit: Vaidyanathan Srinivasan <svaidy@linux.vnet.ibm.com> Signed-off-by: Gautham R Shenoy <ego@in.ibm.com> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Suresh Siddha <suresh.b.siddha@intel.com> Cc: "Balbir Singh" <balbir@in.ibm.com> Cc: Nick Piggin <nickpiggin@yahoo.com.au> Cc: "Dhaval Giani" <dhaval@linux.vnet.ibm.com> Cc: Bharata B Rao <bharata@linux.vnet.ibm.com> LKML-Reference: <20090325091406.13992.54316.stgit@sofia.in.ibm.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c131
1 files changed, 70 insertions, 61 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index ec715f9..540147e 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3484,6 +3484,71 @@ group_next:
} while (group != sd->groups);
}
+
+/**
+ * fix_small_imbalance - Calculate the minor imbalance that exists
+ * amongst the groups of a sched_domain, during
+ * load balancing.
+ * @sds: Statistics of the sched_domain whose imbalance is to be calculated.
+ * @this_cpu: The cpu at whose sched_domain we're performing load-balance.
+ * @imbalance: Variable to store the imbalance.
+ */
+static inline void fix_small_imbalance(struct sd_lb_stats *sds,
+ int this_cpu, unsigned long *imbalance)
+{
+ unsigned long tmp, pwr_now = 0, pwr_move = 0;
+ unsigned int imbn = 2;
+
+ if (sds->this_nr_running) {
+ sds->this_load_per_task /= sds->this_nr_running;
+ if (sds->busiest_load_per_task >
+ sds->this_load_per_task)
+ imbn = 1;
+ } else
+ sds->this_load_per_task =
+ cpu_avg_load_per_task(this_cpu);
+
+ if (sds->max_load - sds->this_load + sds->busiest_load_per_task >=
+ sds->busiest_load_per_task * imbn) {
+ *imbalance = sds->busiest_load_per_task;
+ return;
+ }
+
+ /*
+ * OK, we don't have enough imbalance to justify moving tasks,
+ * however we may be able to increase total CPU power used by
+ * moving them.
+ */
+
+ pwr_now += sds->busiest->__cpu_power *
+ min(sds->busiest_load_per_task, sds->max_load);
+ pwr_now += sds->this->__cpu_power *
+ min(sds->this_load_per_task, sds->this_load);
+ pwr_now /= SCHED_LOAD_SCALE;
+
+ /* Amount of load we'd subtract */
+ tmp = sg_div_cpu_power(sds->busiest,
+ sds->busiest_load_per_task * SCHED_LOAD_SCALE);
+ if (sds->max_load > tmp)
+ pwr_move += sds->busiest->__cpu_power *
+ min(sds->busiest_load_per_task, sds->max_load - tmp);
+
+ /* Amount of load we'd add */
+ if (sds->max_load * sds->busiest->__cpu_power <
+ sds->busiest_load_per_task * SCHED_LOAD_SCALE)
+ tmp = sg_div_cpu_power(sds->this,
+ sds->max_load * sds->busiest->__cpu_power);
+ else
+ tmp = sg_div_cpu_power(sds->this,
+ sds->busiest_load_per_task * SCHED_LOAD_SCALE);
+ pwr_move += sds->this->__cpu_power *
+ min(sds->this_load_per_task, sds->this_load + tmp);
+ pwr_move /= SCHED_LOAD_SCALE;
+
+ /* Move if we gain throughput */
+ if (pwr_move > pwr_now)
+ *imbalance = sds->busiest_load_per_task;
+}
/******* find_busiest_group() helpers end here *********************/
/*
@@ -3547,7 +3612,8 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
*/
if (sds.max_load < sds.avg_load) {
*imbalance = 0;
- goto small_imbalance;
+ fix_small_imbalance(&sds, this_cpu, imbalance);
+ goto ret_busiest;
}
/* Don't want to pull so many tasks that a group would go idle */
@@ -3565,67 +3631,10 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
* a think about bumping its value to force at least one task to be
* moved
*/
- if (*imbalance < sds.busiest_load_per_task) {
- unsigned long tmp, pwr_now, pwr_move;
- unsigned int imbn;
-
-small_imbalance:
- pwr_move = pwr_now = 0;
- imbn = 2;
- if (sds.this_nr_running) {
- sds.this_load_per_task /= sds.this_nr_running;
- if (sds.busiest_load_per_task >
- sds.this_load_per_task)
- imbn = 1;
- } else
- sds.this_load_per_task =
- cpu_avg_load_per_task(this_cpu);
-
- if (sds.max_load - sds.this_load +
- sds.busiest_load_per_task >=
- sds.busiest_load_per_task * imbn) {
- *imbalance = sds.busiest_load_per_task;
- return sds.busiest;
- }
-
- /*
- * OK, we don't have enough imbalance to justify moving tasks,
- * however we may be able to increase total CPU power used by
- * moving them.
- */
-
- pwr_now += sds.busiest->__cpu_power *
- min(sds.busiest_load_per_task, sds.max_load);
- pwr_now += sds.this->__cpu_power *
- min(sds.this_load_per_task, sds.this_load);
- pwr_now /= SCHED_LOAD_SCALE;
-
- /* Amount of load we'd subtract */
- tmp = sg_div_cpu_power(sds.busiest,
- sds.busiest_load_per_task * SCHED_LOAD_SCALE);
- if (sds.max_load > tmp)
- pwr_move += sds.busiest->__cpu_power *
- min(sds.busiest_load_per_task,
- sds.max_load - tmp);
-
- /* Amount of load we'd add */
- if (sds.max_load * sds.busiest->__cpu_power <
- sds.busiest_load_per_task * SCHED_LOAD_SCALE)
- tmp = sg_div_cpu_power(sds.this,
- sds.max_load * sds.busiest->__cpu_power);
- else
- tmp = sg_div_cpu_power(sds.this,
- sds.busiest_load_per_task * SCHED_LOAD_SCALE);
- pwr_move += sds.this->__cpu_power *
- min(sds.this_load_per_task,
- sds.this_load + tmp);
- pwr_move /= SCHED_LOAD_SCALE;
-
- /* Move if we gain throughput */
- if (pwr_move > pwr_now)
- *imbalance = sds.busiest_load_per_task;
- }
+ if (*imbalance < sds.busiest_load_per_task)
+ fix_small_imbalance(&sds, this_cpu, imbalance);
+ret_busiest:
return sds.busiest;
out_balanced:
OpenPOWER on IntegriCloud