summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2007-10-15 17:00:02 +0200
committerIngo Molnar <mingo@elte.hu>2007-10-15 17:00:02 +0200
commit38ad464d410dadceda1563f36bdb0be7fe4c8938 (patch)
tree32d47a47e3d387792d6d8db6e8ee9ae3e93b8469 /kernel
parenteba1ed4b7e52720e3099325874811c38a5ec1562 (diff)
downloadop-kernel-dev-38ad464d410dadceda1563f36bdb0be7fe4c8938.zip
op-kernel-dev-38ad464d410dadceda1563f36bdb0be7fe4c8938.tar.gz
sched: uniform tunings
use the same defaults on both UP and SMP. Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Mike Galbraith <efault@gmx.de> Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c28
1 files changed, 0 insertions, 28 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 282d037..2520923 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -4898,32 +4898,6 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
*/
cpumask_t nohz_cpu_mask = CPU_MASK_NONE;
-/*
- * Increase the granularity value when there are more CPUs,
- * because with more CPUs the 'effective latency' as visible
- * to users decreases. But the relationship is not linear,
- * so pick a second-best guess by going with the log2 of the
- * number of CPUs.
- *
- * This idea comes from the SD scheduler of Con Kolivas:
- */
-static inline void sched_init_granularity(void)
-{
- unsigned int factor = 1 + ilog2(num_online_cpus());
- const unsigned long limit = 100000000;
-
- sysctl_sched_min_granularity *= factor;
- if (sysctl_sched_min_granularity > limit)
- sysctl_sched_min_granularity = limit;
-
- sysctl_sched_latency *= factor;
- if (sysctl_sched_latency > limit)
- sysctl_sched_latency = limit;
-
- sysctl_sched_runtime_limit = sysctl_sched_latency;
- sysctl_sched_wakeup_granularity = sysctl_sched_min_granularity / 2;
-}
-
#ifdef CONFIG_SMP
/*
* This is how migration works:
@@ -6491,12 +6465,10 @@ void __init sched_init_smp(void)
/* Move init over to a non-isolated CPU */
if (set_cpus_allowed(current, non_isolated_cpus) < 0)
BUG();
- sched_init_granularity();
}
#else
void __init sched_init_smp(void)
{
- sched_init_granularity();
}
#endif /* CONFIG_SMP */
OpenPOWER on IntegriCloud