summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-02-01 10:25:09 +1100
committerLinus Torvalds <torvalds@linux-foundation.org>2008-02-01 10:25:09 +1100
commitfbdde7bd274d74729954190f99afcb1e3d9bbfba (patch)
treea72dadd4a8b4957a4e21bf7ae38a763a71d6e274
parente1a9c9872dd004617555dff079b357a6ffd945e9 (diff)
parentc4772d99300a9fc13c86aaa370e630c5973664f6 (diff)
downloadop-kernel-dev-fbdde7bd274d74729954190f99afcb1e3d9bbfba.zip
op-kernel-dev-fbdde7bd274d74729954190f99afcb1e3d9bbfba.tar.gz
Merge git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched
* git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched: debug: turn ignore_loglevel into an early param sched: remove unused params sched: let +nice tasks have smaller impact sched: fix high wake up latencies with FAIR_USER_SCHED RCU: add help text for "RCU implementation type"
-rw-r--r--init/Kconfig8
-rw-r--r--kernel/printk.c4
-rw-r--r--kernel/sched.c10
-rw-r--r--kernel/sched_fair.c8
4 files changed, 21 insertions, 9 deletions
diff --git a/init/Kconfig b/init/Kconfig
index 0d0bbf2..dcc96a8 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -775,6 +775,14 @@ config PREEMPT_NOTIFIERS
choice
prompt "RCU implementation type:"
default CLASSIC_RCU
+ help
+ This allows you to choose either the classic RCU implementation
+ that is designed for best read-side performance on non-realtime
+ systems, or the preemptible RCU implementation for best latency
+ on realtime systems. Note that some kernel preemption modes
+ will restrict your choice.
+
+ Select the default if you are unsure.
config CLASSIC_RCU
bool "Classic RCU"
diff --git a/kernel/printk.c b/kernel/printk.c
index 58bbec6..29ae1e99 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -455,10 +455,10 @@ static int __init ignore_loglevel_setup(char *str)
ignore_loglevel = 1;
printk(KERN_INFO "debug: ignoring loglevel setting.\n");
- return 1;
+ return 0;
}
-__setup("ignore_loglevel", ignore_loglevel_setup);
+early_param("ignore_loglevel", ignore_loglevel_setup);
/*
* Write out chars from start to end - 1 inclusive
diff --git a/kernel/sched.c b/kernel/sched.c
index ba4c880..8355e00 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1255,12 +1255,12 @@ static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd);
#define sched_class_highest (&rt_sched_class)
-static void inc_nr_running(struct task_struct *p, struct rq *rq)
+static void inc_nr_running(struct rq *rq)
{
rq->nr_running++;
}
-static void dec_nr_running(struct task_struct *p, struct rq *rq)
+static void dec_nr_running(struct rq *rq)
{
rq->nr_running--;
}
@@ -1354,7 +1354,7 @@ static void activate_task(struct rq *rq, struct task_struct *p, int wakeup)
rq->nr_uninterruptible--;
enqueue_task(rq, p, wakeup);
- inc_nr_running(p, rq);
+ inc_nr_running(rq);
}
/*
@@ -1366,7 +1366,7 @@ static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep)
rq->nr_uninterruptible++;
dequeue_task(rq, p, sleep);
- dec_nr_running(p, rq);
+ dec_nr_running(rq);
}
/**
@@ -2006,7 +2006,7 @@ void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
* management (if any):
*/
p->sched_class->task_new(rq, p);
- inc_nr_running(p, rq);
+ inc_nr_running(rq);
}
check_preempt_curr(rq, p);
#ifdef CONFIG_SMP
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 72e25c7..6c091d6 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -520,7 +520,7 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
if (!initial) {
/* sleeps upto a single latency don't count. */
- if (sched_feat(NEW_FAIR_SLEEPERS) && entity_is_task(se))
+ if (sched_feat(NEW_FAIR_SLEEPERS))
vruntime -= sysctl_sched_latency;
/* ensure we never gain time by being placed backwards. */
@@ -1106,7 +1106,11 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p)
}
gran = sysctl_sched_wakeup_granularity;
- if (unlikely(se->load.weight != NICE_0_LOAD))
+ /*
+ * More easily preempt - nice tasks, while not making
+ * it harder for + nice tasks.
+ */
+ if (unlikely(se->load.weight > NICE_0_LOAD))
gran = calc_delta_fair(gran, &se->load);
if (pse->vruntime + gran < se->vruntime)
OpenPOWER on IntegriCloud