summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBalbir Singh <balbir@linux.vnet.ibm.com>2011-03-03 17:04:35 +0530
committerIngo Molnar <mingo@elte.hu>2011-03-04 11:03:18 +0100
commit0c3b9168017cbad2c4af3dd65ec93fe646eeaa62 (patch)
tree98ae84f8060a62bc95ab3734b345fa87d0688a16
parentb65a0e0c84cf489bfa00d6aa6c48abc5a237100f (diff)
downloadop-kernel-dev-0c3b9168017cbad2c4af3dd65ec93fe646eeaa62.zip
op-kernel-dev-0c3b9168017cbad2c4af3dd65ec93fe646eeaa62.tar.gz
sched: Fix sched rt group scheduling when hierachy is enabled
The current sched rt code is broken when it comes to hierarchical scheduling, this patch fixes two problems 1. It adds redundant enqueuing (harmless) when it finds a queue has tasks enqueued, but it has no run time and it is not throttled. 2. The most important change is in sched_rt_rq_enqueue/dequeue. The code just picks the rt_rq belonging to the current cpu on which the period timer runs, the patch fixes it, so that the correct rt_se is enqueued/dequeued. Tested with a simple hierarchy /c/d, c and d assigned similar runtimes of 50,000 and a while 1 loop runs within "d". Both c and d get throttled, without the patch, the task just stops running and never runs (depends on where the sched_rt b/w timer runs). With the patch, the task is throttled and runs as expected. [ bharata, suggestions on how to pick the rt_se belong to the rt_rq and correct cpu ] Signed-off-by: Balbir Singh <balbir@linux.vnet.ibm.com> Acked-by: Bharata B Rao <bharata@linux.vnet.ibm.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: stable@kernel.org LKML-Reference: <20110303113435.GA2868@balbir.in.ibm.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--kernel/sched_rt.c14
1 files changed, 9 insertions, 5 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index ad62677..01f75a5 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -210,11 +210,12 @@ static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
{
- int this_cpu = smp_processor_id();
struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
struct sched_rt_entity *rt_se;
- rt_se = rt_rq->tg->rt_se[this_cpu];
+ int cpu = cpu_of(rq_of_rt_rq(rt_rq));
+
+ rt_se = rt_rq->tg->rt_se[cpu];
if (rt_rq->rt_nr_running) {
if (rt_se && !on_rt_rq(rt_se))
@@ -226,10 +227,10 @@ static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
{
- int this_cpu = smp_processor_id();
struct sched_rt_entity *rt_se;
+ int cpu = cpu_of(rq_of_rt_rq(rt_rq));
- rt_se = rt_rq->tg->rt_se[this_cpu];
+ rt_se = rt_rq->tg->rt_se[cpu];
if (rt_se && on_rt_rq(rt_se))
dequeue_rt_entity(rt_se);
@@ -565,8 +566,11 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
if (rt_rq->rt_time || rt_rq->rt_nr_running)
idle = 0;
raw_spin_unlock(&rt_rq->rt_runtime_lock);
- } else if (rt_rq->rt_nr_running)
+ } else if (rt_rq->rt_nr_running) {
idle = 0;
+ if (!rt_rq_throttled(rt_rq))
+ enqueue = 1;
+ }
if (enqueue)
sched_rt_rq_enqueue(rt_rq);
OpenPOWER on IntegriCloud