summaryrefslogtreecommitdiffstats
path: root/kernel/sched_rt.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched_rt.c')
-rw-r--r--kernel/sched_rt.c227
1 files changed, 176 insertions, 51 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 0a6d2e5..c2730a5 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -62,7 +62,12 @@ static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
if (!rt_rq->tg)
return RUNTIME_INF;
- return rt_rq->tg->rt_runtime;
+ return rt_rq->rt_runtime;
+}
+
+static inline u64 sched_rt_period(struct rt_rq *rt_rq)
+{
+ return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
}
#define for_each_leaf_rt_rq(rt_rq, rq) \
@@ -127,14 +132,39 @@ static int rt_se_boosted(struct sched_rt_entity *rt_se)
return p->prio != p->normal_prio;
}
+#ifdef CONFIG_SMP
+static inline cpumask_t sched_rt_period_mask(void)
+{
+ return cpu_rq(smp_processor_id())->rd->span;
+}
+#else
+static inline cpumask_t sched_rt_period_mask(void)
+{
+ return cpu_online_map;
+}
+#endif
+
+static inline
+struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
+{
+ return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
+}
+
+static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
+{
+ return &rt_rq->tg->rt_bandwidth;
+}
+
#else
static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
{
- if (sysctl_sched_rt_runtime == -1)
- return RUNTIME_INF;
+ return rt_rq->rt_runtime;
+}
- return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC;
+static inline u64 sched_rt_period(struct rt_rq *rt_rq)
+{
+ return ktime_to_ns(def_rt_bandwidth.rt_period);
}
#define for_each_leaf_rt_rq(rt_rq, rq) \
@@ -173,6 +203,102 @@ static inline int rt_rq_throttled(struct rt_rq *rt_rq)
{
return rt_rq->rt_throttled;
}
+
+static inline cpumask_t sched_rt_period_mask(void)
+{
+ return cpu_online_map;
+}
+
+static inline
+struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
+{
+ return &cpu_rq(cpu)->rt;
+}
+
+static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
+{
+ return &def_rt_bandwidth;
+}
+
+#endif
+
+static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
+{
+ int i, idle = 1;
+ cpumask_t span;
+
+ if (rt_b->rt_runtime == RUNTIME_INF)
+ return 1;
+
+ span = sched_rt_period_mask();
+ for_each_cpu_mask(i, span) {
+ int enqueue = 0;
+ struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
+ struct rq *rq = rq_of_rt_rq(rt_rq);
+
+ spin_lock(&rq->lock);
+ if (rt_rq->rt_time) {
+ u64 runtime;
+
+ spin_lock(&rt_rq->rt_runtime_lock);
+ runtime = rt_rq->rt_runtime;
+ rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
+ if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
+ rt_rq->rt_throttled = 0;
+ enqueue = 1;
+ }
+ if (rt_rq->rt_time || rt_rq->rt_nr_running)
+ idle = 0;
+ spin_unlock(&rt_rq->rt_runtime_lock);
+ }
+
+ if (enqueue)
+ sched_rt_rq_enqueue(rt_rq);
+ spin_unlock(&rq->lock);
+ }
+
+ return idle;
+}
+
+#ifdef CONFIG_SMP
+static int balance_runtime(struct rt_rq *rt_rq)
+{
+ struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
+ struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
+ int i, weight, more = 0;
+ u64 rt_period;
+
+ weight = cpus_weight(rd->span);
+
+ spin_lock(&rt_b->rt_runtime_lock);
+ rt_period = ktime_to_ns(rt_b->rt_period);
+ for_each_cpu_mask(i, rd->span) {
+ struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
+ s64 diff;
+
+ if (iter == rt_rq)
+ continue;
+
+ spin_lock(&iter->rt_runtime_lock);
+ diff = iter->rt_runtime - iter->rt_time;
+ if (diff > 0) {
+ do_div(diff, weight);
+ if (rt_rq->rt_runtime + diff > rt_period)
+ diff = rt_period - rt_rq->rt_runtime;
+ iter->rt_runtime -= diff;
+ rt_rq->rt_runtime += diff;
+ more = 1;
+ if (rt_rq->rt_runtime == rt_period) {
+ spin_unlock(&iter->rt_runtime_lock);
+ break;
+ }
+ }
+ spin_unlock(&iter->rt_runtime_lock);
+ }
+ spin_unlock(&rt_b->rt_runtime_lock);
+
+ return more;
+}
#endif
static inline int rt_se_prio(struct sched_rt_entity *rt_se)
@@ -197,12 +323,24 @@ static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
if (rt_rq->rt_throttled)
return rt_rq_throttled(rt_rq);
+ if (sched_rt_runtime(rt_rq) >= sched_rt_period(rt_rq))
+ return 0;
+
+#ifdef CONFIG_SMP
if (rt_rq->rt_time > runtime) {
- struct rq *rq = rq_of_rt_rq(rt_rq);
+ int more;
- rq->rt_throttled = 1;
- rt_rq->rt_throttled = 1;
+ spin_unlock(&rt_rq->rt_runtime_lock);
+ more = balance_runtime(rt_rq);
+ spin_lock(&rt_rq->rt_runtime_lock);
+ if (more)
+ runtime = sched_rt_runtime(rt_rq);
+ }
+#endif
+
+ if (rt_rq->rt_time > runtime) {
+ rt_rq->rt_throttled = 1;
if (rt_rq_throttled(rt_rq)) {
sched_rt_rq_dequeue(rt_rq);
return 1;
@@ -212,29 +350,6 @@ static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
return 0;
}
-static void update_sched_rt_period(struct rq *rq)
-{
- struct rt_rq *rt_rq;
- u64 period;
-
- while (rq->clock > rq->rt_period_expire) {
- period = (u64)sysctl_sched_rt_period * NSEC_PER_USEC;
- rq->rt_period_expire += period;
-
- for_each_leaf_rt_rq(rt_rq, rq) {
- u64 runtime = sched_rt_runtime(rt_rq);
-
- rt_rq->rt_time -= min(rt_rq->rt_time, runtime);
- if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
- rt_rq->rt_throttled = 0;
- sched_rt_rq_enqueue(rt_rq);
- }
- }
-
- rq->rt_throttled = 0;
- }
-}
-
/*
* Update the current task's runtime statistics. Skip current tasks that
* are not in our scheduling class.
@@ -259,9 +374,15 @@ static void update_curr_rt(struct rq *rq)
curr->se.exec_start = rq->clock;
cpuacct_charge(curr, delta_exec);
- rt_rq->rt_time += delta_exec;
- if (sched_rt_runtime_exceeded(rt_rq))
- resched_task(curr);
+ for_each_sched_rt_entity(rt_se) {
+ rt_rq = rt_rq_of_se(rt_se);
+
+ spin_lock(&rt_rq->rt_runtime_lock);
+ rt_rq->rt_time += delta_exec;
+ if (sched_rt_runtime_exceeded(rt_rq))
+ resched_task(curr);
+ spin_unlock(&rt_rq->rt_runtime_lock);
+ }
}
static inline
@@ -284,6 +405,11 @@ void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
#ifdef CONFIG_RT_GROUP_SCHED
if (rt_se_boosted(rt_se))
rt_rq->rt_nr_boosted++;
+
+ if (rt_rq->tg)
+ start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
+#else
+ start_rt_bandwidth(&def_rt_bandwidth);
#endif
}
@@ -353,27 +479,21 @@ static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
/*
* Because the prio of an upper entry depends on the lower
* entries, we must remove entries top - down.
- *
- * XXX: O(1/2 h^2) because we can only walk up, not down the chain.
- * doesn't matter much for now, as h=2 for GROUP_SCHED.
*/
static void dequeue_rt_stack(struct task_struct *p)
{
- struct sched_rt_entity *rt_se, *top_se;
+ struct sched_rt_entity *rt_se, *back = NULL;
- /*
- * dequeue all, top - down.
- */
- do {
- rt_se = &p->rt;
- top_se = NULL;
- for_each_sched_rt_entity(rt_se) {
- if (on_rt_rq(rt_se))
- top_se = rt_se;
- }
- if (top_se)
- dequeue_rt_entity(top_se);
- } while (top_se);
+ rt_se = &p->rt;
+ for_each_sched_rt_entity(rt_se) {
+ rt_se->back = back;
+ back = rt_se;
+ }
+
+ for (rt_se = back; rt_se; rt_se = rt_se->back) {
+ if (on_rt_rq(rt_se))
+ dequeue_rt_entity(rt_se);
+ }
}
/*
@@ -393,6 +513,8 @@ static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
*/
for_each_sched_rt_entity(rt_se)
enqueue_rt_entity(rt_se);
+
+ inc_cpu_load(rq, p->se.load.weight);
}
static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
@@ -412,6 +534,8 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
if (rt_rq && rt_rq->rt_nr_running)
enqueue_rt_entity(rt_se);
}
+
+ dec_cpu_load(rq, p->se.load.weight);
}
/*
@@ -1001,7 +1125,8 @@ move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
return 0;
}
-static void set_cpus_allowed_rt(struct task_struct *p, cpumask_t *new_mask)
+static void set_cpus_allowed_rt(struct task_struct *p,
+ const cpumask_t *new_mask)
{
int weight = cpus_weight(*new_mask);
OpenPOWER on IntegriCloud