summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/core.c12
-rw-r--r--kernel/sched/fair.c2
-rw-r--r--kernel/sched/rt.c9
-rw-r--r--kernel/sched/sched.h15
4 files changed, 28 insertions, 10 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 46a2345..b53cc85 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -119,7 +119,9 @@ void update_rq_clock(struct rq *rq)
{
s64 delta;
- if (rq->skip_clock_update > 0)
+ lockdep_assert_held(&rq->lock);
+
+ if (rq->clock_skip_update & RQCF_ACT_SKIP)
return;
delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
@@ -1046,7 +1048,7 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
* this case, we can save a useless back to back clock update.
*/
if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr))
- rq->skip_clock_update = 1;
+ rq_clock_skip_update(rq, true);
}
#ifdef CONFIG_SMP
@@ -2779,6 +2781,8 @@ need_resched:
smp_mb__before_spinlock();
raw_spin_lock_irq(&rq->lock);
+ rq->clock_skip_update <<= 1; /* promote REQ to ACT */
+
switch_count = &prev->nivcsw;
if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
if (unlikely(signal_pending_state(prev->state, prev))) {
@@ -2803,13 +2807,13 @@ need_resched:
switch_count = &prev->nvcsw;
}
- if (task_on_rq_queued(prev) || rq->skip_clock_update < 0)
+ if (task_on_rq_queued(prev))
update_rq_clock(rq);
next = pick_next_task(rq, prev);
clear_tsk_need_resched(prev);
clear_preempt_need_resched();
- rq->skip_clock_update = 0;
+ rq->clock_skip_update = 0;
if (likely(prev != next)) {
rq->nr_switches++;
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 50ff902..2ecf779 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5156,7 +5156,7 @@ static void yield_task_fair(struct rq *rq)
* so we don't do microscopic update in schedule()
* and double the fastpath cost.
*/
- rq->skip_clock_update = 1;
+ rq_clock_skip_update(rq, true);
}
set_skip_buddy(se);
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index ee15f5a..6725e3c 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -831,11 +831,14 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
enqueue = 1;
/*
- * Force a clock update if the CPU was idle,
- * lest wakeup -> unthrottle time accumulate.
+ * When we're idle and a woken (rt) task is
+ * throttled check_preempt_curr() will set
+ * skip_update and the time between the wakeup
+ * and this unthrottle will get accounted as
+ * 'runtime'.
*/
if (rt_rq->rt_nr_running && rq->curr == rq->idle)
- rq->skip_clock_update = -1;
+ rq_clock_skip_update(rq, false);
}
if (rt_rq->rt_time || rt_rq->rt_nr_running)
idle = 0;
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index bd23732..0870db2 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -558,8 +558,6 @@ struct rq {
#ifdef CONFIG_NO_HZ_FULL
unsigned long last_sched_tick;
#endif
- int skip_clock_update;
-
/* capture load from *all* tasks on this cpu: */
struct load_weight load;
unsigned long nr_load_updates;
@@ -588,6 +586,7 @@ struct rq {
unsigned long next_balance;
struct mm_struct *prev_mm;
+ unsigned int clock_skip_update;
u64 clock;
u64 clock_task;
@@ -704,6 +703,18 @@ static inline u64 rq_clock_task(struct rq *rq)
return rq->clock_task;
}
+#define RQCF_REQ_SKIP 0x01
+#define RQCF_ACT_SKIP 0x02
+
+static inline void rq_clock_skip_update(struct rq *rq, bool skip)
+{
+ lockdep_assert_held(&rq->lock);
+ if (skip)
+ rq->clock_skip_update |= RQCF_REQ_SKIP;
+ else
+ rq->clock_skip_update &= ~RQCF_REQ_SKIP;
+}
+
#ifdef CONFIG_NUMA
enum numa_topology_type {
NUMA_DIRECT,
OpenPOWER on IntegriCloud