summaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorGuillaume Chazarain <guichaz@yahoo.fr>2008-04-19 19:44:57 +0200
committerIngo Molnar <mingo@elte.hu>2008-04-19 19:44:57 +0200
commit15934a37324f32e0fda633dc7984a671ea81cd75 (patch)
tree1f65ac7d910e76b65d0cfcd1c72e156b0a8bd273 /kernel/sched.c
parent30914a58af9d21c5f1831adabb5d7a800a378675 (diff)
downloadop-kernel-dev-15934a37324f32e0fda633dc7984a671ea81cd75.zip
op-kernel-dev-15934a37324f32e0fda633dc7984a671ea81cd75.tar.gz
sched: fix rq->clock overflows detection with CONFIG_NO_HZ
When using CONFIG_NO_HZ, rq->tick_timestamp is not updated every TICK_NSEC. We check that the number of skipped ticks matches the clock jump seen in __update_rq_clock(). Signed-off-by: Guillaume Chazarain <guichaz@yahoo.fr> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c38
1 files changed, 35 insertions, 3 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 7fe334e..d8456a9 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -397,6 +397,7 @@ struct rq {
unsigned long cpu_load[CPU_LOAD_IDX_MAX];
unsigned char idle_at_tick;
#ifdef CONFIG_NO_HZ
+ unsigned long last_tick_seen;
unsigned char in_nohz_recently;
#endif
/* capture load from *all* tasks on this cpu: */
@@ -500,6 +501,32 @@ static inline int cpu_of(struct rq *rq)
#endif
}
+#ifdef CONFIG_NO_HZ
+static inline bool nohz_on(int cpu)
+{
+ return tick_get_tick_sched(cpu)->nohz_mode != NOHZ_MODE_INACTIVE;
+}
+
+static inline u64 max_skipped_ticks(struct rq *rq)
+{
+ return nohz_on(cpu_of(rq)) ? jiffies - rq->last_tick_seen + 2 : 1;
+}
+
+static inline void update_last_tick_seen(struct rq *rq)
+{
+ rq->last_tick_seen = jiffies;
+}
+#else
+static inline u64 max_skipped_ticks(struct rq *rq)
+{
+ return 1;
+}
+
+static inline void update_last_tick_seen(struct rq *rq)
+{
+}
+#endif
+
/*
* Update the per-runqueue clock, as finegrained as the platform can give
* us, but without assuming monotonicity, etc.:
@@ -524,9 +551,12 @@ static void __update_rq_clock(struct rq *rq)
/*
* Catch too large forward jumps too:
*/
- if (unlikely(clock + delta > rq->tick_timestamp + TICK_NSEC)) {
- if (clock < rq->tick_timestamp + TICK_NSEC)
- clock = rq->tick_timestamp + TICK_NSEC;
+ u64 max_jump = max_skipped_ticks(rq) * TICK_NSEC;
+ u64 max_time = rq->tick_timestamp + max_jump;
+
+ if (unlikely(clock + delta > max_time)) {
+ if (clock < max_time)
+ clock = max_time;
else
clock++;
rq->clock_overflows++;
@@ -3812,6 +3842,7 @@ void scheduler_tick(void)
rq->clock_underflows++;
}
rq->tick_timestamp = rq->clock;
+ update_last_tick_seen(rq);
update_cpu_load(rq);
curr->sched_class->task_tick(rq, curr, 0);
update_sched_rt_period(rq);
@@ -7261,6 +7292,7 @@ void __init sched_init(void)
lockdep_set_class(&rq->lock, &rq->rq_lock_key);
rq->nr_running = 0;
rq->clock = 1;
+ update_last_tick_seen(rq);
init_cfs_rq(&rq->cfs, rq);
init_rt_rq(&rq->rt, rq);
#ifdef CONFIG_FAIR_GROUP_SCHED
OpenPOWER on IntegriCloud