diff options
author | Frederic Weisbecker <frederic@kernel.org> | 2018-02-21 05:17:28 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2018-02-21 09:49:09 +0100 |
commit | dcdedb24159be3487e3dbbe1faa79ae7d00c92ac (patch) | |
tree | 166945628379cae5ed1e47eef9b5ae1333de79d5 /kernel/sched | |
parent | d84b31313ef8a8de55a2cbfb72f76f36d8c927fb (diff) | |
download | op-kernel-dev-dcdedb24159be3487e3dbbe1faa79ae7d00c92ac.zip op-kernel-dev-dcdedb24159be3487e3dbbe1faa79ae7d00c92ac.tar.gz |
sched/nohz: Remove the 1 Hz tick code
Now that the 1Hz tick is offloaded to workqueues, we can safely remove
the residual code that used to handle it locally.
Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Cc: Chris Metcalf <cmetcalf@mellanox.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Luiz Capitulino <lcapitulino@redhat.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Wanpeng Li <kernellwp@gmail.com>
Link: http://lkml.kernel.org/r/1519186649-3242-7-git-send-email-frederic@kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r-- | kernel/sched/core.c | 29 | ||||
-rw-r--r-- | kernel/sched/idle_task.c | 1 | ||||
-rw-r--r-- | kernel/sched/sched.h | 11 |
3 files changed, 1 insertions, 40 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 5dfef45..8fff4f1 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -3096,35 +3096,9 @@ void scheduler_tick(void) rq->idle_balance = idle_cpu(cpu); trigger_load_balance(rq); #endif - rq_last_tick_reset(rq); } #ifdef CONFIG_NO_HZ_FULL -/** - * scheduler_tick_max_deferment - * - * Keep at least one tick per second when a single - * active task is running because the scheduler doesn't - * yet completely support full dynticks environment. - * - * This makes sure that uptime, CFS vruntime, load - * balancing, etc... continue to move forward, even - * with a very low granularity. - * - * Return: Maximum deferment in nanoseconds. - */ -u64 scheduler_tick_max_deferment(void) -{ - struct rq *rq = this_rq(); - unsigned long next, now = READ_ONCE(jiffies); - - next = rq->last_sched_tick + HZ; - - if (time_before_eq(next, now)) - return 0; - - return jiffies_to_nsecs(next - now); -} struct tick_work { int cpu; @@ -6116,9 +6090,6 @@ void __init sched_init(void) rq->last_load_update_tick = jiffies; rq->nohz_flags = 0; #endif -#ifdef CONFIG_NO_HZ_FULL - rq->last_sched_tick = 0; -#endif #endif /* CONFIG_SMP */ hrtick_rq_init(rq); atomic_set(&rq->nr_iowait, 0); diff --git a/kernel/sched/idle_task.c b/kernel/sched/idle_task.c index e1b46e0..48b8a83 100644 --- a/kernel/sched/idle_task.c +++ b/kernel/sched/idle_task.c @@ -48,7 +48,6 @@ dequeue_task_idle(struct rq *rq, struct task_struct *p, int flags) static void put_prev_task_idle(struct rq *rq, struct task_struct *prev) { - rq_last_tick_reset(rq); } /* diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index c1c7c78..dc6c8b5 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -727,9 +727,7 @@ struct rq { #endif /* CONFIG_SMP */ unsigned long nohz_flags; #endif /* CONFIG_NO_HZ_COMMON */ -#ifdef CONFIG_NO_HZ_FULL - unsigned long last_sched_tick; -#endif + /* capture load from *all* tasks on this cpu: */ struct load_weight load; unsigned long nr_load_updates; @@ -1626,13 +1624,6 @@ static inline void sub_nr_running(struct rq *rq, unsigned count) sched_update_tick_dependency(rq); } -static inline void rq_last_tick_reset(struct rq *rq) -{ -#ifdef CONFIG_NO_HZ_FULL - rq->last_sched_tick = jiffies; -#endif -} - extern void update_rq_clock(struct rq *rq); extern void activate_task(struct rq *rq, struct task_struct *p, int flags); |