diff options
author | Kirill Tkhai <ktkhai@parallels.com> | 2014-09-30 12:23:37 +0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2014-10-03 05:46:58 +0200 |
commit | f10e00f4bf360c36edbe6bf18a6c75b171cbe012 (patch) | |
tree | 0733366372b97784b82acafa6d0fc0a57f62e27b /kernel | |
parent | 10a12983b3d437a6998b3845870e52c1c752c101 (diff) | |
download | op-kernel-dev-f10e00f4bf360c36edbe6bf18a6c75b171cbe012.zip op-kernel-dev-f10e00f4bf360c36edbe6bf18a6c75b171cbe012.tar.gz |
sched/dl: Use dl_bw_of() under rcu_read_lock_sched()
rq->rd is freed using call_rcu_sched(), so rcu_read_lock() to access it
is not enough. We should use either rcu_read_lock_sched() or preempt_disable().
Reported-by: Sasha Levin <sasha.levin@oracle.com>
Suggested-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Kirill Tkhai <ktkhai@parallels.com
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Fixes: 66339c31bc39 "sched: Use dl_bw_of() under RCU read lock"
Link: http://lkml.kernel.org/r/1412065417.20287.24.camel@tkhai
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched/core.c | 25 |
1 files changed, 16 insertions, 9 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index b5349fe..c84bdc0 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -5264,6 +5264,7 @@ static int sched_cpu_inactive(struct notifier_block *nfb, { unsigned long flags; long cpu = (long)hcpu; + struct dl_bw *dl_b; switch (action & ~CPU_TASKS_FROZEN) { case CPU_DOWN_PREPARE: @@ -5271,15 +5272,19 @@ static int sched_cpu_inactive(struct notifier_block *nfb, /* explicitly allow suspend */ if (!(action & CPU_TASKS_FROZEN)) { - struct dl_bw *dl_b = dl_bw_of(cpu); bool overflow; int cpus; + rcu_read_lock_sched(); + dl_b = dl_bw_of(cpu); + raw_spin_lock_irqsave(&dl_b->lock, flags); cpus = dl_bw_cpus(cpu); overflow = __dl_overflow(dl_b, cpus, 0, 0); raw_spin_unlock_irqrestore(&dl_b->lock, flags); + rcu_read_unlock_sched(); + if (overflow) return notifier_from_errno(-EBUSY); } @@ -7647,11 +7652,10 @@ static int sched_dl_global_constraints(void) u64 runtime = global_rt_runtime(); u64 period = global_rt_period(); u64 new_bw = to_ratio(period, runtime); + struct dl_bw *dl_b; int cpu, ret = 0; unsigned long flags; - rcu_read_lock(); - /* * Here we want to check the bandwidth not being set to some * value smaller than the currently allocated bandwidth in @@ -7662,25 +7666,27 @@ static int sched_dl_global_constraints(void) * solutions is welcome! */ for_each_possible_cpu(cpu) { - struct dl_bw *dl_b = dl_bw_of(cpu); + rcu_read_lock_sched(); + dl_b = dl_bw_of(cpu); raw_spin_lock_irqsave(&dl_b->lock, flags); if (new_bw < dl_b->total_bw) ret = -EBUSY; raw_spin_unlock_irqrestore(&dl_b->lock, flags); + rcu_read_unlock_sched(); + if (ret) break; } - rcu_read_unlock(); - return ret; } static void sched_dl_do_global(void) { u64 new_bw = -1; + struct dl_bw *dl_b; int cpu; unsigned long flags; @@ -7690,18 +7696,19 @@ static void sched_dl_do_global(void) if (global_rt_runtime() != RUNTIME_INF) new_bw = to_ratio(global_rt_period(), global_rt_runtime()); - rcu_read_lock(); /* * FIXME: As above... */ for_each_possible_cpu(cpu) { - struct dl_bw *dl_b = dl_bw_of(cpu); + rcu_read_lock_sched(); + dl_b = dl_bw_of(cpu); raw_spin_lock_irqsave(&dl_b->lock, flags); dl_b->bw = new_bw; raw_spin_unlock_irqrestore(&dl_b->lock, flags); + + rcu_read_unlock_sched(); } - rcu_read_unlock(); } static int sched_rt_global_validate(void) |