summaryrefslogtreecommitdiffstats
path: root/kernel/cgroup
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2018-05-23 11:04:54 -0700
committerTejun Heo <tj@kernel.org>2018-05-23 11:04:54 -0700
commitd8742e22902186e30c346b1ba881cb52942ae3e4 (patch)
treea4dd3d558a5eb43dd5ce547ec10a00e9c9bc993a /kernel/cgroup
parentcc659e76f375ab158fb682c1e39b2a22bf9f7657 (diff)
downloadop-kernel-dev-d8742e22902186e30c346b1ba881cb52942ae3e4.zip
op-kernel-dev-d8742e22902186e30c346b1ba881cb52942ae3e4.tar.gz
cgroup: css_set_lock should nest inside tasklist_lock
cgroup_enable_task_cg_lists() incorrectly nests non-irq-safe tasklist_lock inside irq-safe css_set_lock triggering the following lockdep warning. WARNING: possible irq lock inversion dependency detected 4.17.0-rc1-00027-gb37d049 #6 Not tainted -------------------------------------------------------- systemd/1 just changed the state of lock: 00000000fe57773b (css_set_lock){..-.}, at: cgroup_free+0xf2/0x12a but this lock took another, SOFTIRQ-unsafe lock in the past: (tasklist_lock){.+.+} and interrupts could create inverse lock ordering between them. other info that might help us debug this: Possible interrupt unsafe locking scenario: CPU0 CPU1 ---- ---- lock(tasklist_lock); local_irq_disable(); lock(css_set_lock); lock(tasklist_lock); <Interrupt> lock(css_set_lock); *** DEADLOCK *** The condition is highly unlikely to actually happen especially given that the path is executed only once per boot. Signed-off-by: Tejun Heo <tj@kernel.org> Reported-by: Boqun Feng <boqun.feng@gmail.com>
Diffstat (limited to 'kernel/cgroup')
-rw-r--r--kernel/cgroup/cgroup.c16
1 files changed, 8 insertions, 8 deletions
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index 04b7e7f..63989cb 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -1798,13 +1798,6 @@ static void cgroup_enable_task_cg_lists(void)
{
struct task_struct *p, *g;
- spin_lock_irq(&css_set_lock);
-
- if (use_task_css_set_links)
- goto out_unlock;
-
- use_task_css_set_links = true;
-
/*
* We need tasklist_lock because RCU is not safe against
* while_each_thread(). Besides, a forking task that has passed
@@ -1813,6 +1806,13 @@ static void cgroup_enable_task_cg_lists(void)
* tasklist if we walk through it with RCU.
*/
read_lock(&tasklist_lock);
+ spin_lock_irq(&css_set_lock);
+
+ if (use_task_css_set_links)
+ goto out_unlock;
+
+ use_task_css_set_links = true;
+
do_each_thread(g, p) {
WARN_ON_ONCE(!list_empty(&p->cg_list) ||
task_css_set(p) != &init_css_set);
@@ -1840,9 +1840,9 @@ static void cgroup_enable_task_cg_lists(void)
}
spin_unlock(&p->sighand->siglock);
} while_each_thread(g, p);
- read_unlock(&tasklist_lock);
out_unlock:
spin_unlock_irq(&css_set_lock);
+ read_unlock(&tasklist_lock);
}
static void init_cgroup_housekeeping(struct cgroup *cgrp)
OpenPOWER on IntegriCloud