diff options
author | Tejun Heo <tj@kernel.org> | 2012-11-19 08:13:37 -0800 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2012-11-19 08:13:37 -0800 |
commit | 42809dd4225b2f3127a4804314a1b33608620d96 (patch) | |
tree | 7600d71baff46173b9b2c051a3530a4a4b7c9582 /kernel/cgroup.c | |
parent | 02ae7486d05ae6df8395409a4945b2420f1e35c2 (diff) | |
download | op-kernel-dev-42809dd4225b2f3127a4804314a1b33608620d96.zip op-kernel-dev-42809dd4225b2f3127a4804314a1b33608620d96.tar.gz |
cgroup: separate out cgroup_destroy_locked()
Separate out cgroup_destroy_locked() from cgroup_destroy(). This will
be later used in cgroup_create() failure path.
While at it, add lockdep asserts on i_mutex and cgroup_mutex, and move
@d and @parent assignments to their declarations.
This patch doesn't introduce any functional difference.
Signed-off-by: Tejun Heo <tj@kernel.org>
Acked-by: Li Zefan <lizefan@huawei.com>
Diffstat (limited to 'kernel/cgroup.c')
-rw-r--r-- | kernel/cgroup.c | 40 |
1 files changed, 25 insertions, 15 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 5679cb1..4412d96 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -242,6 +242,8 @@ static DEFINE_SPINLOCK(hierarchy_id_lock); */ static int need_forkexit_callback __read_mostly; +static int cgroup_destroy_locked(struct cgroup *cgrp); + #ifdef CONFIG_PROVE_LOCKING int cgroup_lock_is_held(void) { @@ -4209,22 +4211,20 @@ static int cgroup_has_css_refs(struct cgroup *cgrp) return 0; } -static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry) +static int cgroup_destroy_locked(struct cgroup *cgrp) + __releases(&cgroup_mutex) __acquires(&cgroup_mutex) { - struct cgroup *cgrp = dentry->d_fsdata; - struct dentry *d; - struct cgroup *parent; + struct dentry *d = cgrp->dentry; + struct cgroup *parent = cgrp->parent; DEFINE_WAIT(wait); struct cgroup_event *event, *tmp; struct cgroup_subsys *ss; - /* the vfs holds both inode->i_mutex already */ - mutex_lock(&cgroup_mutex); - parent = cgrp->parent; - if (atomic_read(&cgrp->count) || !list_empty(&cgrp->children)) { - mutex_unlock(&cgroup_mutex); + lockdep_assert_held(&d->d_inode->i_mutex); + lockdep_assert_held(&cgroup_mutex); + + if (atomic_read(&cgrp->count) || !list_empty(&cgrp->children)) return -EBUSY; - } /* * Block new css_tryget() by deactivating refcnt and mark @cgrp @@ -4243,7 +4243,9 @@ static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry) /* * Tell subsystems to initate destruction. pre_destroy() should be * called with cgroup_mutex unlocked. See 3fa59dfbc3 ("cgroup: fix - * potential deadlock in pre_destroy") for details. + * potential deadlock in pre_destroy") for details. This temporary + * unlocking should go away once cgroup_mutex is unexported from + * controllers. */ mutex_unlock(&cgroup_mutex); for_each_subsys(cgrp->root, ss) @@ -4268,11 +4270,9 @@ static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry) /* delete this cgroup from parent->children */ list_del_rcu(&cgrp->sibling); - list_del_init(&cgrp->allcg_node); - d = dget(cgrp->dentry); - + dget(d); cgroup_d_remove_dir(d); dput(d); @@ -4293,10 +4293,20 @@ static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry) } spin_unlock(&cgrp->event_list_lock); - mutex_unlock(&cgroup_mutex); return 0; } +static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry) +{ + int ret; + + mutex_lock(&cgroup_mutex); + ret = cgroup_destroy_locked(dentry->d_fsdata); + mutex_unlock(&cgroup_mutex); + + return ret; +} + static void __init_or_module cgroup_init_cftsets(struct cgroup_subsys *ss) { INIT_LIST_HEAD(&ss->cftsets); |