summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2013-05-24 10:55:38 +0900
committerTejun Heo <tj@kernel.org>2013-05-24 10:55:38 +0900
commit53fa5261747a90746531e8a1c81eeb78fedc2f71 (patch)
treeb615f4ac453b9f40d412b5ba22498b827902f438 /kernel
parentbdc7119f1bdd0632d42f435941dc290216a436e7 (diff)
downloadop-kernel-dev-53fa5261747a90746531e8a1c81eeb78fedc2f71.zip
op-kernel-dev-53fa5261747a90746531e8a1c81eeb78fedc2f71.tar.gz
cgroup: add cgroup->serial_nr and implement cgroup_next_sibling()
Currently, there's no easy way to find out the next sibling cgroup unless it's known that the current cgroup is accessed from the parent's children list in a single RCU critical section. This in turn forces all iterators to require whole iteration to be enclosed in a single RCU critical section, which sometimes is too restrictive. This patch implements cgroup_next_sibling() which can reliably determine the next sibling regardless of the state of the current cgroup as long as it's accessible. It currently is impossible to determine the next sibling after dropping RCU read lock because the cgroup being iterated could be removed anytime and if RCU read lock is dropped, nothing guarantess its ->sibling.next pointer is accessible. A removed cgroup would continue to point to its next sibling for RCU accesses but stop receiving updates from the sibling. IOW, the next sibling could be removed and then complete its grace period while RCU read lock is dropped, making it unsafe to dereference ->sibling.next after dropping and re-acquiring RCU read lock. This can be solved by adding a way to traverse to the next sibling without dereferencing ->sibling.next. This patch adds a monotonically increasing cgroup serial number, cgroup->serial_nr, which guarantees that all cgroup->children lists are kept in increasing serial_nr order. A new function, cgroup_next_sibling(), is implemented, which, if CGRP_REMOVED is not set on the current cgroup, follows ->sibling.next; otherwise, traverses the parent's ->children list until it sees a sibling with higher ->serial_nr. This allows the function to always return the next sibling regardless of the state of the current cgroup without adding overhead in the fast path. Further patches will update the iterators to use cgroup_next_sibling() so that they allow dropping RCU read lock and blocking while iteration is in progress which in turn will be used to simplify controllers. v2: Typo fix as per Serge. Signed-off-by: Tejun Heo <tj@kernel.org> Acked-by: Serge E. Hallyn <serge.hallyn@ubuntu.com>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cgroup.c62
1 files changed, 62 insertions, 0 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 5019748..b87c7a5 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -2976,6 +2976,55 @@ static void cgroup_enable_task_cg_lists(void)
}
/**
+ * cgroup_next_sibling - find the next sibling of a given cgroup
+ * @pos: the current cgroup
+ *
+ * This function returns the next sibling of @pos and should be called
+ * under RCU read lock. The only requirement is that @pos is accessible.
+ * The next sibling is guaranteed to be returned regardless of @pos's
+ * state.
+ */
+struct cgroup *cgroup_next_sibling(struct cgroup *pos)
+{
+ struct cgroup *next;
+
+ WARN_ON_ONCE(!rcu_read_lock_held());
+
+ /*
+ * @pos could already have been removed. Once a cgroup is removed,
+ * its ->sibling.next is no longer updated when its next sibling
+ * changes. As CGRP_REMOVED is set on removal which is fully
+ * serialized, if we see it unasserted, it's guaranteed that the
+ * next sibling hasn't finished its grace period even if it's
+ * already removed, and thus safe to dereference from this RCU
+ * critical section. If ->sibling.next is inaccessible,
+ * cgroup_is_removed() is guaranteed to be visible as %true here.
+ */
+ if (likely(!cgroup_is_removed(pos))) {
+ next = list_entry_rcu(pos->sibling.next, struct cgroup, sibling);
+ if (&next->sibling != &pos->parent->children)
+ return next;
+ return NULL;
+ }
+
+ /*
+ * Can't dereference the next pointer. Each cgroup is given a
+ * monotonically increasing unique serial number and always
+ * appended to the sibling list, so the next one can be found by
+ * walking the parent's children until we see a cgroup with higher
+ * serial number than @pos's.
+ *
+ * While this path can be slow, it's taken only when either the
+ * current cgroup is removed or iteration and removal race.
+ */
+ list_for_each_entry_rcu(next, &pos->parent->children, sibling)
+ if (next->serial_nr > pos->serial_nr)
+ return next;
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(cgroup_next_sibling);
+
+/**
* cgroup_next_descendant_pre - find the next descendant for pre-order walk
* @pos: the current position (%NULL to initiate traversal)
* @cgroup: cgroup whose descendants to walk
@@ -4137,6 +4186,7 @@ static void offline_css(struct cgroup_subsys *ss, struct cgroup *cgrp)
static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
umode_t mode)
{
+ static atomic64_t serial_nr_cursor = ATOMIC64_INIT(0);
struct cgroup *cgrp;
struct cgroup_name *name;
struct cgroupfs_root *root = parent->root;
@@ -4217,6 +4267,14 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
goto err_free_all;
lockdep_assert_held(&dentry->d_inode->i_mutex);
+ /*
+ * Assign a monotonically increasing serial number. With the list
+ * appending below, it guarantees that sibling cgroups are always
+ * sorted in the ascending serial number order on the parent's
+ * ->children.
+ */
+ cgrp->serial_nr = atomic64_inc_return(&serial_nr_cursor);
+
/* allocation complete, commit to creation */
list_add_tail(&cgrp->allcg_node, &root->allcg_list);
list_add_tail_rcu(&cgrp->sibling, &cgrp->parent->children);
@@ -4304,6 +4362,10 @@ static int cgroup_destroy_locked(struct cgroup *cgrp)
* removed. This makes future css_tryget() and child creation
* attempts fail thus maintaining the removal conditions verified
* above.
+ *
+ * Note that CGRP_REMVOED clearing is depended upon by
+ * cgroup_next_sibling() to resume iteration after dropping RCU
+ * read lock. See cgroup_next_sibling() for details.
*/
for_each_subsys(cgrp->root, ss) {
struct cgroup_subsys_state *css = cgrp->subsys[ss->subsys_id];
OpenPOWER on IntegriCloud