summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2017-02-22 15:41:17 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2017-02-22 16:41:27 -0800
commit9eeadc8b6e0e31f9aea1f8886ef472f62c2b7f55 (patch)
treec35c43f53d70a3b6bf50f11c13fc0111069ee141 /mm
parent657dc2f9722092e951de95a8109428994541440b (diff)
downloadop-kernel-dev-9eeadc8b6e0e31f9aea1f8886ef472f62c2b7f55.zip
op-kernel-dev-9eeadc8b6e0e31f9aea1f8886ef472f62c2b7f55.tar.gz
slab: reorganize memcg_cache_params
We're going to change how memcg caches are iterated. In preparation, clean up and reorganize memcg_cache_params. * The shared ->list is replaced by ->children in root and ->children_node in children. * ->is_root_cache is removed. Instead ->root_cache is moved out of the child union and now used by both root and children. NULL indicates root cache. Non-NULL a memcg one. This patch doesn't cause any observable behavior changes. Link: http://lkml.kernel.org/r/20170117235411.9408-5-tj@kernel.org Signed-off-by: Tejun Heo <tj@kernel.org> Acked-by: Vladimir Davydov <vdavydov.dev@gmail.com> Cc: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/slab.h6
-rw-r--r--mm/slab_common.c25
2 files changed, 16 insertions, 15 deletions
diff --git a/mm/slab.h b/mm/slab.h
index d07563f..3ed3336 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -206,12 +206,12 @@ int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
* slab_mutex.
*/
#define for_each_memcg_cache(iter, root) \
- list_for_each_entry(iter, &(root)->memcg_params.list, \
- memcg_params.list)
+ list_for_each_entry(iter, &(root)->memcg_params.children, \
+ memcg_params.children_node)
static inline bool is_root_cache(struct kmem_cache *s)
{
- return s->memcg_params.is_root_cache;
+ return !s->memcg_params.root_cache;
}
static inline bool slab_equal_or_root(struct kmem_cache *s,
diff --git a/mm/slab_common.c b/mm/slab_common.c
index ac469c8..c388503 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -140,9 +140,9 @@ int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr,
#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
void slab_init_memcg_params(struct kmem_cache *s)
{
- s->memcg_params.is_root_cache = true;
- INIT_LIST_HEAD(&s->memcg_params.list);
+ s->memcg_params.root_cache = NULL;
RCU_INIT_POINTER(s->memcg_params.memcg_caches, NULL);
+ INIT_LIST_HEAD(&s->memcg_params.children);
}
static int init_memcg_params(struct kmem_cache *s,
@@ -150,10 +150,10 @@ static int init_memcg_params(struct kmem_cache *s,
{
struct memcg_cache_array *arr;
- if (memcg) {
- s->memcg_params.is_root_cache = false;
- s->memcg_params.memcg = memcg;
+ if (root_cache) {
s->memcg_params.root_cache = root_cache;
+ s->memcg_params.memcg = memcg;
+ INIT_LIST_HEAD(&s->memcg_params.children_node);
return 0;
}
@@ -223,7 +223,7 @@ int memcg_update_all_caches(int num_memcgs)
static void unlink_memcg_cache(struct kmem_cache *s)
{
- list_del(&s->memcg_params.list);
+ list_del(&s->memcg_params.children_node);
}
#else
static inline int init_memcg_params(struct kmem_cache *s,
@@ -594,7 +594,8 @@ void memcg_create_kmem_cache(struct mem_cgroup *memcg,
goto out_unlock;
}
- list_add(&s->memcg_params.list, &root_cache->memcg_params.list);
+ list_add(&s->memcg_params.children_node,
+ &root_cache->memcg_params.children);
/*
* Since readers won't lock (see cache_from_memcg_idx()), we need a
@@ -690,7 +691,7 @@ static int shutdown_memcg_caches(struct kmem_cache *s)
* list so as not to try to destroy it for a second
* time while iterating over inactive caches below.
*/
- list_move(&c->memcg_params.list, &busy);
+ list_move(&c->memcg_params.children_node, &busy);
else
/*
* The cache is empty and will be destroyed soon. Clear
@@ -705,17 +706,17 @@ static int shutdown_memcg_caches(struct kmem_cache *s)
* Second, shutdown all caches left from memory cgroups that are now
* offline.
*/
- list_for_each_entry_safe(c, c2, &s->memcg_params.list,
- memcg_params.list)
+ list_for_each_entry_safe(c, c2, &s->memcg_params.children,
+ memcg_params.children_node)
shutdown_cache(c);
- list_splice(&busy, &s->memcg_params.list);
+ list_splice(&busy, &s->memcg_params.children);
/*
* A cache being destroyed must be empty. In particular, this means
* that all per memcg caches attached to it must be empty too.
*/
- if (!list_empty(&s->memcg_params.list))
+ if (!list_empty(&s->memcg_params.children))
return -EBUSY;
return 0;
}
OpenPOWER on IntegriCloud