summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVladimir Davydov <vdavydov@parallels.com>2014-10-09 15:28:43 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-09 22:25:59 -0400
commit33a690c45b202e4c6483bfd1d93ad8d0f51df2ca (patch)
tree7dd3e77ff0007b5c03dd9622f5d0b5e63bc357be
parent7a82ca0d6437261d0727ce472ae4f3a05a9ce5f7 (diff)
downloadop-kernel-dev-33a690c45b202e4c6483bfd1d93ad8d0f51df2ca.zip
op-kernel-dev-33a690c45b202e4c6483bfd1d93ad8d0f51df2ca.tar.gz
memcg: move memcg_{alloc,free}_cache_params to slab_common.c
The only reason why they live in memcontrol.c is that we get/put css reference to the owner memory cgroup in them. However, we can do that in memcg_{un,}register_cache. OTOH, there are several reasons to move them to slab_common.c. First, I think that the less public interface functions we have in memcontrol.h the better. Since the functions I move don't depend on memcontrol, I think it's worth making them private to slab, especially taking into account that the arrays are defined on the slab's side too. Second, the way how per-memcg arrays are updated looks rather awkward: it proceeds from memcontrol.c (__memcg_activate_kmem) to slab_common.c (memcg_update_all_caches) and back to memcontrol.c again (memcg_update_array_size). In the following patches I move the function relocating the arrays (memcg_update_array_size) to slab_common.c and therefore get rid this circular call path. I think we should have the cache allocation stuff in the same place where we have relocation, because it's easier to follow the code then. So I move arrays alloc/free functions to slab_common.c too. The third point isn't obvious. I'm going to make the list_lru structure per-memcg to allow targeted kmem reclaim. That means we will have per-memcg arrays in list_lrus too. It turns out that it's much easier to update these arrays in list_lru.c rather than in memcontrol.c, because all the stuff we need is defined there. This patch makes memcg caches arrays allocation path conform that of the upcoming list_lru. So let's move these functions to slab_common.c and make them static. Signed-off-by: Vladimir Davydov <vdavydov@parallels.com> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: Michal Hocko <mhocko@suse.cz> Cc: Christoph Lameter <cl@linux.com> Cc: Glauber Costa <glommer@gmail.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: David Rientjes <rientjes@google.com> Cc: Pekka Enberg <penberg@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/memcontrol.h14
-rw-r--r--mm/memcontrol.c41
-rw-r--r--mm/slab_common.c44
3 files changed, 47 insertions, 52 deletions
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index e0752d2..4d17242 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -440,10 +440,6 @@ void __memcg_kmem_uncharge_pages(struct page *page, int order);
int memcg_cache_id(struct mem_cgroup *memcg);
-int memcg_alloc_cache_params(struct mem_cgroup *memcg, struct kmem_cache *s,
- struct kmem_cache *root_cache);
-void memcg_free_cache_params(struct kmem_cache *s);
-
int memcg_update_cache_size(struct kmem_cache *s, int num_groups);
void memcg_update_array_size(int num_groups);
@@ -574,16 +570,6 @@ static inline int memcg_cache_id(struct mem_cgroup *memcg)
return -1;
}
-static inline int memcg_alloc_cache_params(struct mem_cgroup *memcg,
- struct kmem_cache *s, struct kmem_cache *root_cache)
-{
- return 0;
-}
-
-static inline void memcg_free_cache_params(struct kmem_cache *s)
-{
-}
-
static inline struct kmem_cache *
memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
{
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 28928ce..865e87c 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2984,43 +2984,6 @@ int memcg_update_cache_size(struct kmem_cache *s, int num_groups)
return 0;
}
-int memcg_alloc_cache_params(struct mem_cgroup *memcg, struct kmem_cache *s,
- struct kmem_cache *root_cache)
-{
- size_t size;
-
- if (!memcg_kmem_enabled())
- return 0;
-
- if (!memcg) {
- size = offsetof(struct memcg_cache_params, memcg_caches);
- size += memcg_limited_groups_array_size * sizeof(void *);
- } else
- size = sizeof(struct memcg_cache_params);
-
- s->memcg_params = kzalloc(size, GFP_KERNEL);
- if (!s->memcg_params)
- return -ENOMEM;
-
- if (memcg) {
- s->memcg_params->memcg = memcg;
- s->memcg_params->root_cache = root_cache;
- css_get(&memcg->css);
- } else
- s->memcg_params->is_root_cache = true;
-
- return 0;
-}
-
-void memcg_free_cache_params(struct kmem_cache *s)
-{
- if (!s->memcg_params)
- return;
- if (!s->memcg_params->is_root_cache)
- css_put(&s->memcg_params->memcg->css);
- kfree(s->memcg_params);
-}
-
static void memcg_register_cache(struct mem_cgroup *memcg,
struct kmem_cache *root_cache)
{
@@ -3051,6 +3014,7 @@ static void memcg_register_cache(struct mem_cgroup *memcg,
if (!cachep)
return;
+ css_get(&memcg->css);
list_add(&cachep->memcg_params->list, &memcg->memcg_slab_caches);
/*
@@ -3084,6 +3048,9 @@ static void memcg_unregister_cache(struct kmem_cache *cachep)
list_del(&cachep->memcg_params->list);
kmem_cache_destroy(cachep);
+
+ /* drop the reference taken in memcg_register_cache */
+ css_put(&memcg->css);
}
/*
diff --git a/mm/slab_common.c b/mm/slab_common.c
index f206cb1..c2a8661 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -116,6 +116,38 @@ static inline int kmem_cache_sanity_check(const char *name, size_t size)
#endif
#ifdef CONFIG_MEMCG_KMEM
+static int memcg_alloc_cache_params(struct mem_cgroup *memcg,
+ struct kmem_cache *s, struct kmem_cache *root_cache)
+{
+ size_t size;
+
+ if (!memcg_kmem_enabled())
+ return 0;
+
+ if (!memcg) {
+ size = offsetof(struct memcg_cache_params, memcg_caches);
+ size += memcg_limited_groups_array_size * sizeof(void *);
+ } else
+ size = sizeof(struct memcg_cache_params);
+
+ s->memcg_params = kzalloc(size, GFP_KERNEL);
+ if (!s->memcg_params)
+ return -ENOMEM;
+
+ if (memcg) {
+ s->memcg_params->memcg = memcg;
+ s->memcg_params->root_cache = root_cache;
+ } else
+ s->memcg_params->is_root_cache = true;
+
+ return 0;
+}
+
+static void memcg_free_cache_params(struct kmem_cache *s)
+{
+ kfree(s->memcg_params);
+}
+
int memcg_update_all_caches(int num_memcgs)
{
struct kmem_cache *s;
@@ -141,7 +173,17 @@ out:
mutex_unlock(&slab_mutex);
return ret;
}
-#endif
+#else
+static inline int memcg_alloc_cache_params(struct mem_cgroup *memcg,
+ struct kmem_cache *s, struct kmem_cache *root_cache)
+{
+ return 0;
+}
+
+static inline void memcg_free_cache_params(struct kmem_cache *s)
+{
+}
+#endif /* CONFIG_MEMCG_KMEM */
/*
* Find a mergeable slab cache
OpenPOWER on IntegriCloud