summaryrefslogtreecommitdiffstats
path: root/mm/memcontrol.c
diff options
context:
space:
mode:
authorVladimir Davydov <vdavydov@parallels.com>2014-04-07 15:39:24 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2014-04-07 16:36:12 -0700
commit5722d094ad2b56fa2c1cb3adaf40071a55bbf242 (patch)
treefbae9cdc8bf4d92d2ac268cf761b5f25c5ad6ef9 /mm/memcontrol.c
parenta44cb9449182fd7b25bf5f1cc38b7f19e0b96f6d (diff)
downloadop-kernel-dev-5722d094ad2b56fa2c1cb3adaf40071a55bbf242.zip
op-kernel-dev-5722d094ad2b56fa2c1cb3adaf40071a55bbf242.tar.gz
memcg, slab: cleanup memcg cache creation
This patch cleans up the memcg cache creation path as follows: - Move memcg cache name creation to a separate function to be called from kmem_cache_create_memcg(). This allows us to get rid of the mutex protecting the temporary buffer used for the name formatting, because the whole cache creation path is protected by the slab_mutex. - Get rid of memcg_create_kmem_cache(). This function serves as a proxy to kmem_cache_create_memcg(). After separating the cache name creation path, it would be reduced to a function call, so let's inline it. Signed-off-by: Vladimir Davydov <vdavydov@parallels.com> Cc: Michal Hocko <mhocko@suse.cz> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: David Rientjes <rientjes@google.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: Glauber Costa <glommer@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r--mm/memcontrol.c89
1 files changed, 39 insertions, 50 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index e33b1d0..32c7342 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -3094,6 +3094,29 @@ int memcg_update_cache_size(struct kmem_cache *s, int num_groups)
return 0;
}
+char *memcg_create_cache_name(struct mem_cgroup *memcg,
+ struct kmem_cache *root_cache)
+{
+ static char *buf = NULL;
+
+ /*
+ * We need a mutex here to protect the shared buffer. Since this is
+ * expected to be called only on cache creation, we can employ the
+ * slab_mutex for that purpose.
+ */
+ lockdep_assert_held(&slab_mutex);
+
+ if (!buf) {
+ buf = kmalloc(NAME_MAX + 1, GFP_KERNEL);
+ if (!buf)
+ return NULL;
+ }
+
+ cgroup_name(memcg->css.cgroup, buf, NAME_MAX + 1);
+ return kasprintf(GFP_KERNEL, "%s(%d:%s)", root_cache->name,
+ memcg_cache_id(memcg), buf);
+}
+
int memcg_alloc_cache_params(struct mem_cgroup *memcg, struct kmem_cache *s,
struct kmem_cache *root_cache)
{
@@ -3298,46 +3321,6 @@ void mem_cgroup_destroy_cache(struct kmem_cache *cachep)
schedule_work(&cachep->memcg_params->destroy);
}
-static struct kmem_cache *memcg_create_kmem_cache(struct mem_cgroup *memcg,
- struct kmem_cache *s)
-{
- struct kmem_cache *new = NULL;
- static char *tmp_path = NULL, *tmp_name = NULL;
- static DEFINE_MUTEX(mutex); /* protects tmp_name */
-
- BUG_ON(!memcg_can_account_kmem(memcg));
-
- mutex_lock(&mutex);
- /*
- * kmem_cache_create_memcg duplicates the given name and
- * cgroup_name for this name requires RCU context.
- * This static temporary buffer is used to prevent from
- * pointless shortliving allocation.
- */
- if (!tmp_path || !tmp_name) {
- if (!tmp_path)
- tmp_path = kmalloc(PATH_MAX, GFP_KERNEL);
- if (!tmp_name)
- tmp_name = kmalloc(NAME_MAX + 1, GFP_KERNEL);
- if (!tmp_path || !tmp_name)
- goto out;
- }
-
- cgroup_name(memcg->css.cgroup, tmp_name, NAME_MAX + 1);
- snprintf(tmp_path, PATH_MAX, "%s(%d:%s)", s->name,
- memcg_cache_id(memcg), tmp_name);
-
- new = kmem_cache_create_memcg(memcg, tmp_path, s->object_size, s->align,
- (s->flags & ~SLAB_PANIC), s->ctor, s);
- if (new)
- new->allocflags |= __GFP_KMEMCG;
- else
- new = s;
-out:
- mutex_unlock(&mutex);
- return new;
-}
-
void kmem_cache_destroy_memcg_children(struct kmem_cache *s)
{
struct kmem_cache *c;
@@ -3384,12 +3367,6 @@ void kmem_cache_destroy_memcg_children(struct kmem_cache *s)
mutex_unlock(&activate_kmem_mutex);
}
-struct create_work {
- struct mem_cgroup *memcg;
- struct kmem_cache *cachep;
- struct work_struct work;
-};
-
static void mem_cgroup_destroy_all_caches(struct mem_cgroup *memcg)
{
struct kmem_cache *cachep;
@@ -3407,13 +3384,25 @@ static void mem_cgroup_destroy_all_caches(struct mem_cgroup *memcg)
mutex_unlock(&memcg->slab_caches_mutex);
}
+struct create_work {
+ struct mem_cgroup *memcg;
+ struct kmem_cache *cachep;
+ struct work_struct work;
+};
+
static void memcg_create_cache_work_func(struct work_struct *w)
{
- struct create_work *cw;
+ struct create_work *cw = container_of(w, struct create_work, work);
+ struct mem_cgroup *memcg = cw->memcg;
+ struct kmem_cache *cachep = cw->cachep;
+ struct kmem_cache *new;
- cw = container_of(w, struct create_work, work);
- memcg_create_kmem_cache(cw->memcg, cw->cachep);
- css_put(&cw->memcg->css);
+ new = kmem_cache_create_memcg(memcg, cachep->name,
+ cachep->object_size, cachep->align,
+ cachep->flags & ~SLAB_PANIC, cachep->ctor, cachep);
+ if (new)
+ new->allocflags |= __GFP_KMEMCG;
+ css_put(&memcg->css);
kfree(cw);
}
OpenPOWER on IntegriCloud