diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 53b8201b31eb..9715c0c491b0 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -3214,43 +3214,6 @@ void mem_cgroup_destroy_cache(struct kmem_cache *cachep) schedule_work(&cachep->memcg_params->destroy); } -static char *memcg_cache_name(struct mem_cgroup *memcg, struct kmem_cache *s) -{ - char *name; - struct dentry *dentry; - - rcu_read_lock(); - dentry = rcu_dereference(memcg->css.cgroup->dentry); - rcu_read_unlock(); - - BUG_ON(dentry == NULL); - - name = kasprintf(GFP_KERNEL, "%s(%d:%s)", s->name, - memcg_cache_id(memcg), dentry->d_name.name); - - return name; -} - -static struct kmem_cache *kmem_cache_dup(struct mem_cgroup *memcg, - struct kmem_cache *s) -{ - char *name; - struct kmem_cache *new; - - name = memcg_cache_name(memcg, s); - if (!name) - return NULL; - - new = kmem_cache_create_memcg(memcg, name, s->object_size, s->align, - (s->flags & ~SLAB_PANIC), s->ctor, s); - - if (new) - new->allocflags |= __GFP_KMEMCG; - - kfree(name); - return new; -} - /* * This lock protects updaters, not readers. We want readers to be as fast as * they can, and they will either see NULL or a valid cache value. Our model @@ -3260,6 +3223,44 @@ static struct kmem_cache *kmem_cache_dup(struct mem_cgroup *memcg, * will span more than one worker. Only one of them can create the cache. */ static DEFINE_MUTEX(memcg_cache_mutex); + +/* + * Called with memcg_cache_mutex held + */ +static struct kmem_cache *kmem_cache_dup(struct mem_cgroup *memcg, + struct kmem_cache *s) +{ + struct kmem_cache *new; + static char *tmp_name = NULL; + + lockdep_assert_held(&memcg_cache_mutex); + + /* + * kmem_cache_create_memcg duplicates the given name and + * cgroup_name for this name requires RCU context. + * This static temporary buffer is used to prevent from + * pointless shortliving allocation. + */ + if (!tmp_name) { + tmp_name = kmalloc(PATH_MAX, GFP_KERNEL); + if (!tmp_name) + return NULL; + } + + rcu_read_lock(); + snprintf(tmp_name, PATH_MAX, "%s(%d:%s)", s->name, + memcg_cache_id(memcg), cgroup_name(memcg->css.cgroup)); + rcu_read_unlock(); + + new = kmem_cache_create_memcg(memcg, tmp_name, s->object_size, s->align, + (s->flags & ~SLAB_PANIC), s->ctor, s); + + if (new) + new->allocflags |= __GFP_KMEMCG; + + return new; +} + static struct kmem_cache *memcg_create_kmem_cache(struct mem_cgroup *memcg, struct kmem_cache *cachep) {