sl[au]b: allocate objects from memcg cache
We are able to match a cache allocation to a particular memcg. If the task doesn't change groups during the allocation itself - a rare event, this will give us a good picture about who is the first group to touch a cache page. This patch uses the now available infrastructure by calling memcg_kmem_get_cache() before all the cache allocations. Signed-off-by: Glauber Costa <glommer@parallels.com> Cc: Christoph Lameter <cl@linux.com> Cc: David Rientjes <rientjes@google.com> Cc: Frederic Weisbecker <fweisbec@redhat.com> Cc: Greg Thelen <gthelen@google.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: JoonSoo Kim <js1304@gmail.com> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Mel Gorman <mel@csn.ul.ie> Cc: Michal Hocko <mhocko@suse.cz> Cc: Pekka Enberg <penberg@cs.helsinki.fi> Cc: Rik van Riel <riel@redhat.com> Cc: Suleiman Souhlal <suleiman@google.com> Cc: Tejun Heo <tj@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Родитель
b9ce5ef49f
Коммит
d79923fad9
|
@ -225,7 +225,10 @@ void *__kmalloc(size_t size, gfp_t flags);
|
||||||
static __always_inline void *
|
static __always_inline void *
|
||||||
kmalloc_order(size_t size, gfp_t flags, unsigned int order)
|
kmalloc_order(size_t size, gfp_t flags, unsigned int order)
|
||||||
{
|
{
|
||||||
void *ret = (void *) __get_free_pages(flags | __GFP_COMP, order);
|
void *ret;
|
||||||
|
|
||||||
|
flags |= (__GFP_COMP | __GFP_KMEMCG);
|
||||||
|
ret = (void *) __get_free_pages(flags, order);
|
||||||
kmemleak_alloc(ret, size, 1, flags);
|
kmemleak_alloc(ret, size, 1, flags);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
|
@ -3086,6 +3086,9 @@ static struct kmem_cache *kmem_cache_dup(struct mem_cgroup *memcg,
|
||||||
new = kmem_cache_create_memcg(memcg, name, s->object_size, s->align,
|
new = kmem_cache_create_memcg(memcg, name, s->object_size, s->align,
|
||||||
(s->flags & ~SLAB_PANIC), s->ctor);
|
(s->flags & ~SLAB_PANIC), s->ctor);
|
||||||
|
|
||||||
|
if (new)
|
||||||
|
new->allocflags |= __GFP_KMEMCG;
|
||||||
|
|
||||||
kfree(name);
|
kfree(name);
|
||||||
return new;
|
return new;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1933,7 +1933,7 @@ static void kmem_freepages(struct kmem_cache *cachep, void *addr)
|
||||||
}
|
}
|
||||||
if (current->reclaim_state)
|
if (current->reclaim_state)
|
||||||
current->reclaim_state->reclaimed_slab += nr_freed;
|
current->reclaim_state->reclaimed_slab += nr_freed;
|
||||||
free_pages((unsigned long)addr, cachep->gfporder);
|
free_memcg_kmem_pages((unsigned long)addr, cachep->gfporder);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void kmem_rcu_free(struct rcu_head *head)
|
static void kmem_rcu_free(struct rcu_head *head)
|
||||||
|
@ -3486,6 +3486,8 @@ slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
|
||||||
if (slab_should_failslab(cachep, flags))
|
if (slab_should_failslab(cachep, flags))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
cachep = memcg_kmem_get_cache(cachep, flags);
|
||||||
|
|
||||||
cache_alloc_debugcheck_before(cachep, flags);
|
cache_alloc_debugcheck_before(cachep, flags);
|
||||||
local_irq_save(save_flags);
|
local_irq_save(save_flags);
|
||||||
|
|
||||||
|
@ -3571,6 +3573,8 @@ slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller)
|
||||||
if (slab_should_failslab(cachep, flags))
|
if (slab_should_failslab(cachep, flags))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
cachep = memcg_kmem_get_cache(cachep, flags);
|
||||||
|
|
||||||
cache_alloc_debugcheck_before(cachep, flags);
|
cache_alloc_debugcheck_before(cachep, flags);
|
||||||
local_irq_save(save_flags);
|
local_irq_save(save_flags);
|
||||||
objp = __do_cache_alloc(cachep, flags);
|
objp = __do_cache_alloc(cachep, flags);
|
||||||
|
|
|
@ -1405,7 +1405,7 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
|
||||||
reset_page_mapcount(page);
|
reset_page_mapcount(page);
|
||||||
if (current->reclaim_state)
|
if (current->reclaim_state)
|
||||||
current->reclaim_state->reclaimed_slab += pages;
|
current->reclaim_state->reclaimed_slab += pages;
|
||||||
__free_pages(page, order);
|
__free_memcg_kmem_pages(page, order);
|
||||||
}
|
}
|
||||||
|
|
||||||
#define need_reserve_slab_rcu \
|
#define need_reserve_slab_rcu \
|
||||||
|
@ -2323,6 +2323,7 @@ static __always_inline void *slab_alloc_node(struct kmem_cache *s,
|
||||||
if (slab_pre_alloc_hook(s, gfpflags))
|
if (slab_pre_alloc_hook(s, gfpflags))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
s = memcg_kmem_get_cache(s, gfpflags);
|
||||||
redo:
|
redo:
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -3284,7 +3285,7 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
|
||||||
struct page *page;
|
struct page *page;
|
||||||
void *ptr = NULL;
|
void *ptr = NULL;
|
||||||
|
|
||||||
flags |= __GFP_COMP | __GFP_NOTRACK;
|
flags |= __GFP_COMP | __GFP_NOTRACK | __GFP_KMEMCG;
|
||||||
page = alloc_pages_node(node, flags, get_order(size));
|
page = alloc_pages_node(node, flags, get_order(size));
|
||||||
if (page)
|
if (page)
|
||||||
ptr = page_address(page);
|
ptr = page_address(page);
|
||||||
|
@ -3390,7 +3391,7 @@ void kfree(const void *x)
|
||||||
if (unlikely(!PageSlab(page))) {
|
if (unlikely(!PageSlab(page))) {
|
||||||
BUG_ON(!PageCompound(page));
|
BUG_ON(!PageCompound(page));
|
||||||
kmemleak_free(x);
|
kmemleak_free(x);
|
||||||
__free_pages(page, compound_order(page));
|
__free_memcg_kmem_pages(page, compound_order(page));
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
slab_free(page->slab_cache, page, object, _RET_IP_);
|
slab_free(page->slab_cache, page, object, _RET_IP_);
|
||||||
|
|
Загрузка…
Ссылка в новой задаче