slub: clean up code for kmem cgroup support to kmem_cache_free_bulk
This change is primarily an attempt to make it easier to realize the optimizations the compiler performs in-case CONFIG_MEMCG_KMEM is not enabled. Performance wise, even when CONFIG_MEMCG_KMEM is compiled in, the overhead is zero. This is because, as long as no process have enabled kmem cgroups accounting, the assignment is replaced by asm-NOP operations. This is possible because memcg_kmem_enabled() uses a static_key_false() construct. It also helps readability as it avoid accessing the p[] array like: p[size - 1] which "expose" that the array is processed backwards inside helper function build_detached_freelist(). Lastly this also makes the code more robust, in error case like passing NULL pointers in the array. Which were previously handled before commit033745189b
("slub: add missing kmem cgroup support to kmem_cache_free_bulk"). Fixes:033745189b
("slub: add missing kmem cgroup support to kmem_cache_free_bulk") Signed-off-by: Jesper Dangaard Brouer <brouer@redhat.com> Cc: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Vladimir Davydov <vdavydov@virtuozzo.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Родитель
dec63a4dec
Коммит
376bf125ac
22
mm/slub.c
22
mm/slub.c
|
@ -2815,6 +2815,7 @@ struct detached_freelist {
|
|||
void *tail;
|
||||
void *freelist;
|
||||
int cnt;
|
||||
struct kmem_cache *s;
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -2829,8 +2830,9 @@ struct detached_freelist {
|
|||
* synchronization primitive. Look ahead in the array is limited due
|
||||
* to performance reasons.
|
||||
*/
|
||||
static int build_detached_freelist(struct kmem_cache *s, size_t size,
|
||||
void **p, struct detached_freelist *df)
|
||||
static inline
|
||||
int build_detached_freelist(struct kmem_cache *s, size_t size,
|
||||
void **p, struct detached_freelist *df)
|
||||
{
|
||||
size_t first_skipped_index = 0;
|
||||
int lookahead = 3;
|
||||
|
@ -2846,8 +2848,11 @@ static int build_detached_freelist(struct kmem_cache *s, size_t size,
|
|||
if (!object)
|
||||
return 0;
|
||||
|
||||
/* Support for memcg, compiler can optimize this out */
|
||||
df->s = cache_from_obj(s, object);
|
||||
|
||||
/* Start new detached freelist */
|
||||
set_freepointer(s, object, NULL);
|
||||
set_freepointer(df->s, object, NULL);
|
||||
df->page = virt_to_head_page(object);
|
||||
df->tail = object;
|
||||
df->freelist = object;
|
||||
|
@ -2862,7 +2867,7 @@ static int build_detached_freelist(struct kmem_cache *s, size_t size,
|
|||
/* df->page is always set at this point */
|
||||
if (df->page == virt_to_head_page(object)) {
|
||||
/* Opportunity build freelist */
|
||||
set_freepointer(s, object, df->freelist);
|
||||
set_freepointer(df->s, object, df->freelist);
|
||||
df->freelist = object;
|
||||
df->cnt++;
|
||||
p[size] = NULL; /* mark object processed */
|
||||
|
@ -2881,25 +2886,20 @@ static int build_detached_freelist(struct kmem_cache *s, size_t size,
|
|||
return first_skipped_index;
|
||||
}
|
||||
|
||||
|
||||
/* Note that interrupts must be enabled when calling this function. */
|
||||
void kmem_cache_free_bulk(struct kmem_cache *orig_s, size_t size, void **p)
|
||||
void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
|
||||
{
|
||||
if (WARN_ON(!size))
|
||||
return;
|
||||
|
||||
do {
|
||||
struct detached_freelist df;
|
||||
struct kmem_cache *s;
|
||||
|
||||
/* Support for memcg */
|
||||
s = cache_from_obj(orig_s, p[size - 1]);
|
||||
|
||||
size = build_detached_freelist(s, size, p, &df);
|
||||
if (unlikely(!df.page))
|
||||
continue;
|
||||
|
||||
slab_free(s, df.page, df.freelist, df.tail, df.cnt, _RET_IP_);
|
||||
slab_free(df.s, df.page, df.freelist, df.tail, df.cnt,_RET_IP_);
|
||||
} while (likely(size));
|
||||
}
|
||||
EXPORT_SYMBOL(kmem_cache_free_bulk);
|
||||
|
|
Загрузка…
Ссылка в новой задаче