slub: explicitly document position of inserting slab to partial list
Adding slab to partial list head/tail is sensitive to performance. So explicitly uses DEACTIVATE_TO_TAIL/DEACTIVATE_TO_HEAD to document it to avoid we get it wrong. Acked-by: Christoph Lameter <cl@linux.com> Signed-off-by: Shaohua Li <shli@kernel.org> Signed-off-by: Shaohua Li <shaohua.li@intel.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
This commit is contained in:
Родитель
130655ef09
Коммит
136333d104
12
mm/slub.c
12
mm/slub.c
|
@ -1534,7 +1534,7 @@ static inline void add_partial(struct kmem_cache_node *n,
|
||||||
struct page *page, int tail)
|
struct page *page, int tail)
|
||||||
{
|
{
|
||||||
n->nr_partial++;
|
n->nr_partial++;
|
||||||
if (tail)
|
if (tail == DEACTIVATE_TO_TAIL)
|
||||||
list_add_tail(&page->lru, &n->partial);
|
list_add_tail(&page->lru, &n->partial);
|
||||||
else
|
else
|
||||||
list_add(&page->lru, &n->partial);
|
list_add(&page->lru, &n->partial);
|
||||||
|
@ -1781,13 +1781,13 @@ static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
|
||||||
enum slab_modes l = M_NONE, m = M_NONE;
|
enum slab_modes l = M_NONE, m = M_NONE;
|
||||||
void *freelist;
|
void *freelist;
|
||||||
void *nextfree;
|
void *nextfree;
|
||||||
int tail = 0;
|
int tail = DEACTIVATE_TO_HEAD;
|
||||||
struct page new;
|
struct page new;
|
||||||
struct page old;
|
struct page old;
|
||||||
|
|
||||||
if (page->freelist) {
|
if (page->freelist) {
|
||||||
stat(s, DEACTIVATE_REMOTE_FREES);
|
stat(s, DEACTIVATE_REMOTE_FREES);
|
||||||
tail = 1;
|
tail = DEACTIVATE_TO_TAIL;
|
||||||
}
|
}
|
||||||
|
|
||||||
c->tid = next_tid(c->tid);
|
c->tid = next_tid(c->tid);
|
||||||
|
@ -1893,7 +1893,7 @@ redo:
|
||||||
if (m == M_PARTIAL) {
|
if (m == M_PARTIAL) {
|
||||||
|
|
||||||
add_partial(n, page, tail);
|
add_partial(n, page, tail);
|
||||||
stat(s, tail ? DEACTIVATE_TO_TAIL : DEACTIVATE_TO_HEAD);
|
stat(s, tail);
|
||||||
|
|
||||||
} else if (m == M_FULL) {
|
} else if (m == M_FULL) {
|
||||||
|
|
||||||
|
@ -2377,7 +2377,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
|
||||||
*/
|
*/
|
||||||
if (unlikely(!prior)) {
|
if (unlikely(!prior)) {
|
||||||
remove_full(s, page);
|
remove_full(s, page);
|
||||||
add_partial(n, page, 1);
|
add_partial(n, page, DEACTIVATE_TO_TAIL);
|
||||||
stat(s, FREE_ADD_PARTIAL);
|
stat(s, FREE_ADD_PARTIAL);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2695,7 +2695,7 @@ static void early_kmem_cache_node_alloc(int node)
|
||||||
init_kmem_cache_node(n, kmem_cache_node);
|
init_kmem_cache_node(n, kmem_cache_node);
|
||||||
inc_slabs_node(kmem_cache_node, node, page->objects);
|
inc_slabs_node(kmem_cache_node, node, page->objects);
|
||||||
|
|
||||||
add_partial(n, page, 0);
|
add_partial(n, page, DEACTIVATE_TO_HEAD);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void free_kmem_cache_nodes(struct kmem_cache *s)
|
static void free_kmem_cache_nodes(struct kmem_cache *s)
|
||||||
|
|
Загрузка…
Ссылка в новой задаче