mm, slub: detach whole partial list at once in unfreeze_partials()
Instead of iterating through the live percpu partial list, detach it from the kmem_cache_cpu at once. This is simpler and will allow further optimization. Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
This commit is contained in:
Родитель
8de06a6f48
Коммит
c2f973ba42
10
mm/slub.c
10
mm/slub.c
|
@ -2358,16 +2358,20 @@ static void unfreeze_partials(struct kmem_cache *s,
|
|||
{
|
||||
#ifdef CONFIG_SLUB_CPU_PARTIAL
|
||||
struct kmem_cache_node *n = NULL, *n2 = NULL;
|
||||
struct page *page, *discard_page = NULL;
|
||||
struct page *page, *partial_page, *discard_page = NULL;
|
||||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
while ((page = slub_percpu_partial(c))) {
|
||||
partial_page = slub_percpu_partial(c);
|
||||
c->partial = NULL;
|
||||
|
||||
while (partial_page) {
|
||||
struct page new;
|
||||
struct page old;
|
||||
|
||||
slub_set_percpu_partial(c, page);
|
||||
page = partial_page;
|
||||
partial_page = page->next;
|
||||
|
||||
n2 = get_node(s, page_to_nid(page));
|
||||
if (n != n2) {
|
||||
|
|
Загрузка…
Ссылка в новой задаче