slub: move discard_slab out of node lock
Lockdep reports there is potential deadlock for slub node list_lock. discard_slab() is called with the lock hold in unfreeze_partials(), which could trigger a slab allocation, which could hold the lock again. discard_slab() doesn't need hold the lock actually, if the slab is already removed from partial list. Acked-by: Christoph Lameter <cl@linux.com> Reported-and-tested-by: Yong Zhang <yong.zhang0@gmail.com> Reported-and-tested-by: Julie Sullivan <kernelmail.jms@gmail.com> Signed-off-by: Shaohua Li <shaohua.li@intel.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
This commit is contained in:
Родитель
f64ae042d9
Коммит
9ada19342b
16
mm/slub.c
16
mm/slub.c
|
@ -1862,7 +1862,7 @@ static void unfreeze_partials(struct kmem_cache *s)
|
|||
{
|
||||
struct kmem_cache_node *n = NULL;
|
||||
struct kmem_cache_cpu *c = this_cpu_ptr(s->cpu_slab);
|
||||
struct page *page;
|
||||
struct page *page, *discard_page = NULL;
|
||||
|
||||
while ((page = c->partial)) {
|
||||
enum slab_modes { M_PARTIAL, M_FREE };
|
||||
|
@ -1916,14 +1916,22 @@ static void unfreeze_partials(struct kmem_cache *s)
|
|||
"unfreezing slab"));
|
||||
|
||||
if (m == M_FREE) {
|
||||
stat(s, DEACTIVATE_EMPTY);
|
||||
discard_slab(s, page);
|
||||
stat(s, FREE_SLAB);
|
||||
page->next = discard_page;
|
||||
discard_page = page;
|
||||
}
|
||||
}
|
||||
|
||||
if (n)
|
||||
spin_unlock(&n->list_lock);
|
||||
|
||||
while (discard_page) {
|
||||
page = discard_page;
|
||||
discard_page = discard_page->next;
|
||||
|
||||
stat(s, DEACTIVATE_EMPTY);
|
||||
discard_slab(s, page);
|
||||
stat(s, FREE_SLAB);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
Загрузка…
Ссылка в новой задаче