mm/slub: Convert alloc_slab_page() to return a struct slab
Preparatory, callers convert back to struct page for now. Also move setting page flags to alloc_slab_page() where we still operate on a struct page. This means the page->slab_cache pointer is now set later than the PageSlab flag, which could theoretically confuse some pfn walker assuming PageSlab means there would be a valid cache pointer. But as the code had no barriers and used __set_bit() anyway, it could have happened already, so there shouldn't be such a walker. Signed-off-by: Vlastimil Babka <vbabka@suse.cz> Reviewed-by: Roman Gushchin <guro@fb.com> Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Tested-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
This commit is contained in:
Родитель
fb012e278d
Коммит
45387b8c14
26
mm/slub.c
26
mm/slub.c
|
@ -1788,18 +1788,27 @@ static void *setup_object(struct kmem_cache *s, struct page *page,
|
|||
/*
|
||||
* Slab allocation and freeing
|
||||
*/
|
||||
static inline struct page *alloc_slab_page(struct kmem_cache *s,
|
||||
static inline struct slab *alloc_slab_page(struct kmem_cache *s,
|
||||
gfp_t flags, int node, struct kmem_cache_order_objects oo)
|
||||
{
|
||||
struct page *page;
|
||||
struct folio *folio;
|
||||
struct slab *slab;
|
||||
unsigned int order = oo_order(oo);
|
||||
|
||||
if (node == NUMA_NO_NODE)
|
||||
page = alloc_pages(flags, order);
|
||||
folio = (struct folio *)alloc_pages(flags, order);
|
||||
else
|
||||
page = __alloc_pages_node(node, flags, order);
|
||||
folio = (struct folio *)__alloc_pages_node(node, flags, order);
|
||||
|
||||
return page;
|
||||
if (!folio)
|
||||
return NULL;
|
||||
|
||||
slab = folio_slab(folio);
|
||||
__folio_set_slab(folio);
|
||||
if (page_is_pfmemalloc(folio_page(folio, 0)))
|
||||
slab_set_pfmemalloc(slab);
|
||||
|
||||
return slab;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SLAB_FREELIST_RANDOM
|
||||
|
@ -1932,7 +1941,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
|
|||
if ((alloc_gfp & __GFP_DIRECT_RECLAIM) && oo_order(oo) > oo_order(s->min))
|
||||
alloc_gfp = (alloc_gfp | __GFP_NOMEMALLOC) & ~(__GFP_RECLAIM|__GFP_NOFAIL);
|
||||
|
||||
page = alloc_slab_page(s, alloc_gfp, node, oo);
|
||||
page = slab_page(alloc_slab_page(s, alloc_gfp, node, oo));
|
||||
if (unlikely(!page)) {
|
||||
oo = s->min;
|
||||
alloc_gfp = flags;
|
||||
|
@ -1940,7 +1949,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
|
|||
* Allocation may have failed due to fragmentation.
|
||||
* Try a lower order alloc if possible
|
||||
*/
|
||||
page = alloc_slab_page(s, alloc_gfp, node, oo);
|
||||
page = slab_page(alloc_slab_page(s, alloc_gfp, node, oo));
|
||||
if (unlikely(!page))
|
||||
goto out;
|
||||
stat(s, ORDER_FALLBACK);
|
||||
|
@ -1951,9 +1960,6 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
|
|||
account_slab(page_slab(page), oo_order(oo), s, flags);
|
||||
|
||||
page->slab_cache = s;
|
||||
__SetPageSlab(page);
|
||||
if (page_is_pfmemalloc(page))
|
||||
SetPageSlabPfmemalloc(page);
|
||||
|
||||
kasan_poison_slab(page);
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче