mm/slub: Convert __free_slab() to use struct slab
__free_slab() is on the boundary of distinguishing struct slab and struct page so start with struct slab but convert to folio for working with flags and folio_page() to call functions that require struct page. Signed-off-by: Vlastimil Babka <vbabka@suse.cz> Reviewed-by: Roman Gushchin <guro@fb.com> Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Tested-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
This commit is contained in:
Родитель
45387b8c14
Коммит
4020b4a226
27
mm/slub.c
27
mm/slub.c
|
@ -2005,35 +2005,34 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
|
||||||
flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
|
flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __free_slab(struct kmem_cache *s, struct page *page)
|
static void __free_slab(struct kmem_cache *s, struct slab *slab)
|
||||||
{
|
{
|
||||||
int order = compound_order(page);
|
struct folio *folio = slab_folio(slab);
|
||||||
|
int order = folio_order(folio);
|
||||||
int pages = 1 << order;
|
int pages = 1 << order;
|
||||||
|
|
||||||
if (kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS)) {
|
if (kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS)) {
|
||||||
void *p;
|
void *p;
|
||||||
|
|
||||||
slab_pad_check(s, page);
|
slab_pad_check(s, folio_page(folio, 0));
|
||||||
for_each_object(p, s, page_address(page),
|
for_each_object(p, s, slab_address(slab), slab->objects)
|
||||||
page->objects)
|
check_object(s, folio_page(folio, 0), p, SLUB_RED_INACTIVE);
|
||||||
check_object(s, page, p, SLUB_RED_INACTIVE);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
__ClearPageSlabPfmemalloc(page);
|
__slab_clear_pfmemalloc(slab);
|
||||||
__ClearPageSlab(page);
|
__folio_clear_slab(folio);
|
||||||
/* In union with page->mapping where page allocator expects NULL */
|
folio->mapping = NULL;
|
||||||
page->slab_cache = NULL;
|
|
||||||
if (current->reclaim_state)
|
if (current->reclaim_state)
|
||||||
current->reclaim_state->reclaimed_slab += pages;
|
current->reclaim_state->reclaimed_slab += pages;
|
||||||
unaccount_slab(page_slab(page), order, s);
|
unaccount_slab(slab, order, s);
|
||||||
__free_pages(page, order);
|
__free_pages(folio_page(folio, 0), order);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void rcu_free_slab(struct rcu_head *h)
|
static void rcu_free_slab(struct rcu_head *h)
|
||||||
{
|
{
|
||||||
struct page *page = container_of(h, struct page, rcu_head);
|
struct page *page = container_of(h, struct page, rcu_head);
|
||||||
|
|
||||||
__free_slab(page->slab_cache, page);
|
__free_slab(page->slab_cache, page_slab(page));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void free_slab(struct kmem_cache *s, struct page *page)
|
static void free_slab(struct kmem_cache *s, struct page *page)
|
||||||
|
@ -2041,7 +2040,7 @@ static void free_slab(struct kmem_cache *s, struct page *page)
|
||||||
if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU)) {
|
if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU)) {
|
||||||
call_rcu(&page->rcu_head, rcu_free_slab);
|
call_rcu(&page->rcu_head, rcu_free_slab);
|
||||||
} else
|
} else
|
||||||
__free_slab(s, page);
|
__free_slab(s, page_slab(page));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void discard_slab(struct kmem_cache *s, struct page *page)
|
static void discard_slab(struct kmem_cache *s, struct page *page)
|
||||||
|
|
Загрузка…
Ссылка в новой задаче