mm: switch s_mem and slab_cache in struct page
This will allow us to store slub's counters in the same bits as slab's s_mem. slub now needs to set page->mapping to NULL as it frees the page, just like slab does. Link: http://lkml.kernel.org/r/20180518194519.3820-5-willy@infradead.org Signed-off-by: Matthew Wilcox <mawilcox@microsoft.com> Acked-by: Christoph Lameter <cl@linux.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: Jérôme Glisse <jglisse@redhat.com> Cc: "Kirill A . Shutemov" <kirill.shutemov@linux.intel.com> Cc: Lai Jiangshan <jiangshanlai@gmail.com> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: Randy Dunlap <rdunlap@infradead.org> Cc: Andrey Ryabinin <aryabinin@virtuozzo.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Родитель
1d40a5ea01
Коммит
d4fc5069a3
|
@ -83,7 +83,7 @@ struct page {
|
|||
/* See page-flags.h for the definition of PAGE_MAPPING_FLAGS */
|
||||
struct address_space *mapping;
|
||||
|
||||
void *s_mem; /* slab first object */
|
||||
struct kmem_cache *slab_cache; /* SL[AU]B: Pointer to slab */
|
||||
atomic_t compound_mapcount; /* first tail page */
|
||||
/* page_deferred_list().next -- second tail page */
|
||||
};
|
||||
|
@ -194,7 +194,7 @@ struct page {
|
|||
spinlock_t ptl;
|
||||
#endif
|
||||
#endif
|
||||
struct kmem_cache *slab_cache; /* SL[AU]B: Pointer to slab */
|
||||
void *s_mem; /* slab first object */
|
||||
};
|
||||
|
||||
#ifdef CONFIG_MEMCG
|
||||
|
|
|
@ -1695,6 +1695,7 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
|
|||
__ClearPageSlab(page);
|
||||
|
||||
page_mapcount_reset(page);
|
||||
page->mapping = NULL;
|
||||
if (current->reclaim_state)
|
||||
current->reclaim_state->reclaimed_slab += pages;
|
||||
memcg_uncharge_slab(page, order, s);
|
||||
|
|
Загрузка…
Ссылка в новой задаче