mm, debug_pagealloc: use a page type instead of page_ext flag
When debug_pagealloc is enabled, we currently allocate the page_ext array to mark guard pages with the PAGE_EXT_DEBUG_GUARD flag. Now that we have the page_type field in struct page, we can use that instead, as guard pages are neither PageSlab nor mapped to userspace. This reduces memory overhead when debug_pagealloc is enabled and there are no other features requiring the page_ext array. Link: http://lkml.kernel.org/r/20190603143451.27353-4-vbabka@suse.cz Signed-off-by: Vlastimil Babka <vbabka@suse.cz> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: Mel Gorman <mgorman@techsingularity.net> Cc: Michal Hocko <mhocko@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Родитель
4462b32c92
Коммит
3972f6bb1c
|
@ -805,12 +805,10 @@
|
|||
tracking down these problems.
|
||||
|
||||
debug_pagealloc=
|
||||
[KNL] When CONFIG_DEBUG_PAGEALLOC is set, this
|
||||
parameter enables the feature at boot time. In
|
||||
default, it is disabled. We can avoid allocating huge
|
||||
chunk of memory for debug pagealloc if we don't enable
|
||||
it at boot time and the system will work mostly same
|
||||
with the kernel built without CONFIG_DEBUG_PAGEALLOC.
|
||||
[KNL] When CONFIG_DEBUG_PAGEALLOC is set, this parameter
|
||||
enables the feature at boot time. By default, it is
|
||||
disabled and the system will work mostly the same as a
|
||||
kernel built without CONFIG_DEBUG_PAGEALLOC.
|
||||
on: enable the feature
|
||||
|
||||
debugpat [X86] Enable PAT debugging
|
||||
|
|
|
@ -2862,8 +2862,6 @@ extern long copy_huge_page_from_user(struct page *dst_page,
|
|||
bool allow_pagefault);
|
||||
#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
|
||||
|
||||
extern struct page_ext_operations debug_guardpage_ops;
|
||||
|
||||
#ifdef CONFIG_DEBUG_PAGEALLOC
|
||||
extern unsigned int _debug_guardpage_minorder;
|
||||
DECLARE_STATIC_KEY_FALSE(_debug_guardpage_enabled);
|
||||
|
@ -2880,16 +2878,10 @@ static inline bool debug_guardpage_enabled(void)
|
|||
|
||||
static inline bool page_is_guard(struct page *page)
|
||||
{
|
||||
struct page_ext *page_ext;
|
||||
|
||||
if (!debug_guardpage_enabled())
|
||||
return false;
|
||||
|
||||
page_ext = lookup_page_ext(page);
|
||||
if (unlikely(!page_ext))
|
||||
return false;
|
||||
|
||||
return test_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
|
||||
return PageGuard(page);
|
||||
}
|
||||
#else
|
||||
static inline unsigned int debug_guardpage_minorder(void) { return 0; }
|
||||
|
|
|
@ -703,6 +703,7 @@ PAGEFLAG_FALSE(DoubleMap)
|
|||
#define PG_offline 0x00000100
|
||||
#define PG_kmemcg 0x00000200
|
||||
#define PG_table 0x00000400
|
||||
#define PG_guard 0x00000800
|
||||
|
||||
#define PageType(page, flag) \
|
||||
((page->page_type & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE)
|
||||
|
@ -754,6 +755,11 @@ PAGE_TYPE_OPS(Kmemcg, kmemcg)
|
|||
*/
|
||||
PAGE_TYPE_OPS(Table, table)
|
||||
|
||||
/*
|
||||
* Marks guardpages used with debug_pagealloc.
|
||||
*/
|
||||
PAGE_TYPE_OPS(Guard, guard)
|
||||
|
||||
extern bool is_free_buddy_page(struct page *page);
|
||||
|
||||
__PAGEFLAG(Isolated, isolated, PF_ANY);
|
||||
|
|
|
@ -17,7 +17,6 @@ struct page_ext_operations {
|
|||
#ifdef CONFIG_PAGE_EXTENSION
|
||||
|
||||
enum page_ext_flags {
|
||||
PAGE_EXT_DEBUG_GUARD,
|
||||
PAGE_EXT_OWNER,
|
||||
#if defined(CONFIG_IDLE_PAGE_TRACKING) && !defined(CONFIG_64BIT)
|
||||
PAGE_EXT_YOUNG,
|
||||
|
|
|
@ -12,7 +12,6 @@ config DEBUG_PAGEALLOC
|
|||
bool "Debug page memory allocations"
|
||||
depends on DEBUG_KERNEL
|
||||
depends on !HIBERNATION || ARCH_SUPPORTS_DEBUG_PAGEALLOC && !PPC && !SPARC
|
||||
select PAGE_EXTENSION
|
||||
select PAGE_POISONING if !ARCH_SUPPORTS_DEBUG_PAGEALLOC
|
||||
---help---
|
||||
Unmap pages from the kernel linear mapping after free_pages().
|
||||
|
|
|
@ -50,7 +50,6 @@
|
|||
#include <linux/backing-dev.h>
|
||||
#include <linux/fault-inject.h>
|
||||
#include <linux/page-isolation.h>
|
||||
#include <linux/page_ext.h>
|
||||
#include <linux/debugobjects.h>
|
||||
#include <linux/kmemleak.h>
|
||||
#include <linux/compaction.h>
|
||||
|
@ -668,18 +667,6 @@ static int __init early_debug_pagealloc(char *buf)
|
|||
}
|
||||
early_param("debug_pagealloc", early_debug_pagealloc);
|
||||
|
||||
static bool need_debug_guardpage(void)
|
||||
{
|
||||
/* If we don't use debug_pagealloc, we don't need guard page */
|
||||
if (!debug_pagealloc_enabled())
|
||||
return false;
|
||||
|
||||
if (!debug_guardpage_minorder())
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void init_debug_guardpage(void)
|
||||
{
|
||||
if (!debug_pagealloc_enabled())
|
||||
|
@ -691,11 +678,6 @@ static void init_debug_guardpage(void)
|
|||
static_branch_enable(&_debug_guardpage_enabled);
|
||||
}
|
||||
|
||||
struct page_ext_operations debug_guardpage_ops = {
|
||||
.need = need_debug_guardpage,
|
||||
.init = init_debug_guardpage,
|
||||
};
|
||||
|
||||
static int __init debug_guardpage_minorder_setup(char *buf)
|
||||
{
|
||||
unsigned long res;
|
||||
|
@ -713,20 +695,13 @@ early_param("debug_guardpage_minorder", debug_guardpage_minorder_setup);
|
|||
static inline bool set_page_guard(struct zone *zone, struct page *page,
|
||||
unsigned int order, int migratetype)
|
||||
{
|
||||
struct page_ext *page_ext;
|
||||
|
||||
if (!debug_guardpage_enabled())
|
||||
return false;
|
||||
|
||||
if (order >= debug_guardpage_minorder())
|
||||
return false;
|
||||
|
||||
page_ext = lookup_page_ext(page);
|
||||
if (unlikely(!page_ext))
|
||||
return false;
|
||||
|
||||
__set_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
|
||||
|
||||
__SetPageGuard(page);
|
||||
INIT_LIST_HEAD(&page->lru);
|
||||
set_page_private(page, order);
|
||||
/* Guard pages are not available for any usage */
|
||||
|
@ -738,23 +713,16 @@ static inline bool set_page_guard(struct zone *zone, struct page *page,
|
|||
static inline void clear_page_guard(struct zone *zone, struct page *page,
|
||||
unsigned int order, int migratetype)
|
||||
{
|
||||
struct page_ext *page_ext;
|
||||
|
||||
if (!debug_guardpage_enabled())
|
||||
return;
|
||||
|
||||
page_ext = lookup_page_ext(page);
|
||||
if (unlikely(!page_ext))
|
||||
return;
|
||||
|
||||
__clear_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
|
||||
__ClearPageGuard(page);
|
||||
|
||||
set_page_private(page, 0);
|
||||
if (!is_migrate_isolate(migratetype))
|
||||
__mod_zone_freepage_state(zone, (1 << order), migratetype);
|
||||
}
|
||||
#else
|
||||
struct page_ext_operations debug_guardpage_ops;
|
||||
static inline bool set_page_guard(struct zone *zone, struct page *page,
|
||||
unsigned int order, int migratetype) { return false; }
|
||||
static inline void clear_page_guard(struct zone *zone, struct page *page,
|
||||
|
@ -1930,6 +1898,10 @@ void __init page_alloc_init_late(void)
|
|||
|
||||
for_each_populated_zone(zone)
|
||||
set_zone_contiguous(zone);
|
||||
|
||||
#ifdef CONFIG_DEBUG_PAGEALLOC
|
||||
init_debug_guardpage();
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef CONFIG_CMA
|
||||
|
|
|
@ -59,9 +59,6 @@
|
|||
*/
|
||||
|
||||
static struct page_ext_operations *page_ext_ops[] = {
|
||||
#ifdef CONFIG_DEBUG_PAGEALLOC
|
||||
&debug_guardpage_ops,
|
||||
#endif
|
||||
#ifdef CONFIG_PAGE_OWNER
|
||||
&page_owner_ops,
|
||||
#endif
|
||||
|
|
Загрузка…
Ссылка в новой задаче