mm: introduce free_pages_prepare()
free_hot_cold_page() and __free_pages_ok() have very similar freeing preparation. Consolidate them. [akpm@linux-foundation.org: fix busted coding style] Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Acked-by: Mel Gorman <mel@csn.ul.ie> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Родитель
5f53e76299
Коммит
ec95f53aa6
|
@ -620,20 +620,23 @@ static void free_one_page(struct zone *zone, struct page *page, int order,
|
||||||
spin_unlock(&zone->lock);
|
spin_unlock(&zone->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __free_pages_ok(struct page *page, unsigned int order)
|
static bool free_pages_prepare(struct page *page, unsigned int order)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
|
||||||
int i;
|
int i;
|
||||||
int bad = 0;
|
int bad = 0;
|
||||||
int wasMlocked = __TestClearPageMlocked(page);
|
|
||||||
|
|
||||||
trace_mm_page_free_direct(page, order);
|
trace_mm_page_free_direct(page, order);
|
||||||
kmemcheck_free_shadow(page, order);
|
kmemcheck_free_shadow(page, order);
|
||||||
|
|
||||||
for (i = 0 ; i < (1 << order) ; ++i)
|
for (i = 0; i < (1 << order); i++) {
|
||||||
bad += free_pages_check(page + i);
|
struct page *pg = page + i;
|
||||||
|
|
||||||
|
if (PageAnon(pg))
|
||||||
|
pg->mapping = NULL;
|
||||||
|
bad += free_pages_check(pg);
|
||||||
|
}
|
||||||
if (bad)
|
if (bad)
|
||||||
return;
|
return false;
|
||||||
|
|
||||||
if (!PageHighMem(page)) {
|
if (!PageHighMem(page)) {
|
||||||
debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order);
|
debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order);
|
||||||
|
@ -643,6 +646,17 @@ static void __free_pages_ok(struct page *page, unsigned int order)
|
||||||
arch_free_page(page, order);
|
arch_free_page(page, order);
|
||||||
kernel_map_pages(page, 1 << order, 0);
|
kernel_map_pages(page, 1 << order, 0);
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void __free_pages_ok(struct page *page, unsigned int order)
|
||||||
|
{
|
||||||
|
unsigned long flags;
|
||||||
|
int wasMlocked = __TestClearPageMlocked(page);
|
||||||
|
|
||||||
|
if (!free_pages_prepare(page, order))
|
||||||
|
return;
|
||||||
|
|
||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
if (unlikely(wasMlocked))
|
if (unlikely(wasMlocked))
|
||||||
free_page_mlock(page);
|
free_page_mlock(page);
|
||||||
|
@ -1128,21 +1142,9 @@ void free_hot_cold_page(struct page *page, int cold)
|
||||||
int migratetype;
|
int migratetype;
|
||||||
int wasMlocked = __TestClearPageMlocked(page);
|
int wasMlocked = __TestClearPageMlocked(page);
|
||||||
|
|
||||||
trace_mm_page_free_direct(page, 0);
|
if (!free_pages_prepare(page, 0))
|
||||||
kmemcheck_free_shadow(page, 0);
|
|
||||||
|
|
||||||
if (PageAnon(page))
|
|
||||||
page->mapping = NULL;
|
|
||||||
if (free_pages_check(page))
|
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (!PageHighMem(page)) {
|
|
||||||
debug_check_no_locks_freed(page_address(page), PAGE_SIZE);
|
|
||||||
debug_check_no_obj_freed(page_address(page), PAGE_SIZE);
|
|
||||||
}
|
|
||||||
arch_free_page(page, 0);
|
|
||||||
kernel_map_pages(page, 1, 0);
|
|
||||||
|
|
||||||
migratetype = get_pageblock_migratetype(page);
|
migratetype = get_pageblock_migratetype(page);
|
||||||
set_page_private(page, migratetype);
|
set_page_private(page, migratetype);
|
||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
|
|
Загрузка…
Ссылка в новой задаче