mm: don't assume anonymous pages have SwapBacked flag
There are a few places the code assumes anonymous pages should have SwapBacked flag set. MADV_FREE pages are anonymous pages but we are going to add them to LRU_INACTIVE_FILE list and clear SwapBacked flag for them. The assumption doesn't hold any more, so fix them. Link: http://lkml.kernel.org/r/3945232c0df3dd6c4ef001976f35a95f18dcb407.1487965799.git.shli@fb.com Signed-off-by: Shaohua Li <shli@fb.com> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: Hillf Danton <hillf.zj@alibaba-inc.com> Cc: Michal Hocko <mhocko@suse.com> Cc: Minchan Kim <minchan@kernel.org> Cc: Hugh Dickins <hughd@google.com> Cc: Rik van Riel <riel@redhat.com> Cc: Mel Gorman <mgorman@techsingularity.net> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Родитель
a128ca71fb
Коммит
d44d363f65
|
@ -2399,7 +2399,6 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
|
|||
|
||||
VM_BUG_ON_PAGE(is_huge_zero_page(page), page);
|
||||
VM_BUG_ON_PAGE(!PageLocked(page), page);
|
||||
VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
|
||||
VM_BUG_ON_PAGE(!PageCompound(page), page);
|
||||
|
||||
if (PageAnon(head)) {
|
||||
|
|
|
@ -483,8 +483,7 @@ void __khugepaged_exit(struct mm_struct *mm)
|
|||
|
||||
static void release_pte_page(struct page *page)
|
||||
{
|
||||
/* 0 stands for page_is_file_cache(page) == false */
|
||||
dec_node_page_state(page, NR_ISOLATED_ANON + 0);
|
||||
dec_node_page_state(page, NR_ISOLATED_ANON + page_is_file_cache(page));
|
||||
unlock_page(page);
|
||||
putback_lru_page(page);
|
||||
}
|
||||
|
@ -532,7 +531,6 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
|
|||
|
||||
VM_BUG_ON_PAGE(PageCompound(page), page);
|
||||
VM_BUG_ON_PAGE(!PageAnon(page), page);
|
||||
VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
|
||||
|
||||
/*
|
||||
* We can do it before isolate_lru_page because the
|
||||
|
@ -579,8 +577,8 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
|
|||
result = SCAN_DEL_PAGE_LRU;
|
||||
goto out;
|
||||
}
|
||||
/* 0 stands for page_is_file_cache(page) == false */
|
||||
inc_node_page_state(page, NR_ISOLATED_ANON + 0);
|
||||
inc_node_page_state(page,
|
||||
NR_ISOLATED_ANON + page_is_file_cache(page));
|
||||
VM_BUG_ON_PAGE(!PageLocked(page), page);
|
||||
VM_BUG_ON_PAGE(PageLRU(page), page);
|
||||
|
||||
|
|
|
@ -1944,7 +1944,8 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
|
|||
|
||||
/* Prepare a page as a migration target */
|
||||
__SetPageLocked(new_page);
|
||||
__SetPageSwapBacked(new_page);
|
||||
if (PageSwapBacked(page))
|
||||
__SetPageSwapBacked(new_page);
|
||||
|
||||
/* anon mapping, we can simply copy page->mapping to the new page: */
|
||||
new_page->mapping = page->mapping;
|
||||
|
|
|
@ -1424,7 +1424,8 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
|
|||
* Store the swap location in the pte.
|
||||
* See handle_pte_fault() ...
|
||||
*/
|
||||
VM_BUG_ON_PAGE(!PageSwapCache(page), page);
|
||||
VM_BUG_ON_PAGE(!PageSwapCache(page) && PageSwapBacked(page),
|
||||
page);
|
||||
|
||||
if (!PageDirty(page)) {
|
||||
/* It's a freeable page by MADV_FREE */
|
||||
|
|
Загрузка…
Ссылка в новой задаче