mm/migrate: convert move_to_new_page() into move_to_new_folio()
Pass in the folios that we already have in each caller. Saves a lot of calls to compound_head(). Link: https://lkml.kernel.org/r/20220504182857.4013401-27-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Родитель
8b463be3a0
Коммит
e7e3ffeb27
58
mm/migrate.c
58
mm/migrate.c
|
@ -843,21 +843,21 @@ static int fallback_migrate_page(struct address_space *mapping,
|
|||
* < 0 - error code
|
||||
* MIGRATEPAGE_SUCCESS - success
|
||||
*/
|
||||
static int move_to_new_page(struct page *newpage, struct page *page,
|
||||
static int move_to_new_folio(struct folio *dst, struct folio *src,
|
||||
enum migrate_mode mode)
|
||||
{
|
||||
struct address_space *mapping;
|
||||
int rc = -EAGAIN;
|
||||
bool is_lru = !__PageMovable(page);
|
||||
bool is_lru = !__PageMovable(&src->page);
|
||||
|
||||
VM_BUG_ON_PAGE(!PageLocked(page), page);
|
||||
VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
|
||||
VM_BUG_ON_FOLIO(!folio_test_locked(src), src);
|
||||
VM_BUG_ON_FOLIO(!folio_test_locked(dst), dst);
|
||||
|
||||
mapping = page_mapping(page);
|
||||
mapping = folio_mapping(src);
|
||||
|
||||
if (likely(is_lru)) {
|
||||
if (!mapping)
|
||||
rc = migrate_page(mapping, newpage, page, mode);
|
||||
rc = migrate_page(mapping, &dst->page, &src->page, mode);
|
||||
else if (mapping->a_ops->migratepage)
|
||||
/*
|
||||
* Most pages have a mapping and most filesystems
|
||||
|
@ -866,54 +866,54 @@ static int move_to_new_page(struct page *newpage, struct page *page,
|
|||
* migratepage callback. This is the most common path
|
||||
* for page migration.
|
||||
*/
|
||||
rc = mapping->a_ops->migratepage(mapping, newpage,
|
||||
page, mode);
|
||||
rc = mapping->a_ops->migratepage(mapping, &dst->page,
|
||||
&src->page, mode);
|
||||
else
|
||||
rc = fallback_migrate_page(mapping, newpage,
|
||||
page, mode);
|
||||
rc = fallback_migrate_page(mapping, &dst->page,
|
||||
&src->page, mode);
|
||||
} else {
|
||||
/*
|
||||
* In case of non-lru page, it could be released after
|
||||
* isolation step. In that case, we shouldn't try migration.
|
||||
*/
|
||||
VM_BUG_ON_PAGE(!PageIsolated(page), page);
|
||||
if (!PageMovable(page)) {
|
||||
VM_BUG_ON_FOLIO(!folio_test_isolated(src), src);
|
||||
if (!folio_test_movable(src)) {
|
||||
rc = MIGRATEPAGE_SUCCESS;
|
||||
ClearPageIsolated(page);
|
||||
folio_clear_isolated(src);
|
||||
goto out;
|
||||
}
|
||||
|
||||
rc = mapping->a_ops->migratepage(mapping, newpage,
|
||||
page, mode);
|
||||
rc = mapping->a_ops->migratepage(mapping, &dst->page,
|
||||
&src->page, mode);
|
||||
WARN_ON_ONCE(rc == MIGRATEPAGE_SUCCESS &&
|
||||
!PageIsolated(page));
|
||||
!folio_test_isolated(src));
|
||||
}
|
||||
|
||||
/*
|
||||
* When successful, old pagecache page->mapping must be cleared before
|
||||
* page is freed; but stats require that PageAnon be left as PageAnon.
|
||||
* When successful, old pagecache src->mapping must be cleared before
|
||||
* src is freed; but stats require that PageAnon be left as PageAnon.
|
||||
*/
|
||||
if (rc == MIGRATEPAGE_SUCCESS) {
|
||||
if (__PageMovable(page)) {
|
||||
VM_BUG_ON_PAGE(!PageIsolated(page), page);
|
||||
if (__PageMovable(&src->page)) {
|
||||
VM_BUG_ON_FOLIO(!folio_test_isolated(src), src);
|
||||
|
||||
/*
|
||||
* We clear PG_movable under page_lock so any compactor
|
||||
* cannot try to migrate this page.
|
||||
*/
|
||||
ClearPageIsolated(page);
|
||||
folio_clear_isolated(src);
|
||||
}
|
||||
|
||||
/*
|
||||
* Anonymous and movable page->mapping will be cleared by
|
||||
* Anonymous and movable src->mapping will be cleared by
|
||||
* free_pages_prepare so don't reset it here for keeping
|
||||
* the type to work PageAnon, for example.
|
||||
*/
|
||||
if (!PageMappingFlags(page))
|
||||
page->mapping = NULL;
|
||||
if (!folio_mapping_flags(src))
|
||||
src->mapping = NULL;
|
||||
|
||||
if (likely(!is_zone_device_page(newpage)))
|
||||
flush_dcache_folio(page_folio(newpage));
|
||||
if (likely(!folio_is_zone_device(dst)))
|
||||
flush_dcache_folio(dst);
|
||||
}
|
||||
out:
|
||||
return rc;
|
||||
|
@ -1001,7 +1001,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
|
|||
goto out_unlock;
|
||||
|
||||
if (unlikely(!is_lru)) {
|
||||
rc = move_to_new_page(newpage, page, mode);
|
||||
rc = move_to_new_folio(dst, folio, mode);
|
||||
goto out_unlock_both;
|
||||
}
|
||||
|
||||
|
@ -1032,7 +1032,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
|
|||
}
|
||||
|
||||
if (!page_mapped(page))
|
||||
rc = move_to_new_page(newpage, page, mode);
|
||||
rc = move_to_new_folio(dst, folio, mode);
|
||||
|
||||
/*
|
||||
* When successful, push newpage to LRU immediately: so that if it
|
||||
|
@ -1261,7 +1261,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
|
|||
}
|
||||
|
||||
if (!page_mapped(hpage))
|
||||
rc = move_to_new_page(new_hpage, hpage, mode);
|
||||
rc = move_to_new_folio(dst, src, mode);
|
||||
|
||||
if (page_was_mapped)
|
||||
remove_migration_ptes(src,
|
||||
|
|
Загрузка…
Ссылка в новой задаче