mm/gup: use a standard migration target allocation callback

There is a well-defined migration target allocation callback. Use it.

Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Acked-by: Michal Hocko <mhocko@suse.com>
Cc: "Aneesh Kumar K . V" <aneesh.kumar@linux.ibm.com>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Roman Gushchin <guro@fb.com>
Link: http://lkml.kernel.org/r/1596180906-8442-3-git-send-email-iamjoonsoo.kim@lge.com
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Joonsoo Kim 2020-08-11 18:37:41 -07:00 коммит произвёл Linus Torvalds
Родитель bbe88753bd
Коммит ed03d92458
1 изменённых файлов: 6 добавлений и 48 удалений

Просмотреть файл

@ -1609,52 +1609,6 @@ static bool check_dax_vmas(struct vm_area_struct **vmas, long nr_pages)
} }
#ifdef CONFIG_CMA #ifdef CONFIG_CMA
static struct page *new_non_cma_page(struct page *page, unsigned long private)
{
/*
* We want to make sure we allocate the new page from the same node
* as the source page.
*/
int nid = page_to_nid(page);
/*
* Trying to allocate a page for migration. Ignore allocation
* failure warnings. We don't force __GFP_THISNODE here because
* this node here is the node where we have CMA reservation and
* in some case these nodes will have really less non CMA
* allocation memory.
*
* Note that CMA region is prohibited by allocation scope.
*/
gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_NOWARN;
if (PageHighMem(page))
gfp_mask |= __GFP_HIGHMEM;
#ifdef CONFIG_HUGETLB_PAGE
if (PageHuge(page)) {
struct hstate *h = page_hstate(page);
gfp_mask = htlb_modify_alloc_mask(h, gfp_mask);
return alloc_huge_page_nodemask(h, nid, NULL, gfp_mask);
}
#endif
if (PageTransHuge(page)) {
struct page *thp;
/*
* ignore allocation failure warnings
*/
gfp_t thp_gfpmask = GFP_TRANSHUGE | __GFP_NOWARN;
thp = __alloc_pages_node(nid, thp_gfpmask, HPAGE_PMD_ORDER);
if (!thp)
return NULL;
prep_transhuge_page(thp);
return thp;
}
return __alloc_pages_node(nid, gfp_mask, 0);
}
static long check_and_migrate_cma_pages(struct task_struct *tsk, static long check_and_migrate_cma_pages(struct task_struct *tsk,
struct mm_struct *mm, struct mm_struct *mm,
unsigned long start, unsigned long start,
@ -1669,6 +1623,10 @@ static long check_and_migrate_cma_pages(struct task_struct *tsk,
bool migrate_allow = true; bool migrate_allow = true;
LIST_HEAD(cma_page_list); LIST_HEAD(cma_page_list);
long ret = nr_pages; long ret = nr_pages;
struct migration_target_control mtc = {
.nid = NUMA_NO_NODE,
.gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_NOWARN,
};
check_again: check_again:
for (i = 0; i < nr_pages;) { for (i = 0; i < nr_pages;) {
@ -1714,8 +1672,8 @@ check_again:
for (i = 0; i < nr_pages; i++) for (i = 0; i < nr_pages; i++)
put_page(pages[i]); put_page(pages[i]);
if (migrate_pages(&cma_page_list, new_non_cma_page, if (migrate_pages(&cma_page_list, alloc_migration_target, NULL,
NULL, 0, MIGRATE_SYNC, MR_CONTIG_RANGE)) { (unsigned long)&mtc, MIGRATE_SYNC, MR_CONTIG_RANGE)) {
/* /*
* some of the pages failed migration. Do get_user_pages * some of the pages failed migration. Do get_user_pages
* without migration. * without migration.