mm, migration: add destination page freeing callback

Memory migration uses a callback defined by the caller to determine how to
allocate destination pages.  When migration fails for a source page,
however, it frees the destination page back to the system.

This patch adds a memory migration callback defined by the caller to
determine how to free destination pages.  If a caller, such as memory
compaction, builds its own freelist for migration targets, this can reuse
already freed memory instead of scanning additional memory.

If the caller provides a function to handle freeing of destination pages,
it is called when page migration fails.  If the caller passes NULL then
freeing back to the system will be handled as usual.  This patch
introduces no functional change.

Signed-off-by: David Rientjes <rientjes@google.com>
Reviewed-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Acked-by: Mel Gorman <mgorman@suse.de>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Greg Thelen <gthelen@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
David Rientjes 2014-06-04 16:08:25 -07:00 коммит произвёл Linus Torvalds
Родитель 93f39eea9c
Коммит 68711a7463
7 изменённых файлов: 53 добавлений и 27 удалений

Просмотреть файл

@ -5,7 +5,9 @@
#include <linux/mempolicy.h>
#include <linux/migrate_mode.h>
typedef struct page *new_page_t(struct page *, unsigned long private, int **);
typedef struct page *new_page_t(struct page *page, unsigned long private,
int **reason);
typedef void free_page_t(struct page *page, unsigned long private);
/*
* Return values from addresss_space_operations.migratepage():
@ -38,7 +40,7 @@ enum migrate_reason {
extern void putback_movable_pages(struct list_head *l);
extern int migrate_page(struct address_space *,
struct page *, struct page *, enum migrate_mode);
extern int migrate_pages(struct list_head *l, new_page_t x,
extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free,
unsigned long private, enum migrate_mode mode, int reason);
extern int migrate_prep(void);
@ -56,8 +58,9 @@ extern int migrate_page_move_mapping(struct address_space *mapping,
#else
static inline void putback_movable_pages(struct list_head *l) {}
static inline int migrate_pages(struct list_head *l, new_page_t x,
unsigned long private, enum migrate_mode mode, int reason)
static inline int migrate_pages(struct list_head *l, new_page_t new,
free_page_t free, unsigned long private, enum migrate_mode mode,
int reason)
{ return -ENOSYS; }
static inline int migrate_prep(void) { return -ENOSYS; }

Просмотреть файл

@ -1016,7 +1016,7 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
}
nr_migrate = cc->nr_migratepages;
err = migrate_pages(&cc->migratepages, compaction_alloc,
err = migrate_pages(&cc->migratepages, compaction_alloc, NULL,
(unsigned long)cc,
cc->sync ? MIGRATE_SYNC_LIGHT : MIGRATE_ASYNC,
MR_COMPACTION);

Просмотреть файл

@ -1503,7 +1503,7 @@ static int soft_offline_huge_page(struct page *page, int flags)
/* Keep page count to indicate a given hugepage is isolated. */
list_move(&hpage->lru, &pagelist);
ret = migrate_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL,
ret = migrate_pages(&pagelist, new_page, NULL, MPOL_MF_MOVE_ALL,
MIGRATE_SYNC, MR_MEMORY_FAILURE);
if (ret) {
pr_info("soft offline: %#lx: migration failed %d, type %lx\n",
@ -1584,7 +1584,7 @@ static int __soft_offline_page(struct page *page, int flags)
inc_zone_page_state(page, NR_ISOLATED_ANON +
page_is_file_cache(page));
list_add(&page->lru, &pagelist);
ret = migrate_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL,
ret = migrate_pages(&pagelist, new_page, NULL, MPOL_MF_MOVE_ALL,
MIGRATE_SYNC, MR_MEMORY_FAILURE);
if (ret) {
if (!list_empty(&pagelist)) {

Просмотреть файл

@ -1394,7 +1394,7 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
* alloc_migrate_target should be improooooved!!
* migrate_pages returns # of failed pages.
*/
ret = migrate_pages(&source, alloc_migrate_target, 0,
ret = migrate_pages(&source, alloc_migrate_target, NULL, 0,
MIGRATE_SYNC, MR_MEMORY_HOTPLUG);
if (ret)
putback_movable_pages(&source);

Просмотреть файл

@ -1028,7 +1028,7 @@ static int migrate_to_node(struct mm_struct *mm, int source, int dest,
flags | MPOL_MF_DISCONTIG_OK, &pagelist);
if (!list_empty(&pagelist)) {
err = migrate_pages(&pagelist, new_node_page, dest,
err = migrate_pages(&pagelist, new_node_page, NULL, dest,
MIGRATE_SYNC, MR_SYSCALL);
if (err)
putback_movable_pages(&pagelist);
@ -1277,7 +1277,7 @@ static long do_mbind(unsigned long start, unsigned long len,
if (!list_empty(&pagelist)) {
WARN_ON_ONCE(flags & MPOL_MF_LAZY);
nr_failed = migrate_pages(&pagelist, new_vma_page,
(unsigned long)vma,
NULL, (unsigned long)vma,
MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
if (nr_failed)
putback_movable_pages(&pagelist);

Просмотреть файл

@ -938,8 +938,9 @@ out:
* Obtain the lock on page, remove all ptes and migrate the page
* to the newly allocated page in newpage.
*/
static int unmap_and_move(new_page_t get_new_page, unsigned long private,
struct page *page, int force, enum migrate_mode mode)
static int unmap_and_move(new_page_t get_new_page, free_page_t put_new_page,
unsigned long private, struct page *page, int force,
enum migrate_mode mode)
{
int rc = 0;
int *result = NULL;
@ -983,11 +984,17 @@ out:
page_is_file_cache(page));
putback_lru_page(page);
}
/*
* Move the new page to the LRU. If migration was not successful
* then this will free the page.
* If migration was not successful and there's a freeing callback, use
* it. Otherwise, putback_lru_page() will drop the reference grabbed
* during isolation.
*/
putback_lru_page(newpage);
if (rc != MIGRATEPAGE_SUCCESS && put_new_page)
put_new_page(newpage, private);
else
putback_lru_page(newpage);
if (result) {
if (rc)
*result = rc;
@ -1016,8 +1023,9 @@ out:
* will wait in the page fault for migration to complete.
*/
static int unmap_and_move_huge_page(new_page_t get_new_page,
unsigned long private, struct page *hpage,
int force, enum migrate_mode mode)
free_page_t put_new_page, unsigned long private,
struct page *hpage, int force,
enum migrate_mode mode)
{
int rc = 0;
int *result = NULL;
@ -1056,20 +1064,30 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
if (!page_mapped(hpage))
rc = move_to_new_page(new_hpage, hpage, 1, mode);
if (rc)
if (rc != MIGRATEPAGE_SUCCESS)
remove_migration_ptes(hpage, hpage);
if (anon_vma)
put_anon_vma(anon_vma);
if (!rc)
if (rc == MIGRATEPAGE_SUCCESS)
hugetlb_cgroup_migrate(hpage, new_hpage);
unlock_page(hpage);
out:
if (rc != -EAGAIN)
putback_active_hugepage(hpage);
put_page(new_hpage);
/*
* If migration was not successful and there's a freeing callback, use
* it. Otherwise, put_page() will drop the reference grabbed during
* isolation.
*/
if (rc != MIGRATEPAGE_SUCCESS && put_new_page)
put_new_page(new_hpage, private);
else
put_page(new_hpage);
if (result) {
if (rc)
*result = rc;
@ -1086,6 +1104,8 @@ out:
* @from: The list of pages to be migrated.
* @get_new_page: The function used to allocate free pages to be used
* as the target of the page migration.
* @put_new_page: The function used to free target pages if migration
* fails, or NULL if no special handling is necessary.
* @private: Private data to be passed on to get_new_page()
* @mode: The migration mode that specifies the constraints for
* page migration, if any.
@ -1099,7 +1119,8 @@ out:
* Returns the number of pages that were not migrated, or an error code.
*/
int migrate_pages(struct list_head *from, new_page_t get_new_page,
unsigned long private, enum migrate_mode mode, int reason)
free_page_t put_new_page, unsigned long private,
enum migrate_mode mode, int reason)
{
int retry = 1;
int nr_failed = 0;
@ -1121,10 +1142,11 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page,
if (PageHuge(page))
rc = unmap_and_move_huge_page(get_new_page,
private, page, pass > 2, mode);
put_new_page, private, page,
pass > 2, mode);
else
rc = unmap_and_move(get_new_page, private,
page, pass > 2, mode);
rc = unmap_and_move(get_new_page, put_new_page,
private, page, pass > 2, mode);
switch(rc) {
case -ENOMEM:
@ -1273,7 +1295,7 @@ set_status:
err = 0;
if (!list_empty(&pagelist)) {
err = migrate_pages(&pagelist, new_page_node,
err = migrate_pages(&pagelist, new_page_node, NULL,
(unsigned long)pm, MIGRATE_SYNC, MR_SYSCALL);
if (err)
putback_movable_pages(&pagelist);
@ -1729,7 +1751,8 @@ int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
list_add(&page->lru, &migratepages);
nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_page,
node, MIGRATE_ASYNC, MR_NUMA_MISPLACED);
NULL, node, MIGRATE_ASYNC,
MR_NUMA_MISPLACED);
if (nr_remaining) {
if (!list_empty(&migratepages)) {
list_del(&page->lru);

Просмотреть файл

@ -6218,7 +6218,7 @@ static int __alloc_contig_migrate_range(struct compact_control *cc,
cc->nr_migratepages -= nr_reclaimed;
ret = migrate_pages(&cc->migratepages, alloc_migrate_target,
0, MIGRATE_SYNC, MR_CMA);
NULL, 0, MIGRATE_SYNC, MR_CMA);
}
if (ret < 0) {
putback_movable_pages(&cc->migratepages);