2006-03-22 11:09:12 +03:00
|
|
|
#ifndef _LINUX_MIGRATE_H
|
|
|
|
#define _LINUX_MIGRATE_H
|
|
|
|
|
|
|
|
#include <linux/mm.h>
|
2007-05-07 01:50:20 +04:00
|
|
|
#include <linux/mempolicy.h>
|
2012-01-21 02:33:53 +04:00
|
|
|
#include <linux/migrate_mode.h>
|
2006-03-22 11:09:12 +03:00
|
|
|
|
2006-06-23 13:03:55 +04:00
|
|
|
typedef struct page *new_page_t(struct page *, unsigned long private, int **);
|
2006-06-23 13:03:53 +04:00
|
|
|
|
2012-12-12 04:02:31 +04:00
|
|
|
/*
|
|
|
|
* Return values from addresss_space_operations.migratepage():
|
|
|
|
* - negative errno on page migration failure;
|
|
|
|
* - zero on page migration success;
|
2012-12-12 04:02:38 +04:00
|
|
|
*
|
|
|
|
* The balloon page migration introduces this special case where a 'distinct'
|
|
|
|
* return code is used to flag a successful page migration to unmap_and_move().
|
|
|
|
* This approach is necessary because page migration can race against balloon
|
|
|
|
* deflation procedure, and for such case we could introduce a nasty page leak
|
|
|
|
* if a successfully migrated balloon page gets released concurrently with
|
|
|
|
* migration's unmap_and_move() wrap-up steps.
|
2012-12-12 04:02:31 +04:00
|
|
|
*/
|
|
|
|
#define MIGRATEPAGE_SUCCESS 0
|
2012-12-12 04:02:38 +04:00
|
|
|
#define MIGRATEPAGE_BALLOON_SUCCESS 1 /* special ret code for balloon page
|
|
|
|
* sucessful migration case.
|
|
|
|
*/
|
2012-10-19 17:07:31 +04:00
|
|
|
enum migrate_reason {
|
|
|
|
MR_COMPACTION,
|
|
|
|
MR_MEMORY_FAILURE,
|
|
|
|
MR_MEMORY_HOTPLUG,
|
|
|
|
MR_SYSCALL, /* also applies to cpusets */
|
|
|
|
MR_MEMPOLICY_MBIND,
|
2012-10-25 16:16:34 +04:00
|
|
|
MR_NUMA_MISPLACED,
|
2012-10-19 17:07:31 +04:00
|
|
|
MR_CMA
|
|
|
|
};
|
2012-12-12 04:02:31 +04:00
|
|
|
|
2007-05-07 01:50:20 +04:00
|
|
|
#ifdef CONFIG_MIGRATION
|
2009-01-07 01:39:16 +03:00
|
|
|
|
2010-05-25 01:31:59 +04:00
|
|
|
extern void putback_lru_pages(struct list_head *l);
|
2012-12-12 04:02:47 +04:00
|
|
|
extern void putback_movable_pages(struct list_head *l);
|
2006-06-23 13:03:33 +04:00
|
|
|
extern int migrate_page(struct address_space *,
|
2012-01-13 05:19:43 +04:00
|
|
|
struct page *, struct page *, enum migrate_mode);
|
ksm: memory hotremove migration only
The previous patch enables page migration of ksm pages, but that soon gets
into trouble: not surprising, since we're using the ksm page lock to lock
operations on its stable_node, but page migration switches the page whose
lock is to be used for that. Another layer of locking would fix it, but
do we need that yet?
Do we actually need page migration of ksm pages? Yes, memory hotremove
needs to offline sections of memory: and since we stopped allocating ksm
pages with GFP_HIGHUSER, they will tend to be GFP_HIGHUSER_MOVABLE
candidates for migration.
But KSM is currently unconscious of NUMA issues, happily merging pages
from different NUMA nodes: at present the rule must be, not to use
MADV_MERGEABLE where you care about NUMA. So no, NUMA page migration of
ksm pages does not make sense yet.
So, to complete support for ksm swapping we need to make hotremove safe.
ksm_memory_callback() take ksm_thread_mutex when MEM_GOING_OFFLINE and
release it when MEM_OFFLINE or MEM_CANCEL_OFFLINE. But if mapped pages
are freed before migration reaches them, stable_nodes may be left still
pointing to struct pages which have been removed from the system: the
stable_node needs to identify a page by pfn rather than page pointer, then
it can safely prune them when MEM_OFFLINE.
And make NUMA migration skip PageKsm pages where it skips PageReserved.
But it's only when we reach unmap_and_move() that the page lock is taken
and we can be sure that raised pagecount has prevented a PageAnon from
being upgraded: so add offlining arg to migrate_pages(), to migrate ksm
page when offlining (has sufficient locking) but reject it otherwise.
Signed-off-by: Hugh Dickins <hugh.dickins@tiscali.co.uk>
Cc: Izik Eidus <ieidus@redhat.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Chris Wright <chrisw@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2009-12-15 04:59:33 +03:00
|
|
|
extern int migrate_pages(struct list_head *l, new_page_t x,
|
2011-01-14 02:45:58 +03:00
|
|
|
unsigned long private, bool offlining,
|
2012-10-19 17:07:31 +04:00
|
|
|
enum migrate_mode mode, int reason);
|
2012-08-01 03:42:06 +04:00
|
|
|
extern int migrate_huge_page(struct page *, new_page_t x,
|
2011-01-14 02:45:58 +03:00
|
|
|
unsigned long private, bool offlining,
|
2012-01-13 05:19:43 +04:00
|
|
|
enum migrate_mode mode);
|
2006-06-23 13:03:53 +04:00
|
|
|
|
2006-06-23 13:03:33 +04:00
|
|
|
extern int fail_migrate_page(struct address_space *,
|
|
|
|
struct page *, struct page *);
|
2006-03-22 11:09:12 +03:00
|
|
|
|
|
|
|
extern int migrate_prep(void);
|
2010-05-25 01:32:27 +04:00
|
|
|
extern int migrate_prep_local(void);
|
2006-06-25 16:46:48 +04:00
|
|
|
extern int migrate_vmas(struct mm_struct *mm,
|
|
|
|
const nodemask_t *from, const nodemask_t *to,
|
|
|
|
unsigned long flags);
|
2010-09-08 05:19:35 +04:00
|
|
|
extern void migrate_page_copy(struct page *newpage, struct page *page);
|
|
|
|
extern int migrate_huge_page_move_mapping(struct address_space *mapping,
|
|
|
|
struct page *newpage, struct page *page);
|
2006-03-22 11:09:12 +03:00
|
|
|
#else
|
2009-01-07 01:39:16 +03:00
|
|
|
|
2010-05-25 01:31:59 +04:00
|
|
|
static inline void putback_lru_pages(struct list_head *l) {}
|
2012-12-12 04:02:47 +04:00
|
|
|
static inline void putback_movable_pages(struct list_head *l) {}
|
2006-06-23 13:03:53 +04:00
|
|
|
static inline int migrate_pages(struct list_head *l, new_page_t x,
|
2011-01-14 02:45:58 +03:00
|
|
|
unsigned long private, bool offlining,
|
2012-10-19 17:07:31 +04:00
|
|
|
enum migrate_mode mode, int reason) { return -ENOSYS; }
|
2012-08-01 03:42:06 +04:00
|
|
|
static inline int migrate_huge_page(struct page *page, new_page_t x,
|
2011-01-14 02:45:58 +03:00
|
|
|
unsigned long private, bool offlining,
|
2012-01-13 05:19:43 +04:00
|
|
|
enum migrate_mode mode) { return -ENOSYS; }
|
2006-03-31 14:29:56 +04:00
|
|
|
|
2006-03-22 11:09:12 +03:00
|
|
|
static inline int migrate_prep(void) { return -ENOSYS; }
|
2010-05-25 01:32:27 +04:00
|
|
|
static inline int migrate_prep_local(void) { return -ENOSYS; }
|
2006-03-22 11:09:12 +03:00
|
|
|
|
2006-06-25 16:46:48 +04:00
|
|
|
static inline int migrate_vmas(struct mm_struct *mm,
|
|
|
|
const nodemask_t *from, const nodemask_t *to,
|
|
|
|
unsigned long flags)
|
|
|
|
{
|
|
|
|
return -ENOSYS;
|
|
|
|
}
|
|
|
|
|
2010-09-08 05:19:35 +04:00
|
|
|
static inline void migrate_page_copy(struct page *newpage,
|
|
|
|
struct page *page) {}
|
|
|
|
|
2010-09-30 06:54:51 +04:00
|
|
|
static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
|
2010-09-08 05:19:35 +04:00
|
|
|
struct page *newpage, struct page *page)
|
|
|
|
{
|
|
|
|
return -ENOSYS;
|
|
|
|
}
|
|
|
|
|
2006-03-22 11:09:12 +03:00
|
|
|
/* Possible settings for the migrate_page() method in address_operations */
|
|
|
|
#define migrate_page NULL
|
|
|
|
#define fail_migrate_page NULL
|
|
|
|
|
|
|
|
#endif /* CONFIG_MIGRATION */
|
2012-10-25 16:16:34 +04:00
|
|
|
|
|
|
|
#ifdef CONFIG_NUMA_BALANCING
|
|
|
|
extern int migrate_misplaced_page(struct page *page, int node);
|
2012-11-19 14:59:15 +04:00
|
|
|
extern int migrate_misplaced_page(struct page *page, int node);
|
|
|
|
extern bool migrate_ratelimited(int node);
|
2012-10-25 16:16:34 +04:00
|
|
|
#else
|
|
|
|
static inline int migrate_misplaced_page(struct page *page, int node)
|
|
|
|
{
|
|
|
|
return -EAGAIN; /* can't migrate now */
|
|
|
|
}
|
2012-11-19 14:59:15 +04:00
|
|
|
static inline bool migrate_ratelimited(int node)
|
|
|
|
{
|
|
|
|
return false;
|
|
|
|
}
|
2012-12-05 13:32:56 +04:00
|
|
|
#endif /* CONFIG_NUMA_BALANCING */
|
2012-11-19 16:35:47 +04:00
|
|
|
|
2012-12-05 13:32:56 +04:00
|
|
|
#if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
|
|
|
|
extern int migrate_misplaced_transhuge_page(struct mm_struct *mm,
|
|
|
|
struct vm_area_struct *vma,
|
|
|
|
pmd_t *pmd, pmd_t entry,
|
|
|
|
unsigned long address,
|
|
|
|
struct page *page, int node);
|
|
|
|
#else
|
2012-11-19 16:35:47 +04:00
|
|
|
static inline int migrate_misplaced_transhuge_page(struct mm_struct *mm,
|
|
|
|
struct vm_area_struct *vma,
|
|
|
|
pmd_t *pmd, pmd_t entry,
|
|
|
|
unsigned long address,
|
|
|
|
struct page *page, int node)
|
|
|
|
{
|
|
|
|
return -EAGAIN;
|
|
|
|
}
|
2012-12-05 13:32:56 +04:00
|
|
|
#endif /* CONFIG_NUMA_BALANCING && CONFIG_TRANSPARENT_HUGEPAGE*/
|
2012-10-25 16:16:34 +04:00
|
|
|
|
2006-03-22 11:09:12 +03:00
|
|
|
#endif /* _LINUX_MIGRATE_H */
|