mm: numa: Change page last {nid,pid} into {cpu,pid}
Change the per page last fault tracking to use cpu,pid instead of nid,pid. This will allow us to try and lookup the alternate task more easily. Note that even though it is the cpu that is store in the page flags that the mpol_misplaced decision is still based on the node. Signed-off-by: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Mel Gorman <mgorman@suse.de> Reviewed-by: Rik van Riel <riel@redhat.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com> Link: http://lkml.kernel.org/r/1381141781-10992-43-git-send-email-mgorman@suse.de [ Fixed build failure on 32-bit systems. ] Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Родитель
e1dda8a797
Коммит
90572890d2
|
@ -581,11 +581,11 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
|
||||||
* sets it, so none of the operations on it need to be atomic.
|
* sets it, so none of the operations on it need to be atomic.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/* Page flags: | [SECTION] | [NODE] | ZONE | [LAST_NIDPID] | ... | FLAGS | */
|
/* Page flags: | [SECTION] | [NODE] | ZONE | [LAST_CPUPID] | ... | FLAGS | */
|
||||||
#define SECTIONS_PGOFF ((sizeof(unsigned long)*8) - SECTIONS_WIDTH)
|
#define SECTIONS_PGOFF ((sizeof(unsigned long)*8) - SECTIONS_WIDTH)
|
||||||
#define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH)
|
#define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH)
|
||||||
#define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH)
|
#define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH)
|
||||||
#define LAST_NIDPID_PGOFF (ZONES_PGOFF - LAST_NIDPID_WIDTH)
|
#define LAST_CPUPID_PGOFF (ZONES_PGOFF - LAST_CPUPID_WIDTH)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Define the bit shifts to access each section. For non-existent
|
* Define the bit shifts to access each section. For non-existent
|
||||||
|
@ -595,7 +595,7 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
|
||||||
#define SECTIONS_PGSHIFT (SECTIONS_PGOFF * (SECTIONS_WIDTH != 0))
|
#define SECTIONS_PGSHIFT (SECTIONS_PGOFF * (SECTIONS_WIDTH != 0))
|
||||||
#define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0))
|
#define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0))
|
||||||
#define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0))
|
#define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0))
|
||||||
#define LAST_NIDPID_PGSHIFT (LAST_NIDPID_PGOFF * (LAST_NIDPID_WIDTH != 0))
|
#define LAST_CPUPID_PGSHIFT (LAST_CPUPID_PGOFF * (LAST_CPUPID_WIDTH != 0))
|
||||||
|
|
||||||
/* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allocator */
|
/* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allocator */
|
||||||
#ifdef NODE_NOT_IN_PAGE_FLAGS
|
#ifdef NODE_NOT_IN_PAGE_FLAGS
|
||||||
|
@ -617,7 +617,7 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
|
||||||
#define ZONES_MASK ((1UL << ZONES_WIDTH) - 1)
|
#define ZONES_MASK ((1UL << ZONES_WIDTH) - 1)
|
||||||
#define NODES_MASK ((1UL << NODES_WIDTH) - 1)
|
#define NODES_MASK ((1UL << NODES_WIDTH) - 1)
|
||||||
#define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1)
|
#define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1)
|
||||||
#define LAST_NIDPID_MASK ((1UL << LAST_NIDPID_WIDTH) - 1)
|
#define LAST_CPUPID_MASK ((1UL << LAST_CPUPID_WIDTH) - 1)
|
||||||
#define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1)
|
#define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1)
|
||||||
|
|
||||||
static inline enum zone_type page_zonenum(const struct page *page)
|
static inline enum zone_type page_zonenum(const struct page *page)
|
||||||
|
@ -661,96 +661,106 @@ static inline int page_to_nid(const struct page *page)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_NUMA_BALANCING
|
#ifdef CONFIG_NUMA_BALANCING
|
||||||
static inline int nid_pid_to_nidpid(int nid, int pid)
|
static inline int cpu_pid_to_cpupid(int cpu, int pid)
|
||||||
{
|
{
|
||||||
return ((nid & LAST__NID_MASK) << LAST__PID_SHIFT) | (pid & LAST__PID_MASK);
|
return ((cpu & LAST__CPU_MASK) << LAST__PID_SHIFT) | (pid & LAST__PID_MASK);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int nidpid_to_pid(int nidpid)
|
static inline int cpupid_to_pid(int cpupid)
|
||||||
{
|
{
|
||||||
return nidpid & LAST__PID_MASK;
|
return cpupid & LAST__PID_MASK;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int nidpid_to_nid(int nidpid)
|
static inline int cpupid_to_cpu(int cpupid)
|
||||||
{
|
{
|
||||||
return (nidpid >> LAST__PID_SHIFT) & LAST__NID_MASK;
|
return (cpupid >> LAST__PID_SHIFT) & LAST__CPU_MASK;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool nidpid_pid_unset(int nidpid)
|
static inline int cpupid_to_nid(int cpupid)
|
||||||
{
|
{
|
||||||
return nidpid_to_pid(nidpid) == (-1 & LAST__PID_MASK);
|
return cpu_to_node(cpupid_to_cpu(cpupid));
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool nidpid_nid_unset(int nidpid)
|
static inline bool cpupid_pid_unset(int cpupid)
|
||||||
{
|
{
|
||||||
return nidpid_to_nid(nidpid) == (-1 & LAST__NID_MASK);
|
return cpupid_to_pid(cpupid) == (-1 & LAST__PID_MASK);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef LAST_NIDPID_NOT_IN_PAGE_FLAGS
|
static inline bool cpupid_cpu_unset(int cpupid)
|
||||||
static inline int page_nidpid_xchg_last(struct page *page, int nid)
|
|
||||||
{
|
{
|
||||||
return xchg(&page->_last_nidpid, nid);
|
return cpupid_to_cpu(cpupid) == (-1 & LAST__CPU_MASK);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int page_nidpid_last(struct page *page)
|
#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
|
||||||
|
static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
|
||||||
{
|
{
|
||||||
return page->_last_nidpid;
|
return xchg(&page->_last_cpupid, cpupid);
|
||||||
}
|
}
|
||||||
static inline void page_nidpid_reset_last(struct page *page)
|
|
||||||
|
static inline int page_cpupid_last(struct page *page)
|
||||||
{
|
{
|
||||||
page->_last_nidpid = -1;
|
return page->_last_cpupid;
|
||||||
|
}
|
||||||
|
static inline void page_cpupid_reset_last(struct page *page)
|
||||||
|
{
|
||||||
|
page->_last_cpupid = -1;
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
static inline int page_nidpid_last(struct page *page)
|
static inline int page_cpupid_last(struct page *page)
|
||||||
{
|
{
|
||||||
return (page->flags >> LAST_NIDPID_PGSHIFT) & LAST_NIDPID_MASK;
|
return (page->flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK;
|
||||||
}
|
}
|
||||||
|
|
||||||
extern int page_nidpid_xchg_last(struct page *page, int nidpid);
|
extern int page_cpupid_xchg_last(struct page *page, int cpupid);
|
||||||
|
|
||||||
static inline void page_nidpid_reset_last(struct page *page)
|
static inline void page_cpupid_reset_last(struct page *page)
|
||||||
{
|
{
|
||||||
int nidpid = (1 << LAST_NIDPID_SHIFT) - 1;
|
int cpupid = (1 << LAST_CPUPID_SHIFT) - 1;
|
||||||
|
|
||||||
page->flags &= ~(LAST_NIDPID_MASK << LAST_NIDPID_PGSHIFT);
|
page->flags &= ~(LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT);
|
||||||
page->flags |= (nidpid & LAST_NIDPID_MASK) << LAST_NIDPID_PGSHIFT;
|
page->flags |= (cpupid & LAST_CPUPID_MASK) << LAST_CPUPID_PGSHIFT;
|
||||||
}
|
}
|
||||||
#endif /* LAST_NIDPID_NOT_IN_PAGE_FLAGS */
|
#endif /* LAST_CPUPID_NOT_IN_PAGE_FLAGS */
|
||||||
#else
|
#else /* !CONFIG_NUMA_BALANCING */
|
||||||
static inline int page_nidpid_xchg_last(struct page *page, int nidpid)
|
static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
|
||||||
{
|
{
|
||||||
return page_to_nid(page);
|
return page_to_nid(page); /* XXX */
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int page_nidpid_last(struct page *page)
|
static inline int page_cpupid_last(struct page *page)
|
||||||
{
|
{
|
||||||
return page_to_nid(page);
|
return page_to_nid(page); /* XXX */
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int nidpid_to_nid(int nidpid)
|
static inline int cpupid_to_nid(int cpupid)
|
||||||
{
|
{
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int nidpid_to_pid(int nidpid)
|
static inline int cpupid_to_pid(int cpupid)
|
||||||
{
|
{
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int nid_pid_to_nidpid(int nid, int pid)
|
static inline int cpupid_to_cpu(int cpupid)
|
||||||
{
|
{
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool nidpid_pid_unset(int nidpid)
|
static inline int cpu_pid_to_cpupid(int nid, int pid)
|
||||||
|
{
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline bool cpupid_pid_unset(int cpupid)
|
||||||
{
|
{
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void page_nidpid_reset_last(struct page *page)
|
static inline void page_cpupid_reset_last(struct page *page)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
#endif
|
#endif /* CONFIG_NUMA_BALANCING */
|
||||||
|
|
||||||
static inline struct zone *page_zone(const struct page *page)
|
static inline struct zone *page_zone(const struct page *page)
|
||||||
{
|
{
|
||||||
|
|
|
@ -174,8 +174,8 @@ struct page {
|
||||||
void *shadow;
|
void *shadow;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef LAST_NIDPID_NOT_IN_PAGE_FLAGS
|
#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
|
||||||
int _last_nidpid;
|
int _last_cpupid;
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -39,9 +39,9 @@
|
||||||
* lookup is necessary.
|
* lookup is necessary.
|
||||||
*
|
*
|
||||||
* No sparsemem or sparsemem vmemmap: | NODE | ZONE | ... | FLAGS |
|
* No sparsemem or sparsemem vmemmap: | NODE | ZONE | ... | FLAGS |
|
||||||
* " plus space for last_nidpid: | NODE | ZONE | LAST_NIDPID ... | FLAGS |
|
* " plus space for last_cpupid: | NODE | ZONE | LAST_CPUPID ... | FLAGS |
|
||||||
* classic sparse with space for node:| SECTION | NODE | ZONE | ... | FLAGS |
|
* classic sparse with space for node:| SECTION | NODE | ZONE | ... | FLAGS |
|
||||||
* " plus space for last_nidpid: | SECTION | NODE | ZONE | LAST_NIDPID ... | FLAGS |
|
* " plus space for last_cpupid: | SECTION | NODE | ZONE | LAST_CPUPID ... | FLAGS |
|
||||||
* classic sparse no space for node: | SECTION | ZONE | ... | FLAGS |
|
* classic sparse no space for node: | SECTION | ZONE | ... | FLAGS |
|
||||||
*/
|
*/
|
||||||
#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
|
#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
|
||||||
|
@ -65,18 +65,18 @@
|
||||||
#define LAST__PID_SHIFT 8
|
#define LAST__PID_SHIFT 8
|
||||||
#define LAST__PID_MASK ((1 << LAST__PID_SHIFT)-1)
|
#define LAST__PID_MASK ((1 << LAST__PID_SHIFT)-1)
|
||||||
|
|
||||||
#define LAST__NID_SHIFT NODES_SHIFT
|
#define LAST__CPU_SHIFT NR_CPUS_BITS
|
||||||
#define LAST__NID_MASK ((1 << LAST__NID_SHIFT)-1)
|
#define LAST__CPU_MASK ((1 << LAST__CPU_SHIFT)-1)
|
||||||
|
|
||||||
#define LAST_NIDPID_SHIFT (LAST__PID_SHIFT+LAST__NID_SHIFT)
|
#define LAST_CPUPID_SHIFT (LAST__PID_SHIFT+LAST__CPU_SHIFT)
|
||||||
#else
|
#else
|
||||||
#define LAST_NIDPID_SHIFT 0
|
#define LAST_CPUPID_SHIFT 0
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT+LAST_NIDPID_SHIFT <= BITS_PER_LONG - NR_PAGEFLAGS
|
#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT+LAST_CPUPID_SHIFT <= BITS_PER_LONG - NR_PAGEFLAGS
|
||||||
#define LAST_NIDPID_WIDTH LAST_NIDPID_SHIFT
|
#define LAST_CPUPID_WIDTH LAST_CPUPID_SHIFT
|
||||||
#else
|
#else
|
||||||
#define LAST_NIDPID_WIDTH 0
|
#define LAST_CPUPID_WIDTH 0
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -87,8 +87,8 @@
|
||||||
#define NODE_NOT_IN_PAGE_FLAGS
|
#define NODE_NOT_IN_PAGE_FLAGS
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if defined(CONFIG_NUMA_BALANCING) && LAST_NIDPID_WIDTH == 0
|
#if defined(CONFIG_NUMA_BALANCING) && LAST_CPUPID_WIDTH == 0
|
||||||
#define LAST_NIDPID_NOT_IN_PAGE_FLAGS
|
#define LAST_CPUPID_NOT_IN_PAGE_FLAGS
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif /* _LINUX_PAGE_FLAGS_LAYOUT */
|
#endif /* _LINUX_PAGE_FLAGS_LAYOUT */
|
||||||
|
|
|
@ -10,6 +10,7 @@
|
||||||
#include <linux/mmzone.h>
|
#include <linux/mmzone.h>
|
||||||
#include <linux/kbuild.h>
|
#include <linux/kbuild.h>
|
||||||
#include <linux/page_cgroup.h>
|
#include <linux/page_cgroup.h>
|
||||||
|
#include <linux/log2.h>
|
||||||
|
|
||||||
void foo(void)
|
void foo(void)
|
||||||
{
|
{
|
||||||
|
@ -17,5 +18,8 @@ void foo(void)
|
||||||
DEFINE(NR_PAGEFLAGS, __NR_PAGEFLAGS);
|
DEFINE(NR_PAGEFLAGS, __NR_PAGEFLAGS);
|
||||||
DEFINE(MAX_NR_ZONES, __MAX_NR_ZONES);
|
DEFINE(MAX_NR_ZONES, __MAX_NR_ZONES);
|
||||||
DEFINE(NR_PCG_FLAGS, __NR_PCG_FLAGS);
|
DEFINE(NR_PCG_FLAGS, __NR_PCG_FLAGS);
|
||||||
|
#ifdef CONFIG_SMP
|
||||||
|
DEFINE(NR_CPUS_BITS, ilog2(CONFIG_NR_CPUS));
|
||||||
|
#endif
|
||||||
/* End of constants */
|
/* End of constants */
|
||||||
}
|
}
|
||||||
|
|
|
@ -1210,7 +1210,7 @@ static void task_numa_placement(struct task_struct *p)
|
||||||
/*
|
/*
|
||||||
* Got a PROT_NONE fault for a page on @node.
|
* Got a PROT_NONE fault for a page on @node.
|
||||||
*/
|
*/
|
||||||
void task_numa_fault(int last_nidpid, int node, int pages, bool migrated)
|
void task_numa_fault(int last_cpupid, int node, int pages, bool migrated)
|
||||||
{
|
{
|
||||||
struct task_struct *p = current;
|
struct task_struct *p = current;
|
||||||
int priv;
|
int priv;
|
||||||
|
@ -1226,8 +1226,8 @@ void task_numa_fault(int last_nidpid, int node, int pages, bool migrated)
|
||||||
* First accesses are treated as private, otherwise consider accesses
|
* First accesses are treated as private, otherwise consider accesses
|
||||||
* to be private if the accessing pid has not changed
|
* to be private if the accessing pid has not changed
|
||||||
*/
|
*/
|
||||||
if (!nidpid_pid_unset(last_nidpid))
|
if (!cpupid_pid_unset(last_cpupid))
|
||||||
priv = ((p->pid & LAST__PID_MASK) == nidpid_to_pid(last_nidpid));
|
priv = ((p->pid & LAST__PID_MASK) == cpupid_to_pid(last_cpupid));
|
||||||
else
|
else
|
||||||
priv = 1;
|
priv = 1;
|
||||||
|
|
||||||
|
|
|
@ -1282,7 +1282,7 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||||
struct page *page;
|
struct page *page;
|
||||||
unsigned long haddr = addr & HPAGE_PMD_MASK;
|
unsigned long haddr = addr & HPAGE_PMD_MASK;
|
||||||
int page_nid = -1, this_nid = numa_node_id();
|
int page_nid = -1, this_nid = numa_node_id();
|
||||||
int target_nid, last_nidpid = -1;
|
int target_nid, last_cpupid = -1;
|
||||||
bool page_locked;
|
bool page_locked;
|
||||||
bool migrated = false;
|
bool migrated = false;
|
||||||
|
|
||||||
|
@ -1293,7 +1293,7 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||||
page = pmd_page(pmd);
|
page = pmd_page(pmd);
|
||||||
BUG_ON(is_huge_zero_page(page));
|
BUG_ON(is_huge_zero_page(page));
|
||||||
page_nid = page_to_nid(page);
|
page_nid = page_to_nid(page);
|
||||||
last_nidpid = page_nidpid_last(page);
|
last_cpupid = page_cpupid_last(page);
|
||||||
count_vm_numa_event(NUMA_HINT_FAULTS);
|
count_vm_numa_event(NUMA_HINT_FAULTS);
|
||||||
if (page_nid == this_nid)
|
if (page_nid == this_nid)
|
||||||
count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
|
count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
|
||||||
|
@ -1362,7 +1362,7 @@ out:
|
||||||
page_unlock_anon_vma_read(anon_vma);
|
page_unlock_anon_vma_read(anon_vma);
|
||||||
|
|
||||||
if (page_nid != -1)
|
if (page_nid != -1)
|
||||||
task_numa_fault(last_nidpid, page_nid, HPAGE_PMD_NR, migrated);
|
task_numa_fault(last_cpupid, page_nid, HPAGE_PMD_NR, migrated);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -1682,7 +1682,7 @@ static void __split_huge_page_refcount(struct page *page,
|
||||||
page_tail->mapping = page->mapping;
|
page_tail->mapping = page->mapping;
|
||||||
|
|
||||||
page_tail->index = page->index + i;
|
page_tail->index = page->index + i;
|
||||||
page_nidpid_xchg_last(page_tail, page_nidpid_last(page));
|
page_cpupid_xchg_last(page_tail, page_cpupid_last(page));
|
||||||
|
|
||||||
BUG_ON(!PageAnon(page_tail));
|
BUG_ON(!PageAnon(page_tail));
|
||||||
BUG_ON(!PageUptodate(page_tail));
|
BUG_ON(!PageUptodate(page_tail));
|
||||||
|
|
16
mm/memory.c
16
mm/memory.c
|
@ -69,8 +69,8 @@
|
||||||
|
|
||||||
#include "internal.h"
|
#include "internal.h"
|
||||||
|
|
||||||
#ifdef LAST_NIDPID_NOT_IN_PAGE_FLAGS
|
#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
|
||||||
#warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_nidpid.
|
#warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_cpupid.
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifndef CONFIG_NEED_MULTIPLE_NODES
|
#ifndef CONFIG_NEED_MULTIPLE_NODES
|
||||||
|
@ -3536,7 +3536,7 @@ int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||||
struct page *page = NULL;
|
struct page *page = NULL;
|
||||||
spinlock_t *ptl;
|
spinlock_t *ptl;
|
||||||
int page_nid = -1;
|
int page_nid = -1;
|
||||||
int last_nidpid;
|
int last_cpupid;
|
||||||
int target_nid;
|
int target_nid;
|
||||||
bool migrated = false;
|
bool migrated = false;
|
||||||
|
|
||||||
|
@ -3567,7 +3567,7 @@ int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||||
}
|
}
|
||||||
BUG_ON(is_zero_pfn(page_to_pfn(page)));
|
BUG_ON(is_zero_pfn(page_to_pfn(page)));
|
||||||
|
|
||||||
last_nidpid = page_nidpid_last(page);
|
last_cpupid = page_cpupid_last(page);
|
||||||
page_nid = page_to_nid(page);
|
page_nid = page_to_nid(page);
|
||||||
target_nid = numa_migrate_prep(page, vma, addr, page_nid);
|
target_nid = numa_migrate_prep(page, vma, addr, page_nid);
|
||||||
pte_unmap_unlock(ptep, ptl);
|
pte_unmap_unlock(ptep, ptl);
|
||||||
|
@ -3583,7 +3583,7 @@ int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||||
|
|
||||||
out:
|
out:
|
||||||
if (page_nid != -1)
|
if (page_nid != -1)
|
||||||
task_numa_fault(last_nidpid, page_nid, 1, migrated);
|
task_numa_fault(last_cpupid, page_nid, 1, migrated);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3598,7 +3598,7 @@ static int do_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||||
unsigned long offset;
|
unsigned long offset;
|
||||||
spinlock_t *ptl;
|
spinlock_t *ptl;
|
||||||
bool numa = false;
|
bool numa = false;
|
||||||
int last_nidpid;
|
int last_cpupid;
|
||||||
|
|
||||||
spin_lock(&mm->page_table_lock);
|
spin_lock(&mm->page_table_lock);
|
||||||
pmd = *pmdp;
|
pmd = *pmdp;
|
||||||
|
@ -3643,7 +3643,7 @@ static int do_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||||
if (unlikely(!page))
|
if (unlikely(!page))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
last_nidpid = page_nidpid_last(page);
|
last_cpupid = page_cpupid_last(page);
|
||||||
page_nid = page_to_nid(page);
|
page_nid = page_to_nid(page);
|
||||||
target_nid = numa_migrate_prep(page, vma, addr, page_nid);
|
target_nid = numa_migrate_prep(page, vma, addr, page_nid);
|
||||||
pte_unmap_unlock(pte, ptl);
|
pte_unmap_unlock(pte, ptl);
|
||||||
|
@ -3656,7 +3656,7 @@ static int do_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (page_nid != -1)
|
if (page_nid != -1)
|
||||||
task_numa_fault(last_nidpid, page_nid, 1, migrated);
|
task_numa_fault(last_cpupid, page_nid, 1, migrated);
|
||||||
|
|
||||||
pte = pte_offset_map_lock(mm, pmdp, addr, &ptl);
|
pte = pte_offset_map_lock(mm, pmdp, addr, &ptl);
|
||||||
}
|
}
|
||||||
|
|
|
@ -2324,6 +2324,8 @@ int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long
|
||||||
struct zone *zone;
|
struct zone *zone;
|
||||||
int curnid = page_to_nid(page);
|
int curnid = page_to_nid(page);
|
||||||
unsigned long pgoff;
|
unsigned long pgoff;
|
||||||
|
int thiscpu = raw_smp_processor_id();
|
||||||
|
int thisnid = cpu_to_node(thiscpu);
|
||||||
int polnid = -1;
|
int polnid = -1;
|
||||||
int ret = -1;
|
int ret = -1;
|
||||||
|
|
||||||
|
@ -2372,11 +2374,11 @@ int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long
|
||||||
|
|
||||||
/* Migrate the page towards the node whose CPU is referencing it */
|
/* Migrate the page towards the node whose CPU is referencing it */
|
||||||
if (pol->flags & MPOL_F_MORON) {
|
if (pol->flags & MPOL_F_MORON) {
|
||||||
int last_nidpid;
|
int last_cpupid;
|
||||||
int this_nidpid;
|
int this_cpupid;
|
||||||
|
|
||||||
polnid = numa_node_id();
|
polnid = thisnid;
|
||||||
this_nidpid = nid_pid_to_nidpid(polnid, current->pid);
|
this_cpupid = cpu_pid_to_cpupid(thiscpu, current->pid);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Multi-stage node selection is used in conjunction
|
* Multi-stage node selection is used in conjunction
|
||||||
|
@ -2399,8 +2401,8 @@ int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long
|
||||||
* it less likely we act on an unlikely task<->page
|
* it less likely we act on an unlikely task<->page
|
||||||
* relation.
|
* relation.
|
||||||
*/
|
*/
|
||||||
last_nidpid = page_nidpid_xchg_last(page, this_nidpid);
|
last_cpupid = page_cpupid_xchg_last(page, this_cpupid);
|
||||||
if (!nidpid_pid_unset(last_nidpid) && nidpid_to_nid(last_nidpid) != polnid)
|
if (!cpupid_pid_unset(last_cpupid) && cpupid_to_nid(last_cpupid) != thisnid)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
#ifdef CONFIG_NUMA_BALANCING
|
#ifdef CONFIG_NUMA_BALANCING
|
||||||
|
@ -2410,7 +2412,7 @@ int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long
|
||||||
* This way a short and temporary process migration will
|
* This way a short and temporary process migration will
|
||||||
* not cause excessive memory migration.
|
* not cause excessive memory migration.
|
||||||
*/
|
*/
|
||||||
if (polnid != current->numa_preferred_nid &&
|
if (thisnid != current->numa_preferred_nid &&
|
||||||
!current->numa_migrate_seq)
|
!current->numa_migrate_seq)
|
||||||
goto out;
|
goto out;
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -1498,7 +1498,7 @@ static struct page *alloc_misplaced_dst_page(struct page *page,
|
||||||
__GFP_NOWARN) &
|
__GFP_NOWARN) &
|
||||||
~GFP_IOFS, 0);
|
~GFP_IOFS, 0);
|
||||||
if (newpage)
|
if (newpage)
|
||||||
page_nidpid_xchg_last(newpage, page_nidpid_last(page));
|
page_cpupid_xchg_last(newpage, page_cpupid_last(page));
|
||||||
|
|
||||||
return newpage;
|
return newpage;
|
||||||
}
|
}
|
||||||
|
@ -1675,7 +1675,7 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
|
||||||
if (!new_page)
|
if (!new_page)
|
||||||
goto out_fail;
|
goto out_fail;
|
||||||
|
|
||||||
page_nidpid_xchg_last(new_page, page_nidpid_last(page));
|
page_cpupid_xchg_last(new_page, page_cpupid_last(page));
|
||||||
|
|
||||||
isolated = numamigrate_isolate_page(pgdat, page);
|
isolated = numamigrate_isolate_page(pgdat, page);
|
||||||
if (!isolated) {
|
if (!isolated) {
|
||||||
|
|
18
mm/mm_init.c
18
mm/mm_init.c
|
@ -71,26 +71,26 @@ void __init mminit_verify_pageflags_layout(void)
|
||||||
unsigned long or_mask, add_mask;
|
unsigned long or_mask, add_mask;
|
||||||
|
|
||||||
shift = 8 * sizeof(unsigned long);
|
shift = 8 * sizeof(unsigned long);
|
||||||
width = shift - SECTIONS_WIDTH - NODES_WIDTH - ZONES_WIDTH - LAST_NIDPID_SHIFT;
|
width = shift - SECTIONS_WIDTH - NODES_WIDTH - ZONES_WIDTH - LAST_CPUPID_SHIFT;
|
||||||
mminit_dprintk(MMINIT_TRACE, "pageflags_layout_widths",
|
mminit_dprintk(MMINIT_TRACE, "pageflags_layout_widths",
|
||||||
"Section %d Node %d Zone %d Lastnidpid %d Flags %d\n",
|
"Section %d Node %d Zone %d Lastcpupid %d Flags %d\n",
|
||||||
SECTIONS_WIDTH,
|
SECTIONS_WIDTH,
|
||||||
NODES_WIDTH,
|
NODES_WIDTH,
|
||||||
ZONES_WIDTH,
|
ZONES_WIDTH,
|
||||||
LAST_NIDPID_WIDTH,
|
LAST_CPUPID_WIDTH,
|
||||||
NR_PAGEFLAGS);
|
NR_PAGEFLAGS);
|
||||||
mminit_dprintk(MMINIT_TRACE, "pageflags_layout_shifts",
|
mminit_dprintk(MMINIT_TRACE, "pageflags_layout_shifts",
|
||||||
"Section %d Node %d Zone %d Lastnidpid %d\n",
|
"Section %d Node %d Zone %d Lastcpupid %d\n",
|
||||||
SECTIONS_SHIFT,
|
SECTIONS_SHIFT,
|
||||||
NODES_SHIFT,
|
NODES_SHIFT,
|
||||||
ZONES_SHIFT,
|
ZONES_SHIFT,
|
||||||
LAST_NIDPID_SHIFT);
|
LAST_CPUPID_SHIFT);
|
||||||
mminit_dprintk(MMINIT_TRACE, "pageflags_layout_pgshifts",
|
mminit_dprintk(MMINIT_TRACE, "pageflags_layout_pgshifts",
|
||||||
"Section %lu Node %lu Zone %lu Lastnidpid %lu\n",
|
"Section %lu Node %lu Zone %lu Lastcpupid %lu\n",
|
||||||
(unsigned long)SECTIONS_PGSHIFT,
|
(unsigned long)SECTIONS_PGSHIFT,
|
||||||
(unsigned long)NODES_PGSHIFT,
|
(unsigned long)NODES_PGSHIFT,
|
||||||
(unsigned long)ZONES_PGSHIFT,
|
(unsigned long)ZONES_PGSHIFT,
|
||||||
(unsigned long)LAST_NIDPID_PGSHIFT);
|
(unsigned long)LAST_CPUPID_PGSHIFT);
|
||||||
mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodezoneid",
|
mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodezoneid",
|
||||||
"Node/Zone ID: %lu -> %lu\n",
|
"Node/Zone ID: %lu -> %lu\n",
|
||||||
(unsigned long)(ZONEID_PGOFF + ZONEID_SHIFT),
|
(unsigned long)(ZONEID_PGOFF + ZONEID_SHIFT),
|
||||||
|
@ -102,9 +102,9 @@ void __init mminit_verify_pageflags_layout(void)
|
||||||
mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodeflags",
|
mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodeflags",
|
||||||
"Node not in page flags");
|
"Node not in page flags");
|
||||||
#endif
|
#endif
|
||||||
#ifdef LAST_NIDPID_NOT_IN_PAGE_FLAGS
|
#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
|
||||||
mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodeflags",
|
mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodeflags",
|
||||||
"Last nidpid not in page flags");
|
"Last cpupid not in page flags");
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
if (SECTIONS_WIDTH) {
|
if (SECTIONS_WIDTH) {
|
||||||
|
|
14
mm/mmzone.c
14
mm/mmzone.c
|
@ -97,20 +97,20 @@ void lruvec_init(struct lruvec *lruvec)
|
||||||
INIT_LIST_HEAD(&lruvec->lists[lru]);
|
INIT_LIST_HEAD(&lruvec->lists[lru]);
|
||||||
}
|
}
|
||||||
|
|
||||||
#if defined(CONFIG_NUMA_BALANCING) && !defined(LAST_NIDPID_NOT_IN_PAGE_FLAGS)
|
#if defined(CONFIG_NUMA_BALANCING) && !defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS)
|
||||||
int page_nidpid_xchg_last(struct page *page, int nidpid)
|
int page_cpupid_xchg_last(struct page *page, int cpupid)
|
||||||
{
|
{
|
||||||
unsigned long old_flags, flags;
|
unsigned long old_flags, flags;
|
||||||
int last_nidpid;
|
int last_cpupid;
|
||||||
|
|
||||||
do {
|
do {
|
||||||
old_flags = flags = page->flags;
|
old_flags = flags = page->flags;
|
||||||
last_nidpid = page_nidpid_last(page);
|
last_cpupid = page_cpupid_last(page);
|
||||||
|
|
||||||
flags &= ~(LAST_NIDPID_MASK << LAST_NIDPID_PGSHIFT);
|
flags &= ~(LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT);
|
||||||
flags |= (nidpid & LAST_NIDPID_MASK) << LAST_NIDPID_PGSHIFT;
|
flags |= (cpupid & LAST_CPUPID_MASK) << LAST_CPUPID_PGSHIFT;
|
||||||
} while (unlikely(cmpxchg(&page->flags, old_flags, flags) != old_flags));
|
} while (unlikely(cmpxchg(&page->flags, old_flags, flags) != old_flags));
|
||||||
|
|
||||||
return last_nidpid;
|
return last_cpupid;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -37,14 +37,14 @@ static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
|
||||||
|
|
||||||
static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
|
static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
|
||||||
unsigned long addr, unsigned long end, pgprot_t newprot,
|
unsigned long addr, unsigned long end, pgprot_t newprot,
|
||||||
int dirty_accountable, int prot_numa, bool *ret_all_same_nidpid)
|
int dirty_accountable, int prot_numa, bool *ret_all_same_cpupid)
|
||||||
{
|
{
|
||||||
struct mm_struct *mm = vma->vm_mm;
|
struct mm_struct *mm = vma->vm_mm;
|
||||||
pte_t *pte, oldpte;
|
pte_t *pte, oldpte;
|
||||||
spinlock_t *ptl;
|
spinlock_t *ptl;
|
||||||
unsigned long pages = 0;
|
unsigned long pages = 0;
|
||||||
bool all_same_nidpid = true;
|
bool all_same_cpupid = true;
|
||||||
int last_nid = -1;
|
int last_cpu = -1;
|
||||||
int last_pid = -1;
|
int last_pid = -1;
|
||||||
|
|
||||||
pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
|
pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
|
||||||
|
@ -64,17 +64,17 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
|
||||||
|
|
||||||
page = vm_normal_page(vma, addr, oldpte);
|
page = vm_normal_page(vma, addr, oldpte);
|
||||||
if (page) {
|
if (page) {
|
||||||
int nidpid = page_nidpid_last(page);
|
int cpupid = page_cpupid_last(page);
|
||||||
int this_nid = nidpid_to_nid(nidpid);
|
int this_cpu = cpupid_to_cpu(cpupid);
|
||||||
int this_pid = nidpid_to_pid(nidpid);
|
int this_pid = cpupid_to_pid(cpupid);
|
||||||
|
|
||||||
if (last_nid == -1)
|
if (last_cpu == -1)
|
||||||
last_nid = this_nid;
|
last_cpu = this_cpu;
|
||||||
if (last_pid == -1)
|
if (last_pid == -1)
|
||||||
last_pid = this_pid;
|
last_pid = this_pid;
|
||||||
if (last_nid != this_nid ||
|
if (last_cpu != this_cpu ||
|
||||||
last_pid != this_pid) {
|
last_pid != this_pid) {
|
||||||
all_same_nidpid = false;
|
all_same_cpupid = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!pte_numa(oldpte)) {
|
if (!pte_numa(oldpte)) {
|
||||||
|
@ -115,7 +115,7 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
|
||||||
arch_leave_lazy_mmu_mode();
|
arch_leave_lazy_mmu_mode();
|
||||||
pte_unmap_unlock(pte - 1, ptl);
|
pte_unmap_unlock(pte - 1, ptl);
|
||||||
|
|
||||||
*ret_all_same_nidpid = all_same_nidpid;
|
*ret_all_same_cpupid = all_same_cpupid;
|
||||||
return pages;
|
return pages;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -142,7 +142,7 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
|
||||||
pmd_t *pmd;
|
pmd_t *pmd;
|
||||||
unsigned long next;
|
unsigned long next;
|
||||||
unsigned long pages = 0;
|
unsigned long pages = 0;
|
||||||
bool all_same_nidpid;
|
bool all_same_cpupid;
|
||||||
|
|
||||||
pmd = pmd_offset(pud, addr);
|
pmd = pmd_offset(pud, addr);
|
||||||
do {
|
do {
|
||||||
|
@ -168,7 +168,7 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
|
||||||
if (pmd_none_or_clear_bad(pmd))
|
if (pmd_none_or_clear_bad(pmd))
|
||||||
continue;
|
continue;
|
||||||
this_pages = change_pte_range(vma, pmd, addr, next, newprot,
|
this_pages = change_pte_range(vma, pmd, addr, next, newprot,
|
||||||
dirty_accountable, prot_numa, &all_same_nidpid);
|
dirty_accountable, prot_numa, &all_same_cpupid);
|
||||||
pages += this_pages;
|
pages += this_pages;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -177,7 +177,7 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
|
||||||
* node. This allows a regular PMD to be handled as one fault
|
* node. This allows a regular PMD to be handled as one fault
|
||||||
* and effectively batches the taking of the PTL
|
* and effectively batches the taking of the PTL
|
||||||
*/
|
*/
|
||||||
if (prot_numa && this_pages && all_same_nidpid)
|
if (prot_numa && this_pages && all_same_cpupid)
|
||||||
change_pmd_protnuma(vma->vm_mm, addr, pmd);
|
change_pmd_protnuma(vma->vm_mm, addr, pmd);
|
||||||
} while (pmd++, addr = next, addr != end);
|
} while (pmd++, addr = next, addr != end);
|
||||||
|
|
||||||
|
|
|
@ -626,7 +626,7 @@ static inline int free_pages_check(struct page *page)
|
||||||
bad_page(page);
|
bad_page(page);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
page_nidpid_reset_last(page);
|
page_cpupid_reset_last(page);
|
||||||
if (page->flags & PAGE_FLAGS_CHECK_AT_PREP)
|
if (page->flags & PAGE_FLAGS_CHECK_AT_PREP)
|
||||||
page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
|
page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -4015,7 +4015,7 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
|
||||||
mminit_verify_page_links(page, zone, nid, pfn);
|
mminit_verify_page_links(page, zone, nid, pfn);
|
||||||
init_page_count(page);
|
init_page_count(page);
|
||||||
page_mapcount_reset(page);
|
page_mapcount_reset(page);
|
||||||
page_nidpid_reset_last(page);
|
page_cpupid_reset_last(page);
|
||||||
SetPageReserved(page);
|
SetPageReserved(page);
|
||||||
/*
|
/*
|
||||||
* Mark the block movable so that blocks are reserved for
|
* Mark the block movable so that blocks are reserved for
|
||||||
|
|
Загрузка…
Ссылка в новой задаче