mm: sched: Adapt the scanning rate if a NUMA hinting fault does not migrate
The PTE scanning rate and fault rates are two of the biggest sources of system CPU overhead with automatic NUMA placement. Ideally a proper policy would detect if a workload was properly placed, schedule and adjust the PTE scanning rate accordingly. We do not track the necessary information to do that but we at least know if we migrated or not. This patch scans slower if a page was not migrated as the result of a NUMA hinting fault up to sysctl_numa_balancing_scan_period_max which is now higher than the previous default. Once every minute it will reset the scanner in case of phase changes. This is hilariously crude and the numbers are arbitrary. Workloads will converge quite slowly in comparison to what a proper policy should be able to do. On the plus side, we will chew up less CPU for workloads that have no need for automatic balancing. Signed-off-by: Mel Gorman <mgorman@suse.de>
This commit is contained in:
Родитель
e42c8ff299
Коммит
b8593bfda1
|
@ -410,6 +410,9 @@ struct mm_struct {
|
||||||
*/
|
*/
|
||||||
unsigned long numa_next_scan;
|
unsigned long numa_next_scan;
|
||||||
|
|
||||||
|
/* numa_next_reset is when the PTE scanner period will be reset */
|
||||||
|
unsigned long numa_next_reset;
|
||||||
|
|
||||||
/* Restart point for scanning and setting pte_numa */
|
/* Restart point for scanning and setting pte_numa */
|
||||||
unsigned long numa_scan_offset;
|
unsigned long numa_scan_offset;
|
||||||
|
|
||||||
|
|
|
@ -1562,9 +1562,9 @@ struct task_struct {
|
||||||
#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
|
#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
|
||||||
|
|
||||||
#ifdef CONFIG_NUMA_BALANCING
|
#ifdef CONFIG_NUMA_BALANCING
|
||||||
extern void task_numa_fault(int node, int pages);
|
extern void task_numa_fault(int node, int pages, bool migrated);
|
||||||
#else
|
#else
|
||||||
static inline void task_numa_fault(int node, int pages)
|
static inline void task_numa_fault(int node, int pages, bool migrated)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -2009,6 +2009,7 @@ extern enum sched_tunable_scaling sysctl_sched_tunable_scaling;
|
||||||
extern unsigned int sysctl_numa_balancing_scan_delay;
|
extern unsigned int sysctl_numa_balancing_scan_delay;
|
||||||
extern unsigned int sysctl_numa_balancing_scan_period_min;
|
extern unsigned int sysctl_numa_balancing_scan_period_min;
|
||||||
extern unsigned int sysctl_numa_balancing_scan_period_max;
|
extern unsigned int sysctl_numa_balancing_scan_period_max;
|
||||||
|
extern unsigned int sysctl_numa_balancing_scan_period_reset;
|
||||||
extern unsigned int sysctl_numa_balancing_scan_size;
|
extern unsigned int sysctl_numa_balancing_scan_size;
|
||||||
extern unsigned int sysctl_numa_balancing_settle_count;
|
extern unsigned int sysctl_numa_balancing_settle_count;
|
||||||
|
|
||||||
|
|
|
@ -1537,6 +1537,7 @@ static void __sched_fork(struct task_struct *p)
|
||||||
#ifdef CONFIG_NUMA_BALANCING
|
#ifdef CONFIG_NUMA_BALANCING
|
||||||
if (p->mm && atomic_read(&p->mm->mm_users) == 1) {
|
if (p->mm && atomic_read(&p->mm->mm_users) == 1) {
|
||||||
p->mm->numa_next_scan = jiffies;
|
p->mm->numa_next_scan = jiffies;
|
||||||
|
p->mm->numa_next_reset = jiffies;
|
||||||
p->mm->numa_scan_seq = 0;
|
p->mm->numa_scan_seq = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -784,7 +784,8 @@ update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
|
||||||
* numa task sample period in ms
|
* numa task sample period in ms
|
||||||
*/
|
*/
|
||||||
unsigned int sysctl_numa_balancing_scan_period_min = 100;
|
unsigned int sysctl_numa_balancing_scan_period_min = 100;
|
||||||
unsigned int sysctl_numa_balancing_scan_period_max = 100*16;
|
unsigned int sysctl_numa_balancing_scan_period_max = 100*50;
|
||||||
|
unsigned int sysctl_numa_balancing_scan_period_reset = 100*600;
|
||||||
|
|
||||||
/* Portion of address space to scan in MB */
|
/* Portion of address space to scan in MB */
|
||||||
unsigned int sysctl_numa_balancing_scan_size = 256;
|
unsigned int sysctl_numa_balancing_scan_size = 256;
|
||||||
|
@ -806,20 +807,19 @@ static void task_numa_placement(struct task_struct *p)
|
||||||
/*
|
/*
|
||||||
* Got a PROT_NONE fault for a page on @node.
|
* Got a PROT_NONE fault for a page on @node.
|
||||||
*/
|
*/
|
||||||
void task_numa_fault(int node, int pages)
|
void task_numa_fault(int node, int pages, bool migrated)
|
||||||
{
|
{
|
||||||
struct task_struct *p = current;
|
struct task_struct *p = current;
|
||||||
|
|
||||||
/* FIXME: Allocate task-specific structure for placement policy here */
|
/* FIXME: Allocate task-specific structure for placement policy here */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Assume that as faults occur that pages are getting properly placed
|
* If pages are properly placed (did not migrate) then scan slower.
|
||||||
* and fewer NUMA hints are required. Note that this is a big
|
* This is reset periodically in case of phase changes
|
||||||
* assumption, it assumes processes reach a steady steady with no
|
|
||||||
* further phase changes.
|
|
||||||
*/
|
*/
|
||||||
p->numa_scan_period = min(sysctl_numa_balancing_scan_period_max,
|
if (!migrated)
|
||||||
p->numa_scan_period + jiffies_to_msecs(2));
|
p->numa_scan_period = min(sysctl_numa_balancing_scan_period_max,
|
||||||
|
p->numa_scan_period + jiffies_to_msecs(10));
|
||||||
|
|
||||||
task_numa_placement(p);
|
task_numa_placement(p);
|
||||||
}
|
}
|
||||||
|
@ -857,6 +857,19 @@ void task_numa_work(struct callback_head *work)
|
||||||
if (p->flags & PF_EXITING)
|
if (p->flags & PF_EXITING)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Reset the scan period if enough time has gone by. Objective is that
|
||||||
|
* scanning will be reduced if pages are properly placed. As tasks
|
||||||
|
* can enter different phases this needs to be re-examined. Lacking
|
||||||
|
* proper tracking of reference behaviour, this blunt hammer is used.
|
||||||
|
*/
|
||||||
|
migrate = mm->numa_next_reset;
|
||||||
|
if (time_after(now, migrate)) {
|
||||||
|
p->numa_scan_period = sysctl_numa_balancing_scan_period_min;
|
||||||
|
next_scan = now + msecs_to_jiffies(sysctl_numa_balancing_scan_period_reset);
|
||||||
|
xchg(&mm->numa_next_reset, next_scan);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Enforce maximal scan/migration frequency..
|
* Enforce maximal scan/migration frequency..
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -366,6 +366,13 @@ static struct ctl_table kern_table[] = {
|
||||||
.mode = 0644,
|
.mode = 0644,
|
||||||
.proc_handler = proc_dointvec,
|
.proc_handler = proc_dointvec,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
.procname = "numa_balancing_scan_period_reset",
|
||||||
|
.data = &sysctl_numa_balancing_scan_period_reset,
|
||||||
|
.maxlen = sizeof(unsigned int),
|
||||||
|
.mode = 0644,
|
||||||
|
.proc_handler = proc_dointvec,
|
||||||
|
},
|
||||||
{
|
{
|
||||||
.procname = "numa_balancing_scan_period_max_ms",
|
.procname = "numa_balancing_scan_period_max_ms",
|
||||||
.data = &sysctl_numa_balancing_scan_period_max,
|
.data = &sysctl_numa_balancing_scan_period_max,
|
||||||
|
|
|
@ -1068,7 +1068,7 @@ out_unlock:
|
||||||
spin_unlock(&mm->page_table_lock);
|
spin_unlock(&mm->page_table_lock);
|
||||||
if (page) {
|
if (page) {
|
||||||
put_page(page);
|
put_page(page);
|
||||||
task_numa_fault(numa_node_id(), HPAGE_PMD_NR);
|
task_numa_fault(numa_node_id(), HPAGE_PMD_NR, false);
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
12
mm/memory.c
12
mm/memory.c
|
@ -3468,6 +3468,7 @@ int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||||
spinlock_t *ptl;
|
spinlock_t *ptl;
|
||||||
int current_nid = -1;
|
int current_nid = -1;
|
||||||
int target_nid;
|
int target_nid;
|
||||||
|
bool migrated = false;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The "pte" at this point cannot be used safely without
|
* The "pte" at this point cannot be used safely without
|
||||||
|
@ -3509,12 +3510,13 @@ int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Migrate to the requested node */
|
/* Migrate to the requested node */
|
||||||
if (migrate_misplaced_page(page, target_nid))
|
migrated = migrate_misplaced_page(page, target_nid);
|
||||||
|
if (migrated)
|
||||||
current_nid = target_nid;
|
current_nid = target_nid;
|
||||||
|
|
||||||
out:
|
out:
|
||||||
if (current_nid != -1)
|
if (current_nid != -1)
|
||||||
task_numa_fault(current_nid, 1);
|
task_numa_fault(current_nid, 1, migrated);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3554,6 +3556,7 @@ static int do_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||||
struct page *page;
|
struct page *page;
|
||||||
int curr_nid = local_nid;
|
int curr_nid = local_nid;
|
||||||
int target_nid;
|
int target_nid;
|
||||||
|
bool migrated;
|
||||||
if (!pte_present(pteval))
|
if (!pte_present(pteval))
|
||||||
continue;
|
continue;
|
||||||
if (!pte_numa(pteval))
|
if (!pte_numa(pteval))
|
||||||
|
@ -3590,9 +3593,10 @@ static int do_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||||
|
|
||||||
/* Migrate to the requested node */
|
/* Migrate to the requested node */
|
||||||
pte_unmap_unlock(pte, ptl);
|
pte_unmap_unlock(pte, ptl);
|
||||||
if (migrate_misplaced_page(page, target_nid))
|
migrated = migrate_misplaced_page(page, target_nid);
|
||||||
|
if (migrated)
|
||||||
curr_nid = target_nid;
|
curr_nid = target_nid;
|
||||||
task_numa_fault(curr_nid, 1);
|
task_numa_fault(curr_nid, 1, migrated);
|
||||||
|
|
||||||
pte = pte_offset_map_lock(mm, pmdp, addr, &ptl);
|
pte = pte_offset_map_lock(mm, pmdp, addr, &ptl);
|
||||||
}
|
}
|
||||||
|
|
Загрузка…
Ссылка в новой задаче