Merge branch 'akpm' (patches from Andrew)
Merge fixes from Andrew Morton: "6 fixes" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: mm: proc: smaps_rollup: fix pss_locked calculation Rename include/{uapi => }/asm-generic/shmparam.h really Revert "mm: use early_pfn_to_nid in page_ext_init" mm/gup: fix gup_pmd_range() for dax Revert "mm: slowly shrink slabs with a relatively small number of objects" Revert "mm: don't reclaim inodes with many attached pages"
This commit is contained in:
Коммит
1f947a7a01
|
@ -730,11 +730,8 @@ static enum lru_status inode_lru_isolate(struct list_head *item,
|
||||||
return LRU_REMOVED;
|
return LRU_REMOVED;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/* recently referenced inodes get one more pass */
|
||||||
* Recently referenced inodes and inodes with many attached pages
|
if (inode->i_state & I_REFERENCED) {
|
||||||
* get one more pass.
|
|
||||||
*/
|
|
||||||
if (inode->i_state & I_REFERENCED || inode->i_data.nrpages > 1) {
|
|
||||||
inode->i_state &= ~I_REFERENCED;
|
inode->i_state &= ~I_REFERENCED;
|
||||||
spin_unlock(&inode->i_lock);
|
spin_unlock(&inode->i_lock);
|
||||||
return LRU_ROTATE;
|
return LRU_ROTATE;
|
||||||
|
|
|
@ -423,7 +423,7 @@ struct mem_size_stats {
|
||||||
};
|
};
|
||||||
|
|
||||||
static void smaps_account(struct mem_size_stats *mss, struct page *page,
|
static void smaps_account(struct mem_size_stats *mss, struct page *page,
|
||||||
bool compound, bool young, bool dirty)
|
bool compound, bool young, bool dirty, bool locked)
|
||||||
{
|
{
|
||||||
int i, nr = compound ? 1 << compound_order(page) : 1;
|
int i, nr = compound ? 1 << compound_order(page) : 1;
|
||||||
unsigned long size = nr * PAGE_SIZE;
|
unsigned long size = nr * PAGE_SIZE;
|
||||||
|
@ -450,24 +450,31 @@ static void smaps_account(struct mem_size_stats *mss, struct page *page,
|
||||||
else
|
else
|
||||||
mss->private_clean += size;
|
mss->private_clean += size;
|
||||||
mss->pss += (u64)size << PSS_SHIFT;
|
mss->pss += (u64)size << PSS_SHIFT;
|
||||||
|
if (locked)
|
||||||
|
mss->pss_locked += (u64)size << PSS_SHIFT;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < nr; i++, page++) {
|
for (i = 0; i < nr; i++, page++) {
|
||||||
int mapcount = page_mapcount(page);
|
int mapcount = page_mapcount(page);
|
||||||
|
unsigned long pss = (PAGE_SIZE << PSS_SHIFT);
|
||||||
|
|
||||||
if (mapcount >= 2) {
|
if (mapcount >= 2) {
|
||||||
if (dirty || PageDirty(page))
|
if (dirty || PageDirty(page))
|
||||||
mss->shared_dirty += PAGE_SIZE;
|
mss->shared_dirty += PAGE_SIZE;
|
||||||
else
|
else
|
||||||
mss->shared_clean += PAGE_SIZE;
|
mss->shared_clean += PAGE_SIZE;
|
||||||
mss->pss += (PAGE_SIZE << PSS_SHIFT) / mapcount;
|
mss->pss += pss / mapcount;
|
||||||
|
if (locked)
|
||||||
|
mss->pss_locked += pss / mapcount;
|
||||||
} else {
|
} else {
|
||||||
if (dirty || PageDirty(page))
|
if (dirty || PageDirty(page))
|
||||||
mss->private_dirty += PAGE_SIZE;
|
mss->private_dirty += PAGE_SIZE;
|
||||||
else
|
else
|
||||||
mss->private_clean += PAGE_SIZE;
|
mss->private_clean += PAGE_SIZE;
|
||||||
mss->pss += PAGE_SIZE << PSS_SHIFT;
|
mss->pss += pss;
|
||||||
|
if (locked)
|
||||||
|
mss->pss_locked += pss;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -490,6 +497,7 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
|
||||||
{
|
{
|
||||||
struct mem_size_stats *mss = walk->private;
|
struct mem_size_stats *mss = walk->private;
|
||||||
struct vm_area_struct *vma = walk->vma;
|
struct vm_area_struct *vma = walk->vma;
|
||||||
|
bool locked = !!(vma->vm_flags & VM_LOCKED);
|
||||||
struct page *page = NULL;
|
struct page *page = NULL;
|
||||||
|
|
||||||
if (pte_present(*pte)) {
|
if (pte_present(*pte)) {
|
||||||
|
@ -532,7 +540,7 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
|
||||||
if (!page)
|
if (!page)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
smaps_account(mss, page, false, pte_young(*pte), pte_dirty(*pte));
|
smaps_account(mss, page, false, pte_young(*pte), pte_dirty(*pte), locked);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||||
|
@ -541,6 +549,7 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
|
||||||
{
|
{
|
||||||
struct mem_size_stats *mss = walk->private;
|
struct mem_size_stats *mss = walk->private;
|
||||||
struct vm_area_struct *vma = walk->vma;
|
struct vm_area_struct *vma = walk->vma;
|
||||||
|
bool locked = !!(vma->vm_flags & VM_LOCKED);
|
||||||
struct page *page;
|
struct page *page;
|
||||||
|
|
||||||
/* FOLL_DUMP will return -EFAULT on huge zero page */
|
/* FOLL_DUMP will return -EFAULT on huge zero page */
|
||||||
|
@ -555,7 +564,7 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
|
||||||
/* pass */;
|
/* pass */;
|
||||||
else
|
else
|
||||||
VM_BUG_ON_PAGE(1, page);
|
VM_BUG_ON_PAGE(1, page);
|
||||||
smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd));
|
smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd), locked);
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
|
static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
|
||||||
|
@ -737,11 +746,8 @@ static void smap_gather_stats(struct vm_area_struct *vma,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* mmap_sem is held in m_start */
|
/* mmap_sem is held in m_start */
|
||||||
walk_page_vma(vma, &smaps_walk);
|
walk_page_vma(vma, &smaps_walk);
|
||||||
if (vma->vm_flags & VM_LOCKED)
|
|
||||||
mss->pss_locked += mss->pss;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#define SEQ_PUT_DEC(str, val) \
|
#define SEQ_PUT_DEC(str, val) \
|
||||||
|
|
|
@ -695,7 +695,6 @@ asmlinkage __visible void __init start_kernel(void)
|
||||||
initrd_start = 0;
|
initrd_start = 0;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
page_ext_init();
|
|
||||||
kmemleak_init();
|
kmemleak_init();
|
||||||
setup_per_cpu_pageset();
|
setup_per_cpu_pageset();
|
||||||
numa_policy_init();
|
numa_policy_init();
|
||||||
|
@ -1131,6 +1130,8 @@ static noinline void __init kernel_init_freeable(void)
|
||||||
sched_init_smp();
|
sched_init_smp();
|
||||||
|
|
||||||
page_alloc_init_late();
|
page_alloc_init_late();
|
||||||
|
/* Initialize page ext after all struct pages are initialized. */
|
||||||
|
page_ext_init();
|
||||||
|
|
||||||
do_basic_setup();
|
do_basic_setup();
|
||||||
|
|
||||||
|
|
3
mm/gup.c
3
mm/gup.c
|
@ -1674,7 +1674,8 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
|
||||||
if (!pmd_present(pmd))
|
if (!pmd_present(pmd))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd))) {
|
if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd) ||
|
||||||
|
pmd_devmap(pmd))) {
|
||||||
/*
|
/*
|
||||||
* NUMA hinting faults need to be handled in the GUP
|
* NUMA hinting faults need to be handled in the GUP
|
||||||
* slowpath for accounting purposes and so that they
|
* slowpath for accounting purposes and so that they
|
||||||
|
|
|
@ -398,10 +398,8 @@ void __init page_ext_init(void)
|
||||||
* We know some arch can have a nodes layout such as
|
* We know some arch can have a nodes layout such as
|
||||||
* -------------pfn-------------->
|
* -------------pfn-------------->
|
||||||
* N0 | N1 | N2 | N0 | N1 | N2|....
|
* N0 | N1 | N2 | N0 | N1 | N2|....
|
||||||
*
|
|
||||||
* Take into account DEFERRED_STRUCT_PAGE_INIT.
|
|
||||||
*/
|
*/
|
||||||
if (early_pfn_to_nid(pfn) != nid)
|
if (pfn_to_nid(pfn) != nid)
|
||||||
continue;
|
continue;
|
||||||
if (init_section_page_ext(pfn, nid))
|
if (init_section_page_ext(pfn, nid))
|
||||||
goto oom;
|
goto oom;
|
||||||
|
|
10
mm/vmscan.c
10
mm/vmscan.c
|
@ -491,16 +491,6 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
|
||||||
delta = freeable / 2;
|
delta = freeable / 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Make sure we apply some minimal pressure on default priority
|
|
||||||
* even on small cgroups. Stale objects are not only consuming memory
|
|
||||||
* by themselves, but can also hold a reference to a dying cgroup,
|
|
||||||
* preventing it from being reclaimed. A dying cgroup with all
|
|
||||||
* corresponding structures like per-cpu stats and kmem caches
|
|
||||||
* can be really big, so it may lead to a significant waste of memory.
|
|
||||||
*/
|
|
||||||
delta = max_t(unsigned long long, delta, min(freeable, batch_size));
|
|
||||||
|
|
||||||
total_scan += delta;
|
total_scan += delta;
|
||||||
if (total_scan < 0) {
|
if (total_scan < 0) {
|
||||||
pr_err("shrink_slab: %pF negative objects to delete nr=%ld\n",
|
pr_err("shrink_slab: %pF negative objects to delete nr=%ld\n",
|
||||||
|
|
Загрузка…
Ссылка в новой задаче