mm: fold mlocked_vma_newpage() into its only call site
In previous commit(mm: use the light version __mod_zone_page_state in mlocked_vma_newpage()) a irq-unsafe __mod_zone_page_state is used. And as suggested by Andrew, to reduce the risks that new call sites incorrectly using mlocked_vma_newpage() without knowing they are adding racing, this patch folds mlocked_vma_newpage() into its only call site, page_add_new_anon_rmap, to make it open-cocded for people to know what is going on. [akpm@linux-foundation.org: coding-style fixes] Signed-off-by: Jianyu Zhan <nasa4836@gmail.com> Suggested-by: Andrew Morton <akpm@linux-foundation.org> Suggested-by: Hugh Dickins <hughd@google.com> Acked-by: Hugh Dickins <hughd@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Родитель
bea04b0732
Коммит
7ee07a44eb
|
@ -188,31 +188,6 @@ static inline void munlock_vma_pages_all(struct vm_area_struct *vma)
|
|||
munlock_vma_pages_range(vma, vma->vm_start, vma->vm_end);
|
||||
}
|
||||
|
||||
/*
|
||||
* Called only in fault path, to determine if a new page is being
|
||||
* mapped into a LOCKED vma. If it is, mark page as mlocked.
|
||||
*/
|
||||
static inline int mlocked_vma_newpage(struct vm_area_struct *vma,
|
||||
struct page *page)
|
||||
{
|
||||
VM_BUG_ON_PAGE(PageLRU(page), page);
|
||||
|
||||
if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED))
|
||||
return 0;
|
||||
|
||||
if (!TestSetPageMlocked(page)) {
|
||||
/*
|
||||
* We use the irq-unsafe __mod_zone_page_stat because this
|
||||
* counter is not modified from interrupt context, and the pte
|
||||
* lock is held(spinlock), which implies preemption disabled.
|
||||
*/
|
||||
__mod_zone_page_state(page_zone(page), NR_MLOCK,
|
||||
hpage_nr_pages(page));
|
||||
count_vm_event(UNEVICTABLE_PGMLOCKED);
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* must be called with vma's mmap_sem held for read or write, and page locked.
|
||||
*/
|
||||
|
@ -255,10 +230,6 @@ extern unsigned long vma_address(struct page *page,
|
|||
struct vm_area_struct *vma);
|
||||
#endif
|
||||
#else /* !CONFIG_MMU */
|
||||
static inline int mlocked_vma_newpage(struct vm_area_struct *v, struct page *p)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline void clear_page_mlock(struct page *page) { }
|
||||
static inline void mlock_vma_page(struct page *page) { }
|
||||
static inline void mlock_migrate_page(struct page *new, struct page *old) { }
|
||||
|
|
20
mm/rmap.c
20
mm/rmap.c
|
@ -1032,11 +1032,25 @@ void page_add_new_anon_rmap(struct page *page,
|
|||
__mod_zone_page_state(page_zone(page), NR_ANON_PAGES,
|
||||
hpage_nr_pages(page));
|
||||
__page_set_anon_rmap(page, vma, address, 1);
|
||||
if (!mlocked_vma_newpage(vma, page)) {
|
||||
|
||||
VM_BUG_ON_PAGE(PageLRU(page), page);
|
||||
if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED)) {
|
||||
SetPageActive(page);
|
||||
lru_cache_add(page);
|
||||
} else
|
||||
add_page_to_unevictable_list(page);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!TestSetPageMlocked(page)) {
|
||||
/*
|
||||
* We use the irq-unsafe __mod_zone_page_stat because this
|
||||
* counter is not modified from interrupt context, and the pte
|
||||
* lock is held(spinlock), which implies preemption disabled.
|
||||
*/
|
||||
__mod_zone_page_state(page_zone(page), NR_MLOCK,
|
||||
hpage_nr_pages(page));
|
||||
count_vm_event(UNEVICTABLE_PGMLOCKED);
|
||||
}
|
||||
add_page_to_unevictable_list(page);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
Загрузка…
Ссылка в новой задаче