mm: do not pass mm_struct into handle_mm_fault
We always have vma->vm_mm around. Link: http://lkml.kernel.org/r/1466021202-61880-8-git-send-email-kirill.shutemov@linux.intel.com Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Родитель
6fb8ddfc45
Коммит
dcddffd41d
|
@ -147,7 +147,7 @@ retry:
|
|||
/* If for any reason at all we couldn't handle the fault,
|
||||
make sure we exit gracefully rather than endlessly redo
|
||||
the fault. */
|
||||
fault = handle_mm_fault(mm, vma, address, flags);
|
||||
fault = handle_mm_fault(vma, address, flags);
|
||||
|
||||
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
|
||||
return;
|
||||
|
|
|
@ -137,7 +137,7 @@ good_area:
|
|||
* make sure we exit gracefully rather than endlessly redo
|
||||
* the fault.
|
||||
*/
|
||||
fault = handle_mm_fault(mm, vma, address, flags);
|
||||
fault = handle_mm_fault(vma, address, flags);
|
||||
|
||||
/* If Pagefault was interrupted by SIGKILL, exit page fault "early" */
|
||||
if (unlikely(fatal_signal_pending(current))) {
|
||||
|
|
|
@ -243,7 +243,7 @@ good_area:
|
|||
goto out;
|
||||
}
|
||||
|
||||
return handle_mm_fault(mm, vma, addr & PAGE_MASK, flags);
|
||||
return handle_mm_fault(vma, addr & PAGE_MASK, flags);
|
||||
|
||||
check_stack:
|
||||
/* Don't allow expansion below FIRST_USER_ADDRESS */
|
||||
|
|
|
@ -233,7 +233,7 @@ good_area:
|
|||
goto out;
|
||||
}
|
||||
|
||||
return handle_mm_fault(mm, vma, addr & PAGE_MASK, mm_flags);
|
||||
return handle_mm_fault(vma, addr & PAGE_MASK, mm_flags);
|
||||
|
||||
check_stack:
|
||||
if (vma->vm_flags & VM_GROWSDOWN && !expand_stack(vma, addr))
|
||||
|
|
|
@ -134,7 +134,7 @@ good_area:
|
|||
* sure we exit gracefully rather than endlessly redo the
|
||||
* fault.
|
||||
*/
|
||||
fault = handle_mm_fault(mm, vma, address, flags);
|
||||
fault = handle_mm_fault(vma, address, flags);
|
||||
|
||||
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
|
||||
return;
|
||||
|
|
|
@ -168,7 +168,7 @@ retry:
|
|||
* the fault.
|
||||
*/
|
||||
|
||||
fault = handle_mm_fault(mm, vma, address, flags);
|
||||
fault = handle_mm_fault(vma, address, flags);
|
||||
|
||||
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
|
||||
return;
|
||||
|
|
|
@ -164,7 +164,7 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear
|
|||
* make sure we exit gracefully rather than endlessly redo
|
||||
* the fault.
|
||||
*/
|
||||
fault = handle_mm_fault(mm, vma, ear0, flags);
|
||||
fault = handle_mm_fault(vma, ear0, flags);
|
||||
if (unlikely(fault & VM_FAULT_ERROR)) {
|
||||
if (fault & VM_FAULT_OOM)
|
||||
goto out_of_memory;
|
||||
|
|
|
@ -101,7 +101,7 @@ good_area:
|
|||
break;
|
||||
}
|
||||
|
||||
fault = handle_mm_fault(mm, vma, address, flags);
|
||||
fault = handle_mm_fault(vma, address, flags);
|
||||
|
||||
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
|
||||
return;
|
||||
|
|
|
@ -159,7 +159,7 @@ retry:
|
|||
* sure we exit gracefully rather than endlessly redo the
|
||||
* fault.
|
||||
*/
|
||||
fault = handle_mm_fault(mm, vma, address, flags);
|
||||
fault = handle_mm_fault(vma, address, flags);
|
||||
|
||||
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
|
||||
return;
|
||||
|
|
|
@ -196,7 +196,7 @@ good_area:
|
|||
*/
|
||||
addr = (address & PAGE_MASK);
|
||||
set_thread_fault_code(error_code);
|
||||
fault = handle_mm_fault(mm, vma, addr, flags);
|
||||
fault = handle_mm_fault(vma, addr, flags);
|
||||
if (unlikely(fault & VM_FAULT_ERROR)) {
|
||||
if (fault & VM_FAULT_OOM)
|
||||
goto out_of_memory;
|
||||
|
|
|
@ -136,7 +136,7 @@ good_area:
|
|||
* the fault.
|
||||
*/
|
||||
|
||||
fault = handle_mm_fault(mm, vma, address, flags);
|
||||
fault = handle_mm_fault(vma, address, flags);
|
||||
pr_debug("handle_mm_fault returns %d\n", fault);
|
||||
|
||||
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
|
||||
|
|
|
@ -133,7 +133,7 @@ good_area:
|
|||
* make sure we exit gracefully rather than endlessly redo
|
||||
* the fault.
|
||||
*/
|
||||
fault = handle_mm_fault(mm, vma, address, flags);
|
||||
fault = handle_mm_fault(vma, address, flags);
|
||||
|
||||
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
|
||||
return 0;
|
||||
|
|
|
@ -216,7 +216,7 @@ good_area:
|
|||
* make sure we exit gracefully rather than endlessly redo
|
||||
* the fault.
|
||||
*/
|
||||
fault = handle_mm_fault(mm, vma, address, flags);
|
||||
fault = handle_mm_fault(vma, address, flags);
|
||||
|
||||
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
|
||||
return;
|
||||
|
|
|
@ -153,7 +153,7 @@ good_area:
|
|||
* make sure we exit gracefully rather than endlessly redo
|
||||
* the fault.
|
||||
*/
|
||||
fault = handle_mm_fault(mm, vma, address, flags);
|
||||
fault = handle_mm_fault(vma, address, flags);
|
||||
|
||||
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
|
||||
return;
|
||||
|
|
|
@ -254,7 +254,7 @@ good_area:
|
|||
* make sure we exit gracefully rather than endlessly redo
|
||||
* the fault.
|
||||
*/
|
||||
fault = handle_mm_fault(mm, vma, address, flags);
|
||||
fault = handle_mm_fault(vma, address, flags);
|
||||
|
||||
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
|
||||
return;
|
||||
|
|
|
@ -131,7 +131,7 @@ good_area:
|
|||
* make sure we exit gracefully rather than endlessly redo
|
||||
* the fault.
|
||||
*/
|
||||
fault = handle_mm_fault(mm, vma, address, flags);
|
||||
fault = handle_mm_fault(vma, address, flags);
|
||||
|
||||
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
|
||||
return;
|
||||
|
|
|
@ -163,7 +163,7 @@ good_area:
|
|||
* the fault.
|
||||
*/
|
||||
|
||||
fault = handle_mm_fault(mm, vma, address, flags);
|
||||
fault = handle_mm_fault(vma, address, flags);
|
||||
|
||||
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
|
||||
return;
|
||||
|
|
|
@ -239,7 +239,7 @@ good_area:
|
|||
* fault.
|
||||
*/
|
||||
|
||||
fault = handle_mm_fault(mm, vma, address, flags);
|
||||
fault = handle_mm_fault(vma, address, flags);
|
||||
|
||||
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
|
||||
return;
|
||||
|
|
|
@ -75,7 +75,7 @@ int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
|
|||
}
|
||||
|
||||
ret = 0;
|
||||
*flt = handle_mm_fault(mm, vma, ea, is_write ? FAULT_FLAG_WRITE : 0);
|
||||
*flt = handle_mm_fault(vma, ea, is_write ? FAULT_FLAG_WRITE : 0);
|
||||
if (unlikely(*flt & VM_FAULT_ERROR)) {
|
||||
if (*flt & VM_FAULT_OOM) {
|
||||
ret = -ENOMEM;
|
||||
|
|
|
@ -429,7 +429,7 @@ good_area:
|
|||
* make sure we exit gracefully rather than endlessly redo
|
||||
* the fault.
|
||||
*/
|
||||
fault = handle_mm_fault(mm, vma, address, flags);
|
||||
fault = handle_mm_fault(vma, address, flags);
|
||||
if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) {
|
||||
if (fault & VM_FAULT_SIGSEGV)
|
||||
goto bad_area;
|
||||
|
|
|
@ -456,7 +456,7 @@ retry:
|
|||
* make sure we exit gracefully rather than endlessly redo
|
||||
* the fault.
|
||||
*/
|
||||
fault = handle_mm_fault(mm, vma, address, flags);
|
||||
fault = handle_mm_fault(vma, address, flags);
|
||||
/* No reason to continue if interrupted by SIGKILL. */
|
||||
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) {
|
||||
fault = VM_FAULT_SIGNAL;
|
||||
|
|
|
@ -111,7 +111,7 @@ good_area:
|
|||
* make sure we exit gracefully rather than endlessly redo
|
||||
* the fault.
|
||||
*/
|
||||
fault = handle_mm_fault(mm, vma, address, flags);
|
||||
fault = handle_mm_fault(vma, address, flags);
|
||||
if (unlikely(fault & VM_FAULT_ERROR)) {
|
||||
if (fault & VM_FAULT_OOM)
|
||||
goto out_of_memory;
|
||||
|
|
|
@ -487,7 +487,7 @@ good_area:
|
|||
* make sure we exit gracefully rather than endlessly redo
|
||||
* the fault.
|
||||
*/
|
||||
fault = handle_mm_fault(mm, vma, address, flags);
|
||||
fault = handle_mm_fault(vma, address, flags);
|
||||
|
||||
if (unlikely(fault & (VM_FAULT_RETRY | VM_FAULT_ERROR)))
|
||||
if (mm_fault_error(regs, error_code, address, fault))
|
||||
|
|
|
@ -241,7 +241,7 @@ good_area:
|
|||
* make sure we exit gracefully rather than endlessly redo
|
||||
* the fault.
|
||||
*/
|
||||
fault = handle_mm_fault(mm, vma, address, flags);
|
||||
fault = handle_mm_fault(vma, address, flags);
|
||||
|
||||
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
|
||||
return;
|
||||
|
@ -411,7 +411,7 @@ good_area:
|
|||
if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
|
||||
goto bad_area;
|
||||
}
|
||||
switch (handle_mm_fault(mm, vma, address, flags)) {
|
||||
switch (handle_mm_fault(vma, address, flags)) {
|
||||
case VM_FAULT_SIGBUS:
|
||||
case VM_FAULT_OOM:
|
||||
goto do_sigbus;
|
||||
|
|
|
@ -436,7 +436,7 @@ good_area:
|
|||
goto bad_area;
|
||||
}
|
||||
|
||||
fault = handle_mm_fault(mm, vma, address, flags);
|
||||
fault = handle_mm_fault(vma, address, flags);
|
||||
|
||||
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
|
||||
goto exit_exception;
|
||||
|
|
|
@ -434,7 +434,7 @@ good_area:
|
|||
* make sure we exit gracefully rather than endlessly redo
|
||||
* the fault.
|
||||
*/
|
||||
fault = handle_mm_fault(mm, vma, address, flags);
|
||||
fault = handle_mm_fault(vma, address, flags);
|
||||
|
||||
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
|
||||
return 0;
|
||||
|
|
|
@ -73,7 +73,7 @@ good_area:
|
|||
do {
|
||||
int fault;
|
||||
|
||||
fault = handle_mm_fault(mm, vma, address, flags);
|
||||
fault = handle_mm_fault(vma, address, flags);
|
||||
|
||||
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
|
||||
goto out_nosemaphore;
|
||||
|
|
|
@ -194,7 +194,7 @@ good_area:
|
|||
* If for any reason at all we couldn't handle the fault, make
|
||||
* sure we exit gracefully rather than endlessly redo the fault.
|
||||
*/
|
||||
fault = handle_mm_fault(mm, vma, addr & PAGE_MASK, flags);
|
||||
fault = handle_mm_fault(vma, addr & PAGE_MASK, flags);
|
||||
return fault;
|
||||
|
||||
check_stack:
|
||||
|
|
|
@ -1353,7 +1353,7 @@ good_area:
|
|||
* the fault. Since we never set FAULT_FLAG_RETRY_NOWAIT, if
|
||||
* we get VM_FAULT_RETRY back, the mmap_sem has been unlocked.
|
||||
*/
|
||||
fault = handle_mm_fault(mm, vma, address, flags);
|
||||
fault = handle_mm_fault(vma, address, flags);
|
||||
major |= fault & VM_FAULT_MAJOR;
|
||||
|
||||
/*
|
||||
|
|
|
@ -110,7 +110,7 @@ good_area:
|
|||
* make sure we exit gracefully rather than endlessly redo
|
||||
* the fault.
|
||||
*/
|
||||
fault = handle_mm_fault(mm, vma, address, flags);
|
||||
fault = handle_mm_fault(vma, address, flags);
|
||||
|
||||
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
|
||||
return;
|
||||
|
|
|
@ -538,8 +538,7 @@ static void do_fault(struct work_struct *work)
|
|||
if (access_error(vma, fault))
|
||||
goto out;
|
||||
|
||||
ret = handle_mm_fault(mm, vma, address, flags);
|
||||
|
||||
ret = handle_mm_fault(vma, address, flags);
|
||||
out:
|
||||
up_read(&mm->mmap_sem);
|
||||
|
||||
|
|
|
@ -583,7 +583,7 @@ static irqreturn_t prq_event_thread(int irq, void *d)
|
|||
if (access_error(vma, req))
|
||||
goto invalid;
|
||||
|
||||
ret = handle_mm_fault(svm->mm, vma, address,
|
||||
ret = handle_mm_fault(vma, address,
|
||||
req->wr_req ? FAULT_FLAG_WRITE : 0);
|
||||
if (ret & VM_FAULT_ERROR)
|
||||
goto invalid;
|
||||
|
|
|
@ -1215,15 +1215,14 @@ int generic_error_remove_page(struct address_space *mapping, struct page *page);
|
|||
int invalidate_inode_page(struct page *page);
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||
unsigned long address, unsigned int flags);
|
||||
extern int handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
|
||||
unsigned int flags);
|
||||
extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
|
||||
unsigned long address, unsigned int fault_flags,
|
||||
bool *unlocked);
|
||||
#else
|
||||
static inline int handle_mm_fault(struct mm_struct *mm,
|
||||
struct vm_area_struct *vma, unsigned long address,
|
||||
unsigned int flags)
|
||||
static inline int handle_mm_fault(struct vm_area_struct *vma,
|
||||
unsigned long address, unsigned int flags)
|
||||
{
|
||||
/* should never happen if there's no MMU */
|
||||
BUG();
|
||||
|
|
5
mm/gup.c
5
mm/gup.c
|
@ -352,7 +352,6 @@ unmap:
|
|||
static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
|
||||
unsigned long address, unsigned int *flags, int *nonblocking)
|
||||
{
|
||||
struct mm_struct *mm = vma->vm_mm;
|
||||
unsigned int fault_flags = 0;
|
||||
int ret;
|
||||
|
||||
|
@ -377,7 +376,7 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
|
|||
fault_flags |= FAULT_FLAG_TRIED;
|
||||
}
|
||||
|
||||
ret = handle_mm_fault(mm, vma, address, fault_flags);
|
||||
ret = handle_mm_fault(vma, address, fault_flags);
|
||||
if (ret & VM_FAULT_ERROR) {
|
||||
if (ret & VM_FAULT_OOM)
|
||||
return -ENOMEM;
|
||||
|
@ -692,7 +691,7 @@ retry:
|
|||
if (!vma_permits_fault(vma, fault_flags))
|
||||
return -EFAULT;
|
||||
|
||||
ret = handle_mm_fault(mm, vma, address, fault_flags);
|
||||
ret = handle_mm_fault(vma, address, fault_flags);
|
||||
major |= ret & VM_FAULT_MAJOR;
|
||||
if (ret & VM_FAULT_ERROR) {
|
||||
if (ret & VM_FAULT_OOM)
|
||||
|
|
5
mm/ksm.c
5
mm/ksm.c
|
@ -376,9 +376,8 @@ static int break_ksm(struct vm_area_struct *vma, unsigned long addr)
|
|||
if (IS_ERR_OR_NULL(page))
|
||||
break;
|
||||
if (PageKsm(page))
|
||||
ret = handle_mm_fault(vma->vm_mm, vma, addr,
|
||||
FAULT_FLAG_WRITE |
|
||||
FAULT_FLAG_REMOTE);
|
||||
ret = handle_mm_fault(vma, addr,
|
||||
FAULT_FLAG_WRITE | FAULT_FLAG_REMOTE);
|
||||
else
|
||||
ret = VM_FAULT_WRITE;
|
||||
put_page(page);
|
||||
|
|
13
mm/memory.c
13
mm/memory.c
|
@ -3420,9 +3420,10 @@ unlock:
|
|||
* The mmap_sem may have been released depending on flags and our
|
||||
* return value. See filemap_fault() and __lock_page_or_retry().
|
||||
*/
|
||||
static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||
unsigned long address, unsigned int flags)
|
||||
static int __handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
|
||||
unsigned int flags)
|
||||
{
|
||||
struct mm_struct *mm = vma->vm_mm;
|
||||
pgd_t *pgd;
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
|
@ -3509,15 +3510,15 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
|||
* The mmap_sem may have been released depending on flags and our
|
||||
* return value. See filemap_fault() and __lock_page_or_retry().
|
||||
*/
|
||||
int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||
unsigned long address, unsigned int flags)
|
||||
int handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
|
||||
unsigned int flags)
|
||||
{
|
||||
int ret;
|
||||
|
||||
__set_current_state(TASK_RUNNING);
|
||||
|
||||
count_vm_event(PGFAULT);
|
||||
mem_cgroup_count_vm_event(mm, PGFAULT);
|
||||
mem_cgroup_count_vm_event(vma->vm_mm, PGFAULT);
|
||||
|
||||
/* do counter updates before entering really critical section. */
|
||||
check_sync_rss_stat(current);
|
||||
|
@ -3529,7 +3530,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
|||
if (flags & FAULT_FLAG_USER)
|
||||
mem_cgroup_oom_enable();
|
||||
|
||||
ret = __handle_mm_fault(mm, vma, address, flags);
|
||||
ret = __handle_mm_fault(vma, address, flags);
|
||||
|
||||
if (flags & FAULT_FLAG_USER) {
|
||||
mem_cgroup_oom_disable();
|
||||
|
|
Загрузка…
Ссылка в новой задаче