arm64/mm: Convert to using lock_mm_and_find_vma()
This converts arm64 to use the new page fault helper. It was very straightforward, but still needed a fix for the "obvious" conversion I initially did. Thanks to Suren for the fix and testing. Fixed-and-tested-by: Suren Baghdasaryan <surenb@google.com> Unnecessary-code-removal-by: Liam R. Howlett <Liam.Howlett@oracle.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Родитель
eda0047296
Коммит
ae870a68b5
|
@ -225,6 +225,7 @@ config ARM64
|
||||||
select IRQ_DOMAIN
|
select IRQ_DOMAIN
|
||||||
select IRQ_FORCED_THREADING
|
select IRQ_FORCED_THREADING
|
||||||
select KASAN_VMALLOC if KASAN
|
select KASAN_VMALLOC if KASAN
|
||||||
|
select LOCK_MM_AND_FIND_VMA
|
||||||
select MODULES_USE_ELF_RELA
|
select MODULES_USE_ELF_RELA
|
||||||
select NEED_DMA_MAP_STATE
|
select NEED_DMA_MAP_STATE
|
||||||
select NEED_SG_DMA_LENGTH
|
select NEED_SG_DMA_LENGTH
|
||||||
|
|
|
@ -483,27 +483,14 @@ static void do_bad_area(unsigned long far, unsigned long esr,
|
||||||
#define VM_FAULT_BADMAP ((__force vm_fault_t)0x010000)
|
#define VM_FAULT_BADMAP ((__force vm_fault_t)0x010000)
|
||||||
#define VM_FAULT_BADACCESS ((__force vm_fault_t)0x020000)
|
#define VM_FAULT_BADACCESS ((__force vm_fault_t)0x020000)
|
||||||
|
|
||||||
static vm_fault_t __do_page_fault(struct mm_struct *mm, unsigned long addr,
|
static vm_fault_t __do_page_fault(struct mm_struct *mm,
|
||||||
|
struct vm_area_struct *vma, unsigned long addr,
|
||||||
unsigned int mm_flags, unsigned long vm_flags,
|
unsigned int mm_flags, unsigned long vm_flags,
|
||||||
struct pt_regs *regs)
|
struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
struct vm_area_struct *vma = find_vma(mm, addr);
|
|
||||||
|
|
||||||
if (unlikely(!vma))
|
|
||||||
return VM_FAULT_BADMAP;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Ok, we have a good vm_area for this memory access, so we can handle
|
* Ok, we have a good vm_area for this memory access, so we can handle
|
||||||
* it.
|
* it.
|
||||||
*/
|
|
||||||
if (unlikely(vma->vm_start > addr)) {
|
|
||||||
if (!(vma->vm_flags & VM_GROWSDOWN))
|
|
||||||
return VM_FAULT_BADMAP;
|
|
||||||
if (expand_stack(vma, addr))
|
|
||||||
return VM_FAULT_BADMAP;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Check that the permissions on the VMA allow for the fault which
|
* Check that the permissions on the VMA allow for the fault which
|
||||||
* occurred.
|
* occurred.
|
||||||
*/
|
*/
|
||||||
|
@ -617,31 +604,15 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
|
||||||
}
|
}
|
||||||
lock_mmap:
|
lock_mmap:
|
||||||
#endif /* CONFIG_PER_VMA_LOCK */
|
#endif /* CONFIG_PER_VMA_LOCK */
|
||||||
/*
|
|
||||||
* As per x86, we may deadlock here. However, since the kernel only
|
|
||||||
* validly references user space from well defined areas of the code,
|
|
||||||
* we can bug out early if this is from code which shouldn't.
|
|
||||||
*/
|
|
||||||
if (!mmap_read_trylock(mm)) {
|
|
||||||
if (!user_mode(regs) && !search_exception_tables(regs->pc))
|
|
||||||
goto no_context;
|
|
||||||
retry:
|
retry:
|
||||||
mmap_read_lock(mm);
|
vma = lock_mm_and_find_vma(mm, addr, regs);
|
||||||
} else {
|
if (unlikely(!vma)) {
|
||||||
/*
|
fault = VM_FAULT_BADMAP;
|
||||||
* The above mmap_read_trylock() might have succeeded in which
|
goto done;
|
||||||
* case, we'll have missed the might_sleep() from down_read().
|
|
||||||
*/
|
|
||||||
might_sleep();
|
|
||||||
#ifdef CONFIG_DEBUG_VM
|
|
||||||
if (!user_mode(regs) && !search_exception_tables(regs->pc)) {
|
|
||||||
mmap_read_unlock(mm);
|
|
||||||
goto no_context;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fault = __do_page_fault(mm, addr, mm_flags, vm_flags, regs);
|
fault = __do_page_fault(mm, vma, addr, mm_flags, vm_flags, regs);
|
||||||
|
|
||||||
/* Quick path to respond to signals */
|
/* Quick path to respond to signals */
|
||||||
if (fault_signal_pending(fault, regs)) {
|
if (fault_signal_pending(fault, regs)) {
|
||||||
|
@ -660,9 +631,7 @@ retry:
|
||||||
}
|
}
|
||||||
mmap_read_unlock(mm);
|
mmap_read_unlock(mm);
|
||||||
|
|
||||||
#ifdef CONFIG_PER_VMA_LOCK
|
|
||||||
done:
|
done:
|
||||||
#endif
|
|
||||||
/*
|
/*
|
||||||
* Handle the "normal" (no error) case first.
|
* Handle the "normal" (no error) case first.
|
||||||
*/
|
*/
|
||||||
|
|
Загрузка…
Ссылка в новой задаче