mm: take i_mmap_lock in unmap_mapping_range() for DAX
DAX is not so special: we need i_mmap_lock to protect mapping->i_mmap. __dax_pmd_fault() uses unmap_mapping_range() shoot out zero page from all mappings. We need to drop i_mmap_lock there to avoid lock deadlock. Re-aquiring the lock should be fine since we check i_size after the point. Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Matthew Wilcox <willy@linux.intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Родитель
3fdd1b479d
Коммит
46c043ede4
35
fs/dax.c
35
fs/dax.c
|
@ -554,6 +554,25 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
|
||||||
if (!buffer_size_valid(&bh) || bh.b_size < PMD_SIZE)
|
if (!buffer_size_valid(&bh) || bh.b_size < PMD_SIZE)
|
||||||
goto fallback;
|
goto fallback;
|
||||||
|
|
||||||
|
if (buffer_unwritten(&bh) || buffer_new(&bh)) {
|
||||||
|
int i;
|
||||||
|
for (i = 0; i < PTRS_PER_PMD; i++)
|
||||||
|
clear_page(kaddr + i * PAGE_SIZE);
|
||||||
|
count_vm_event(PGMAJFAULT);
|
||||||
|
mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
|
||||||
|
result |= VM_FAULT_MAJOR;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If we allocated new storage, make sure no process has any
|
||||||
|
* zero pages covering this hole
|
||||||
|
*/
|
||||||
|
if (buffer_new(&bh)) {
|
||||||
|
i_mmap_unlock_write(mapping);
|
||||||
|
unmap_mapping_range(mapping, pgoff << PAGE_SHIFT, PMD_SIZE, 0);
|
||||||
|
i_mmap_lock_write(mapping);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If a truncate happened while we were allocating blocks, we may
|
* If a truncate happened while we were allocating blocks, we may
|
||||||
* leave blocks allocated to the file that are beyond EOF. We can't
|
* leave blocks allocated to the file that are beyond EOF. We can't
|
||||||
|
@ -568,13 +587,6 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
|
||||||
if ((pgoff | PG_PMD_COLOUR) >= size)
|
if ((pgoff | PG_PMD_COLOUR) >= size)
|
||||||
goto fallback;
|
goto fallback;
|
||||||
|
|
||||||
/*
|
|
||||||
* If we allocated new storage, make sure no process has any
|
|
||||||
* zero pages covering this hole
|
|
||||||
*/
|
|
||||||
if (buffer_new(&bh))
|
|
||||||
unmap_mapping_range(mapping, pgoff << PAGE_SHIFT, PMD_SIZE, 0);
|
|
||||||
|
|
||||||
if (!write && !buffer_mapped(&bh) && buffer_uptodate(&bh)) {
|
if (!write && !buffer_mapped(&bh) && buffer_uptodate(&bh)) {
|
||||||
spinlock_t *ptl;
|
spinlock_t *ptl;
|
||||||
pmd_t entry;
|
pmd_t entry;
|
||||||
|
@ -605,15 +617,6 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
|
||||||
if ((length < PMD_SIZE) || (pfn & PG_PMD_COLOUR))
|
if ((length < PMD_SIZE) || (pfn & PG_PMD_COLOUR))
|
||||||
goto fallback;
|
goto fallback;
|
||||||
|
|
||||||
if (buffer_unwritten(&bh) || buffer_new(&bh)) {
|
|
||||||
int i;
|
|
||||||
for (i = 0; i < PTRS_PER_PMD; i++)
|
|
||||||
clear_page(kaddr + i * PAGE_SIZE);
|
|
||||||
count_vm_event(PGMAJFAULT);
|
|
||||||
mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
|
|
||||||
result |= VM_FAULT_MAJOR;
|
|
||||||
}
|
|
||||||
|
|
||||||
result |= vmf_insert_pfn_pmd(vma, address, pmd, pfn, write);
|
result |= vmf_insert_pfn_pmd(vma, address, pmd, pfn, write);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
11
mm/memory.c
11
mm/memory.c
|
@ -2426,17 +2426,10 @@ void unmap_mapping_range(struct address_space *mapping,
|
||||||
if (details.last_index < details.first_index)
|
if (details.last_index < details.first_index)
|
||||||
details.last_index = ULONG_MAX;
|
details.last_index = ULONG_MAX;
|
||||||
|
|
||||||
|
i_mmap_lock_write(mapping);
|
||||||
/*
|
|
||||||
* DAX already holds i_mmap_lock to serialise file truncate vs
|
|
||||||
* page fault and page fault vs page fault.
|
|
||||||
*/
|
|
||||||
if (!IS_DAX(mapping->host))
|
|
||||||
i_mmap_lock_write(mapping);
|
|
||||||
if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap)))
|
if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap)))
|
||||||
unmap_mapping_range_tree(&mapping->i_mmap, &details);
|
unmap_mapping_range_tree(&mapping->i_mmap, &details);
|
||||||
if (!IS_DAX(mapping->host))
|
i_mmap_unlock_write(mapping);
|
||||||
i_mmap_unlock_write(mapping);
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(unmap_mapping_range);
|
EXPORT_SYMBOL(unmap_mapping_range);
|
||||||
|
|
||||||
|
|
Загрузка…
Ссылка в новой задаче