dax: Finish fault completely when loading holes
The only case when we do not finish the page fault completely is when we are loading hole pages into a radix tree. Avoid this special case and finish the fault in that case as well inside the DAX fault handler. It will allow us for easier iomap handling. Reviewed-by: Ross Zwisler <ross.zwisler@linux.intel.com> Signed-off-by: Jan Kara <jack@suse.cz> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
This commit is contained in:
Родитель
e3fce68cdb
Коммит
f449b936f1
27
fs/dax.c
27
fs/dax.c
|
@ -539,15 +539,16 @@ int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
|
|||
* otherwise it will simply fall out of the page cache under memory
|
||||
* pressure without ever having been dirtied.
|
||||
*/
|
||||
static int dax_load_hole(struct address_space *mapping, void *entry,
|
||||
static int dax_load_hole(struct address_space *mapping, void **entry,
|
||||
struct vm_fault *vmf)
|
||||
{
|
||||
struct page *page;
|
||||
int ret;
|
||||
|
||||
/* Hole page already exists? Return it... */
|
||||
if (!radix_tree_exceptional_entry(entry)) {
|
||||
vmf->page = entry;
|
||||
return VM_FAULT_LOCKED;
|
||||
if (!radix_tree_exceptional_entry(*entry)) {
|
||||
page = *entry;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* This will replace locked radix tree entry with a hole page */
|
||||
|
@ -555,8 +556,17 @@ static int dax_load_hole(struct address_space *mapping, void *entry,
|
|||
vmf->gfp_mask | __GFP_ZERO);
|
||||
if (!page)
|
||||
return VM_FAULT_OOM;
|
||||
out:
|
||||
vmf->page = page;
|
||||
return VM_FAULT_LOCKED;
|
||||
ret = finish_fault(vmf);
|
||||
vmf->page = NULL;
|
||||
*entry = page;
|
||||
if (!ret) {
|
||||
/* Grab reference for PTE that is now referencing the page */
|
||||
get_page(page);
|
||||
return VM_FAULT_NOPAGE;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int copy_user_dax(struct block_device *bdev, sector_t sector, size_t size,
|
||||
|
@ -1163,8 +1173,8 @@ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
|
|||
case IOMAP_UNWRITTEN:
|
||||
case IOMAP_HOLE:
|
||||
if (!(vmf->flags & FAULT_FLAG_WRITE)) {
|
||||
vmf_ret = dax_load_hole(mapping, entry, vmf);
|
||||
break;
|
||||
vmf_ret = dax_load_hole(mapping, &entry, vmf);
|
||||
goto finish_iomap;
|
||||
}
|
||||
/*FALLTHRU*/
|
||||
default:
|
||||
|
@ -1185,8 +1195,7 @@ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
|
|||
}
|
||||
}
|
||||
unlock_entry:
|
||||
if (vmf_ret != VM_FAULT_LOCKED || error)
|
||||
put_locked_mapping_entry(mapping, vmf->pgoff, entry);
|
||||
put_locked_mapping_entry(mapping, vmf->pgoff, entry);
|
||||
out:
|
||||
if (error == -ENOMEM)
|
||||
return VM_FAULT_OOM | major;
|
||||
|
|
Загрузка…
Ссылка в новой задаче