dax: dax_iomap_fault() needs to call iomap_end()

Currently iomap_end() doesn't do anything for DAX page faults for both ext2
and XFS.  ext2_iomap_end() just checks for a write underrun, and
xfs_file_iomap_end() checks to see if it needs to finish a delayed
allocation.  However, in the future iomap_end() calls might be needed to
make sure we have balanced allocations, locks, etc.  So, add calls to
iomap_end() with appropriate error handling to dax_iomap_fault().

Signed-off-by: Ross Zwisler <ross.zwisler@linux.intel.com>
Suggested-by: Jan Kara <jack@suse.cz>
Reviewed-by: Jan Kara <jack@suse.cz>
Signed-off-by: Dave Chinner <david@fromorbit.com>
This commit is contained in:
Ross Zwisler 2016-11-08 11:33:26 +11:00 коммит произвёл Dave Chinner
Родитель 333ccc978e
Коммит 1550290b08
1 изменённых файлов: 29 добавлений и 8 удалений

Просмотреть файл

@ -1165,6 +1165,7 @@ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
struct iomap iomap = { 0 }; struct iomap iomap = { 0 };
unsigned flags = 0; unsigned flags = 0;
int error, major = 0; int error, major = 0;
int locked_status = 0;
void *entry; void *entry;
/* /*
@ -1194,7 +1195,7 @@ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
goto unlock_entry; goto unlock_entry;
if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) { if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) {
error = -EIO; /* fs corruption? */ error = -EIO; /* fs corruption? */
goto unlock_entry; goto finish_iomap;
} }
sector = dax_iomap_sector(&iomap, pos); sector = dax_iomap_sector(&iomap, pos);
@ -1216,13 +1217,15 @@ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
} }
if (error) if (error)
goto unlock_entry; goto finish_iomap;
if (!radix_tree_exceptional_entry(entry)) { if (!radix_tree_exceptional_entry(entry)) {
vmf->page = entry; vmf->page = entry;
return VM_FAULT_LOCKED; locked_status = VM_FAULT_LOCKED;
} else {
vmf->entry = entry;
locked_status = VM_FAULT_DAX_LOCKED;
} }
vmf->entry = entry; goto finish_iomap;
return VM_FAULT_DAX_LOCKED;
} }
switch (iomap.type) { switch (iomap.type) {
@ -1237,8 +1240,10 @@ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
break; break;
case IOMAP_UNWRITTEN: case IOMAP_UNWRITTEN:
case IOMAP_HOLE: case IOMAP_HOLE:
if (!(vmf->flags & FAULT_FLAG_WRITE)) if (!(vmf->flags & FAULT_FLAG_WRITE)) {
return dax_load_hole(mapping, entry, vmf); locked_status = dax_load_hole(mapping, entry, vmf);
break;
}
/*FALLTHRU*/ /*FALLTHRU*/
default: default:
WARN_ON_ONCE(1); WARN_ON_ONCE(1);
@ -1246,14 +1251,30 @@ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
break; break;
} }
finish_iomap:
if (ops->iomap_end) {
if (error) {
/* keep previous error */
ops->iomap_end(inode, pos, PAGE_SIZE, 0, flags,
&iomap);
} else {
error = ops->iomap_end(inode, pos, PAGE_SIZE,
PAGE_SIZE, flags, &iomap);
}
}
unlock_entry: unlock_entry:
put_locked_mapping_entry(mapping, vmf->pgoff, entry); if (!locked_status || error)
put_locked_mapping_entry(mapping, vmf->pgoff, entry);
out: out:
if (error == -ENOMEM) if (error == -ENOMEM)
return VM_FAULT_OOM | major; return VM_FAULT_OOM | major;
/* -EBUSY is fine, somebody else faulted on the same PTE */ /* -EBUSY is fine, somebody else faulted on the same PTE */
if (error < 0 && error != -EBUSY) if (error < 0 && error != -EBUSY)
return VM_FAULT_SIGBUS | major; return VM_FAULT_SIGBUS | major;
if (locked_status) {
WARN_ON_ONCE(error); /* -EBUSY from ops->iomap_end? */
return locked_status;
}
return VM_FAULT_NOPAGE | major; return VM_FAULT_NOPAGE | major;
} }
EXPORT_SYMBOL_GPL(dax_iomap_fault); EXPORT_SYMBOL_GPL(dax_iomap_fault);