iomap: Add done_before argument to iomap_dio_rw
commit 4fdccaa0d1
upstream
Add a done_before argument to iomap_dio_rw that indicates how much of
the request has already been transferred. When the request succeeds, we
report that done_before additional bytes were tranferred. This is
useful for finishing a request asynchronously when part of the request
has already been completed synchronously.
We'll use that to allow iomap_dio_rw to be used with page faults
disabled: when a page fault occurs while submitting a request, we
synchronously complete the part of the request that has already been
submitted. The caller can then take care of the page fault and call
iomap_dio_rw again for the rest of the request, passing in the number of
bytes already tranferred.
Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
Reviewed-by: Darrick J. Wong <djwong@kernel.org>
Signed-off-by: Anand Jain <anand.jain@oracle.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Родитель
ea7a578588
Коммит
d3b744791b
|
@ -1956,7 +1956,7 @@ relock:
|
|||
}
|
||||
|
||||
dio = __iomap_dio_rw(iocb, from, &btrfs_dio_iomap_ops, &btrfs_dio_ops,
|
||||
0);
|
||||
0, 0);
|
||||
|
||||
btrfs_inode_unlock(inode, ilock_flags);
|
||||
|
||||
|
@ -3668,7 +3668,8 @@ static ssize_t btrfs_direct_read(struct kiocb *iocb, struct iov_iter *to)
|
|||
return 0;
|
||||
|
||||
btrfs_inode_lock(inode, BTRFS_ILOCK_SHARED);
|
||||
ret = iomap_dio_rw(iocb, to, &btrfs_dio_iomap_ops, &btrfs_dio_ops, 0);
|
||||
ret = iomap_dio_rw(iocb, to, &btrfs_dio_iomap_ops, &btrfs_dio_ops,
|
||||
0, 0);
|
||||
btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -287,7 +287,7 @@ static ssize_t erofs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
|
|||
|
||||
if (!err)
|
||||
return iomap_dio_rw(iocb, to, &erofs_iomap_ops,
|
||||
NULL, 0);
|
||||
NULL, 0, 0);
|
||||
if (err < 0)
|
||||
return err;
|
||||
}
|
||||
|
|
|
@ -74,7 +74,7 @@ static ssize_t ext4_dio_read_iter(struct kiocb *iocb, struct iov_iter *to)
|
|||
return generic_file_read_iter(iocb, to);
|
||||
}
|
||||
|
||||
ret = iomap_dio_rw(iocb, to, &ext4_iomap_ops, NULL, 0);
|
||||
ret = iomap_dio_rw(iocb, to, &ext4_iomap_ops, NULL, 0, 0);
|
||||
inode_unlock_shared(inode);
|
||||
|
||||
file_accessed(iocb->ki_filp);
|
||||
|
@ -566,7 +566,8 @@ static ssize_t ext4_dio_write_iter(struct kiocb *iocb, struct iov_iter *from)
|
|||
if (ilock_shared)
|
||||
iomap_ops = &ext4_iomap_overwrite_ops;
|
||||
ret = iomap_dio_rw(iocb, from, iomap_ops, &ext4_dio_write_ops,
|
||||
(unaligned_io || extend) ? IOMAP_DIO_FORCE_WAIT : 0);
|
||||
(unaligned_io || extend) ? IOMAP_DIO_FORCE_WAIT : 0,
|
||||
0);
|
||||
if (ret == -ENOTBLK)
|
||||
ret = 0;
|
||||
|
||||
|
|
|
@ -823,7 +823,7 @@ static ssize_t gfs2_file_direct_read(struct kiocb *iocb, struct iov_iter *to,
|
|||
if (ret)
|
||||
goto out_uninit;
|
||||
|
||||
ret = iomap_dio_rw(iocb, to, &gfs2_iomap_ops, NULL, 0);
|
||||
ret = iomap_dio_rw(iocb, to, &gfs2_iomap_ops, NULL, 0, 0);
|
||||
gfs2_glock_dq(gh);
|
||||
out_uninit:
|
||||
gfs2_holder_uninit(gh);
|
||||
|
@ -857,7 +857,7 @@ static ssize_t gfs2_file_direct_write(struct kiocb *iocb, struct iov_iter *from,
|
|||
if (offset + len > i_size_read(&ip->i_inode))
|
||||
goto out;
|
||||
|
||||
ret = iomap_dio_rw(iocb, from, &gfs2_iomap_ops, NULL, 0);
|
||||
ret = iomap_dio_rw(iocb, from, &gfs2_iomap_ops, NULL, 0, 0);
|
||||
if (ret == -ENOTBLK)
|
||||
ret = 0;
|
||||
out:
|
||||
|
|
|
@ -31,6 +31,7 @@ struct iomap_dio {
|
|||
atomic_t ref;
|
||||
unsigned flags;
|
||||
int error;
|
||||
size_t done_before;
|
||||
bool wait_for_completion;
|
||||
|
||||
union {
|
||||
|
@ -124,6 +125,9 @@ ssize_t iomap_dio_complete(struct iomap_dio *dio)
|
|||
if (ret > 0 && (dio->flags & IOMAP_DIO_NEED_SYNC))
|
||||
ret = generic_write_sync(iocb, ret);
|
||||
|
||||
if (ret > 0)
|
||||
ret += dio->done_before;
|
||||
|
||||
kfree(dio);
|
||||
|
||||
return ret;
|
||||
|
@ -450,13 +454,21 @@ static loff_t iomap_dio_iter(const struct iomap_iter *iter,
|
|||
* may be pure data writes. In that case, we still need to do a full data sync
|
||||
* completion.
|
||||
*
|
||||
* When page faults are disabled and @dio_flags includes IOMAP_DIO_PARTIAL,
|
||||
* __iomap_dio_rw can return a partial result if it encounters a non-resident
|
||||
* page in @iter after preparing a transfer. In that case, the non-resident
|
||||
* pages can be faulted in and the request resumed with @done_before set to the
|
||||
* number of bytes previously transferred. The request will then complete with
|
||||
* the correct total number of bytes transferred; this is essential for
|
||||
* completing partial requests asynchronously.
|
||||
*
|
||||
* Returns -ENOTBLK In case of a page invalidation invalidation failure for
|
||||
* writes. The callers needs to fall back to buffered I/O in this case.
|
||||
*/
|
||||
struct iomap_dio *
|
||||
__iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
|
||||
const struct iomap_ops *ops, const struct iomap_dio_ops *dops,
|
||||
unsigned int dio_flags)
|
||||
unsigned int dio_flags, size_t done_before)
|
||||
{
|
||||
struct address_space *mapping = iocb->ki_filp->f_mapping;
|
||||
struct inode *inode = file_inode(iocb->ki_filp);
|
||||
|
@ -486,6 +498,7 @@ __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
|
|||
dio->dops = dops;
|
||||
dio->error = 0;
|
||||
dio->flags = 0;
|
||||
dio->done_before = done_before;
|
||||
|
||||
dio->submit.iter = iter;
|
||||
dio->submit.waiter = current;
|
||||
|
@ -652,11 +665,11 @@ EXPORT_SYMBOL_GPL(__iomap_dio_rw);
|
|||
ssize_t
|
||||
iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
|
||||
const struct iomap_ops *ops, const struct iomap_dio_ops *dops,
|
||||
unsigned int dio_flags)
|
||||
unsigned int dio_flags, size_t done_before)
|
||||
{
|
||||
struct iomap_dio *dio;
|
||||
|
||||
dio = __iomap_dio_rw(iocb, iter, ops, dops, dio_flags);
|
||||
dio = __iomap_dio_rw(iocb, iter, ops, dops, dio_flags, done_before);
|
||||
if (IS_ERR_OR_NULL(dio))
|
||||
return PTR_ERR_OR_ZERO(dio);
|
||||
return iomap_dio_complete(dio);
|
||||
|
|
|
@ -259,7 +259,7 @@ xfs_file_dio_read(
|
|||
ret = xfs_ilock_iocb(iocb, XFS_IOLOCK_SHARED);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = iomap_dio_rw(iocb, to, &xfs_read_iomap_ops, NULL, 0);
|
||||
ret = iomap_dio_rw(iocb, to, &xfs_read_iomap_ops, NULL, 0, 0);
|
||||
xfs_iunlock(ip, XFS_IOLOCK_SHARED);
|
||||
|
||||
return ret;
|
||||
|
@ -569,7 +569,7 @@ xfs_file_dio_write_aligned(
|
|||
}
|
||||
trace_xfs_file_direct_write(iocb, from);
|
||||
ret = iomap_dio_rw(iocb, from, &xfs_direct_write_iomap_ops,
|
||||
&xfs_dio_write_ops, 0);
|
||||
&xfs_dio_write_ops, 0, 0);
|
||||
out_unlock:
|
||||
if (iolock)
|
||||
xfs_iunlock(ip, iolock);
|
||||
|
@ -647,7 +647,7 @@ retry_exclusive:
|
|||
|
||||
trace_xfs_file_direct_write(iocb, from);
|
||||
ret = iomap_dio_rw(iocb, from, &xfs_direct_write_iomap_ops,
|
||||
&xfs_dio_write_ops, flags);
|
||||
&xfs_dio_write_ops, flags, 0);
|
||||
|
||||
/*
|
||||
* Retry unaligned I/O with exclusive blocking semantics if the DIO
|
||||
|
|
|
@ -852,7 +852,7 @@ static ssize_t zonefs_file_dio_write(struct kiocb *iocb, struct iov_iter *from)
|
|||
ret = zonefs_file_dio_append(iocb, from);
|
||||
else
|
||||
ret = iomap_dio_rw(iocb, from, &zonefs_iomap_ops,
|
||||
&zonefs_write_dio_ops, 0);
|
||||
&zonefs_write_dio_ops, 0, 0);
|
||||
if (zi->i_ztype == ZONEFS_ZTYPE_SEQ &&
|
||||
(ret > 0 || ret == -EIOCBQUEUED)) {
|
||||
if (ret > 0)
|
||||
|
@ -987,7 +987,7 @@ static ssize_t zonefs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
|
|||
}
|
||||
file_accessed(iocb->ki_filp);
|
||||
ret = iomap_dio_rw(iocb, to, &zonefs_iomap_ops,
|
||||
&zonefs_read_dio_ops, 0);
|
||||
&zonefs_read_dio_ops, 0, 0);
|
||||
} else {
|
||||
ret = generic_file_read_iter(iocb, to);
|
||||
if (ret == -EIO)
|
||||
|
|
|
@ -339,10 +339,10 @@ struct iomap_dio_ops {
|
|||
|
||||
ssize_t iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
|
||||
const struct iomap_ops *ops, const struct iomap_dio_ops *dops,
|
||||
unsigned int dio_flags);
|
||||
unsigned int dio_flags, size_t done_before);
|
||||
struct iomap_dio *__iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
|
||||
const struct iomap_ops *ops, const struct iomap_dio_ops *dops,
|
||||
unsigned int dio_flags);
|
||||
unsigned int dio_flags, size_t done_before);
|
||||
ssize_t iomap_dio_complete(struct iomap_dio *dio);
|
||||
int iomap_dio_iopoll(struct kiocb *kiocb, bool spin);
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче