xfs: split the iomap ops for buffered vs direct writes
Instead of lots of magic conditionals in the main write_begin handler this make the intent very clear. Thing will become even better once we support delayed allocations for extent size hints and realtime allocations. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com> Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
This commit is contained in:
Родитель
a526c85c22
Коммит
f150b42343
|
@ -1113,7 +1113,8 @@ xfs_free_file_space(
|
|||
return 0;
|
||||
if (offset + len > XFS_ISIZE(ip))
|
||||
len = XFS_ISIZE(ip) - offset;
|
||||
error = iomap_zero_range(VFS_I(ip), offset, len, NULL, &xfs_iomap_ops);
|
||||
error = iomap_zero_range(VFS_I(ip), offset, len, NULL,
|
||||
&xfs_buffered_write_iomap_ops);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
|
|
|
@ -352,7 +352,7 @@ restart:
|
|||
|
||||
trace_xfs_zero_eof(ip, isize, iocb->ki_pos - isize);
|
||||
error = iomap_zero_range(inode, isize, iocb->ki_pos - isize,
|
||||
NULL, &xfs_iomap_ops);
|
||||
NULL, &xfs_buffered_write_iomap_ops);
|
||||
if (error)
|
||||
return error;
|
||||
} else
|
||||
|
@ -552,7 +552,8 @@ xfs_file_dio_aio_write(
|
|||
* If unaligned, this is the only IO in-flight. Wait on it before we
|
||||
* release the iolock to prevent subsequent overlapping IO.
|
||||
*/
|
||||
ret = iomap_dio_rw(iocb, from, &xfs_iomap_ops, &xfs_dio_write_ops,
|
||||
ret = iomap_dio_rw(iocb, from, &xfs_direct_write_iomap_ops,
|
||||
&xfs_dio_write_ops,
|
||||
is_sync_kiocb(iocb) || unaligned_io);
|
||||
out:
|
||||
xfs_iunlock(ip, iolock);
|
||||
|
@ -592,7 +593,7 @@ xfs_file_dax_write(
|
|||
count = iov_iter_count(from);
|
||||
|
||||
trace_xfs_file_dax_write(ip, count, pos);
|
||||
ret = dax_iomap_rw(iocb, from, &xfs_iomap_ops);
|
||||
ret = dax_iomap_rw(iocb, from, &xfs_direct_write_iomap_ops);
|
||||
if (ret > 0 && iocb->ki_pos > i_size_read(inode)) {
|
||||
i_size_write(inode, iocb->ki_pos);
|
||||
error = xfs_setfilesize(ip, pos, ret);
|
||||
|
@ -639,7 +640,8 @@ write_retry:
|
|||
current->backing_dev_info = inode_to_bdi(inode);
|
||||
|
||||
trace_xfs_file_buffered_write(ip, iov_iter_count(from), iocb->ki_pos);
|
||||
ret = iomap_file_buffered_write(iocb, from, &xfs_iomap_ops);
|
||||
ret = iomap_file_buffered_write(iocb, from,
|
||||
&xfs_buffered_write_iomap_ops);
|
||||
if (likely(ret >= 0))
|
||||
iocb->ki_pos += ret;
|
||||
|
||||
|
@ -1156,12 +1158,14 @@ __xfs_filemap_fault(
|
|||
|
||||
ret = dax_iomap_fault(vmf, pe_size, &pfn, NULL,
|
||||
(write_fault && !vmf->cow_page) ?
|
||||
&xfs_iomap_ops : &xfs_read_iomap_ops);
|
||||
&xfs_direct_write_iomap_ops :
|
||||
&xfs_read_iomap_ops);
|
||||
if (ret & VM_FAULT_NEEDDSYNC)
|
||||
ret = dax_finish_sync_fault(vmf, pe_size, pfn);
|
||||
} else {
|
||||
if (write_fault)
|
||||
ret = iomap_page_mkwrite(vmf, &xfs_iomap_ops);
|
||||
ret = iomap_page_mkwrite(vmf,
|
||||
&xfs_buffered_write_iomap_ops);
|
||||
else
|
||||
ret = filemap_fault(vmf);
|
||||
}
|
||||
|
|
|
@ -719,16 +719,7 @@ relock:
|
|||
}
|
||||
|
||||
static int
|
||||
xfs_file_iomap_begin_delay(
|
||||
struct inode *inode,
|
||||
loff_t offset,
|
||||
loff_t count,
|
||||
unsigned flags,
|
||||
struct iomap *iomap,
|
||||
struct iomap *srcmap);
|
||||
|
||||
static int
|
||||
xfs_file_iomap_begin(
|
||||
xfs_direct_write_iomap_begin(
|
||||
struct inode *inode,
|
||||
loff_t offset,
|
||||
loff_t length,
|
||||
|
@ -751,13 +742,6 @@ xfs_file_iomap_begin(
|
|||
if (XFS_FORCED_SHUTDOWN(mp))
|
||||
return -EIO;
|
||||
|
||||
if (!(flags & IOMAP_DIRECT) && !IS_DAX(inode) &&
|
||||
!xfs_get_extsz_hint(ip)) {
|
||||
/* Reserve delalloc blocks for regular writeback. */
|
||||
return xfs_file_iomap_begin_delay(inode, offset, length, flags,
|
||||
iomap, srcmap);
|
||||
}
|
||||
|
||||
/*
|
||||
* Lock the inode in the manner required for the specified operation and
|
||||
* check for as many conditions that would result in blocking as
|
||||
|
@ -864,8 +848,12 @@ out_unlock:
|
|||
return error;
|
||||
}
|
||||
|
||||
const struct iomap_ops xfs_direct_write_iomap_ops = {
|
||||
.iomap_begin = xfs_direct_write_iomap_begin,
|
||||
};
|
||||
|
||||
static int
|
||||
xfs_file_iomap_begin_delay(
|
||||
xfs_buffered_write_iomap_begin(
|
||||
struct inode *inode,
|
||||
loff_t offset,
|
||||
loff_t count,
|
||||
|
@ -884,8 +872,12 @@ xfs_file_iomap_begin_delay(
|
|||
int whichfork = XFS_DATA_FORK;
|
||||
int error = 0;
|
||||
|
||||
/* we can't use delayed allocations when using extent size hints */
|
||||
if (xfs_get_extsz_hint(ip))
|
||||
return xfs_direct_write_iomap_begin(inode, offset, count,
|
||||
flags, iomap, srcmap);
|
||||
|
||||
ASSERT(!XFS_IS_REALTIME_INODE(ip));
|
||||
ASSERT(!xfs_get_extsz_hint(ip));
|
||||
|
||||
xfs_ilock(ip, XFS_ILOCK_EXCL);
|
||||
|
||||
|
@ -1077,18 +1069,23 @@ out_unlock:
|
|||
}
|
||||
|
||||
static int
|
||||
xfs_file_iomap_end_delalloc(
|
||||
struct xfs_inode *ip,
|
||||
xfs_buffered_write_iomap_end(
|
||||
struct inode *inode,
|
||||
loff_t offset,
|
||||
loff_t length,
|
||||
ssize_t written,
|
||||
unsigned flags,
|
||||
struct iomap *iomap)
|
||||
{
|
||||
struct xfs_inode *ip = XFS_I(inode);
|
||||
struct xfs_mount *mp = ip->i_mount;
|
||||
xfs_fileoff_t start_fsb;
|
||||
xfs_fileoff_t end_fsb;
|
||||
int error = 0;
|
||||
|
||||
if (iomap->type != IOMAP_DELALLOC)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Behave as if the write failed if drop writes is enabled. Set the NEW
|
||||
* flag to force delalloc cleanup.
|
||||
|
@ -1133,25 +1130,9 @@ xfs_file_iomap_end_delalloc(
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
xfs_file_iomap_end(
|
||||
struct inode *inode,
|
||||
loff_t offset,
|
||||
loff_t length,
|
||||
ssize_t written,
|
||||
unsigned flags,
|
||||
struct iomap *iomap)
|
||||
{
|
||||
if ((flags & (IOMAP_WRITE | IOMAP_ZERO)) &&
|
||||
iomap->type == IOMAP_DELALLOC)
|
||||
return xfs_file_iomap_end_delalloc(XFS_I(inode), offset,
|
||||
length, written, iomap);
|
||||
return 0;
|
||||
}
|
||||
|
||||
const struct iomap_ops xfs_iomap_ops = {
|
||||
.iomap_begin = xfs_file_iomap_begin,
|
||||
.iomap_end = xfs_file_iomap_end,
|
||||
const struct iomap_ops xfs_buffered_write_iomap_ops = {
|
||||
.iomap_begin = xfs_buffered_write_iomap_begin,
|
||||
.iomap_end = xfs_buffered_write_iomap_end,
|
||||
};
|
||||
|
||||
static int
|
||||
|
|
|
@ -39,7 +39,8 @@ xfs_aligned_fsb_count(
|
|||
return count_fsb;
|
||||
}
|
||||
|
||||
extern const struct iomap_ops xfs_iomap_ops;
|
||||
extern const struct iomap_ops xfs_buffered_write_iomap_ops;
|
||||
extern const struct iomap_ops xfs_direct_write_iomap_ops;
|
||||
extern const struct iomap_ops xfs_read_iomap_ops;
|
||||
extern const struct iomap_ops xfs_seek_iomap_ops;
|
||||
extern const struct iomap_ops xfs_xattr_iomap_ops;
|
||||
|
|
|
@ -883,10 +883,10 @@ xfs_setattr_size(
|
|||
if (newsize > oldsize) {
|
||||
trace_xfs_zero_eof(ip, oldsize, newsize - oldsize);
|
||||
error = iomap_zero_range(inode, oldsize, newsize - oldsize,
|
||||
&did_zeroing, &xfs_iomap_ops);
|
||||
&did_zeroing, &xfs_buffered_write_iomap_ops);
|
||||
} else {
|
||||
error = iomap_truncate_page(inode, newsize, &did_zeroing,
|
||||
&xfs_iomap_ops);
|
||||
&xfs_buffered_write_iomap_ops);
|
||||
}
|
||||
|
||||
if (error)
|
||||
|
|
|
@ -1270,7 +1270,7 @@ xfs_reflink_zero_posteof(
|
|||
|
||||
trace_xfs_zero_eof(ip, isize, pos - isize);
|
||||
return iomap_zero_range(VFS_I(ip), isize, pos - isize, NULL,
|
||||
&xfs_iomap_ops);
|
||||
&xfs_buffered_write_iomap_ops);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1527,7 +1527,8 @@ xfs_reflink_unshare(
|
|||
|
||||
inode_dio_wait(inode);
|
||||
|
||||
error = iomap_file_unshare(inode, offset, len, &xfs_iomap_ops);
|
||||
error = iomap_file_unshare(inode, offset, len,
|
||||
&xfs_buffered_write_iomap_ops);
|
||||
if (error)
|
||||
goto out;
|
||||
error = filemap_write_and_wait(inode->i_mapping);
|
||||
|
|
Загрузка…
Ссылка в новой задаче