xfs: serialise unaligned direct IOs
When two concurrent unaligned, non-overlapping direct IOs are issued to the same block, the direct Io layer will race to zero the block. The result is that one of the concurrent IOs will overwrite data written by the other IO with zeros. This is demonstrated by the xfsqa test 240. To avoid this problem, serialise all unaligned direct IOs to an inode with a big hammer. We need a big hammer approach as we need to serialise AIO as well, so we can't just block writes on locks. Hence, the big hammer is calling xfs_ioend_wait() while holding out other unaligned direct IOs from starting. We don't bother trying to serialised aligned vs unaligned IOs as they are overlapping IO and the result of concurrent overlapping IOs is undefined - the result of either IO is a valid result so we let them race. Hence we only penalise unaligned IO, which already has a major overhead compared to aligned IO so this isn't a major problem. Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Alex Elder <aelder@sgi.com> Reviewed-by: Christoph Hellwig <hch@lst.de>
This commit is contained in:
Родитель
4d8d15812f
Коммит
eda7798272
|
@ -684,9 +684,24 @@ xfs_file_aio_write_checks(
|
||||||
* xfs_file_dio_aio_write - handle direct IO writes
|
* xfs_file_dio_aio_write - handle direct IO writes
|
||||||
*
|
*
|
||||||
* Lock the inode appropriately to prepare for and issue a direct IO write.
|
* Lock the inode appropriately to prepare for and issue a direct IO write.
|
||||||
* By spearating it from the buffered write path we remove all the tricky to
|
* By separating it from the buffered write path we remove all the tricky to
|
||||||
* follow locking changes and looping.
|
* follow locking changes and looping.
|
||||||
*
|
*
|
||||||
|
* If there are cached pages or we're extending the file, we need IOLOCK_EXCL
|
||||||
|
* until we're sure the bytes at the new EOF have been zeroed and/or the cached
|
||||||
|
* pages are flushed out.
|
||||||
|
*
|
||||||
|
* In most cases the direct IO writes will be done holding IOLOCK_SHARED
|
||||||
|
* allowing them to be done in parallel with reads and other direct IO writes.
|
||||||
|
* However, if the IO is not aligned to filesystem blocks, the direct IO layer
|
||||||
|
* needs to do sub-block zeroing and that requires serialisation against other
|
||||||
|
* direct IOs to the same block. In this case we need to serialise the
|
||||||
|
* submission of the unaligned IOs so that we don't get racing block zeroing in
|
||||||
|
* the dio layer. To avoid the problem with aio, we also need to wait for
|
||||||
|
* outstanding IOs to complete so that unwritten extent conversion is completed
|
||||||
|
* before we try to map the overlapping block. This is currently implemented by
|
||||||
|
* hitting it with a big hammer (i.e. xfs_ioend_wait()).
|
||||||
|
*
|
||||||
* Returns with locks held indicated by @iolock and errors indicated by
|
* Returns with locks held indicated by @iolock and errors indicated by
|
||||||
* negative return values.
|
* negative return values.
|
||||||
*/
|
*/
|
||||||
|
@ -706,6 +721,7 @@ xfs_file_dio_aio_write(
|
||||||
struct xfs_mount *mp = ip->i_mount;
|
struct xfs_mount *mp = ip->i_mount;
|
||||||
ssize_t ret = 0;
|
ssize_t ret = 0;
|
||||||
size_t count = ocount;
|
size_t count = ocount;
|
||||||
|
int unaligned_io = 0;
|
||||||
struct xfs_buftarg *target = XFS_IS_REALTIME_INODE(ip) ?
|
struct xfs_buftarg *target = XFS_IS_REALTIME_INODE(ip) ?
|
||||||
mp->m_rtdev_targp : mp->m_ddev_targp;
|
mp->m_rtdev_targp : mp->m_ddev_targp;
|
||||||
|
|
||||||
|
@ -713,13 +729,10 @@ xfs_file_dio_aio_write(
|
||||||
if ((pos & target->bt_smask) || (count & target->bt_smask))
|
if ((pos & target->bt_smask) || (count & target->bt_smask))
|
||||||
return -XFS_ERROR(EINVAL);
|
return -XFS_ERROR(EINVAL);
|
||||||
|
|
||||||
/*
|
if ((pos & mp->m_blockmask) || ((pos + count) & mp->m_blockmask))
|
||||||
* For direct I/O, if there are cached pages or we're extending
|
unaligned_io = 1;
|
||||||
* the file, we need IOLOCK_EXCL until we're sure the bytes at
|
|
||||||
* the new EOF have been zeroed and/or the cached pages are
|
if (unaligned_io || mapping->nrpages || pos > ip->i_size)
|
||||||
* flushed out.
|
|
||||||
*/
|
|
||||||
if (mapping->nrpages || pos > ip->i_size)
|
|
||||||
*iolock = XFS_IOLOCK_EXCL;
|
*iolock = XFS_IOLOCK_EXCL;
|
||||||
else
|
else
|
||||||
*iolock = XFS_IOLOCK_SHARED;
|
*iolock = XFS_IOLOCK_SHARED;
|
||||||
|
@ -737,8 +750,13 @@ xfs_file_dio_aio_write(
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (*iolock == XFS_IOLOCK_EXCL) {
|
/*
|
||||||
/* demote the lock now the cached pages are gone */
|
* If we are doing unaligned IO, wait for all other IO to drain,
|
||||||
|
* otherwise demote the lock if we had to flush cached pages
|
||||||
|
*/
|
||||||
|
if (unaligned_io)
|
||||||
|
xfs_ioend_wait(ip);
|
||||||
|
else if (*iolock == XFS_IOLOCK_EXCL) {
|
||||||
xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL);
|
xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL);
|
||||||
*iolock = XFS_IOLOCK_SHARED;
|
*iolock = XFS_IOLOCK_SHARED;
|
||||||
}
|
}
|
||||||
|
|
Загрузка…
Ссылка в новой задаче