Btrfs: inline csums if we're fsyncing
The tree logging stuff needs the csums to be on the ordered extents in order to log them properly, so mark that we're sync and inline the csum creation so we don't have to wait on the csumming to be done when logging extents that are still in flight. Thanks, Signed-off-by: Josef Bacik <jbacik@fusionio.com> Signed-off-by: Chris Mason <chris.mason@fusionio.com>
This commit is contained in:
Родитель
a95249b392
Коммит
b812ce2879
|
@ -91,6 +91,9 @@ struct btrfs_inode {
|
|||
|
||||
unsigned long runtime_flags;
|
||||
|
||||
/* Keep track of who's O_SYNC/fsycing currently */
|
||||
atomic_t sync_writers;
|
||||
|
||||
/* full 64 bit generation number, struct vfs_inode doesn't have a big
|
||||
* enough field for this.
|
||||
*/
|
||||
|
|
|
@ -1472,6 +1472,7 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
|
|||
ssize_t num_written = 0;
|
||||
ssize_t err = 0;
|
||||
size_t count, ocount;
|
||||
bool sync = (file->f_flags & O_DSYNC) || IS_SYNC(file->f_mapping->host);
|
||||
|
||||
sb_start_write(inode->i_sb);
|
||||
|
||||
|
@ -1529,6 +1530,9 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
|
|||
}
|
||||
}
|
||||
|
||||
if (sync)
|
||||
atomic_inc(&BTRFS_I(inode)->sync_writers);
|
||||
|
||||
if (unlikely(file->f_flags & O_DIRECT)) {
|
||||
num_written = __btrfs_direct_write(iocb, iov, nr_segs,
|
||||
pos, ppos, count, ocount);
|
||||
|
@ -1563,6 +1567,8 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
|
|||
num_written = err;
|
||||
}
|
||||
out:
|
||||
if (sync)
|
||||
atomic_dec(&BTRFS_I(inode)->sync_writers);
|
||||
sb_end_write(inode->i_sb);
|
||||
current->backing_dev_info = NULL;
|
||||
return num_written ? num_written : err;
|
||||
|
@ -1613,7 +1619,9 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
|
|||
* out of the ->i_mutex. If so, we can flush the dirty pages by
|
||||
* multi-task, and make the performance up.
|
||||
*/
|
||||
atomic_inc(&BTRFS_I(inode)->sync_writers);
|
||||
ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
|
||||
atomic_dec(&BTRFS_I(inode)->sync_writers);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
|
|
@ -1622,6 +1622,7 @@ static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
|
|||
int ret = 0;
|
||||
int skip_sum;
|
||||
int metadata = 0;
|
||||
int async = !atomic_read(&BTRFS_I(inode)->sync_writers);
|
||||
|
||||
skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
|
||||
|
||||
|
@ -1644,7 +1645,7 @@ static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
|
|||
goto out;
|
||||
}
|
||||
goto mapit;
|
||||
} else if (!skip_sum) {
|
||||
} else if (async && !skip_sum) {
|
||||
/* csum items have already been cloned */
|
||||
if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
|
||||
goto mapit;
|
||||
|
@ -1655,6 +1656,10 @@ static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
|
|||
__btrfs_submit_bio_start,
|
||||
__btrfs_submit_bio_done);
|
||||
goto out;
|
||||
} else if (!skip_sum) {
|
||||
ret = btrfs_csum_one_bio(root, inode, bio, 0, 0);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
|
||||
mapit:
|
||||
|
@ -6333,6 +6338,9 @@ static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode,
|
|||
struct btrfs_root *root = BTRFS_I(inode)->root;
|
||||
int ret;
|
||||
|
||||
if (async_submit)
|
||||
async_submit = !atomic_read(&BTRFS_I(inode)->sync_writers);
|
||||
|
||||
bio_get(bio);
|
||||
|
||||
if (!write) {
|
||||
|
@ -7113,6 +7121,7 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
|
|||
extent_io_tree_init(&ei->io_failure_tree, &inode->i_data);
|
||||
ei->io_tree.track_uptodate = 1;
|
||||
ei->io_failure_tree.track_uptodate = 1;
|
||||
atomic_set(&ei->sync_writers, 0);
|
||||
mutex_init(&ei->log_mutex);
|
||||
mutex_init(&ei->delalloc_mutex);
|
||||
btrfs_ordered_inode_tree_init(&ei->ordered_tree);
|
||||
|
|
Загрузка…
Ссылка в новой задаче