Btrfs: make sure all dirty blocks are written at commit time
Write dirty block groups may allocate new block, and so may add new delayed back ref. btrfs_run_delayed_refs may make some block groups dirty. commit_cowonly_roots does not handle the recursion properly, and some dirty blocks can be left unwritten at commit time. This patch moves btrfs_run_delayed_refs into the loop that writes dirty block groups, and makes the code not break out of the loop until there are no dirty block groups or delayed back refs. Signed-off-by: Yan Zheng <zheng.yan@oracle.com> Signed-off-by: Chris Mason <chris.mason@oracle.com>
This commit is contained in:
Родитель
33c66f430b
Коммит
4a8c9a62d7
|
@ -2387,13 +2387,29 @@ fail:
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static struct btrfs_block_group_cache *
|
||||||
|
next_block_group(struct btrfs_root *root,
|
||||||
|
struct btrfs_block_group_cache *cache)
|
||||||
|
{
|
||||||
|
struct rb_node *node;
|
||||||
|
spin_lock(&root->fs_info->block_group_cache_lock);
|
||||||
|
node = rb_next(&cache->cache_node);
|
||||||
|
btrfs_put_block_group(cache);
|
||||||
|
if (node) {
|
||||||
|
cache = rb_entry(node, struct btrfs_block_group_cache,
|
||||||
|
cache_node);
|
||||||
|
atomic_inc(&cache->count);
|
||||||
|
} else
|
||||||
|
cache = NULL;
|
||||||
|
spin_unlock(&root->fs_info->block_group_cache_lock);
|
||||||
|
return cache;
|
||||||
|
}
|
||||||
|
|
||||||
int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
|
int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
|
||||||
struct btrfs_root *root)
|
struct btrfs_root *root)
|
||||||
{
|
{
|
||||||
struct btrfs_block_group_cache *cache, *entry;
|
struct btrfs_block_group_cache *cache;
|
||||||
struct rb_node *n;
|
|
||||||
int err = 0;
|
int err = 0;
|
||||||
int werr = 0;
|
|
||||||
struct btrfs_path *path;
|
struct btrfs_path *path;
|
||||||
u64 last = 0;
|
u64 last = 0;
|
||||||
|
|
||||||
|
@ -2402,39 +2418,35 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
while (1) {
|
while (1) {
|
||||||
cache = NULL;
|
if (last == 0) {
|
||||||
spin_lock(&root->fs_info->block_group_cache_lock);
|
err = btrfs_run_delayed_refs(trans, root,
|
||||||
for (n = rb_first(&root->fs_info->block_group_cache_tree);
|
(unsigned long)-1);
|
||||||
n; n = rb_next(n)) {
|
BUG_ON(err);
|
||||||
entry = rb_entry(n, struct btrfs_block_group_cache,
|
|
||||||
cache_node);
|
|
||||||
if (entry->dirty) {
|
|
||||||
cache = entry;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
spin_unlock(&root->fs_info->block_group_cache_lock);
|
|
||||||
|
|
||||||
if (!cache)
|
cache = btrfs_lookup_first_block_group(root->fs_info, last);
|
||||||
break;
|
while (cache) {
|
||||||
|
if (cache->dirty)
|
||||||
cache->dirty = 0;
|
break;
|
||||||
last += cache->key.offset;
|
cache = next_block_group(root, cache);
|
||||||
|
}
|
||||||
err = write_one_cache_group(trans, root,
|
if (!cache) {
|
||||||
path, cache);
|
if (last == 0)
|
||||||
/*
|
break;
|
||||||
* if we fail to write the cache group, we want
|
last = 0;
|
||||||
* to keep it marked dirty in hopes that a later
|
|
||||||
* write will work
|
|
||||||
*/
|
|
||||||
if (err) {
|
|
||||||
werr = err;
|
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
cache->dirty = 0;
|
||||||
|
last = cache->key.objectid + cache->key.offset;
|
||||||
|
|
||||||
|
err = write_one_cache_group(trans, root, path, cache);
|
||||||
|
BUG_ON(err);
|
||||||
|
btrfs_put_block_group(cache);
|
||||||
}
|
}
|
||||||
|
|
||||||
btrfs_free_path(path);
|
btrfs_free_path(path);
|
||||||
return werr;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
|
int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
|
||||||
|
|
|
@ -444,9 +444,6 @@ static int update_cowonly_root(struct btrfs_trans_handle *trans,
|
||||||
|
|
||||||
btrfs_write_dirty_block_groups(trans, root);
|
btrfs_write_dirty_block_groups(trans, root);
|
||||||
|
|
||||||
ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
|
|
||||||
BUG_ON(ret);
|
|
||||||
|
|
||||||
while (1) {
|
while (1) {
|
||||||
old_root_bytenr = btrfs_root_bytenr(&root->root_item);
|
old_root_bytenr = btrfs_root_bytenr(&root->root_item);
|
||||||
if (old_root_bytenr == root->node->start)
|
if (old_root_bytenr == root->node->start)
|
||||||
|
@ -457,9 +454,8 @@ static int update_cowonly_root(struct btrfs_trans_handle *trans,
|
||||||
&root->root_key,
|
&root->root_key,
|
||||||
&root->root_item);
|
&root->root_item);
|
||||||
BUG_ON(ret);
|
BUG_ON(ret);
|
||||||
btrfs_write_dirty_block_groups(trans, root);
|
|
||||||
|
|
||||||
ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
|
ret = btrfs_write_dirty_block_groups(trans, root);
|
||||||
BUG_ON(ret);
|
BUG_ON(ret);
|
||||||
}
|
}
|
||||||
free_extent_buffer(root->commit_root);
|
free_extent_buffer(root->commit_root);
|
||||||
|
@ -495,9 +491,6 @@ static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
|
||||||
root = list_entry(next, struct btrfs_root, dirty_list);
|
root = list_entry(next, struct btrfs_root, dirty_list);
|
||||||
|
|
||||||
update_cowonly_root(trans, root);
|
update_cowonly_root(trans, root);
|
||||||
|
|
||||||
ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
|
|
||||||
BUG_ON(ret);
|
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
Загрузка…
Ссылка в новой задаче