btrfs: update last_byte_to_unpin in switch_commit_roots

While writing an explanation for the need of the commit_root_sem for
btrfs_prepare_extent_commit, I realized we have a slight hole that could
result in leaked space if we have to do the old style caching.  Consider
the following scenario

 commit root
 +----+----+----+----+----+----+----+
 |\\\\|    |\\\\|\\\\|    |\\\\|\\\\|
 +----+----+----+----+----+----+----+
 0    1    2    3    4    5    6    7

 new commit root
 +----+----+----+----+----+----+----+
 |    |    |    |\\\\|    |    |\\\\|
 +----+----+----+----+----+----+----+
 0    1    2    3    4    5    6    7

Prior to this patch, we run btrfs_prepare_extent_commit, which updates
the last_byte_to_unpin, and then we subsequently run
switch_commit_roots.  In this example lets assume that
caching_ctl->progress == 1 at btrfs_prepare_extent_commit() time, which
means that cache->last_byte_to_unpin == 1.  Then we go and do the
switch_commit_roots(), but in the meantime the caching thread has made
some more progress, because we drop the commit_root_sem and re-acquired
it.  Now caching_ctl->progress == 3.  We swap out the commit root and
carry on to unpin.

The race can happen like:

  1) The caching thread was running using the old commit root when it
     found the extent for [2, 3);

  2) Then it released the commit_root_sem because it was in the last
     item of a leaf and the semaphore was contended, and set ->progress
     to 3 (value of 'last'), as the last extent item in the current leaf
     was for the extent for range [2, 3);

  3) Next time it gets the commit_root_sem, will start using the new
     commit root and search for a key with offset 3, so it never finds
     the hole for [2, 3).

  So the caching thread never saw [2, 3) as free space in any of the
  commit roots, and by the time finish_extent_commit() was called for
  the range [0, 3), ->last_byte_to_unpin was 1, so it only returned the
  subrange [0, 1) to the free space cache, skipping [2, 3).

In the unpin code we have last_byte_to_unpin == 1, so we unpin [0,1),
but do not unpin [2,3).  However because caching_ctl->progress == 3 we
do not see the newly freed section of [2,3), and thus do not add it to
our free space cache.  This results in us missing a chunk of free space
in memory (on disk too, unless we have a power failure before writing
the free space cache to disk).

Fix this by making sure the ->last_byte_to_unpin is set at the same time
that we swap the commit roots, this ensures that we will always be
consistent.

CC: stable@vger.kernel.org # 5.8+
Reviewed-by: Filipe Manana <fdmanana@suse.com>
Signed-off-by: Josef Bacik <josef@toxicpanda.com>
[ update changelog with Filipe's review comments ]
Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
Josef Bacik 2020-10-23 09:58:05 -04:00 коммит произвёл David Sterba
Родитель 9076dbd5ee
Коммит 27d56e62e4
3 изменённых файлов: 40 добавлений и 28 удалений

Просмотреть файл

@ -2610,7 +2610,6 @@ int btrfs_free_reserved_extent(struct btrfs_fs_info *fs_info,
u64 start, u64 len, int delalloc); u64 start, u64 len, int delalloc);
int btrfs_pin_reserved_extent(struct btrfs_trans_handle *trans, u64 start, int btrfs_pin_reserved_extent(struct btrfs_trans_handle *trans, u64 start,
u64 len); u64 len);
void btrfs_prepare_extent_commit(struct btrfs_fs_info *fs_info);
int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans); int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans);
int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans, int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
struct btrfs_ref *generic_ref); struct btrfs_ref *generic_ref);

Просмотреть файл

@ -2708,31 +2708,6 @@ btrfs_inc_block_group_reservations(struct btrfs_block_group *bg)
atomic_inc(&bg->reservations); atomic_inc(&bg->reservations);
} }
void btrfs_prepare_extent_commit(struct btrfs_fs_info *fs_info)
{
struct btrfs_caching_control *next;
struct btrfs_caching_control *caching_ctl;
struct btrfs_block_group *cache;
down_write(&fs_info->commit_root_sem);
list_for_each_entry_safe(caching_ctl, next,
&fs_info->caching_block_groups, list) {
cache = caching_ctl->block_group;
if (btrfs_block_group_done(cache)) {
cache->last_byte_to_unpin = (u64)-1;
list_del_init(&caching_ctl->list);
btrfs_put_caching_control(caching_ctl);
} else {
cache->last_byte_to_unpin = caching_ctl->progress;
}
}
up_write(&fs_info->commit_root_sem);
btrfs_update_global_block_rsv(fs_info);
}
/* /*
* Returns the free cluster for the given space info and sets empty_cluster to * Returns the free cluster for the given space info and sets empty_cluster to
* what it should be based on the mount options. * what it should be based on the mount options.

Просмотреть файл

@ -155,6 +155,7 @@ static noinline void switch_commit_roots(struct btrfs_trans_handle *trans)
struct btrfs_transaction *cur_trans = trans->transaction; struct btrfs_transaction *cur_trans = trans->transaction;
struct btrfs_fs_info *fs_info = trans->fs_info; struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_root *root, *tmp; struct btrfs_root *root, *tmp;
struct btrfs_caching_control *caching_ctl, *next;
down_write(&fs_info->commit_root_sem); down_write(&fs_info->commit_root_sem);
list_for_each_entry_safe(root, tmp, &cur_trans->switch_commits, list_for_each_entry_safe(root, tmp, &cur_trans->switch_commits,
@ -180,6 +181,45 @@ static noinline void switch_commit_roots(struct btrfs_trans_handle *trans)
spin_lock(&cur_trans->dropped_roots_lock); spin_lock(&cur_trans->dropped_roots_lock);
} }
spin_unlock(&cur_trans->dropped_roots_lock); spin_unlock(&cur_trans->dropped_roots_lock);
/*
* We have to update the last_byte_to_unpin under the commit_root_sem,
* at the same time we swap out the commit roots.
*
* This is because we must have a real view of the last spot the caching
* kthreads were while caching. Consider the following views of the
* extent tree for a block group
*
* commit root
* +----+----+----+----+----+----+----+
* |\\\\| |\\\\|\\\\| |\\\\|\\\\|
* +----+----+----+----+----+----+----+
* 0 1 2 3 4 5 6 7
*
* new commit root
* +----+----+----+----+----+----+----+
* | | | |\\\\| | |\\\\|
* +----+----+----+----+----+----+----+
* 0 1 2 3 4 5 6 7
*
* If the cache_ctl->progress was at 3, then we are only allowed to
* unpin [0,1) and [2,3], because the caching thread has already
* processed those extents. We are not allowed to unpin [5,6), because
* the caching thread will re-start it's search from 3, and thus find
* the hole from [4,6) to add to the free space cache.
*/
list_for_each_entry_safe(caching_ctl, next,
&fs_info->caching_block_groups, list) {
struct btrfs_block_group *cache = caching_ctl->block_group;
if (btrfs_block_group_done(cache)) {
cache->last_byte_to_unpin = (u64)-1;
list_del_init(&caching_ctl->list);
btrfs_put_caching_control(caching_ctl);
} else {
cache->last_byte_to_unpin = caching_ctl->progress;
}
}
up_write(&fs_info->commit_root_sem); up_write(&fs_info->commit_root_sem);
} }
@ -2254,8 +2294,6 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
goto unlock_tree_log; goto unlock_tree_log;
} }
btrfs_prepare_extent_commit(fs_info);
cur_trans = fs_info->running_transaction; cur_trans = fs_info->running_transaction;
btrfs_set_root_node(&fs_info->tree_root->root_item, btrfs_set_root_node(&fs_info->tree_root->root_item,