btrfs: Use btrfs_mark_bg_unused to replace open code
Introduce a small helper, btrfs_mark_bg_unused(), to acquire locks and add a block group to unused_bgs list. No functional modification, and only 3 callers are involved. Signed-off-by: Qu Wenruo <wqu@suse.com> Reviewed-by: Nikolay Borisov <nborisov@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
Родитель
2556fbb0be
Коммит
031f24da2c
|
@ -2801,6 +2801,7 @@ void btrfs_wait_for_snapshot_creation(struct btrfs_root *root);
|
|||
void check_system_chunk(struct btrfs_trans_handle *trans, const u64 type);
|
||||
u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
|
||||
u64 start, u64 end);
|
||||
void btrfs_mark_bg_unused(struct btrfs_block_group_cache *bg);
|
||||
|
||||
/* ctree.c */
|
||||
int btrfs_bin_search(struct extent_buffer *eb, const struct btrfs_key *key,
|
||||
|
|
|
@ -6166,16 +6166,8 @@ static int update_block_group(struct btrfs_trans_handle *trans,
|
|||
* dirty list to avoid races between cleaner kthread and space
|
||||
* cache writeout.
|
||||
*/
|
||||
if (!alloc && old_val == 0) {
|
||||
spin_lock(&info->unused_bgs_lock);
|
||||
if (list_empty(&cache->bg_list)) {
|
||||
btrfs_get_block_group(cache);
|
||||
trace_btrfs_add_unused_block_group(cache);
|
||||
list_add_tail(&cache->bg_list,
|
||||
&info->unused_bgs);
|
||||
}
|
||||
spin_unlock(&info->unused_bgs_lock);
|
||||
}
|
||||
if (!alloc && old_val == 0)
|
||||
btrfs_mark_bg_unused(cache);
|
||||
|
||||
btrfs_put_block_group(cache);
|
||||
total -= num_bytes;
|
||||
|
@ -9987,15 +9979,8 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info)
|
|||
if (btrfs_chunk_readonly(info, cache->key.objectid)) {
|
||||
inc_block_group_ro(cache, 1);
|
||||
} else if (btrfs_block_group_used(&cache->item) == 0) {
|
||||
spin_lock(&info->unused_bgs_lock);
|
||||
/* Should always be true but just in case. */
|
||||
if (list_empty(&cache->bg_list)) {
|
||||
btrfs_get_block_group(cache);
|
||||
trace_btrfs_add_unused_block_group(cache);
|
||||
list_add_tail(&cache->bg_list,
|
||||
&info->unused_bgs);
|
||||
}
|
||||
spin_unlock(&info->unused_bgs_lock);
|
||||
ASSERT(list_empty(&cache->bg_list));
|
||||
btrfs_mark_bg_unused(cache);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -10914,3 +10899,16 @@ void btrfs_wait_for_snapshot_creation(struct btrfs_root *root)
|
|||
!atomic_read(&root->will_be_snapshotted));
|
||||
}
|
||||
}
|
||||
|
||||
void btrfs_mark_bg_unused(struct btrfs_block_group_cache *bg)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = bg->fs_info;
|
||||
|
||||
spin_lock(&fs_info->unused_bgs_lock);
|
||||
if (list_empty(&bg->bg_list)) {
|
||||
btrfs_get_block_group(bg);
|
||||
trace_btrfs_add_unused_block_group(bg);
|
||||
list_add_tail(&bg->bg_list, &fs_info->unused_bgs);
|
||||
}
|
||||
spin_unlock(&fs_info->unused_bgs_lock);
|
||||
}
|
||||
|
|
|
@ -3951,14 +3951,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
|
|||
if (!cache->removed && !cache->ro && cache->reserved == 0 &&
|
||||
btrfs_block_group_used(&cache->item) == 0) {
|
||||
spin_unlock(&cache->lock);
|
||||
spin_lock(&fs_info->unused_bgs_lock);
|
||||
if (list_empty(&cache->bg_list)) {
|
||||
btrfs_get_block_group(cache);
|
||||
trace_btrfs_add_unused_block_group(cache);
|
||||
list_add_tail(&cache->bg_list,
|
||||
&fs_info->unused_bgs);
|
||||
}
|
||||
spin_unlock(&fs_info->unused_bgs_lock);
|
||||
btrfs_mark_bg_unused(cache);
|
||||
} else {
|
||||
spin_unlock(&cache->lock);
|
||||
}
|
||||
|
|
Загрузка…
Ссылка в новой задаче