btrfs: handle errors properly in update_inline_extent_backref()

[PROBLEM]
Inside function update_inline_extent_backref(), we have several
BUG_ON()s along with some ASSERT()s which can be triggered by corrupted
filesystem.

[ANAYLYSE]
Most of those BUG_ON()s and ASSERT()s are just a way of handling
unexpected on-disk data.

Although we have tree-checker to rule out obviously incorrect extent
tree blocks, it's not enough for these ones.  Thus we need proper error
handling for them.

[FIX]
Thankfully all the callers of update_inline_extent_backref() would
eventually handle the errror by aborting the current transaction.
So this patch would do the proper error handling by:

- Make update_inline_extent_backref() to return int
  The return value would be either 0 or -EUCLEAN.

- Replace BUG_ON()s and ASSERT()s with proper error handling
  This includes:
  * Dump the bad extent tree leaf
  * Output an error message for the cause
    This would include the extent bytenr, num_bytes (if needed), the bad
    values and expected good values.
  * Return -EUCLEAN

  Note here we remove all the WARN_ON()s, as eventually the transaction
  would be aborted, thus a backtrace would be triggered anyway.

- Better comments on why we expect refs == 1 and refs_to_mode == -1 for
  tree blocks

Reviewed-by: Josef Bacik <josef@toxicpanda.com>
Signed-off-by: Qu Wenruo <wqu@suse.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
Qu Wenruo 2023-08-09 15:08:21 +08:00 коммит произвёл David Sterba
Родитель 5b135b382a
Коммит 257614301a
1 изменённых файлов: 62 добавлений и 13 удалений

Просмотреть файл

@ -381,11 +381,11 @@ int btrfs_get_extent_inline_ref_type(const struct extent_buffer *eb,
} }
} }
WARN_ON(1);
btrfs_print_leaf(eb); btrfs_print_leaf(eb);
btrfs_err(eb->fs_info, btrfs_err(eb->fs_info,
"eb %llu iref 0x%lx invalid extent inline ref type %d", "eb %llu iref 0x%lx invalid extent inline ref type %d",
eb->start, (unsigned long)iref, type); eb->start, (unsigned long)iref, type);
WARN_ON(1);
return BTRFS_REF_TYPE_INVALID; return BTRFS_REF_TYPE_INVALID;
} }
@ -1058,13 +1058,13 @@ static int lookup_extent_backref(struct btrfs_trans_handle *trans,
/* /*
* helper to update/remove inline back ref * helper to update/remove inline back ref
*/ */
static noinline_for_stack static noinline_for_stack int update_inline_extent_backref(struct btrfs_path *path,
void update_inline_extent_backref(struct btrfs_path *path,
struct btrfs_extent_inline_ref *iref, struct btrfs_extent_inline_ref *iref,
int refs_to_mod, int refs_to_mod,
struct btrfs_delayed_extent_op *extent_op) struct btrfs_delayed_extent_op *extent_op)
{ {
struct extent_buffer *leaf = path->nodes[0]; struct extent_buffer *leaf = path->nodes[0];
struct btrfs_fs_info *fs_info = leaf->fs_info;
struct btrfs_extent_item *ei; struct btrfs_extent_item *ei;
struct btrfs_extent_data_ref *dref = NULL; struct btrfs_extent_data_ref *dref = NULL;
struct btrfs_shared_data_ref *sref = NULL; struct btrfs_shared_data_ref *sref = NULL;
@ -1077,18 +1077,33 @@ void update_inline_extent_backref(struct btrfs_path *path,
ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
refs = btrfs_extent_refs(leaf, ei); refs = btrfs_extent_refs(leaf, ei);
WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0); if (unlikely(refs_to_mod < 0 && refs + refs_to_mod <= 0)) {
struct btrfs_key key;
u32 extent_size;
btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
if (key.type == BTRFS_METADATA_ITEM_KEY)
extent_size = fs_info->nodesize;
else
extent_size = key.offset;
btrfs_print_leaf(leaf);
btrfs_err(fs_info,
"invalid refs_to_mod for extent %llu num_bytes %u, has %d expect >= -%llu",
key.objectid, extent_size, refs_to_mod, refs);
return -EUCLEAN;
}
refs += refs_to_mod; refs += refs_to_mod;
btrfs_set_extent_refs(leaf, ei, refs); btrfs_set_extent_refs(leaf, ei, refs);
if (extent_op) if (extent_op)
__run_delayed_extent_op(extent_op, leaf, ei); __run_delayed_extent_op(extent_op, leaf, ei);
/*
* If type is invalid, we should have bailed out after
* lookup_inline_extent_backref().
*/
type = btrfs_get_extent_inline_ref_type(leaf, iref, BTRFS_REF_TYPE_ANY); type = btrfs_get_extent_inline_ref_type(leaf, iref, BTRFS_REF_TYPE_ANY);
ASSERT(type != BTRFS_REF_TYPE_INVALID); /*
* Function btrfs_get_extent_inline_ref_type() has already printed
* error messages.
*/
if (unlikely(type == BTRFS_REF_TYPE_INVALID))
return -EUCLEAN;
if (type == BTRFS_EXTENT_DATA_REF_KEY) { if (type == BTRFS_EXTENT_DATA_REF_KEY) {
dref = (struct btrfs_extent_data_ref *)(&iref->offset); dref = (struct btrfs_extent_data_ref *)(&iref->offset);
@ -1098,10 +1113,43 @@ void update_inline_extent_backref(struct btrfs_path *path,
refs = btrfs_shared_data_ref_count(leaf, sref); refs = btrfs_shared_data_ref_count(leaf, sref);
} else { } else {
refs = 1; refs = 1;
BUG_ON(refs_to_mod != -1); /*
* For tree blocks we can only drop one ref for it, and tree
* blocks should not have refs > 1.
*
* Furthermore if we're inserting a new inline backref, we
* won't reach this path either. That would be
* setup_inline_extent_backref().
*/
if (unlikely(refs_to_mod != -1)) {
struct btrfs_key key;
btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
btrfs_print_leaf(leaf);
btrfs_err(fs_info,
"invalid refs_to_mod for tree block %llu, has %d expect -1",
key.objectid, refs_to_mod);
return -EUCLEAN;
}
} }
BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod); if (unlikely(refs_to_mod < 0 && refs < -refs_to_mod)) {
struct btrfs_key key;
u32 extent_size;
btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
if (key.type == BTRFS_METADATA_ITEM_KEY)
extent_size = fs_info->nodesize;
else
extent_size = key.offset;
btrfs_print_leaf(leaf);
btrfs_err(fs_info,
"invalid refs_to_mod for backref entry, iref %lu extent %llu num_bytes %u, has %d expect >= -%llu",
(unsigned long)iref, key.objectid, extent_size,
refs_to_mod, refs);
return -EUCLEAN;
}
refs += refs_to_mod; refs += refs_to_mod;
if (refs > 0) { if (refs > 0) {
@ -1121,6 +1169,7 @@ void update_inline_extent_backref(struct btrfs_path *path,
btrfs_truncate_item(path, item_size, 1); btrfs_truncate_item(path, item_size, 1);
} }
btrfs_mark_buffer_dirty(leaf); btrfs_mark_buffer_dirty(leaf);
return 0;
} }
static noinline_for_stack static noinline_for_stack
@ -1149,7 +1198,7 @@ int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
bytenr, num_bytes, root_objectid, path->slots[0]); bytenr, num_bytes, root_objectid, path->slots[0]);
return -EUCLEAN; return -EUCLEAN;
} }
update_inline_extent_backref(path, iref, refs_to_add, extent_op); ret = update_inline_extent_backref(path, iref, refs_to_add, extent_op);
} else if (ret == -ENOENT) { } else if (ret == -ENOENT) {
setup_inline_extent_backref(trans->fs_info, path, iref, parent, setup_inline_extent_backref(trans->fs_info, path, iref, parent,
root_objectid, owner, offset, root_objectid, owner, offset,
@ -1169,7 +1218,7 @@ static int remove_extent_backref(struct btrfs_trans_handle *trans,
BUG_ON(!is_data && refs_to_drop != 1); BUG_ON(!is_data && refs_to_drop != 1);
if (iref) if (iref)
update_inline_extent_backref(path, iref, -refs_to_drop, NULL); ret = update_inline_extent_backref(path, iref, -refs_to_drop, NULL);
else if (is_data) else if (is_data)
ret = remove_extent_data_ref(trans, root, path, refs_to_drop); ret = remove_extent_data_ref(trans, root, path, refs_to_drop);
else else