2018-04-03 20:16:55 +03:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0 */
|
2011-06-13 21:52:59 +04:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2011 STRATO. All rights reserved.
|
|
|
|
*/
|
|
|
|
|
2018-04-03 20:16:55 +03:00
|
|
|
#ifndef BTRFS_BACKREF_H
|
|
|
|
#define BTRFS_BACKREF_H
|
2011-06-13 21:52:59 +04:00
|
|
|
|
2013-01-29 10:04:50 +04:00
|
|
|
#include <linux/btrfs.h>
|
2011-11-23 21:55:04 +04:00
|
|
|
#include "ulist.h"
|
2020-03-03 08:26:12 +03:00
|
|
|
#include "disk-io.h"
|
2012-06-03 16:23:23 +04:00
|
|
|
#include "extent_io.h"
|
2011-06-13 21:52:59 +04:00
|
|
|
|
|
|
|
struct inode_fs_paths {
|
|
|
|
struct btrfs_path *btrfs_path;
|
|
|
|
struct btrfs_root *fs_root;
|
|
|
|
struct btrfs_data_container *fspath;
|
|
|
|
};
|
|
|
|
|
|
|
|
typedef int (iterate_extent_inodes_t)(u64 inum, u64 offset, u64 root,
|
|
|
|
void *ctx);
|
|
|
|
|
|
|
|
int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
|
2012-09-08 06:01:28 +04:00
|
|
|
struct btrfs_path *path, struct btrfs_key *found_key,
|
|
|
|
u64 *flags);
|
2011-06-13 21:52:59 +04:00
|
|
|
|
|
|
|
int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
|
2014-06-09 06:54:07 +04:00
|
|
|
struct btrfs_key *key, struct btrfs_extent_item *ei,
|
|
|
|
u32 item_size, u64 *out_root, u8 *out_level);
|
2011-06-13 21:52:59 +04:00
|
|
|
|
|
|
|
int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
|
|
|
|
u64 extent_item_objectid,
|
2012-03-23 20:32:28 +04:00
|
|
|
u64 extent_offset, int search_commit_root,
|
btrfs: add a flag to iterate_inodes_from_logical to find all extent refs for uncompressed extents
The LOGICAL_INO ioctl provides a backward mapping from extent bytenr and
offset (encoded as a single logical address) to a list of extent refs.
LOGICAL_INO complements TREE_SEARCH, which provides the forward mapping
(extent ref -> extent bytenr and offset, or logical address). These are
useful capabilities for programs that manipulate extents and extent
references from userspace (e.g. dedup and defrag utilities).
When the extents are uncompressed (and not encrypted and not other),
check_extent_in_eb performs filtering of the extent refs to remove any
extent refs which do not contain the same extent offset as the 'logical'
parameter's extent offset. This prevents LOGICAL_INO from returning
references to more than a single block.
To find the set of extent references to an uncompressed extent from [a, b),
userspace has to run a loop like this pseudocode:
for (i = a; i < b; ++i)
extent_ref_set += LOGICAL_INO(i);
At each iteration of the loop (up to 32768 iterations for a 128M extent),
data we are interested in is collected in the kernel, then deleted by
the filter in check_extent_in_eb.
When the extents are compressed (or encrypted or other), the 'logical'
parameter must be an extent bytenr (the 'a' parameter in the loop).
No filtering by extent offset is done (or possible?) so the result is
the complete set of extent refs for the entire extent. This removes
the need for the loop, since we get all the extent refs in one call.
Add an 'ignore_offset' argument to iterate_inodes_from_logical,
[...several levels of function call graph...], and check_extent_in_eb, so
that we can disable the extent offset filtering for uncompressed extents.
This flag can be set by an improved version of the LOGICAL_INO ioctl to
get either behavior as desired.
There is no functional change in this patch. The new flag is always
false.
Signed-off-by: Zygo Blaxell <ce3g8jdj@umail.furryterror.org>
Reviewed-by: David Sterba <dsterba@suse.com>
[ minor coding style fixes ]
Signed-off-by: David Sterba <dsterba@suse.com>
2017-09-22 20:58:45 +03:00
|
|
|
iterate_extent_inodes_t *iterate, void *ctx,
|
|
|
|
bool ignore_offset);
|
2011-06-13 21:52:59 +04:00
|
|
|
|
|
|
|
int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info,
|
2022-06-06 20:32:59 +03:00
|
|
|
struct btrfs_path *path, void *ctx,
|
btrfs: add a flag to iterate_inodes_from_logical to find all extent refs for uncompressed extents
The LOGICAL_INO ioctl provides a backward mapping from extent bytenr and
offset (encoded as a single logical address) to a list of extent refs.
LOGICAL_INO complements TREE_SEARCH, which provides the forward mapping
(extent ref -> extent bytenr and offset, or logical address). These are
useful capabilities for programs that manipulate extents and extent
references from userspace (e.g. dedup and defrag utilities).
When the extents are uncompressed (and not encrypted and not other),
check_extent_in_eb performs filtering of the extent refs to remove any
extent refs which do not contain the same extent offset as the 'logical'
parameter's extent offset. This prevents LOGICAL_INO from returning
references to more than a single block.
To find the set of extent references to an uncompressed extent from [a, b),
userspace has to run a loop like this pseudocode:
for (i = a; i < b; ++i)
extent_ref_set += LOGICAL_INO(i);
At each iteration of the loop (up to 32768 iterations for a 128M extent),
data we are interested in is collected in the kernel, then deleted by
the filter in check_extent_in_eb.
When the extents are compressed (or encrypted or other), the 'logical'
parameter must be an extent bytenr (the 'a' parameter in the loop).
No filtering by extent offset is done (or possible?) so the result is
the complete set of extent refs for the entire extent. This removes
the need for the loop, since we get all the extent refs in one call.
Add an 'ignore_offset' argument to iterate_inodes_from_logical,
[...several levels of function call graph...], and check_extent_in_eb, so
that we can disable the extent offset filtering for uncompressed extents.
This flag can be set by an improved version of the LOGICAL_INO ioctl to
get either behavior as desired.
There is no functional change in this patch. The new flag is always
false.
Signed-off-by: Zygo Blaxell <ce3g8jdj@umail.furryterror.org>
Reviewed-by: David Sterba <dsterba@suse.com>
[ minor coding style fixes ]
Signed-off-by: David Sterba <dsterba@suse.com>
2017-09-22 20:58:45 +03:00
|
|
|
bool ignore_offset);
|
2011-06-13 21:52:59 +04:00
|
|
|
|
|
|
|
int paths_from_inode(u64 inum, struct inode_fs_paths *ipath);
|
|
|
|
|
2020-03-10 11:14:15 +03:00
|
|
|
int btrfs_find_all_leafs(struct btrfs_trans_handle *trans,
|
|
|
|
struct btrfs_fs_info *fs_info, u64 bytenr,
|
|
|
|
u64 time_seq, struct ulist **leafs,
|
|
|
|
const u64 *extent_item_pos, bool ignore_offset);
|
2011-11-23 21:55:04 +04:00
|
|
|
int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
|
2014-05-14 04:30:47 +04:00
|
|
|
struct btrfs_fs_info *fs_info, u64 bytenr,
|
2021-07-22 17:58:10 +03:00
|
|
|
u64 time_seq, struct ulist **roots,
|
btrfs: fix lock inversion problem when doing qgroup extent tracing
At btrfs_qgroup_trace_extent_post() we call btrfs_find_all_roots() with a
NULL value as the transaction handle argument, which makes that function
take the commit_root_sem semaphore, which is necessary when we don't hold
a transaction handle or any other mechanism to prevent a transaction
commit from wiping out commit roots.
However btrfs_qgroup_trace_extent_post() can be called in a context where
we are holding a write lock on an extent buffer from a subvolume tree,
namely from btrfs_truncate_inode_items(), called either during truncate
or unlink operations. In this case we end up with a lock inversion problem
because the commit_root_sem is a higher level lock, always supposed to be
acquired before locking any extent buffer.
Lockdep detects this lock inversion problem since we switched the extent
buffer locks from custom locks to semaphores, and when running btrfs/158
from fstests, it reported the following trace:
[ 9057.626435] ======================================================
[ 9057.627541] WARNING: possible circular locking dependency detected
[ 9057.628334] 5.14.0-rc2-btrfs-next-93 #1 Not tainted
[ 9057.628961] ------------------------------------------------------
[ 9057.629867] kworker/u16:4/30781 is trying to acquire lock:
[ 9057.630824] ffff8e2590f58760 (btrfs-tree-00){++++}-{3:3}, at: __btrfs_tree_read_lock+0x24/0x110 [btrfs]
[ 9057.632542]
but task is already holding lock:
[ 9057.633551] ffff8e25582d4b70 (&fs_info->commit_root_sem){++++}-{3:3}, at: iterate_extent_inodes+0x10b/0x280 [btrfs]
[ 9057.635255]
which lock already depends on the new lock.
[ 9057.636292]
the existing dependency chain (in reverse order) is:
[ 9057.637240]
-> #1 (&fs_info->commit_root_sem){++++}-{3:3}:
[ 9057.638138] down_read+0x46/0x140
[ 9057.638648] btrfs_find_all_roots+0x41/0x80 [btrfs]
[ 9057.639398] btrfs_qgroup_trace_extent_post+0x37/0x70 [btrfs]
[ 9057.640283] btrfs_add_delayed_data_ref+0x418/0x490 [btrfs]
[ 9057.641114] btrfs_free_extent+0x35/0xb0 [btrfs]
[ 9057.641819] btrfs_truncate_inode_items+0x424/0xf70 [btrfs]
[ 9057.642643] btrfs_evict_inode+0x454/0x4f0 [btrfs]
[ 9057.643418] evict+0xcf/0x1d0
[ 9057.643895] do_unlinkat+0x1e9/0x300
[ 9057.644525] do_syscall_64+0x3b/0xc0
[ 9057.645110] entry_SYSCALL_64_after_hwframe+0x44/0xae
[ 9057.645835]
-> #0 (btrfs-tree-00){++++}-{3:3}:
[ 9057.646600] __lock_acquire+0x130e/0x2210
[ 9057.647248] lock_acquire+0xd7/0x310
[ 9057.647773] down_read_nested+0x4b/0x140
[ 9057.648350] __btrfs_tree_read_lock+0x24/0x110 [btrfs]
[ 9057.649175] btrfs_read_lock_root_node+0x31/0x40 [btrfs]
[ 9057.650010] btrfs_search_slot+0x537/0xc00 [btrfs]
[ 9057.650849] scrub_print_warning_inode+0x89/0x370 [btrfs]
[ 9057.651733] iterate_extent_inodes+0x1e3/0x280 [btrfs]
[ 9057.652501] scrub_print_warning+0x15d/0x2f0 [btrfs]
[ 9057.653264] scrub_handle_errored_block.isra.0+0x135f/0x1640 [btrfs]
[ 9057.654295] scrub_bio_end_io_worker+0x101/0x2e0 [btrfs]
[ 9057.655111] btrfs_work_helper+0xf8/0x400 [btrfs]
[ 9057.655831] process_one_work+0x247/0x5a0
[ 9057.656425] worker_thread+0x55/0x3c0
[ 9057.656993] kthread+0x155/0x180
[ 9057.657494] ret_from_fork+0x22/0x30
[ 9057.658030]
other info that might help us debug this:
[ 9057.659064] Possible unsafe locking scenario:
[ 9057.659824] CPU0 CPU1
[ 9057.660402] ---- ----
[ 9057.660988] lock(&fs_info->commit_root_sem);
[ 9057.661581] lock(btrfs-tree-00);
[ 9057.662348] lock(&fs_info->commit_root_sem);
[ 9057.663254] lock(btrfs-tree-00);
[ 9057.663690]
*** DEADLOCK ***
[ 9057.664437] 4 locks held by kworker/u16:4/30781:
[ 9057.665023] #0: ffff8e25922a1148 ((wq_completion)btrfs-scrub){+.+.}-{0:0}, at: process_one_work+0x1c7/0x5a0
[ 9057.666260] #1: ffffabb3451ffe70 ((work_completion)(&work->normal_work)){+.+.}-{0:0}, at: process_one_work+0x1c7/0x5a0
[ 9057.667639] #2: ffff8e25922da198 (&ret->mutex){+.+.}-{3:3}, at: scrub_handle_errored_block.isra.0+0x5d2/0x1640 [btrfs]
[ 9057.669017] #3: ffff8e25582d4b70 (&fs_info->commit_root_sem){++++}-{3:3}, at: iterate_extent_inodes+0x10b/0x280 [btrfs]
[ 9057.670408]
stack backtrace:
[ 9057.670976] CPU: 7 PID: 30781 Comm: kworker/u16:4 Not tainted 5.14.0-rc2-btrfs-next-93 #1
[ 9057.672030] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.14.0-0-g155821a1990b-prebuilt.qemu.org 04/01/2014
[ 9057.673492] Workqueue: btrfs-scrub btrfs_work_helper [btrfs]
[ 9057.674258] Call Trace:
[ 9057.674588] dump_stack_lvl+0x57/0x72
[ 9057.675083] check_noncircular+0xf3/0x110
[ 9057.675611] __lock_acquire+0x130e/0x2210
[ 9057.676132] lock_acquire+0xd7/0x310
[ 9057.676605] ? __btrfs_tree_read_lock+0x24/0x110 [btrfs]
[ 9057.677313] ? lock_is_held_type+0xe8/0x140
[ 9057.677849] down_read_nested+0x4b/0x140
[ 9057.678349] ? __btrfs_tree_read_lock+0x24/0x110 [btrfs]
[ 9057.679068] __btrfs_tree_read_lock+0x24/0x110 [btrfs]
[ 9057.679760] btrfs_read_lock_root_node+0x31/0x40 [btrfs]
[ 9057.680458] btrfs_search_slot+0x537/0xc00 [btrfs]
[ 9057.681083] ? _raw_spin_unlock+0x29/0x40
[ 9057.681594] ? btrfs_find_all_roots_safe+0x11f/0x140 [btrfs]
[ 9057.682336] scrub_print_warning_inode+0x89/0x370 [btrfs]
[ 9057.683058] ? btrfs_find_all_roots_safe+0x11f/0x140 [btrfs]
[ 9057.683834] ? scrub_write_block_to_dev_replace+0xb0/0xb0 [btrfs]
[ 9057.684632] iterate_extent_inodes+0x1e3/0x280 [btrfs]
[ 9057.685316] scrub_print_warning+0x15d/0x2f0 [btrfs]
[ 9057.685977] ? ___ratelimit+0xa4/0x110
[ 9057.686460] scrub_handle_errored_block.isra.0+0x135f/0x1640 [btrfs]
[ 9057.687316] scrub_bio_end_io_worker+0x101/0x2e0 [btrfs]
[ 9057.688021] btrfs_work_helper+0xf8/0x400 [btrfs]
[ 9057.688649] ? lock_is_held_type+0xe8/0x140
[ 9057.689180] process_one_work+0x247/0x5a0
[ 9057.689696] worker_thread+0x55/0x3c0
[ 9057.690175] ? process_one_work+0x5a0/0x5a0
[ 9057.690731] kthread+0x155/0x180
[ 9057.691158] ? set_kthread_struct+0x40/0x40
[ 9057.691697] ret_from_fork+0x22/0x30
Fix this by making btrfs_find_all_roots() never attempt to lock the
commit_root_sem when it is called from btrfs_qgroup_trace_extent_post().
We can't just pass a non-NULL transaction handle to btrfs_find_all_roots()
from btrfs_qgroup_trace_extent_post(), because that would make backref
lookup not use commit roots and acquire read locks on extent buffers, and
therefore could deadlock when btrfs_qgroup_trace_extent_post() is called
from the btrfs_truncate_inode_items() code path which has acquired a write
lock on an extent buffer of the subvolume btree.
CC: stable@vger.kernel.org # 4.19+
Reviewed-by: Qu Wenruo <wqu@suse.com>
Signed-off-by: Filipe Manana <fdmanana@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2021-07-21 19:31:48 +03:00
|
|
|
bool skip_commit_root_sem);
|
2012-10-15 12:30:45 +04:00
|
|
|
char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
|
|
|
|
u32 name_len, unsigned long name_off,
|
|
|
|
struct extent_buffer *eb_in, u64 parent,
|
|
|
|
char *dest, u32 size);
|
2011-11-23 21:55:04 +04:00
|
|
|
|
2011-06-13 21:52:59 +04:00
|
|
|
struct btrfs_data_container *init_data_container(u32 total_bytes);
|
|
|
|
struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,
|
|
|
|
struct btrfs_path *path);
|
|
|
|
void free_ipath(struct inode_fs_paths *ipath);
|
|
|
|
|
2012-08-08 22:32:27 +04:00
|
|
|
int btrfs_find_one_extref(struct btrfs_root *root, u64 inode_objectid,
|
|
|
|
u64 start_off, struct btrfs_path *path,
|
|
|
|
struct btrfs_inode_extref **ret_extref,
|
|
|
|
u64 *found_off);
|
2019-05-15 16:31:04 +03:00
|
|
|
int btrfs_check_shared(struct btrfs_root *root, u64 inum, u64 bytenr,
|
|
|
|
struct ulist *roots, struct ulist *tmp_ulist);
|
2012-08-08 22:32:27 +04:00
|
|
|
|
2013-08-09 09:25:36 +04:00
|
|
|
int __init btrfs_prelim_ref_init(void);
|
2018-02-19 19:24:18 +03:00
|
|
|
void __cold btrfs_prelim_ref_exit(void);
|
2017-07-13 01:20:08 +03:00
|
|
|
|
|
|
|
struct prelim_ref {
|
|
|
|
struct rb_node rbnode;
|
|
|
|
u64 root_id;
|
|
|
|
struct btrfs_key key_for_search;
|
|
|
|
int level;
|
|
|
|
int count;
|
|
|
|
struct extent_inode_elem *inode_list;
|
|
|
|
u64 parent;
|
|
|
|
u64 wanted_disk_byte;
|
|
|
|
};
|
|
|
|
|
2020-02-13 09:11:04 +03:00
|
|
|
/*
|
|
|
|
* Iterate backrefs of one extent.
|
|
|
|
*
|
|
|
|
* Now it only supports iteration of tree block in commit root.
|
|
|
|
*/
|
|
|
|
struct btrfs_backref_iter {
|
|
|
|
u64 bytenr;
|
|
|
|
struct btrfs_path *path;
|
|
|
|
struct btrfs_fs_info *fs_info;
|
|
|
|
struct btrfs_key cur_key;
|
|
|
|
u32 item_ptr;
|
|
|
|
u32 cur_ptr;
|
|
|
|
u32 end_ptr;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct btrfs_backref_iter *btrfs_backref_iter_alloc(
|
|
|
|
struct btrfs_fs_info *fs_info, gfp_t gfp_flag);
|
|
|
|
|
|
|
|
static inline void btrfs_backref_iter_free(struct btrfs_backref_iter *iter)
|
|
|
|
{
|
|
|
|
if (!iter)
|
|
|
|
return;
|
|
|
|
btrfs_free_path(iter->path);
|
|
|
|
kfree(iter);
|
|
|
|
}
|
|
|
|
|
2020-02-13 10:04:04 +03:00
|
|
|
static inline struct extent_buffer *btrfs_backref_get_eb(
|
|
|
|
struct btrfs_backref_iter *iter)
|
|
|
|
{
|
|
|
|
if (!iter)
|
|
|
|
return NULL;
|
|
|
|
return iter->path->nodes[0];
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* For metadata with EXTENT_ITEM key (non-skinny) case, the first inline data
|
|
|
|
* is btrfs_tree_block_info, without a btrfs_extent_inline_ref header.
|
|
|
|
*
|
|
|
|
* This helper determines if that's the case.
|
|
|
|
*/
|
|
|
|
static inline bool btrfs_backref_has_tree_block_info(
|
|
|
|
struct btrfs_backref_iter *iter)
|
|
|
|
{
|
|
|
|
if (iter->cur_key.type == BTRFS_EXTENT_ITEM_KEY &&
|
|
|
|
iter->cur_ptr - iter->item_ptr == sizeof(struct btrfs_extent_item))
|
|
|
|
return true;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2020-02-13 09:11:04 +03:00
|
|
|
int btrfs_backref_iter_start(struct btrfs_backref_iter *iter, u64 bytenr);
|
|
|
|
|
2020-02-13 10:04:04 +03:00
|
|
|
int btrfs_backref_iter_next(struct btrfs_backref_iter *iter);
|
|
|
|
|
|
|
|
static inline bool btrfs_backref_iter_is_inline_ref(
|
|
|
|
struct btrfs_backref_iter *iter)
|
|
|
|
{
|
|
|
|
if (iter->cur_key.type == BTRFS_EXTENT_ITEM_KEY ||
|
|
|
|
iter->cur_key.type == BTRFS_METADATA_ITEM_KEY)
|
|
|
|
return true;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2020-02-13 09:11:04 +03:00
|
|
|
static inline void btrfs_backref_iter_release(struct btrfs_backref_iter *iter)
|
|
|
|
{
|
|
|
|
iter->bytenr = 0;
|
|
|
|
iter->item_ptr = 0;
|
|
|
|
iter->cur_ptr = 0;
|
|
|
|
iter->end_ptr = 0;
|
|
|
|
btrfs_release_path(iter->path);
|
|
|
|
memset(&iter->cur_key, 0, sizeof(iter->cur_key));
|
|
|
|
}
|
|
|
|
|
2020-03-23 10:03:56 +03:00
|
|
|
/*
|
|
|
|
* Backref cache related structures
|
|
|
|
*
|
|
|
|
* The whole objective of backref_cache is to build a bi-directional map
|
|
|
|
* of tree blocks (represented by backref_node) and all their parents.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Represent a tree block in the backref cache
|
|
|
|
*/
|
|
|
|
struct btrfs_backref_node {
|
2020-03-26 09:11:09 +03:00
|
|
|
struct {
|
|
|
|
struct rb_node rb_node;
|
|
|
|
u64 bytenr;
|
|
|
|
}; /* Use rb_simple_node for search/insert */
|
2020-03-23 10:03:56 +03:00
|
|
|
|
|
|
|
u64 new_bytenr;
|
|
|
|
/* Objectid of tree block owner, can be not uptodate */
|
|
|
|
u64 owner;
|
|
|
|
/* Link to pending, changed or detached list */
|
|
|
|
struct list_head list;
|
|
|
|
|
|
|
|
/* List of upper level edges, which link this node to its parents */
|
|
|
|
struct list_head upper;
|
|
|
|
/* List of lower level edges, which link this node to its children */
|
|
|
|
struct list_head lower;
|
|
|
|
|
|
|
|
/* NULL if this node is not tree root */
|
|
|
|
struct btrfs_root *root;
|
|
|
|
/* Extent buffer got by COWing the block */
|
|
|
|
struct extent_buffer *eb;
|
|
|
|
/* Level of the tree block */
|
|
|
|
unsigned int level:8;
|
2020-05-15 09:01:40 +03:00
|
|
|
/* Is the block in a non-shareable tree */
|
2020-03-23 10:03:56 +03:00
|
|
|
unsigned int cowonly:1;
|
|
|
|
/* 1 if no child node is in the cache */
|
|
|
|
unsigned int lowest:1;
|
|
|
|
/* Is the extent buffer locked */
|
|
|
|
unsigned int locked:1;
|
|
|
|
/* Has the block been processed */
|
|
|
|
unsigned int processed:1;
|
|
|
|
/* Have backrefs of this block been checked */
|
|
|
|
unsigned int checked:1;
|
|
|
|
/*
|
|
|
|
* 1 if corresponding block has been COWed but some upper level block
|
|
|
|
* pointers may not point to the new location
|
|
|
|
*/
|
|
|
|
unsigned int pending:1;
|
|
|
|
/* 1 if the backref node isn't connected to any other backref node */
|
|
|
|
unsigned int detached:1;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* For generic purpose backref cache, where we only care if it's a reloc
|
|
|
|
* root, doesn't care the source subvolid.
|
|
|
|
*/
|
|
|
|
unsigned int is_reloc_root:1;
|
|
|
|
};
|
|
|
|
|
|
|
|
#define LOWER 0
|
|
|
|
#define UPPER 1
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Represent an edge connecting upper and lower backref nodes.
|
|
|
|
*/
|
|
|
|
struct btrfs_backref_edge {
|
|
|
|
/*
|
|
|
|
* list[LOWER] is linked to btrfs_backref_node::upper of lower level
|
|
|
|
* node, and list[UPPER] is linked to btrfs_backref_node::lower of
|
|
|
|
* upper level node.
|
|
|
|
*
|
|
|
|
* Also, build_backref_tree() uses list[UPPER] for pending edges, before
|
|
|
|
* linking list[UPPER] to its upper level nodes.
|
|
|
|
*/
|
|
|
|
struct list_head list[2];
|
|
|
|
|
|
|
|
/* Two related nodes */
|
|
|
|
struct btrfs_backref_node *node[2];
|
|
|
|
};
|
|
|
|
|
|
|
|
struct btrfs_backref_cache {
|
|
|
|
/* Red black tree of all backref nodes in the cache */
|
|
|
|
struct rb_root rb_root;
|
|
|
|
/* For passing backref nodes to btrfs_reloc_cow_block */
|
|
|
|
struct btrfs_backref_node *path[BTRFS_MAX_LEVEL];
|
|
|
|
/*
|
|
|
|
* List of blocks that have been COWed but some block pointers in upper
|
|
|
|
* level blocks may not reflect the new location
|
|
|
|
*/
|
|
|
|
struct list_head pending[BTRFS_MAX_LEVEL];
|
|
|
|
/* List of backref nodes with no child node */
|
|
|
|
struct list_head leaves;
|
|
|
|
/* List of blocks that have been COWed in current transaction */
|
|
|
|
struct list_head changed;
|
|
|
|
/* List of detached backref node. */
|
|
|
|
struct list_head detached;
|
|
|
|
|
|
|
|
u64 last_trans;
|
|
|
|
|
|
|
|
int nr_nodes;
|
|
|
|
int nr_edges;
|
|
|
|
|
|
|
|
/* List of unchecked backref edges during backref cache build */
|
|
|
|
struct list_head pending_edge;
|
|
|
|
|
|
|
|
/* List of useless backref nodes during backref cache build */
|
|
|
|
struct list_head useless_node;
|
|
|
|
|
|
|
|
struct btrfs_fs_info *fs_info;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Whether this cache is for relocation
|
|
|
|
*
|
|
|
|
* Reloction backref cache require more info for reloc root compared
|
|
|
|
* to generic backref cache.
|
|
|
|
*/
|
|
|
|
unsigned int is_reloc;
|
|
|
|
};
|
|
|
|
|
2020-03-03 08:14:41 +03:00
|
|
|
void btrfs_backref_init_cache(struct btrfs_fs_info *fs_info,
|
|
|
|
struct btrfs_backref_cache *cache, int is_reloc);
|
2020-03-03 08:21:30 +03:00
|
|
|
struct btrfs_backref_node *btrfs_backref_alloc_node(
|
|
|
|
struct btrfs_backref_cache *cache, u64 bytenr, int level);
|
2020-03-03 08:22:57 +03:00
|
|
|
struct btrfs_backref_edge *btrfs_backref_alloc_edge(
|
|
|
|
struct btrfs_backref_cache *cache);
|
2020-03-03 08:14:41 +03:00
|
|
|
|
2020-03-03 08:24:06 +03:00
|
|
|
#define LINK_LOWER (1 << 0)
|
|
|
|
#define LINK_UPPER (1 << 1)
|
|
|
|
static inline void btrfs_backref_link_edge(struct btrfs_backref_edge *edge,
|
|
|
|
struct btrfs_backref_node *lower,
|
|
|
|
struct btrfs_backref_node *upper,
|
|
|
|
int link_which)
|
|
|
|
{
|
|
|
|
ASSERT(upper && lower && upper->level == lower->level + 1);
|
|
|
|
edge->node[LOWER] = lower;
|
|
|
|
edge->node[UPPER] = upper;
|
|
|
|
if (link_which & LINK_LOWER)
|
|
|
|
list_add_tail(&edge->list[LOWER], &lower->upper);
|
|
|
|
if (link_which & LINK_UPPER)
|
|
|
|
list_add_tail(&edge->list[UPPER], &upper->lower);
|
|
|
|
}
|
|
|
|
|
2020-03-03 08:26:12 +03:00
|
|
|
static inline void btrfs_backref_free_node(struct btrfs_backref_cache *cache,
|
|
|
|
struct btrfs_backref_node *node)
|
|
|
|
{
|
|
|
|
if (node) {
|
2021-01-14 22:02:45 +03:00
|
|
|
ASSERT(list_empty(&node->list));
|
|
|
|
ASSERT(list_empty(&node->lower));
|
|
|
|
ASSERT(node->eb == NULL);
|
2020-03-03 08:26:12 +03:00
|
|
|
cache->nr_nodes--;
|
|
|
|
btrfs_put_root(node->root);
|
|
|
|
kfree(node);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void btrfs_backref_free_edge(struct btrfs_backref_cache *cache,
|
|
|
|
struct btrfs_backref_edge *edge)
|
|
|
|
{
|
|
|
|
if (edge) {
|
|
|
|
cache->nr_edges--;
|
|
|
|
kfree(edge);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-03 08:35:27 +03:00
|
|
|
static inline void btrfs_backref_unlock_node_buffer(
|
|
|
|
struct btrfs_backref_node *node)
|
|
|
|
{
|
|
|
|
if (node->locked) {
|
|
|
|
btrfs_tree_unlock(node->eb);
|
|
|
|
node->locked = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void btrfs_backref_drop_node_buffer(
|
|
|
|
struct btrfs_backref_node *node)
|
|
|
|
{
|
|
|
|
if (node->eb) {
|
|
|
|
btrfs_backref_unlock_node_buffer(node);
|
|
|
|
free_extent_buffer(node->eb);
|
|
|
|
node->eb = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Drop the backref node from cache without cleaning up its children
|
|
|
|
* edges.
|
|
|
|
*
|
|
|
|
* This can only be called on node without parent edges.
|
|
|
|
* The children edges are still kept as is.
|
|
|
|
*/
|
|
|
|
static inline void btrfs_backref_drop_node(struct btrfs_backref_cache *tree,
|
|
|
|
struct btrfs_backref_node *node)
|
|
|
|
{
|
2021-01-14 22:02:45 +03:00
|
|
|
ASSERT(list_empty(&node->upper));
|
2020-03-03 08:35:27 +03:00
|
|
|
|
|
|
|
btrfs_backref_drop_node_buffer(node);
|
2021-01-14 22:02:45 +03:00
|
|
|
list_del_init(&node->list);
|
|
|
|
list_del_init(&node->lower);
|
2020-03-03 08:35:27 +03:00
|
|
|
if (!RB_EMPTY_NODE(&node->rb_node))
|
|
|
|
rb_erase(&node->rb_node, &tree->rb_root);
|
|
|
|
btrfs_backref_free_node(tree, node);
|
|
|
|
}
|
|
|
|
|
2020-03-23 10:42:25 +03:00
|
|
|
void btrfs_backref_cleanup_node(struct btrfs_backref_cache *cache,
|
|
|
|
struct btrfs_backref_node *node);
|
|
|
|
|
2020-03-03 08:55:12 +03:00
|
|
|
void btrfs_backref_release_cache(struct btrfs_backref_cache *cache);
|
|
|
|
|
2020-03-26 09:21:36 +03:00
|
|
|
static inline void btrfs_backref_panic(struct btrfs_fs_info *fs_info,
|
|
|
|
u64 bytenr, int errno)
|
|
|
|
{
|
|
|
|
btrfs_panic(fs_info, errno,
|
|
|
|
"Inconsistency in backref cache found at offset %llu",
|
|
|
|
bytenr);
|
|
|
|
}
|
|
|
|
|
2020-03-23 11:08:34 +03:00
|
|
|
int btrfs_backref_add_tree_node(struct btrfs_backref_cache *cache,
|
|
|
|
struct btrfs_path *path,
|
|
|
|
struct btrfs_backref_iter *iter,
|
|
|
|
struct btrfs_key *node_key,
|
|
|
|
struct btrfs_backref_node *cur);
|
|
|
|
|
2020-03-23 11:14:08 +03:00
|
|
|
int btrfs_backref_finish_upper_links(struct btrfs_backref_cache *cache,
|
|
|
|
struct btrfs_backref_node *start);
|
|
|
|
|
2020-03-23 11:57:15 +03:00
|
|
|
void btrfs_backref_error_cleanup(struct btrfs_backref_cache *cache,
|
|
|
|
struct btrfs_backref_node *node);
|
|
|
|
|
2011-06-13 21:52:59 +04:00
|
|
|
#endif
|