Btrfs: preftree: use rb_first_cached
rb_first_cached() trades an extra pointer "leftmost" for doing the same job as rb_first() but in O(1). While resolving indirect refs and missing refs, it always looks for the first rb entry in a while loop, it's helpful to use rb_first_cached instead. For more details about the optimization see patch "Btrfs: delayed-refs: use rb_first_cached for href_root". Tested-by: Holger Hoffstätte <holger@applied-asynchrony.com> Signed-off-by: Liu Bo <bo.liu@linux.alibaba.com> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
Родитель
07e1ce096d
Коммит
ecf160b424
|
@ -112,11 +112,11 @@ static int find_extent_in_eb(const struct extent_buffer *eb,
|
|||
}
|
||||
|
||||
struct preftree {
|
||||
struct rb_root root;
|
||||
struct rb_root_cached root;
|
||||
unsigned int count;
|
||||
};
|
||||
|
||||
#define PREFTREE_INIT { .root = RB_ROOT, .count = 0 }
|
||||
#define PREFTREE_INIT { .root = RB_ROOT_CACHED, .count = 0 }
|
||||
|
||||
struct preftrees {
|
||||
struct preftree direct; /* BTRFS_SHARED_[DATA|BLOCK]_REF_KEY */
|
||||
|
@ -225,14 +225,15 @@ static void prelim_ref_insert(const struct btrfs_fs_info *fs_info,
|
|||
struct prelim_ref *newref,
|
||||
struct share_check *sc)
|
||||
{
|
||||
struct rb_root *root;
|
||||
struct rb_root_cached *root;
|
||||
struct rb_node **p;
|
||||
struct rb_node *parent = NULL;
|
||||
struct prelim_ref *ref;
|
||||
int result;
|
||||
bool leftmost = true;
|
||||
|
||||
root = &preftree->root;
|
||||
p = &root->rb_node;
|
||||
p = &root->rb_root.rb_node;
|
||||
|
||||
while (*p) {
|
||||
parent = *p;
|
||||
|
@ -242,6 +243,7 @@ static void prelim_ref_insert(const struct btrfs_fs_info *fs_info,
|
|||
p = &(*p)->rb_left;
|
||||
} else if (result > 0) {
|
||||
p = &(*p)->rb_right;
|
||||
leftmost = false;
|
||||
} else {
|
||||
/* Identical refs, merge them and free @newref */
|
||||
struct extent_inode_elem *eie = ref->inode_list;
|
||||
|
@ -272,7 +274,7 @@ static void prelim_ref_insert(const struct btrfs_fs_info *fs_info,
|
|||
preftree->count++;
|
||||
trace_btrfs_prelim_ref_insert(fs_info, newref, NULL, preftree->count);
|
||||
rb_link_node(&newref->rbnode, parent, p);
|
||||
rb_insert_color(&newref->rbnode, root);
|
||||
rb_insert_color_cached(&newref->rbnode, root, leftmost);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -283,11 +285,11 @@ static void prelim_release(struct preftree *preftree)
|
|||
{
|
||||
struct prelim_ref *ref, *next_ref;
|
||||
|
||||
rbtree_postorder_for_each_entry_safe(ref, next_ref, &preftree->root,
|
||||
rbnode)
|
||||
rbtree_postorder_for_each_entry_safe(ref, next_ref,
|
||||
&preftree->root.rb_root, rbnode)
|
||||
free_pref(ref);
|
||||
|
||||
preftree->root = RB_ROOT;
|
||||
preftree->root = RB_ROOT_CACHED;
|
||||
preftree->count = 0;
|
||||
}
|
||||
|
||||
|
@ -627,7 +629,7 @@ static int resolve_indirect_refs(struct btrfs_fs_info *fs_info,
|
|||
* freeing the entire indirect tree when we're done. In some test
|
||||
* cases, the tree can grow quite large (~200k objects).
|
||||
*/
|
||||
while ((rnode = rb_first(&preftrees->indirect.root))) {
|
||||
while ((rnode = rb_first_cached(&preftrees->indirect.root))) {
|
||||
struct prelim_ref *ref;
|
||||
|
||||
ref = rb_entry(rnode, struct prelim_ref, rbnode);
|
||||
|
@ -637,7 +639,7 @@ static int resolve_indirect_refs(struct btrfs_fs_info *fs_info,
|
|||
goto out;
|
||||
}
|
||||
|
||||
rb_erase(&ref->rbnode, &preftrees->indirect.root);
|
||||
rb_erase_cached(&ref->rbnode, &preftrees->indirect.root);
|
||||
preftrees->indirect.count--;
|
||||
|
||||
if (ref->count == 0) {
|
||||
|
@ -717,9 +719,9 @@ static int add_missing_keys(struct btrfs_fs_info *fs_info,
|
|||
struct preftree *tree = &preftrees->indirect_missing_keys;
|
||||
struct rb_node *node;
|
||||
|
||||
while ((node = rb_first(&tree->root))) {
|
||||
while ((node = rb_first_cached(&tree->root))) {
|
||||
ref = rb_entry(node, struct prelim_ref, rbnode);
|
||||
rb_erase(node, &tree->root);
|
||||
rb_erase_cached(node, &tree->root);
|
||||
|
||||
BUG_ON(ref->parent); /* should not be a direct ref */
|
||||
BUG_ON(ref->key_for_search.type);
|
||||
|
@ -1229,14 +1231,14 @@ again:
|
|||
if (ret)
|
||||
goto out;
|
||||
|
||||
WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect_missing_keys.root));
|
||||
WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect_missing_keys.root.rb_root));
|
||||
|
||||
ret = resolve_indirect_refs(fs_info, path, time_seq, &preftrees,
|
||||
extent_item_pos, total_refs, sc, ignore_offset);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect.root));
|
||||
WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect.root.rb_root));
|
||||
|
||||
/*
|
||||
* This walks the tree of merged and resolved refs. Tree blocks are
|
||||
|
@ -1245,7 +1247,7 @@ again:
|
|||
*
|
||||
* We release the entire tree in one go before returning.
|
||||
*/
|
||||
node = rb_first(&preftrees.direct.root);
|
||||
node = rb_first_cached(&preftrees.direct.root);
|
||||
while (node) {
|
||||
ref = rb_entry(node, struct prelim_ref, rbnode);
|
||||
node = rb_next(&ref->rbnode);
|
||||
|
|
Загрузка…
Ссылка в новой задаче