btrfs: locking: remove all the blocking helpers
Now that we're using a rw_semaphore we no longer need to indicate if a lock is blocking or not, nor do we need to flip the entire path from blocking to spinning. Remove these helpers and all the places they are called. Signed-off-by: Josef Bacik <josef@toxicpanda.com> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
Родитель
2ae0c2d80d
Коммит
ac5887c8e0
|
@ -1341,14 +1341,12 @@ again:
|
|||
goto out;
|
||||
}
|
||||
|
||||
if (!path->skip_locking) {
|
||||
if (!path->skip_locking)
|
||||
btrfs_tree_read_lock(eb);
|
||||
btrfs_set_lock_blocking_read(eb);
|
||||
}
|
||||
ret = find_extent_in_eb(eb, bytenr,
|
||||
*extent_item_pos, &eie, ignore_offset);
|
||||
if (!path->skip_locking)
|
||||
btrfs_tree_read_unlock_blocking(eb);
|
||||
btrfs_tree_read_unlock(eb);
|
||||
free_extent_buffer(eb);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
@ -1685,7 +1683,7 @@ char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
|
|||
name_off, name_len);
|
||||
if (eb != eb_in) {
|
||||
if (!path->skip_locking)
|
||||
btrfs_tree_read_unlock_blocking(eb);
|
||||
btrfs_tree_read_unlock(eb);
|
||||
free_extent_buffer(eb);
|
||||
}
|
||||
ret = btrfs_find_item(fs_root, path, parent, 0,
|
||||
|
@ -1705,8 +1703,6 @@ char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
|
|||
eb = path->nodes[0];
|
||||
/* make sure we can use eb after releasing the path */
|
||||
if (eb != eb_in) {
|
||||
if (!path->skip_locking)
|
||||
btrfs_set_lock_blocking_read(eb);
|
||||
path->nodes[0] = NULL;
|
||||
path->locks[0] = 0;
|
||||
}
|
||||
|
|
|
@ -1278,14 +1278,11 @@ tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
|
|||
if (!tm)
|
||||
return eb;
|
||||
|
||||
btrfs_set_path_blocking(path);
|
||||
btrfs_set_lock_blocking_read(eb);
|
||||
|
||||
if (tm->op == MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
|
||||
BUG_ON(tm->slot != 0);
|
||||
eb_rewin = alloc_dummy_extent_buffer(fs_info, eb->start);
|
||||
if (!eb_rewin) {
|
||||
btrfs_tree_read_unlock_blocking(eb);
|
||||
btrfs_tree_read_unlock(eb);
|
||||
free_extent_buffer(eb);
|
||||
return NULL;
|
||||
}
|
||||
|
@ -1297,13 +1294,13 @@ tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
|
|||
} else {
|
||||
eb_rewin = btrfs_clone_extent_buffer(eb);
|
||||
if (!eb_rewin) {
|
||||
btrfs_tree_read_unlock_blocking(eb);
|
||||
btrfs_tree_read_unlock(eb);
|
||||
free_extent_buffer(eb);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
btrfs_tree_read_unlock_blocking(eb);
|
||||
btrfs_tree_read_unlock(eb);
|
||||
free_extent_buffer(eb);
|
||||
|
||||
btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb_rewin),
|
||||
|
@ -1373,9 +1370,8 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
|
|||
free_extent_buffer(eb_root);
|
||||
eb = alloc_dummy_extent_buffer(fs_info, logical);
|
||||
} else {
|
||||
btrfs_set_lock_blocking_read(eb_root);
|
||||
eb = btrfs_clone_extent_buffer(eb_root);
|
||||
btrfs_tree_read_unlock_blocking(eb_root);
|
||||
btrfs_tree_read_unlock(eb_root);
|
||||
free_extent_buffer(eb_root);
|
||||
}
|
||||
|
||||
|
@ -1483,10 +1479,6 @@ noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
|
|||
|
||||
search_start = buf->start & ~((u64)SZ_1G - 1);
|
||||
|
||||
if (parent)
|
||||
btrfs_set_lock_blocking_write(parent);
|
||||
btrfs_set_lock_blocking_write(buf);
|
||||
|
||||
/*
|
||||
* Before CoWing this block for later modification, check if it's
|
||||
* the subtree root and do the delayed subtree trace if needed.
|
||||
|
@ -1604,8 +1596,6 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans,
|
|||
if (parent_nritems <= 1)
|
||||
return 0;
|
||||
|
||||
btrfs_set_lock_blocking_write(parent);
|
||||
|
||||
for (i = start_slot; i <= end_slot; i++) {
|
||||
struct btrfs_key first_key;
|
||||
int close = 1;
|
||||
|
@ -1663,7 +1653,6 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans,
|
|||
search_start = last_block;
|
||||
|
||||
btrfs_tree_lock(cur);
|
||||
btrfs_set_lock_blocking_write(cur);
|
||||
err = __btrfs_cow_block(trans, root, cur, parent, i,
|
||||
&cur, search_start,
|
||||
min(16 * blocksize,
|
||||
|
@ -1835,8 +1824,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
|
|||
|
||||
mid = path->nodes[level];
|
||||
|
||||
WARN_ON(path->locks[level] != BTRFS_WRITE_LOCK &&
|
||||
path->locks[level] != BTRFS_WRITE_LOCK_BLOCKING);
|
||||
WARN_ON(path->locks[level] != BTRFS_WRITE_LOCK);
|
||||
WARN_ON(btrfs_header_generation(mid) != trans->transid);
|
||||
|
||||
orig_ptr = btrfs_node_blockptr(mid, orig_slot);
|
||||
|
@ -1865,7 +1853,6 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
|
|||
}
|
||||
|
||||
btrfs_tree_lock(child);
|
||||
btrfs_set_lock_blocking_write(child);
|
||||
ret = btrfs_cow_block(trans, root, child, mid, 0, &child,
|
||||
BTRFS_NESTING_COW);
|
||||
if (ret) {
|
||||
|
@ -1904,7 +1891,6 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
|
|||
|
||||
if (left) {
|
||||
__btrfs_tree_lock(left, BTRFS_NESTING_LEFT);
|
||||
btrfs_set_lock_blocking_write(left);
|
||||
wret = btrfs_cow_block(trans, root, left,
|
||||
parent, pslot - 1, &left,
|
||||
BTRFS_NESTING_LEFT_COW);
|
||||
|
@ -1920,7 +1906,6 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
|
|||
|
||||
if (right) {
|
||||
__btrfs_tree_lock(right, BTRFS_NESTING_RIGHT);
|
||||
btrfs_set_lock_blocking_write(right);
|
||||
wret = btrfs_cow_block(trans, root, right,
|
||||
parent, pslot + 1, &right,
|
||||
BTRFS_NESTING_RIGHT_COW);
|
||||
|
@ -2084,7 +2069,6 @@ static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
|
|||
u32 left_nr;
|
||||
|
||||
__btrfs_tree_lock(left, BTRFS_NESTING_LEFT);
|
||||
btrfs_set_lock_blocking_write(left);
|
||||
|
||||
left_nr = btrfs_header_nritems(left);
|
||||
if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) {
|
||||
|
@ -2139,7 +2123,6 @@ static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
|
|||
u32 right_nr;
|
||||
|
||||
__btrfs_tree_lock(right, BTRFS_NESTING_RIGHT);
|
||||
btrfs_set_lock_blocking_write(right);
|
||||
|
||||
right_nr = btrfs_header_nritems(right);
|
||||
if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) {
|
||||
|
@ -2399,14 +2382,6 @@ read_block_for_search(struct btrfs_root *root, struct btrfs_path *p,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* the pages were up to date, but we failed
|
||||
* the generation number check. Do a full
|
||||
* read for the generation number that is correct.
|
||||
* We must do this without dropping locks so
|
||||
* we can trust our generation number
|
||||
*/
|
||||
btrfs_set_path_blocking(p);
|
||||
|
||||
/* now we're allowed to do a blocking uptodate check */
|
||||
ret = btrfs_read_buffer(tmp, gen, parent_level - 1, &first_key);
|
||||
if (!ret) {
|
||||
|
@ -2426,7 +2401,6 @@ read_block_for_search(struct btrfs_root *root, struct btrfs_path *p,
|
|||
* out which blocks to read.
|
||||
*/
|
||||
btrfs_unlock_up_safe(p, level + 1);
|
||||
btrfs_set_path_blocking(p);
|
||||
|
||||
if (p->reada != READA_NONE)
|
||||
reada_for_search(fs_info, p, level, slot, key->objectid);
|
||||
|
@ -2480,7 +2454,6 @@ setup_nodes_for_search(struct btrfs_trans_handle *trans,
|
|||
goto again;
|
||||
}
|
||||
|
||||
btrfs_set_path_blocking(p);
|
||||
reada_for_balance(fs_info, p, level);
|
||||
sret = split_node(trans, root, p, level);
|
||||
|
||||
|
@ -2500,7 +2473,6 @@ setup_nodes_for_search(struct btrfs_trans_handle *trans,
|
|||
goto again;
|
||||
}
|
||||
|
||||
btrfs_set_path_blocking(p);
|
||||
reada_for_balance(fs_info, p, level);
|
||||
sret = balance_level(trans, root, p, level);
|
||||
|
||||
|
@ -2752,7 +2724,6 @@ again:
|
|||
goto again;
|
||||
}
|
||||
|
||||
btrfs_set_path_blocking(p);
|
||||
if (last_level)
|
||||
err = btrfs_cow_block(trans, root, b, NULL, 0,
|
||||
&b,
|
||||
|
@ -2822,7 +2793,6 @@ cow_done:
|
|||
goto again;
|
||||
}
|
||||
|
||||
btrfs_set_path_blocking(p);
|
||||
err = split_leaf(trans, root, key,
|
||||
p, ins_len, ret == 0);
|
||||
|
||||
|
@ -2884,17 +2854,11 @@ cow_done:
|
|||
if (!p->skip_locking) {
|
||||
level = btrfs_header_level(b);
|
||||
if (level <= write_lock_level) {
|
||||
if (!btrfs_try_tree_write_lock(b)) {
|
||||
btrfs_set_path_blocking(p);
|
||||
btrfs_tree_lock(b);
|
||||
}
|
||||
btrfs_tree_lock(b);
|
||||
p->locks[level] = BTRFS_WRITE_LOCK;
|
||||
} else {
|
||||
if (!btrfs_tree_read_lock_atomic(b)) {
|
||||
btrfs_set_path_blocking(p);
|
||||
__btrfs_tree_read_lock(b, BTRFS_NESTING_NORMAL,
|
||||
p->recurse);
|
||||
}
|
||||
__btrfs_tree_read_lock(b, BTRFS_NESTING_NORMAL,
|
||||
p->recurse);
|
||||
p->locks[level] = BTRFS_READ_LOCK;
|
||||
}
|
||||
p->nodes[level] = b;
|
||||
|
@ -2902,12 +2866,6 @@ cow_done:
|
|||
}
|
||||
ret = 1;
|
||||
done:
|
||||
/*
|
||||
* we don't really know what they plan on doing with the path
|
||||
* from here on, so for now just mark it as blocking
|
||||
*/
|
||||
if (!p->leave_spinning)
|
||||
btrfs_set_path_blocking(p);
|
||||
if (ret < 0 && !p->skip_release_on_error)
|
||||
btrfs_release_path(p);
|
||||
return ret;
|
||||
|
@ -2999,10 +2957,7 @@ again:
|
|||
}
|
||||
|
||||
level = btrfs_header_level(b);
|
||||
if (!btrfs_tree_read_lock_atomic(b)) {
|
||||
btrfs_set_path_blocking(p);
|
||||
btrfs_tree_read_lock(b);
|
||||
}
|
||||
btrfs_tree_read_lock(b);
|
||||
b = tree_mod_log_rewind(fs_info, p, b, time_seq);
|
||||
if (!b) {
|
||||
ret = -ENOMEM;
|
||||
|
@ -3013,8 +2968,6 @@ again:
|
|||
}
|
||||
ret = 1;
|
||||
done:
|
||||
if (!p->leave_spinning)
|
||||
btrfs_set_path_blocking(p);
|
||||
if (ret < 0)
|
||||
btrfs_release_path(p);
|
||||
|
||||
|
@ -3441,7 +3394,7 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans,
|
|||
add_root_to_dirty_list(root);
|
||||
atomic_inc(&c->refs);
|
||||
path->nodes[level] = c;
|
||||
path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
|
||||
path->locks[level] = BTRFS_WRITE_LOCK;
|
||||
path->slots[level] = 0;
|
||||
return 0;
|
||||
}
|
||||
|
@ -3814,7 +3767,6 @@ static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
|
|||
return 1;
|
||||
|
||||
__btrfs_tree_lock(right, BTRFS_NESTING_RIGHT);
|
||||
btrfs_set_lock_blocking_write(right);
|
||||
|
||||
free_space = btrfs_leaf_free_space(right);
|
||||
if (free_space < data_size)
|
||||
|
@ -4053,7 +4005,6 @@ static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
|
|||
return 1;
|
||||
|
||||
__btrfs_tree_lock(left, BTRFS_NESTING_LEFT);
|
||||
btrfs_set_lock_blocking_write(left);
|
||||
|
||||
free_space = btrfs_leaf_free_space(left);
|
||||
if (free_space < data_size) {
|
||||
|
@ -4448,7 +4399,6 @@ static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
|
|||
goto err;
|
||||
}
|
||||
|
||||
btrfs_set_path_blocking(path);
|
||||
ret = split_leaf(trans, root, &key, path, ins_len, 1);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
@ -4478,8 +4428,6 @@ static noinline int split_item(struct btrfs_path *path,
|
|||
leaf = path->nodes[0];
|
||||
BUG_ON(btrfs_leaf_free_space(leaf) < sizeof(struct btrfs_item));
|
||||
|
||||
btrfs_set_path_blocking(path);
|
||||
|
||||
item = btrfs_item_nr(path->slots[0]);
|
||||
orig_offset = btrfs_item_offset(leaf, item);
|
||||
item_size = btrfs_item_size(leaf, item);
|
||||
|
@ -5055,7 +5003,6 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
|
|||
if (leaf == root->node) {
|
||||
btrfs_set_header_level(leaf, 0);
|
||||
} else {
|
||||
btrfs_set_path_blocking(path);
|
||||
btrfs_clean_tree_block(leaf);
|
||||
btrfs_del_leaf(trans, root, path, leaf);
|
||||
}
|
||||
|
@ -5077,7 +5024,6 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
|
|||
slot = path->slots[1];
|
||||
atomic_inc(&leaf->refs);
|
||||
|
||||
btrfs_set_path_blocking(path);
|
||||
wret = push_leaf_left(trans, root, path, 1, 1,
|
||||
1, (u32)-1);
|
||||
if (wret < 0 && wret != -ENOSPC)
|
||||
|
@ -5248,7 +5194,6 @@ find_next_key:
|
|||
*/
|
||||
if (slot >= nritems) {
|
||||
path->slots[level] = slot;
|
||||
btrfs_set_path_blocking(path);
|
||||
sret = btrfs_find_next_key(root, path, min_key, level,
|
||||
min_trans);
|
||||
if (sret == 0) {
|
||||
|
@ -5265,7 +5210,6 @@ find_next_key:
|
|||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
btrfs_set_path_blocking(path);
|
||||
cur = btrfs_read_node_slot(cur, slot);
|
||||
if (IS_ERR(cur)) {
|
||||
ret = PTR_ERR(cur);
|
||||
|
@ -5282,7 +5226,6 @@ out:
|
|||
path->keep_locks = keep_locks;
|
||||
if (ret == 0) {
|
||||
btrfs_unlock_up_safe(path, path->lowest_level + 1);
|
||||
btrfs_set_path_blocking(path);
|
||||
memcpy(min_key, &found_key, sizeof(found_key));
|
||||
}
|
||||
return ret;
|
||||
|
@ -5492,7 +5435,6 @@ again:
|
|||
goto again;
|
||||
}
|
||||
if (!ret) {
|
||||
btrfs_set_path_blocking(path);
|
||||
__btrfs_tree_read_lock(next,
|
||||
BTRFS_NESTING_RIGHT,
|
||||
path->recurse);
|
||||
|
@ -5527,13 +5469,8 @@ again:
|
|||
}
|
||||
|
||||
if (!path->skip_locking) {
|
||||
ret = btrfs_try_tree_read_lock(next);
|
||||
if (!ret) {
|
||||
btrfs_set_path_blocking(path);
|
||||
__btrfs_tree_read_lock(next,
|
||||
BTRFS_NESTING_RIGHT,
|
||||
path->recurse);
|
||||
}
|
||||
__btrfs_tree_read_lock(next, BTRFS_NESTING_RIGHT,
|
||||
path->recurse);
|
||||
next_rw_lock = BTRFS_READ_LOCK;
|
||||
}
|
||||
}
|
||||
|
@ -5541,8 +5478,6 @@ again:
|
|||
done:
|
||||
unlock_up(path, 0, 1, 0, NULL);
|
||||
path->leave_spinning = old_spinning;
|
||||
if (!old_spinning)
|
||||
btrfs_set_path_blocking(path);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -5564,7 +5499,6 @@ int btrfs_previous_item(struct btrfs_root *root,
|
|||
|
||||
while (1) {
|
||||
if (path->slots[0] == 0) {
|
||||
btrfs_set_path_blocking(path);
|
||||
ret = btrfs_prev_leaf(root, path);
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
|
@ -5606,7 +5540,6 @@ int btrfs_previous_extent_item(struct btrfs_root *root,
|
|||
|
||||
while (1) {
|
||||
if (path->slots[0] == 0) {
|
||||
btrfs_set_path_blocking(path);
|
||||
ret = btrfs_prev_leaf(root, path);
|
||||
if (ret != 0)
|
||||
return ret;
|
||||
|
|
|
@ -740,13 +740,6 @@ static int btrfs_batch_insert_items(struct btrfs_root *root,
|
|||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* we need allocate some memory space, but it might cause the task
|
||||
* to sleep, so we set all locked nodes in the path to blocking locks
|
||||
* first.
|
||||
*/
|
||||
btrfs_set_path_blocking(path);
|
||||
|
||||
keys = kmalloc_array(nitems, sizeof(struct btrfs_key), GFP_NOFS);
|
||||
if (!keys) {
|
||||
ret = -ENOMEM;
|
||||
|
|
|
@ -250,10 +250,8 @@ static int verify_parent_transid(struct extent_io_tree *io_tree,
|
|||
if (atomic)
|
||||
return -EAGAIN;
|
||||
|
||||
if (need_lock) {
|
||||
if (need_lock)
|
||||
btrfs_tree_read_lock(eb);
|
||||
btrfs_set_lock_blocking_read(eb);
|
||||
}
|
||||
|
||||
lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1,
|
||||
&cached_state);
|
||||
|
@ -282,7 +280,7 @@ out:
|
|||
unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1,
|
||||
&cached_state);
|
||||
if (need_lock)
|
||||
btrfs_tree_read_unlock_blocking(eb);
|
||||
btrfs_tree_read_unlock(eb);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1013,8 +1011,6 @@ void btrfs_clean_tree_block(struct extent_buffer *buf)
|
|||
percpu_counter_add_batch(&fs_info->dirty_metadata_bytes,
|
||||
-buf->len,
|
||||
fs_info->dirty_metadata_batch);
|
||||
/* ugh, clear_extent_buffer_dirty needs to lock the page */
|
||||
btrfs_set_lock_blocking_write(buf);
|
||||
clear_extent_buffer_dirty(buf);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -4665,7 +4665,6 @@ btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
|
|||
btrfs_clean_tree_block(buf);
|
||||
clear_bit(EXTENT_BUFFER_STALE, &buf->bflags);
|
||||
|
||||
btrfs_set_lock_blocking_write(buf);
|
||||
set_extent_buffer_uptodate(buf);
|
||||
|
||||
memzero_extent_buffer(buf, 0, sizeof(struct btrfs_header));
|
||||
|
@ -5054,7 +5053,6 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans,
|
|||
reada = 1;
|
||||
}
|
||||
btrfs_tree_lock(next);
|
||||
btrfs_set_lock_blocking_write(next);
|
||||
|
||||
ret = btrfs_lookup_extent_info(trans, fs_info, bytenr, level - 1, 1,
|
||||
&wc->refs[level - 1],
|
||||
|
@ -5114,7 +5112,6 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans,
|
|||
return -EIO;
|
||||
}
|
||||
btrfs_tree_lock(next);
|
||||
btrfs_set_lock_blocking_write(next);
|
||||
}
|
||||
|
||||
level--;
|
||||
|
@ -5126,7 +5123,7 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans,
|
|||
}
|
||||
path->nodes[level] = next;
|
||||
path->slots[level] = 0;
|
||||
path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
|
||||
path->locks[level] = BTRFS_WRITE_LOCK;
|
||||
wc->level = level;
|
||||
if (wc->level == 1)
|
||||
wc->reada_slot = 0;
|
||||
|
@ -5254,8 +5251,7 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
|
|||
if (!path->locks[level]) {
|
||||
BUG_ON(level == 0);
|
||||
btrfs_tree_lock(eb);
|
||||
btrfs_set_lock_blocking_write(eb);
|
||||
path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
|
||||
path->locks[level] = BTRFS_WRITE_LOCK;
|
||||
|
||||
ret = btrfs_lookup_extent_info(trans, fs_info,
|
||||
eb->start, level, 1,
|
||||
|
@ -5298,8 +5294,7 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
|
|||
if (!path->locks[level] &&
|
||||
btrfs_header_generation(eb) == trans->transid) {
|
||||
btrfs_tree_lock(eb);
|
||||
btrfs_set_lock_blocking_write(eb);
|
||||
path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
|
||||
path->locks[level] = BTRFS_WRITE_LOCK;
|
||||
}
|
||||
btrfs_clean_tree_block(eb);
|
||||
}
|
||||
|
@ -5467,9 +5462,8 @@ int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref, int for_reloc)
|
|||
if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
|
||||
level = btrfs_header_level(root->node);
|
||||
path->nodes[level] = btrfs_lock_root_node(root);
|
||||
btrfs_set_lock_blocking_write(path->nodes[level]);
|
||||
path->slots[level] = 0;
|
||||
path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
|
||||
path->locks[level] = BTRFS_WRITE_LOCK;
|
||||
memset(&wc->update_progress, 0,
|
||||
sizeof(wc->update_progress));
|
||||
} else {
|
||||
|
@ -5497,8 +5491,7 @@ int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref, int for_reloc)
|
|||
level = btrfs_header_level(root->node);
|
||||
while (1) {
|
||||
btrfs_tree_lock(path->nodes[level]);
|
||||
btrfs_set_lock_blocking_write(path->nodes[level]);
|
||||
path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
|
||||
path->locks[level] = BTRFS_WRITE_LOCK;
|
||||
|
||||
ret = btrfs_lookup_extent_info(trans, fs_info,
|
||||
path->nodes[level]->start,
|
||||
|
@ -5685,7 +5678,7 @@ int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
|
|||
level = btrfs_header_level(node);
|
||||
path->nodes[level] = node;
|
||||
path->slots[level] = 0;
|
||||
path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
|
||||
path->locks[level] = BTRFS_WRITE_LOCK;
|
||||
|
||||
wc->refs[parent_level] = 1;
|
||||
wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
|
||||
|
|
|
@ -991,8 +991,7 @@ delete_extent_item:
|
|||
* write lock.
|
||||
*/
|
||||
if (!ret && replace_extent && leafs_visited == 1 &&
|
||||
(path->locks[0] == BTRFS_WRITE_LOCK_BLOCKING ||
|
||||
path->locks[0] == BTRFS_WRITE_LOCK) &&
|
||||
path->locks[0] == BTRFS_WRITE_LOCK &&
|
||||
btrfs_leaf_free_space(leaf) >=
|
||||
sizeof(struct btrfs_item) + extent_item_size) {
|
||||
|
||||
|
|
|
@ -6789,7 +6789,6 @@ next:
|
|||
em->orig_start = em->start;
|
||||
ptr = btrfs_file_extent_inline_start(item) + extent_offset;
|
||||
|
||||
btrfs_set_path_blocking(path);
|
||||
if (!PageUptodate(page)) {
|
||||
if (btrfs_file_extent_compression(leaf, item) !=
|
||||
BTRFS_COMPRESS_NONE) {
|
||||
|
|
|
@ -50,31 +50,6 @@
|
|||
*
|
||||
*/
|
||||
|
||||
/*
|
||||
* Mark already held read lock as blocking. Can be nested in write lock by the
|
||||
* same thread.
|
||||
*
|
||||
* Use when there are potentially long operations ahead so other thread waiting
|
||||
* on the lock will not actively spin but sleep instead.
|
||||
*
|
||||
* The rwlock is released and blocking reader counter is increased.
|
||||
*/
|
||||
void btrfs_set_lock_blocking_read(struct extent_buffer *eb)
|
||||
{
|
||||
}
|
||||
|
||||
/*
|
||||
* Mark already held write lock as blocking.
|
||||
*
|
||||
* Use when there are potentially long operations ahead so other threads
|
||||
* waiting on the lock will not actively spin but sleep instead.
|
||||
*
|
||||
* The rwlock is released and blocking writers is set.
|
||||
*/
|
||||
void btrfs_set_lock_blocking_write(struct extent_buffer *eb)
|
||||
{
|
||||
}
|
||||
|
||||
/*
|
||||
* __btrfs_tree_read_lock - lock extent buffer for read
|
||||
* @eb: the eb to be locked
|
||||
|
@ -130,17 +105,6 @@ void btrfs_tree_read_lock(struct extent_buffer *eb)
|
|||
__btrfs_tree_read_lock(eb, BTRFS_NESTING_NORMAL, false);
|
||||
}
|
||||
|
||||
/*
|
||||
* Lock extent buffer for read, optimistically expecting that there are no
|
||||
* contending blocking writers. If there are, don't wait.
|
||||
*
|
||||
* Return 1 if the rwlock has been taken, 0 otherwise
|
||||
*/
|
||||
int btrfs_tree_read_lock_atomic(struct extent_buffer *eb)
|
||||
{
|
||||
return btrfs_try_tree_read_lock(eb);
|
||||
}
|
||||
|
||||
/*
|
||||
* Try-lock for read.
|
||||
*
|
||||
|
@ -192,18 +156,6 @@ void btrfs_tree_read_unlock(struct extent_buffer *eb)
|
|||
up_read(&eb->lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* Release read lock, previously set to blocking by a pairing call to
|
||||
* btrfs_set_lock_blocking_read(). Can be nested in write lock by the same
|
||||
* thread.
|
||||
*
|
||||
* State of rwlock is unchanged, last reader wakes waiting threads.
|
||||
*/
|
||||
void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb)
|
||||
{
|
||||
btrfs_tree_read_unlock(eb);
|
||||
}
|
||||
|
||||
/*
|
||||
* __btrfs_tree_lock - lock eb for write
|
||||
* @eb: the eb to lock
|
||||
|
@ -239,32 +191,6 @@ void btrfs_tree_unlock(struct extent_buffer *eb)
|
|||
up_write(&eb->lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* Set all locked nodes in the path to blocking locks. This should be done
|
||||
* before scheduling
|
||||
*/
|
||||
void btrfs_set_path_blocking(struct btrfs_path *p)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
|
||||
if (!p->nodes[i] || !p->locks[i])
|
||||
continue;
|
||||
/*
|
||||
* If we currently have a spinning reader or writer lock this
|
||||
* will bump the count of blocking holders and drop the
|
||||
* spinlock.
|
||||
*/
|
||||
if (p->locks[i] == BTRFS_READ_LOCK) {
|
||||
btrfs_set_lock_blocking_read(p->nodes[i]);
|
||||
p->locks[i] = BTRFS_READ_LOCK_BLOCKING;
|
||||
} else if (p->locks[i] == BTRFS_WRITE_LOCK) {
|
||||
btrfs_set_lock_blocking_write(p->nodes[i]);
|
||||
p->locks[i] = BTRFS_WRITE_LOCK_BLOCKING;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* This releases any locks held in the path starting at level and going all the
|
||||
* way up to the root.
|
||||
|
|
|
@ -13,8 +13,6 @@
|
|||
|
||||
#define BTRFS_WRITE_LOCK 1
|
||||
#define BTRFS_READ_LOCK 2
|
||||
#define BTRFS_WRITE_LOCK_BLOCKING 3
|
||||
#define BTRFS_READ_LOCK_BLOCKING 4
|
||||
|
||||
/*
|
||||
* We are limited in number of subclasses by MAX_LOCKDEP_SUBCLASSES, which at
|
||||
|
@ -93,12 +91,8 @@ void __btrfs_tree_read_lock(struct extent_buffer *eb, enum btrfs_lock_nesting ne
|
|||
bool recurse);
|
||||
void btrfs_tree_read_lock(struct extent_buffer *eb);
|
||||
void btrfs_tree_read_unlock(struct extent_buffer *eb);
|
||||
void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb);
|
||||
void btrfs_set_lock_blocking_read(struct extent_buffer *eb);
|
||||
void btrfs_set_lock_blocking_write(struct extent_buffer *eb);
|
||||
int btrfs_try_tree_read_lock(struct extent_buffer *eb);
|
||||
int btrfs_try_tree_write_lock(struct extent_buffer *eb);
|
||||
int btrfs_tree_read_lock_atomic(struct extent_buffer *eb);
|
||||
struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root);
|
||||
struct extent_buffer *__btrfs_read_lock_root_node(struct btrfs_root *root,
|
||||
bool recurse);
|
||||
|
@ -116,15 +110,12 @@ static inline void btrfs_assert_tree_locked(struct extent_buffer *eb) {
|
|||
static inline void btrfs_assert_tree_locked(struct extent_buffer *eb) { }
|
||||
#endif
|
||||
|
||||
void btrfs_set_path_blocking(struct btrfs_path *p);
|
||||
void btrfs_unlock_up_safe(struct btrfs_path *path, int level);
|
||||
|
||||
static inline void btrfs_tree_unlock_rw(struct extent_buffer *eb, int rw)
|
||||
{
|
||||
if (rw == BTRFS_WRITE_LOCK || rw == BTRFS_WRITE_LOCK_BLOCKING)
|
||||
if (rw == BTRFS_WRITE_LOCK)
|
||||
btrfs_tree_unlock(eb);
|
||||
else if (rw == BTRFS_READ_LOCK_BLOCKING)
|
||||
btrfs_tree_read_unlock_blocking(eb);
|
||||
else if (rw == BTRFS_READ_LOCK)
|
||||
btrfs_tree_read_unlock(eb);
|
||||
else
|
||||
|
|
|
@ -1970,8 +1970,7 @@ static int qgroup_trace_extent_swap(struct btrfs_trans_handle* trans,
|
|||
src_path->nodes[cur_level] = eb;
|
||||
|
||||
btrfs_tree_read_lock(eb);
|
||||
btrfs_set_lock_blocking_read(eb);
|
||||
src_path->locks[cur_level] = BTRFS_READ_LOCK_BLOCKING;
|
||||
src_path->locks[cur_level] = BTRFS_READ_LOCK;
|
||||
}
|
||||
|
||||
src_path->slots[cur_level] = dst_path->slots[cur_level];
|
||||
|
@ -2111,8 +2110,7 @@ static int qgroup_trace_new_subtree_blocks(struct btrfs_trans_handle* trans,
|
|||
dst_path->slots[cur_level] = 0;
|
||||
|
||||
btrfs_tree_read_lock(eb);
|
||||
btrfs_set_lock_blocking_read(eb);
|
||||
dst_path->locks[cur_level] = BTRFS_READ_LOCK_BLOCKING;
|
||||
dst_path->locks[cur_level] = BTRFS_READ_LOCK;
|
||||
need_cleanup = true;
|
||||
}
|
||||
|
||||
|
@ -2286,8 +2284,7 @@ walk_down:
|
|||
path->slots[level] = 0;
|
||||
|
||||
btrfs_tree_read_lock(eb);
|
||||
btrfs_set_lock_blocking_read(eb);
|
||||
path->locks[level] = BTRFS_READ_LOCK_BLOCKING;
|
||||
path->locks[level] = BTRFS_READ_LOCK;
|
||||
|
||||
ret = btrfs_qgroup_trace_extent(trans, child_bytenr,
|
||||
fs_info->nodesize,
|
||||
|
|
|
@ -575,10 +575,9 @@ static int walk_down_tree(struct btrfs_root *root, struct btrfs_path *path,
|
|||
return -EIO;
|
||||
}
|
||||
btrfs_tree_read_lock(eb);
|
||||
btrfs_set_lock_blocking_read(eb);
|
||||
path->nodes[level-1] = eb;
|
||||
path->slots[level-1] = 0;
|
||||
path->locks[level-1] = BTRFS_READ_LOCK_BLOCKING;
|
||||
path->locks[level-1] = BTRFS_READ_LOCK;
|
||||
} else {
|
||||
ret = process_leaf(root, path, bytenr, num_bytes);
|
||||
if (ret)
|
||||
|
@ -1000,11 +999,10 @@ int btrfs_build_ref_tree(struct btrfs_fs_info *fs_info)
|
|||
return -ENOMEM;
|
||||
|
||||
eb = btrfs_read_lock_root_node(fs_info->extent_root);
|
||||
btrfs_set_lock_blocking_read(eb);
|
||||
level = btrfs_header_level(eb);
|
||||
path->nodes[level] = eb;
|
||||
path->slots[level] = 0;
|
||||
path->locks[level] = BTRFS_READ_LOCK_BLOCKING;
|
||||
path->locks[level] = BTRFS_READ_LOCK;
|
||||
|
||||
while (1) {
|
||||
/*
|
||||
|
|
|
@ -1196,7 +1196,6 @@ again:
|
|||
btrfs_node_key_to_cpu(path->nodes[lowest_level], &key, slot);
|
||||
|
||||
eb = btrfs_lock_root_node(dest);
|
||||
btrfs_set_lock_blocking_write(eb);
|
||||
level = btrfs_header_level(eb);
|
||||
|
||||
if (level < lowest_level) {
|
||||
|
@ -1210,7 +1209,6 @@ again:
|
|||
BTRFS_NESTING_COW);
|
||||
BUG_ON(ret);
|
||||
}
|
||||
btrfs_set_lock_blocking_write(eb);
|
||||
|
||||
if (next_key) {
|
||||
next_key->objectid = (u64)-1;
|
||||
|
@ -1279,7 +1277,6 @@ again:
|
|||
BTRFS_NESTING_COW);
|
||||
BUG_ON(ret);
|
||||
}
|
||||
btrfs_set_lock_blocking_write(eb);
|
||||
|
||||
btrfs_tree_unlock(parent);
|
||||
free_extent_buffer(parent);
|
||||
|
@ -2309,7 +2306,6 @@ static int do_relocation(struct btrfs_trans_handle *trans,
|
|||
goto next;
|
||||
}
|
||||
btrfs_tree_lock(eb);
|
||||
btrfs_set_lock_blocking_write(eb);
|
||||
|
||||
if (!node->eb) {
|
||||
ret = btrfs_cow_block(trans, root, eb, upper->eb,
|
||||
|
|
|
@ -1598,8 +1598,6 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
|
|||
goto fail;
|
||||
}
|
||||
|
||||
btrfs_set_lock_blocking_write(old);
|
||||
|
||||
ret = btrfs_copy_root(trans, root, old, &tmp, objectid);
|
||||
/* clean up in any case */
|
||||
btrfs_tree_unlock(old);
|
||||
|
|
|
@ -52,7 +52,6 @@ int btrfs_defrag_leaves(struct btrfs_trans_handle *trans,
|
|||
u32 nritems;
|
||||
|
||||
root_node = btrfs_lock_root_node(root);
|
||||
btrfs_set_lock_blocking_write(root_node);
|
||||
nritems = btrfs_header_nritems(root_node);
|
||||
root->defrag_max.objectid = 0;
|
||||
/* from above we know this is not a leaf */
|
||||
|
|
|
@ -2722,7 +2722,6 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
|
|||
|
||||
if (trans) {
|
||||
btrfs_tree_lock(next);
|
||||
btrfs_set_lock_blocking_write(next);
|
||||
btrfs_clean_tree_block(next);
|
||||
btrfs_wait_tree_block_writeback(next);
|
||||
btrfs_tree_unlock(next);
|
||||
|
@ -2791,7 +2790,6 @@ static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans,
|
|||
|
||||
if (trans) {
|
||||
btrfs_tree_lock(next);
|
||||
btrfs_set_lock_blocking_write(next);
|
||||
btrfs_clean_tree_block(next);
|
||||
btrfs_wait_tree_block_writeback(next);
|
||||
btrfs_tree_unlock(next);
|
||||
|
@ -2873,7 +2871,6 @@ static int walk_log_tree(struct btrfs_trans_handle *trans,
|
|||
|
||||
if (trans) {
|
||||
btrfs_tree_lock(next);
|
||||
btrfs_set_lock_blocking_write(next);
|
||||
btrfs_clean_tree_block(next);
|
||||
btrfs_wait_tree_block_writeback(next);
|
||||
btrfs_tree_unlock(next);
|
||||
|
|
Загрузка…
Ссылка в новой задаче