btrfs: take an fs_info directly when the root is not used otherwise

There are loads of functions in btrfs that accept a root parameter
but only use it to obtain an fs_info pointer.  Let's convert those to
just accept an fs_info pointer directly.

Signed-off-by: Jeff Mahoney <jeffm@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
Jeff Mahoney 2016-06-22 18:54:24 -04:00 коммит произвёл David Sterba
Родитель afdb571890
Коммит 2ff7e61e0d
44 изменённых файлов: 1119 добавлений и 1243 удалений

Просмотреть файл

@ -788,8 +788,7 @@ static int __add_missing_keys(struct btrfs_fs_info *fs_info,
if (ref->key_for_search.type)
continue;
BUG_ON(!ref->wanted_disk_byte);
eb = read_tree_block(fs_info->tree_root, ref->wanted_disk_byte,
0);
eb = read_tree_block(fs_info, ref->wanted_disk_byte, 0);
if (IS_ERR(eb)) {
return PTR_ERR(eb);
} else if (!extent_buffer_uptodate(eb)) {
@ -1405,8 +1404,7 @@ again:
ref->level == 0) {
struct extent_buffer *eb;
eb = read_tree_block(fs_info->extent_root,
ref->parent, 0);
eb = read_tree_block(fs_info, ref->parent, 0);
if (IS_ERR(eb)) {
ret = PTR_ERR(eb);
goto out;

Просмотреть файл

@ -2904,14 +2904,13 @@ int btrfsic_submit_bio_wait(struct bio *bio)
return submit_bio_wait(bio);
}
int btrfsic_mount(struct btrfs_root *root,
int btrfsic_mount(struct btrfs_fs_info *fs_info,
struct btrfs_fs_devices *fs_devices,
int including_extent_data, u32 print_mask)
{
int ret;
struct btrfsic_state *state;
struct list_head *dev_head = &fs_devices->devices;
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_device *device;
if (fs_info->nodesize & ((u64)PAGE_SIZE - 1)) {
@ -2939,7 +2938,7 @@ int btrfsic_mount(struct btrfs_root *root,
btrfsic_is_initialized = 1;
}
mutex_lock(&btrfsic_mutex);
state->fs_info = root->fs_info;
state->fs_info = fs_info;
state->print_mask = print_mask;
state->include_extent_data = including_extent_data;
state->csum_size = 0;
@ -2977,7 +2976,7 @@ int btrfsic_mount(struct btrfs_root *root,
ret = btrfsic_process_superblock(state, fs_devices);
if (0 != ret) {
mutex_unlock(&btrfsic_mutex);
btrfsic_unmount(root, fs_devices);
btrfsic_unmount(fs_devices);
return ret;
}
@ -2990,8 +2989,7 @@ int btrfsic_mount(struct btrfs_root *root,
return 0;
}
void btrfsic_unmount(struct btrfs_root *root,
struct btrfs_fs_devices *fs_devices)
void btrfsic_unmount(struct btrfs_fs_devices *fs_devices)
{
struct btrfsic_block *b_all, *tmp_all;
struct btrfsic_state *state;

Просмотреть файл

@ -29,10 +29,9 @@ int btrfsic_submit_bio_wait(struct bio *bio);
#define btrfsic_submit_bio_wait submit_bio_wait
#endif
int btrfsic_mount(struct btrfs_root *root,
int btrfsic_mount(struct btrfs_fs_info *fs_info,
struct btrfs_fs_devices *fs_devices,
int including_extent_data, u32 print_mask);
void btrfsic_unmount(struct btrfs_root *root,
struct btrfs_fs_devices *fs_devices);
void btrfsic_unmount(struct btrfs_fs_devices *fs_devices);
#endif

Просмотреть файл

@ -85,10 +85,9 @@ static int btrfs_decompress_bio(int type, struct page **pages_in,
u64 disk_start, struct bio *orig_bio,
size_t srclen);
static inline int compressed_bio_size(struct btrfs_root *root,
static inline int compressed_bio_size(struct btrfs_fs_info *fs_info,
unsigned long disk_size)
{
struct btrfs_fs_info *fs_info = root->fs_info;
u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
return sizeof(struct compressed_bio) +
@ -331,7 +330,6 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct bio *bio = NULL;
struct btrfs_root *root = BTRFS_I(inode)->root;
struct compressed_bio *cb;
unsigned long bytes_left;
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
@ -343,7 +341,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
int skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
WARN_ON(start & ((u64)PAGE_SIZE - 1));
cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS);
cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS);
if (!cb)
return -ENOMEM;
atomic_set(&cb->pending_bios, 0);
@ -398,12 +396,11 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
BUG_ON(ret); /* -ENOMEM */
if (!skip_sum) {
ret = btrfs_csum_one_bio(root, inode, bio,
start, 1);
ret = btrfs_csum_one_bio(inode, bio, start, 1);
BUG_ON(ret); /* -ENOMEM */
}
ret = btrfs_map_bio(root, bio, 0, 1);
ret = btrfs_map_bio(fs_info, bio, 0, 1);
if (ret) {
bio->bi_error = ret;
bio_endio(bio);
@ -433,11 +430,11 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
BUG_ON(ret); /* -ENOMEM */
if (!skip_sum) {
ret = btrfs_csum_one_bio(root, inode, bio, start, 1);
ret = btrfs_csum_one_bio(inode, bio, start, 1);
BUG_ON(ret); /* -ENOMEM */
}
ret = btrfs_map_bio(root, bio, 0, 1);
ret = btrfs_map_bio(fs_info, bio, 0, 1);
if (ret) {
bio->bi_error = ret;
bio_endio(bio);
@ -581,7 +578,6 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
struct extent_io_tree *tree;
struct extent_map_tree *em_tree;
struct compressed_bio *cb;
struct btrfs_root *root = BTRFS_I(inode)->root;
unsigned long compressed_len;
unsigned long nr_pages;
unsigned long pg_index;
@ -609,7 +605,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
return -EIO;
compressed_len = em->block_len;
cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS);
cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS);
if (!cb)
goto out;
@ -694,14 +690,14 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
atomic_inc(&cb->pending_bios);
if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
ret = btrfs_lookup_bio_sums(root, inode,
comp_bio, sums);
ret = btrfs_lookup_bio_sums(inode, comp_bio,
sums);
BUG_ON(ret); /* -ENOMEM */
}
sums += DIV_ROUND_UP(comp_bio->bi_iter.bi_size,
fs_info->sectorsize);
ret = btrfs_map_bio(root, comp_bio, mirror_num, 0);
ret = btrfs_map_bio(fs_info, comp_bio, mirror_num, 0);
if (ret) {
comp_bio->bi_error = ret;
bio_endio(comp_bio);
@ -726,11 +722,11 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
BUG_ON(ret); /* -ENOMEM */
if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
ret = btrfs_lookup_bio_sums(root, inode, comp_bio, sums);
ret = btrfs_lookup_bio_sums(inode, comp_bio, sums);
BUG_ON(ret); /* -ENOMEM */
}
ret = btrfs_map_bio(root, comp_bio, mirror_num, 0);
ret = btrfs_map_bio(fs_info, comp_bio, mirror_num, 0);
if (ret) {
comp_bio->bi_error = ret;
bio_endio(comp_bio);

Просмотреть файл

@ -32,10 +32,11 @@ static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root
*root, struct btrfs_key *ins_key,
struct btrfs_path *path, int data_size, int extend);
static int push_node_left(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct extent_buffer *dst,
struct btrfs_fs_info *fs_info,
struct extent_buffer *dst,
struct extent_buffer *src, int empty);
static int balance_node_right(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_fs_info *fs_info,
struct extent_buffer *dst_buf,
struct extent_buffer *src_buf);
static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
@ -1005,7 +1006,7 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
*/
if (btrfs_block_can_be_shared(root, buf)) {
ret = btrfs_lookup_extent_info(trans, root, buf->start,
ret = btrfs_lookup_extent_info(trans, fs_info, buf->start,
btrfs_header_level(buf), 1,
&refs, &flags);
if (ret)
@ -1055,7 +1056,7 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
if (new_flags != 0) {
int level = btrfs_header_level(buf);
ret = btrfs_set_disk_extent_flags(trans, root,
ret = btrfs_set_disk_extent_flags(trans, fs_info,
buf->start,
buf->len,
new_flags, level, 0);
@ -1431,7 +1432,7 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
if (old_root && tm && tm->op != MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
btrfs_tree_read_unlock(eb_root);
free_extent_buffer(eb_root);
old = read_tree_block(root, logical, 0);
old = read_tree_block(fs_info, logical, 0);
if (WARN_ON(IS_ERR(old) || !extent_buffer_uptodate(old))) {
if (!IS_ERR(old))
free_extent_buffer(old);
@ -1682,7 +1683,7 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans,
uptodate = 0;
if (!cur || !uptodate) {
if (!cur) {
cur = read_tree_block(root, blocknr, gen);
cur = read_tree_block(fs_info, blocknr, gen);
if (IS_ERR(cur)) {
return PTR_ERR(cur);
} else if (!extent_buffer_uptodate(cur)) {
@ -1843,8 +1844,9 @@ static void root_sub_used(struct btrfs_root *root, u32 size)
/* given a node and slot number, this reads the blocks it points to. The
* extent buffer is returned with a reference taken (but unlocked).
*/
static noinline struct extent_buffer *read_node_slot(struct btrfs_root *root,
struct extent_buffer *parent, int slot)
static noinline struct extent_buffer *
read_node_slot(struct btrfs_fs_info *fs_info, struct extent_buffer *parent,
int slot)
{
int level = btrfs_header_level(parent);
struct extent_buffer *eb;
@ -1854,7 +1856,7 @@ static noinline struct extent_buffer *read_node_slot(struct btrfs_root *root,
BUG_ON(level == 0);
eb = read_tree_block(root, btrfs_node_blockptr(parent, slot),
eb = read_tree_block(fs_info, btrfs_node_blockptr(parent, slot),
btrfs_node_ptr_generation(parent, slot));
if (!IS_ERR(eb) && !extent_buffer_uptodate(eb)) {
free_extent_buffer(eb);
@ -1911,7 +1913,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
return 0;
/* promote the child to a root */
child = read_node_slot(root, mid, 0);
child = read_node_slot(fs_info, mid, 0);
if (IS_ERR(child)) {
ret = PTR_ERR(child);
btrfs_handle_fs_error(fs_info, ret, NULL);
@ -1950,7 +1952,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
BTRFS_NODEPTRS_PER_BLOCK(fs_info) / 4)
return 0;
left = read_node_slot(root, parent, pslot - 1);
left = read_node_slot(fs_info, parent, pslot - 1);
if (IS_ERR(left))
left = NULL;
@ -1965,7 +1967,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
}
}
right = read_node_slot(root, parent, pslot + 1);
right = read_node_slot(fs_info, parent, pslot + 1);
if (IS_ERR(right))
right = NULL;
@ -1983,7 +1985,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
/* first, try to make some room in the middle buffer */
if (left) {
orig_slot += btrfs_header_nritems(left);
wret = push_node_left(trans, root, left, mid, 1);
wret = push_node_left(trans, fs_info, left, mid, 1);
if (wret < 0)
ret = wret;
}
@ -1992,7 +1994,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
* then try to empty the right most buffer into the middle
*/
if (right) {
wret = push_node_left(trans, root, mid, right, 1);
wret = push_node_left(trans, fs_info, mid, right, 1);
if (wret < 0 && wret != -ENOSPC)
ret = wret;
if (btrfs_header_nritems(right) == 0) {
@ -2027,13 +2029,13 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
btrfs_handle_fs_error(fs_info, ret, NULL);
goto enospc;
}
wret = balance_node_right(trans, root, mid, left);
wret = balance_node_right(trans, fs_info, mid, left);
if (wret < 0) {
ret = wret;
goto enospc;
}
if (wret == 1) {
wret = push_node_left(trans, root, left, mid, 1);
wret = push_node_left(trans, fs_info, left, mid, 1);
if (wret < 0)
ret = wret;
}
@ -2122,7 +2124,7 @@ static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
if (!parent)
return 1;
left = read_node_slot(root, parent, pslot - 1);
left = read_node_slot(fs_info, parent, pslot - 1);
if (IS_ERR(left))
left = NULL;
@ -2142,7 +2144,7 @@ static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
if (ret)
wret = 1;
else {
wret = push_node_left(trans, root,
wret = push_node_left(trans, fs_info,
left, mid, 0);
}
}
@ -2173,7 +2175,7 @@ static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
btrfs_tree_unlock(left);
free_extent_buffer(left);
}
right = read_node_slot(root, parent, pslot + 1);
right = read_node_slot(fs_info, parent, pslot + 1);
if (IS_ERR(right))
right = NULL;
@ -2196,7 +2198,7 @@ static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
if (ret)
wret = 1;
else {
wret = balance_node_right(trans, root,
wret = balance_node_right(trans, fs_info,
right, mid);
}
}
@ -2234,11 +2236,10 @@ static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
* readahead one full node of leaves, finding things that are close
* to the block in 'slot', and triggering ra on them.
*/
static void reada_for_search(struct btrfs_root *root,
static void reada_for_search(struct btrfs_fs_info *fs_info,
struct btrfs_path *path,
int level, int slot, u64 objectid)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_buffer *node;
struct btrfs_disk_key disk_key;
u32 nritems;
@ -2289,7 +2290,7 @@ static void reada_for_search(struct btrfs_root *root,
search = btrfs_node_blockptr(node, nr);
if ((search <= target && target - search <= 65536) ||
(search > target && search - target <= 65536)) {
readahead_tree_block(root, search);
readahead_tree_block(fs_info, search);
nread += blocksize;
}
nscan++;
@ -2298,10 +2299,9 @@ static void reada_for_search(struct btrfs_root *root,
}
}
static noinline void reada_for_balance(struct btrfs_root *root,
static noinline void reada_for_balance(struct btrfs_fs_info *fs_info,
struct btrfs_path *path, int level)
{
struct btrfs_fs_info *fs_info = root->fs_info;
int slot;
int nritems;
struct extent_buffer *parent;
@ -2340,9 +2340,9 @@ static noinline void reada_for_balance(struct btrfs_root *root,
}
if (block1)
readahead_tree_block(root, block1);
readahead_tree_block(fs_info, block1);
if (block2)
readahead_tree_block(root, block2);
readahead_tree_block(fs_info, block2);
}
@ -2491,12 +2491,12 @@ read_block_for_search(struct btrfs_trans_handle *trans,
free_extent_buffer(tmp);
if (p->reada != READA_NONE)
reada_for_search(root, p, level, slot, key->objectid);
reada_for_search(fs_info, p, level, slot, key->objectid);
btrfs_release_path(p);
ret = -EAGAIN;
tmp = read_tree_block(root, blocknr, 0);
tmp = read_tree_block(fs_info, blocknr, 0);
if (!IS_ERR(tmp)) {
/*
* If the read above didn't mark this buffer up to date,
@ -2542,7 +2542,7 @@ setup_nodes_for_search(struct btrfs_trans_handle *trans,
}
btrfs_set_path_blocking(p);
reada_for_balance(root, p, level);
reada_for_balance(fs_info, p, level);
sret = split_node(trans, root, p, level);
btrfs_clear_path_blocking(p, NULL, 0);
@ -2563,7 +2563,7 @@ setup_nodes_for_search(struct btrfs_trans_handle *trans,
}
btrfs_set_path_blocking(p);
reada_for_balance(root, p, level);
reada_for_balance(fs_info, p, level);
sret = balance_level(trans, root, p, level);
btrfs_clear_path_blocking(p, NULL, 0);
@ -2905,7 +2905,7 @@ cow_done:
} else {
p->slots[level] = slot;
if (ins_len > 0 &&
btrfs_leaf_free_space(root, b) < ins_len) {
btrfs_leaf_free_space(fs_info, b) < ins_len) {
if (write_lock_level < 1) {
write_lock_level = 1;
btrfs_release_path(p);
@ -3198,10 +3198,10 @@ void btrfs_set_item_key_safe(struct btrfs_fs_info *fs_info,
* error, and > 0 if there was no room in the left hand block.
*/
static int push_node_left(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct extent_buffer *dst,
struct btrfs_fs_info *fs_info,
struct extent_buffer *dst,
struct extent_buffer *src, int empty)
{
struct btrfs_fs_info *fs_info = root->fs_info;
int push_items = 0;
int src_nritems;
int dst_nritems;
@ -3273,11 +3273,10 @@ static int push_node_left(struct btrfs_trans_handle *trans,
* this will only push up to 1/2 the contents of the left node over
*/
static int balance_node_right(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_fs_info *fs_info,
struct extent_buffer *dst,
struct extent_buffer *src)
{
struct btrfs_fs_info *fs_info = root->fs_info;
int push_items = 0;
int max_push;
int src_nritems;
@ -3407,11 +3406,10 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans,
* blocknr is the block the key points to.
*/
static void insert_ptr(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct btrfs_path *path,
struct btrfs_fs_info *fs_info, struct btrfs_path *path,
struct btrfs_disk_key *key, u64 bytenr,
int slot, int level)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_buffer *lower;
int nritems;
int ret;
@ -3527,7 +3525,7 @@ static noinline int split_node(struct btrfs_trans_handle *trans,
btrfs_mark_buffer_dirty(c);
btrfs_mark_buffer_dirty(split);
insert_ptr(trans, root, path, &disk_key, split->start,
insert_ptr(trans, fs_info, path, &disk_key, split->start,
path->slots[level + 1] + 1, level + 1);
if (path->slots[level] >= mid) {
@ -3575,10 +3573,9 @@ static int leaf_space_used(struct extent_buffer *l, int start, int nr)
* the start of the leaf data. IOW, how much room
* the leaf has left for both items and data
*/
noinline int btrfs_leaf_free_space(struct btrfs_root *root,
noinline int btrfs_leaf_free_space(struct btrfs_fs_info *fs_info,
struct extent_buffer *leaf)
{
struct btrfs_fs_info *fs_info = root->fs_info;
int nritems = btrfs_header_nritems(leaf);
int ret;
@ -3598,14 +3595,13 @@ noinline int btrfs_leaf_free_space(struct btrfs_root *root,
* right. We'll push up to and including min_slot, but no lower
*/
static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_fs_info *fs_info,
struct btrfs_path *path,
int data_size, int empty,
struct extent_buffer *right,
int free_space, u32 left_nritems,
u32 min_slot)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_buffer *left = path->nodes[0];
struct extent_buffer *upper = path->nodes[1];
struct btrfs_map_token token;
@ -3639,7 +3635,7 @@ static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
if (path->slots[0] > i)
break;
if (path->slots[0] == i) {
int space = btrfs_leaf_free_space(root, left);
int space = btrfs_leaf_free_space(fs_info, left);
if (space + push_space * 2 > free_space)
break;
}
@ -3668,10 +3664,10 @@ static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
right_nritems = btrfs_header_nritems(right);
push_space = btrfs_item_end_nr(left, left_nritems - push_items);
push_space -= leaf_data_end(root, left);
push_space -= leaf_data_end(fs_info, left);
/* make room in the right data area */
data_end = leaf_data_end(root, right);
data_end = leaf_data_end(fs_info, right);
memmove_extent_buffer(right,
btrfs_leaf_data(right) + data_end - push_space,
btrfs_leaf_data(right) + data_end,
@ -3680,7 +3676,7 @@ static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
/* copy from the left data area */
copy_extent_buffer(right, left, btrfs_leaf_data(right) +
BTRFS_LEAF_DATA_SIZE(fs_info) - push_space,
btrfs_leaf_data(left) + leaf_data_end(root, left),
btrfs_leaf_data(left) + leaf_data_end(fs_info, left),
push_space);
memmove_extent_buffer(right, btrfs_item_nr_offset(push_items),
@ -3752,6 +3748,7 @@ static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
int min_data_size, int data_size,
int empty, u32 min_slot)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_buffer *left = path->nodes[0];
struct extent_buffer *right;
struct extent_buffer *upper;
@ -3770,7 +3767,7 @@ static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
btrfs_assert_tree_locked(path->nodes[1]);
right = read_node_slot(root, upper, slot + 1);
right = read_node_slot(fs_info, upper, slot + 1);
/*
* slot + 1 is not valid or we fail to read the right node,
* no big deal, just return.
@ -3781,7 +3778,7 @@ static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
btrfs_tree_lock(right);
btrfs_set_lock_blocking(right);
free_space = btrfs_leaf_free_space(root, right);
free_space = btrfs_leaf_free_space(fs_info, right);
if (free_space < data_size)
goto out_unlock;
@ -3791,7 +3788,7 @@ static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
if (ret)
goto out_unlock;
free_space = btrfs_leaf_free_space(root, right);
free_space = btrfs_leaf_free_space(fs_info, right);
if (free_space < data_size)
goto out_unlock;
@ -3812,7 +3809,7 @@ static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
return 0;
}
return __push_leaf_right(trans, root, path, min_data_size, empty,
return __push_leaf_right(trans, fs_info, path, min_data_size, empty,
right, free_space, left_nritems, min_slot);
out_unlock:
btrfs_tree_unlock(right);
@ -3829,13 +3826,12 @@ out_unlock:
* items
*/
static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_fs_info *fs_info,
struct btrfs_path *path, int data_size,
int empty, struct extent_buffer *left,
int free_space, u32 right_nritems,
u32 max_slot)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_disk_key disk_key;
struct extent_buffer *right = path->nodes[0];
int i;
@ -3863,7 +3859,7 @@ static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
if (path->slots[0] < i)
break;
if (path->slots[0] == i) {
int space = btrfs_leaf_free_space(root, right);
int space = btrfs_leaf_free_space(fs_info, right);
if (space + push_space * 2 > free_space)
break;
}
@ -3896,7 +3892,7 @@ static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
btrfs_item_offset_nr(right, push_items - 1);
copy_extent_buffer(left, right, btrfs_leaf_data(left) +
leaf_data_end(root, left) - push_space,
leaf_data_end(fs_info, left) - push_space,
btrfs_leaf_data(right) +
btrfs_item_offset_nr(right, push_items - 1),
push_space);
@ -3923,11 +3919,11 @@ static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
if (push_items < right_nritems) {
push_space = btrfs_item_offset_nr(right, push_items - 1) -
leaf_data_end(root, right);
leaf_data_end(fs_info, right);
memmove_extent_buffer(right, btrfs_leaf_data(right) +
BTRFS_LEAF_DATA_SIZE(fs_info) - push_space,
btrfs_leaf_data(right) +
leaf_data_end(root, right), push_space);
leaf_data_end(fs_info, right), push_space);
memmove_extent_buffer(right, btrfs_item_nr_offset(0),
btrfs_item_nr_offset(push_items),
@ -3986,6 +3982,7 @@ static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
*root, struct btrfs_path *path, int min_data_size,
int data_size, int empty, u32 max_slot)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_buffer *right = path->nodes[0];
struct extent_buffer *left;
int slot;
@ -4005,7 +4002,7 @@ static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
btrfs_assert_tree_locked(path->nodes[1]);
left = read_node_slot(root, path->nodes[1], slot - 1);
left = read_node_slot(fs_info, path->nodes[1], slot - 1);
/*
* slot - 1 is not valid or we fail to read the left node,
* no big deal, just return.
@ -4016,7 +4013,7 @@ static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
btrfs_tree_lock(left);
btrfs_set_lock_blocking(left);
free_space = btrfs_leaf_free_space(root, left);
free_space = btrfs_leaf_free_space(fs_info, left);
if (free_space < data_size) {
ret = 1;
goto out;
@ -4032,13 +4029,13 @@ static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
goto out;
}
free_space = btrfs_leaf_free_space(root, left);
free_space = btrfs_leaf_free_space(fs_info, left);
if (free_space < data_size) {
ret = 1;
goto out;
}
return __push_leaf_left(trans, root, path, min_data_size,
return __push_leaf_left(trans, fs_info, path, min_data_size,
empty, left, free_space, right_nritems,
max_slot);
out:
@ -4052,13 +4049,12 @@ out:
* available for the resulting leaf level of the path.
*/
static noinline void copy_for_split(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_fs_info *fs_info,
struct btrfs_path *path,
struct extent_buffer *l,
struct extent_buffer *right,
int slot, int mid, int nritems)
{
struct btrfs_fs_info *fs_info = root->fs_info;
int data_copy_size;
int rt_data_off;
int i;
@ -4069,7 +4065,7 @@ static noinline void copy_for_split(struct btrfs_trans_handle *trans,
nritems = nritems - mid;
btrfs_set_header_nritems(right, nritems);
data_copy_size = btrfs_item_end_nr(l, mid) - leaf_data_end(root, l);
data_copy_size = btrfs_item_end_nr(l, mid) - leaf_data_end(fs_info, l);
copy_extent_buffer(right, l, btrfs_item_nr_offset(0),
btrfs_item_nr_offset(mid),
@ -4078,7 +4074,7 @@ static noinline void copy_for_split(struct btrfs_trans_handle *trans,
copy_extent_buffer(right, l,
btrfs_leaf_data(right) + BTRFS_LEAF_DATA_SIZE(fs_info) -
data_copy_size, btrfs_leaf_data(l) +
leaf_data_end(root, l), data_copy_size);
leaf_data_end(fs_info, l), data_copy_size);
rt_data_off = BTRFS_LEAF_DATA_SIZE(fs_info) - btrfs_item_end_nr(l, mid);
@ -4093,7 +4089,7 @@ static noinline void copy_for_split(struct btrfs_trans_handle *trans,
btrfs_set_header_nritems(l, mid);
btrfs_item_key(right, &disk_key, 0);
insert_ptr(trans, root, path, &disk_key, right->start,
insert_ptr(trans, fs_info, path, &disk_key, right->start,
path->slots[1] + 1, 1);
btrfs_mark_buffer_dirty(right);
@ -4129,6 +4125,7 @@ static noinline int push_for_double_split(struct btrfs_trans_handle *trans,
struct btrfs_path *path,
int data_size)
{
struct btrfs_fs_info *fs_info = root->fs_info;
int ret;
int progress = 0;
int slot;
@ -4137,7 +4134,7 @@ static noinline int push_for_double_split(struct btrfs_trans_handle *trans,
slot = path->slots[0];
if (slot < btrfs_header_nritems(path->nodes[0]))
space_needed -= btrfs_leaf_free_space(root, path->nodes[0]);
space_needed -= btrfs_leaf_free_space(fs_info, path->nodes[0]);
/*
* try to push all the items after our slot into the
@ -4158,7 +4155,7 @@ static noinline int push_for_double_split(struct btrfs_trans_handle *trans,
if (path->slots[0] == 0 || path->slots[0] == nritems)
return 0;
if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size)
if (btrfs_leaf_free_space(fs_info, path->nodes[0]) >= data_size)
return 0;
/* try to push all the items before our slot into the next leaf */
@ -4211,7 +4208,7 @@ static noinline int split_leaf(struct btrfs_trans_handle *trans,
int space_needed = data_size;
if (slot < btrfs_header_nritems(l))
space_needed -= btrfs_leaf_free_space(root, l);
space_needed -= btrfs_leaf_free_space(fs_info, l);
wret = push_leaf_right(trans, root, path, space_needed,
space_needed, 0, 0);
@ -4226,7 +4223,7 @@ static noinline int split_leaf(struct btrfs_trans_handle *trans,
l = path->nodes[0];
/* did the pushes work? */
if (btrfs_leaf_free_space(root, l) >= data_size)
if (btrfs_leaf_free_space(fs_info, l) >= data_size)
return 0;
}
@ -4303,8 +4300,8 @@ again:
if (split == 0) {
if (mid <= slot) {
btrfs_set_header_nritems(right, 0);
insert_ptr(trans, root, path, &disk_key, right->start,
path->slots[1] + 1, 1);
insert_ptr(trans, fs_info, path, &disk_key,
right->start, path->slots[1] + 1, 1);
btrfs_tree_unlock(path->nodes[0]);
free_extent_buffer(path->nodes[0]);
path->nodes[0] = right;
@ -4312,8 +4309,8 @@ again:
path->slots[1] += 1;
} else {
btrfs_set_header_nritems(right, 0);
insert_ptr(trans, root, path, &disk_key, right->start,
path->slots[1], 1);
insert_ptr(trans, fs_info, path, &disk_key,
right->start, path->slots[1], 1);
btrfs_tree_unlock(path->nodes[0]);
free_extent_buffer(path->nodes[0]);
path->nodes[0] = right;
@ -4329,7 +4326,7 @@ again:
return ret;
}
copy_for_split(trans, root, path, l, right, slot, mid, nritems);
copy_for_split(trans, fs_info, path, l, right, slot, mid, nritems);
if (split == 2) {
BUG_ON(num_doubles != 0);
@ -4342,7 +4339,7 @@ again:
push_for_double:
push_for_double_split(trans, root, path, data_size);
tried_avoid_double = 1;
if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size)
if (btrfs_leaf_free_space(fs_info, path->nodes[0]) >= data_size)
return 0;
goto again;
}
@ -4351,6 +4348,7 @@ static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path, int ins_len)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_key key;
struct extent_buffer *leaf;
struct btrfs_file_extent_item *fi;
@ -4364,7 +4362,7 @@ static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY &&
key.type != BTRFS_EXTENT_CSUM_KEY);
if (btrfs_leaf_free_space(root, leaf) >= ins_len)
if (btrfs_leaf_free_space(fs_info, leaf) >= ins_len)
return 0;
item_size = btrfs_item_size_nr(leaf, path->slots[0]);
@ -4391,7 +4389,7 @@ static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
goto err;
/* the leaf has changed, it now has room. return now */
if (btrfs_leaf_free_space(root, path->nodes[0]) >= ins_len)
if (btrfs_leaf_free_space(fs_info, path->nodes[0]) >= ins_len)
goto err;
if (key.type == BTRFS_EXTENT_DATA_KEY) {
@ -4415,7 +4413,7 @@ err:
}
static noinline int split_item(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_fs_info *fs_info,
struct btrfs_path *path,
struct btrfs_key *new_key,
unsigned long split_offset)
@ -4431,7 +4429,7 @@ static noinline int split_item(struct btrfs_trans_handle *trans,
struct btrfs_disk_key disk_key;
leaf = path->nodes[0];
BUG_ON(btrfs_leaf_free_space(root, leaf) < sizeof(struct btrfs_item));
BUG_ON(btrfs_leaf_free_space(fs_info, leaf) < sizeof(struct btrfs_item));
btrfs_set_path_blocking(path);
@ -4480,7 +4478,7 @@ static noinline int split_item(struct btrfs_trans_handle *trans,
item_size - split_offset);
btrfs_mark_buffer_dirty(leaf);
BUG_ON(btrfs_leaf_free_space(root, leaf) < 0);
BUG_ON(btrfs_leaf_free_space(fs_info, leaf) < 0);
kfree(buf);
return 0;
}
@ -4512,7 +4510,7 @@ int btrfs_split_item(struct btrfs_trans_handle *trans,
if (ret)
return ret;
ret = split_item(trans, root, path, new_key, split_offset);
ret = split_item(trans, root->fs_info, path, new_key, split_offset);
return ret;
}
@ -4558,10 +4556,9 @@ int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
* off the end of the item or if we shift the item to chop bytes off
* the front.
*/
void btrfs_truncate_item(struct btrfs_root *root, struct btrfs_path *path,
u32 new_size, int from_end)
void btrfs_truncate_item(struct btrfs_fs_info *fs_info,
struct btrfs_path *path, u32 new_size, int from_end)
{
struct btrfs_fs_info *fs_info = root->fs_info;
int slot;
struct extent_buffer *leaf;
struct btrfs_item *item;
@ -4583,7 +4580,7 @@ void btrfs_truncate_item(struct btrfs_root *root, struct btrfs_path *path,
return;
nritems = btrfs_header_nritems(leaf);
data_end = leaf_data_end(root, leaf);
data_end = leaf_data_end(fs_info, leaf);
old_data_start = btrfs_item_offset_nr(leaf, slot);
@ -4649,8 +4646,8 @@ void btrfs_truncate_item(struct btrfs_root *root, struct btrfs_path *path,
btrfs_set_item_size(leaf, item, new_size);
btrfs_mark_buffer_dirty(leaf);
if (btrfs_leaf_free_space(root, leaf) < 0) {
btrfs_print_leaf(root, leaf);
if (btrfs_leaf_free_space(fs_info, leaf) < 0) {
btrfs_print_leaf(fs_info, leaf);
BUG();
}
}
@ -4658,10 +4655,9 @@ void btrfs_truncate_item(struct btrfs_root *root, struct btrfs_path *path,
/*
* make the item pointed to by the path bigger, data_size is the added size.
*/
void btrfs_extend_item(struct btrfs_root *root, struct btrfs_path *path,
void btrfs_extend_item(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
u32 data_size)
{
struct btrfs_fs_info *fs_info = root->fs_info;
int slot;
struct extent_buffer *leaf;
struct btrfs_item *item;
@ -4677,10 +4673,10 @@ void btrfs_extend_item(struct btrfs_root *root, struct btrfs_path *path,
leaf = path->nodes[0];
nritems = btrfs_header_nritems(leaf);
data_end = leaf_data_end(root, leaf);
data_end = leaf_data_end(fs_info, leaf);
if (btrfs_leaf_free_space(root, leaf) < data_size) {
btrfs_print_leaf(root, leaf);
if (btrfs_leaf_free_space(fs_info, leaf) < data_size) {
btrfs_print_leaf(fs_info, leaf);
BUG();
}
slot = path->slots[0];
@ -4688,7 +4684,7 @@ void btrfs_extend_item(struct btrfs_root *root, struct btrfs_path *path,
BUG_ON(slot < 0);
if (slot >= nritems) {
btrfs_print_leaf(root, leaf);
btrfs_print_leaf(fs_info, leaf);
btrfs_crit(fs_info, "slot %d too large, nritems %d",
slot, nritems);
BUG_ON(1);
@ -4718,8 +4714,8 @@ void btrfs_extend_item(struct btrfs_root *root, struct btrfs_path *path,
btrfs_set_item_size(leaf, item, old_size + data_size);
btrfs_mark_buffer_dirty(leaf);
if (btrfs_leaf_free_space(root, leaf) < 0) {
btrfs_print_leaf(root, leaf);
if (btrfs_leaf_free_space(fs_info, leaf) < 0) {
btrfs_print_leaf(fs_info, leaf);
BUG();
}
}
@ -4755,12 +4751,12 @@ void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path,
slot = path->slots[0];
nritems = btrfs_header_nritems(leaf);
data_end = leaf_data_end(root, leaf);
data_end = leaf_data_end(fs_info, leaf);
if (btrfs_leaf_free_space(root, leaf) < total_size) {
btrfs_print_leaf(root, leaf);
if (btrfs_leaf_free_space(fs_info, leaf) < total_size) {
btrfs_print_leaf(fs_info, leaf);
btrfs_crit(fs_info, "not enough freespace need %u have %d",
total_size, btrfs_leaf_free_space(root, leaf));
total_size, btrfs_leaf_free_space(fs_info, leaf));
BUG();
}
@ -4768,7 +4764,7 @@ void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path,
unsigned int old_data = btrfs_item_end_nr(leaf, slot);
if (old_data < data_end) {
btrfs_print_leaf(root, leaf);
btrfs_print_leaf(fs_info, leaf);
btrfs_crit(fs_info, "slot %d old_data %d data_end %d",
slot, old_data, data_end);
BUG_ON(1);
@ -4811,8 +4807,8 @@ void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path,
btrfs_set_header_nritems(leaf, nritems + nr);
btrfs_mark_buffer_dirty(leaf);
if (btrfs_leaf_free_space(root, leaf) < 0) {
btrfs_print_leaf(root, leaf);
if (btrfs_leaf_free_space(fs_info, leaf) < 0) {
btrfs_print_leaf(fs_info, leaf);
BUG();
}
}
@ -4982,7 +4978,7 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
nritems = btrfs_header_nritems(leaf);
if (slot + nr != nritems) {
int data_end = leaf_data_end(root, leaf);
int data_end = leaf_data_end(fs_info, leaf);
memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
data_end + dsize,
@ -5145,6 +5141,7 @@ int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
struct btrfs_path *path,
u64 min_trans)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_buffer *cur;
struct btrfs_key found_key;
int slot;
@ -5221,7 +5218,7 @@ find_next_key:
goto out;
}
btrfs_set_path_blocking(path);
cur = read_node_slot(root, cur, slot);
cur = read_node_slot(fs_info, cur, slot);
if (IS_ERR(cur)) {
ret = PTR_ERR(cur);
goto out;
@ -5244,14 +5241,14 @@ out:
return ret;
}
static int tree_move_down(struct btrfs_root *root,
static int tree_move_down(struct btrfs_fs_info *fs_info,
struct btrfs_path *path,
int *level, int root_level)
{
struct extent_buffer *eb;
BUG_ON(*level == 0);
eb = read_node_slot(root, path->nodes[*level], path->slots[*level]);
eb = read_node_slot(fs_info, path->nodes[*level], path->slots[*level]);
if (IS_ERR(eb))
return PTR_ERR(eb);
@ -5261,7 +5258,7 @@ static int tree_move_down(struct btrfs_root *root,
return 0;
}
static int tree_move_next_or_upnext(struct btrfs_root *root,
static int tree_move_next_or_upnext(struct btrfs_fs_info *fs_info,
struct btrfs_path *path,
int *level, int root_level)
{
@ -5292,7 +5289,7 @@ static int tree_move_next_or_upnext(struct btrfs_root *root,
* Returns 1 if it had to move up and next. 0 is returned if it moved only next
* or down.
*/
static int tree_advance(struct btrfs_root *root,
static int tree_advance(struct btrfs_fs_info *fs_info,
struct btrfs_path *path,
int *level, int root_level,
int allow_down,
@ -5301,9 +5298,10 @@ static int tree_advance(struct btrfs_root *root,
int ret;
if (*level == 0 || !allow_down) {
ret = tree_move_next_or_upnext(root, path, level, root_level);
ret = tree_move_next_or_upnext(fs_info, path, level,
root_level);
} else {
ret = tree_move_down(root, path, level, root_level);
ret = tree_move_down(fs_info, path, level, root_level);
}
if (ret >= 0) {
if (*level == 0)
@ -5316,8 +5314,7 @@ static int tree_advance(struct btrfs_root *root,
return ret;
}
static int tree_compare_item(struct btrfs_root *left_root,
struct btrfs_path *left_path,
static int tree_compare_item(struct btrfs_path *left_path,
struct btrfs_path *right_path,
char *tmp_buf)
{
@ -5474,7 +5471,7 @@ int btrfs_compare_trees(struct btrfs_root *left_root,
while (1) {
if (advance_left && !left_end_reached) {
ret = tree_advance(left_root, left_path, &left_level,
ret = tree_advance(fs_info, left_path, &left_level,
left_root_level,
advance_left != ADVANCE_ONLY_NEXT,
&left_key);
@ -5485,7 +5482,7 @@ int btrfs_compare_trees(struct btrfs_root *left_root,
advance_left = 0;
}
if (advance_right && !right_end_reached) {
ret = tree_advance(right_root, right_path, &right_level,
ret = tree_advance(fs_info, right_path, &right_level,
right_root_level,
advance_right != ADVANCE_ONLY_NEXT,
&right_key);
@ -5549,8 +5546,8 @@ int btrfs_compare_trees(struct btrfs_root *left_root,
enum btrfs_compare_tree_result result;
WARN_ON(!extent_buffer_uptodate(left_path->nodes[0]));
ret = tree_compare_item(left_root, left_path,
right_path, tmp_buf);
ret = tree_compare_item(left_path, right_path,
tmp_buf);
if (ret)
result = BTRFS_COMPARE_TREE_CHANGED;
else

Просмотреть файл

@ -1349,10 +1349,9 @@ static inline u32 BTRFS_MAX_XATTR_SIZE(const struct btrfs_fs_info *info)
#ifdef CONFIG_BTRFS_DEBUG
static inline int
btrfs_should_fragment_free_space(struct btrfs_root *root,
struct btrfs_block_group_cache *block_group)
btrfs_should_fragment_free_space(struct btrfs_block_group_cache *block_group)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_fs_info *fs_info = block_group->fs_info;
return (btrfs_test_opt(fs_info, FRAGMENT_METADATA) &&
block_group->flags & BTRFS_BLOCK_GROUP_METADATA) ||
@ -2311,10 +2310,9 @@ static inline unsigned long btrfs_leaf_data(struct extent_buffer *l)
* this returns the address of the start of the last item,
* which is the stop of the leaf data stack
*/
static inline unsigned int leaf_data_end(struct btrfs_root *root,
static inline unsigned int leaf_data_end(struct btrfs_fs_info *fs_info,
struct extent_buffer *leaf)
{
struct btrfs_fs_info *fs_info = root->fs_info;
u32 nr = btrfs_header_nritems(leaf);
if (nr == 0)
@ -2536,7 +2534,7 @@ static inline gfp_t btrfs_alloc_write_mask(struct address_space *mapping)
/* extent-tree.c */
u64 btrfs_csum_bytes_to_leaves(struct btrfs_root *root, u64 csum_bytes);
u64 btrfs_csum_bytes_to_leaves(struct btrfs_fs_info *fs_info, u64 csum_bytes);
static inline u64 btrfs_calc_trans_metadata_size(struct btrfs_fs_info *fs_info,
unsigned num_items)
@ -2555,9 +2553,9 @@ static inline u64 btrfs_calc_trunc_metadata_size(struct btrfs_fs_info *fs_info,
}
int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
struct btrfs_fs_info *fs_info);
int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
struct btrfs_fs_info *fs_info);
void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info,
const u64 start);
void btrfs_wait_block_group_reservations(struct btrfs_block_group_cache *bg);
@ -2566,18 +2564,18 @@ void btrfs_dec_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr);
void btrfs_wait_nocow_writers(struct btrfs_block_group_cache *bg);
void btrfs_put_block_group(struct btrfs_block_group_cache *cache);
int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
struct btrfs_root *root, unsigned long count);
int btrfs_async_run_delayed_refs(struct btrfs_root *root,
struct btrfs_fs_info *fs_info, unsigned long count);
int btrfs_async_run_delayed_refs(struct btrfs_fs_info *fs_info,
unsigned long count, u64 transid, int wait);
int btrfs_lookup_data_extent(struct btrfs_root *root, u64 start, u64 len);
int btrfs_lookup_data_extent(struct btrfs_fs_info *fs_info, u64 start, u64 len);
int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
struct btrfs_root *root, u64 bytenr,
struct btrfs_fs_info *fs_info, u64 bytenr,
u64 offset, int metadata, u64 *refs, u64 *flags);
int btrfs_pin_extent(struct btrfs_root *root,
int btrfs_pin_extent(struct btrfs_fs_info *fs_info,
u64 bytenr, u64 num, int reserved);
int btrfs_pin_extent_for_log_replay(struct btrfs_root *root,
int btrfs_pin_extent_for_log_replay(struct btrfs_fs_info *fs_info,
u64 bytenr, u64 num_bytes);
int btrfs_exclude_logged_extents(struct btrfs_root *root,
int btrfs_exclude_logged_extents(struct btrfs_fs_info *fs_info,
struct extent_buffer *eb);
int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
@ -2598,12 +2596,11 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
struct extent_buffer *buf,
u64 parent, int last_ref);
int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
u64 root_objectid, u64 owner,
u64 offset, u64 ram_bytes,
struct btrfs_key *ins);
int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_fs_info *fs_info,
u64 root_objectid, u64 owner, u64 offset,
struct btrfs_key *ins);
int btrfs_reserve_extent(struct btrfs_root *root, u64 ram_bytes, u64 num_bytes,
@ -2614,39 +2611,39 @@ int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
struct extent_buffer *buf, int full_backref);
int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_fs_info *fs_info,
u64 bytenr, u64 num_bytes, u64 flags,
int level, int is_data);
int btrfs_free_extent(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_fs_info *fs_info,
u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
u64 owner, u64 offset);
int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len,
int delalloc);
int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root,
int btrfs_free_reserved_extent(struct btrfs_fs_info *fs_info,
u64 start, u64 len, int delalloc);
int btrfs_free_and_pin_reserved_extent(struct btrfs_fs_info *fs_info,
u64 start, u64 len);
void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
struct btrfs_fs_info *fs_info);
int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
struct btrfs_fs_info *fs_info);
int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_fs_info *fs_info,
u64 bytenr, u64 num_bytes, u64 parent,
u64 root_objectid, u64 owner, u64 offset);
int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
struct btrfs_fs_info *fs_info);
int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
struct btrfs_fs_info *fs_info);
int btrfs_setup_space_cache(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr);
struct btrfs_fs_info *fs_info);
int btrfs_extent_readonly(struct btrfs_fs_info *fs_info, u64 bytenr);
int btrfs_free_block_groups(struct btrfs_fs_info *info);
int btrfs_read_block_groups(struct btrfs_fs_info *info);
int btrfs_can_relocate(struct btrfs_fs_info *fs_info, u64 bytenr);
int btrfs_make_block_group(struct btrfs_trans_handle *trans,
struct btrfs_root *root, u64 bytes_used,
struct btrfs_fs_info *fs_info, u64 bytes_used,
u64 type, u64 chunk_objectid, u64 chunk_offset,
u64 size);
struct btrfs_trans_handle *btrfs_start_trans_remove_block_group(
@ -2659,7 +2656,7 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info);
void btrfs_get_block_group_trimming(struct btrfs_block_group_cache *cache);
void btrfs_put_block_group_trimming(struct btrfs_block_group_cache *cache);
void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
struct btrfs_fs_info *fs_info);
u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data);
void btrfs_clear_space_info_full(struct btrfs_fs_info *info);
@ -2689,7 +2686,7 @@ void btrfs_free_reserved_data_space(struct inode *inode, u64 start, u64 len);
void btrfs_free_reserved_data_space_noquota(struct inode *inode, u64 start,
u64 len);
void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
struct btrfs_fs_info *fs_info);
void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans);
int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
struct inode *inode);
@ -2698,7 +2695,7 @@ int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
struct btrfs_block_rsv *rsv,
int nitems,
u64 *qgroup_reserved, bool use_global_rsv);
void btrfs_subvolume_release_metadata(struct btrfs_root *root,
void btrfs_subvolume_release_metadata(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *rsv,
u64 qgroup_reserved);
int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes);
@ -2706,16 +2703,15 @@ void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes);
int btrfs_delalloc_reserve_space(struct inode *inode, u64 start, u64 len);
void btrfs_delalloc_release_space(struct inode *inode, u64 start, u64 len);
void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type);
struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root,
struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_fs_info *fs_info,
unsigned short type);
void btrfs_free_block_rsv(struct btrfs_root *root,
void btrfs_free_block_rsv(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *rsv);
void __btrfs_free_block_rsv(struct btrfs_block_rsv *rsv);
int btrfs_block_rsv_add(struct btrfs_root *root,
struct btrfs_block_rsv *block_rsv, u64 num_bytes,
enum btrfs_reserve_flush_enum flush);
int btrfs_block_rsv_check(struct btrfs_root *root,
struct btrfs_block_rsv *block_rsv, int min_factor);
int btrfs_block_rsv_check(struct btrfs_block_rsv *block_rsv, int min_factor);
int btrfs_block_rsv_refill(struct btrfs_root *root,
struct btrfs_block_rsv *block_rsv, u64 min_reserved,
enum btrfs_reserve_flush_enum flush);
@ -2725,22 +2721,21 @@ int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
int btrfs_cond_migrate_bytes(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *dest, u64 num_bytes,
int min_factor);
void btrfs_block_rsv_release(struct btrfs_root *root,
void btrfs_block_rsv_release(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *block_rsv,
u64 num_bytes);
int btrfs_inc_block_group_ro(struct btrfs_root *root,
struct btrfs_block_group_cache *cache);
void btrfs_dec_block_group_ro(struct btrfs_root *root,
struct btrfs_block_group_cache *cache);
void btrfs_dec_block_group_ro(struct btrfs_block_group_cache *cache);
void btrfs_put_block_group_cache(struct btrfs_fs_info *info);
u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo);
int btrfs_error_unpin_extent_range(struct btrfs_root *root,
int btrfs_error_unpin_extent_range(struct btrfs_fs_info *fs_info,
u64 start, u64 end);
int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr,
u64 num_bytes, u64 *actual_bytes);
int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
struct btrfs_root *root, u64 type);
int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range);
struct btrfs_fs_info *fs_info, u64 type);
int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range);
int btrfs_init_space_info(struct btrfs_fs_info *fs_info);
int btrfs_delayed_refs_qgroup_accounting(struct btrfs_trans_handle *trans,
@ -2750,8 +2745,7 @@ int btrfs_start_write_no_snapshoting(struct btrfs_root *root);
void btrfs_end_write_no_snapshoting(struct btrfs_root *root);
void btrfs_wait_for_snapshot_creation(struct btrfs_root *root);
void check_system_chunk(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
const u64 type);
struct btrfs_fs_info *fs_info, const u64 type);
u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
struct btrfs_fs_info *info, u64 start, u64 end);
@ -2801,10 +2795,10 @@ int btrfs_copy_root(struct btrfs_trans_handle *trans,
struct extent_buffer **cow_ret, u64 new_root_objectid);
int btrfs_block_can_be_shared(struct btrfs_root *root,
struct extent_buffer *buf);
void btrfs_extend_item(struct btrfs_root *root, struct btrfs_path *path,
void btrfs_extend_item(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
u32 data_size);
void btrfs_truncate_item(struct btrfs_root *root, struct btrfs_path *path,
u32 new_size, int from_end);
void btrfs_truncate_item(struct btrfs_fs_info *fs_info,
struct btrfs_path *path, u32 new_size, int from_end);
int btrfs_split_item(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path,
@ -2880,7 +2874,8 @@ static inline int btrfs_next_item(struct btrfs_root *root, struct btrfs_path *p)
{
return btrfs_next_old_item(root, p, 0);
}
int btrfs_leaf_free_space(struct btrfs_root *root, struct extent_buffer *leaf);
int btrfs_leaf_free_space(struct btrfs_fs_info *fs_info,
struct extent_buffer *leaf);
int __must_check btrfs_drop_snapshot(struct btrfs_root *root,
struct btrfs_block_rsv *block_rsv,
int update_ref, int for_reloc);
@ -2906,11 +2901,9 @@ static inline int btrfs_fs_closing(struct btrfs_fs_info *fs_info)
* anything except sleeping. This function is used to check the status of
* the fs.
*/
static inline int btrfs_need_cleaner_sleep(struct btrfs_root *root)
static inline int btrfs_need_cleaner_sleep(struct btrfs_fs_info *fs_info)
{
struct btrfs_fs_info *fs_info = root->fs_info;
return (fs_info->sb->s_flags & MS_RDONLY || btrfs_fs_closing(fs_info));
return fs_info->sb->s_flags & MS_RDONLY || btrfs_fs_closing(fs_info);
}
static inline void free_fs_info(struct btrfs_fs_info *fs_info)
@ -3013,10 +3006,10 @@ struct btrfs_dir_item *btrfs_lookup_xattr(struct btrfs_trans_handle *trans,
struct btrfs_path *path, u64 dir,
const char *name, u16 name_len,
int mod);
int verify_dir_item(struct btrfs_root *root,
int verify_dir_item(struct btrfs_fs_info *fs_info,
struct extent_buffer *leaf,
struct btrfs_dir_item *dir_item);
struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_root *root,
struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_fs_info *fs_info,
struct btrfs_path *path,
const char *name,
int name_len);
@ -3061,10 +3054,9 @@ int btrfs_find_name_in_ext_backref(struct btrfs_path *path,
struct btrfs_dio_private;
int btrfs_del_csums(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info, u64 bytenr, u64 len);
int btrfs_lookup_bio_sums(struct btrfs_root *root, struct inode *inode,
struct bio *bio, u32 *dst);
int btrfs_lookup_bio_sums_dio(struct btrfs_root *root, struct inode *inode,
struct bio *bio, u64 logical_offset);
int btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, u32 *dst);
int btrfs_lookup_bio_sums_dio(struct inode *inode, struct bio *bio,
u64 logical_offset);
int btrfs_insert_file_extent(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
u64 objectid, u64 pos,
@ -3078,8 +3070,8 @@ int btrfs_lookup_file_extent(struct btrfs_trans_handle *trans,
int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_ordered_sum *sums);
int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
struct bio *bio, u64 file_start, int contig);
int btrfs_csum_one_bio(struct inode *inode, struct bio *bio,
u64 file_start, int contig);
int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
struct list_head *list, int search_commit);
void btrfs_extent_item_to_extent_map(struct inode *inode,
@ -3182,7 +3174,7 @@ void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans,
int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size);
void btrfs_invalidate_inodes(struct btrfs_root *root);
void btrfs_add_delayed_iput(struct inode *inode);
void btrfs_run_delayed_iputs(struct btrfs_root *root);
void btrfs_run_delayed_iputs(struct btrfs_fs_info *fs_info);
int btrfs_prealloc_file_range(struct inode *inode, int mode,
u64 start, u64 num_bytes, u64 min_size,
loff_t actual_len, u64 *alloc_hint);
@ -3236,9 +3228,8 @@ int btrfs_drop_extents(struct btrfs_trans_handle *trans,
int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
struct inode *inode, u64 start, u64 end);
int btrfs_release_file(struct inode *inode, struct file *file);
int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode,
struct page **pages, size_t num_pages,
loff_t pos, size_t write_bytes,
int btrfs_dirty_pages(struct inode *inode, struct page **pages,
size_t num_pages, loff_t pos, size_t write_bytes,
struct extent_state **cached);
int btrfs_fdatawrite_range(struct inode *inode, loff_t start, loff_t end);
ssize_t btrfs_copy_file_range(struct file *file_in, loff_t pos_in,
@ -3261,7 +3252,7 @@ void btrfs_sysfs_remove_mounted(struct btrfs_fs_info *fs_info);
ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size);
/* super.c */
int btrfs_parse_options(struct btrfs_root *root, char *options,
int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
unsigned long new_flags);
int btrfs_sync_fs(struct super_block *sb, int wait);
@ -3637,12 +3628,12 @@ int btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans,
int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
u64 end, struct btrfs_scrub_progress *progress,
int readonly, int is_dev_replace);
void btrfs_scrub_pause(struct btrfs_root *root);
void btrfs_scrub_continue(struct btrfs_root *root);
void btrfs_scrub_pause(struct btrfs_fs_info *fs_info);
void btrfs_scrub_continue(struct btrfs_fs_info *fs_info);
int btrfs_scrub_cancel(struct btrfs_fs_info *info);
int btrfs_scrub_cancel_dev(struct btrfs_fs_info *info,
struct btrfs_device *dev);
int btrfs_scrub_progress(struct btrfs_root *root, u64 devid,
int btrfs_scrub_progress(struct btrfs_fs_info *fs_info, u64 devid,
struct btrfs_scrub_progress *progress);
/* dev-replace.c */

Просмотреть файл

@ -529,10 +529,9 @@ static struct btrfs_delayed_item *__btrfs_next_delayed_item(
}
static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_fs_info *fs_info,
struct btrfs_delayed_item *item)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_block_rsv *src_rsv;
struct btrfs_block_rsv *dst_rsv;
u64 num_bytes;
@ -556,10 +555,9 @@ static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
return ret;
}
static void btrfs_delayed_item_release_metadata(struct btrfs_root *root,
static void btrfs_delayed_item_release_metadata(struct btrfs_fs_info *fs_info,
struct btrfs_delayed_item *item)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_block_rsv *rsv;
if (!item->bytes_reserved)
@ -569,7 +567,7 @@ static void btrfs_delayed_item_release_metadata(struct btrfs_root *root,
trace_btrfs_space_reservation(fs_info, "delayed_item",
item->key.objectid, item->bytes_reserved,
0);
btrfs_block_rsv_release(root, rsv,
btrfs_block_rsv_release(fs_info, rsv,
item->bytes_reserved);
}
@ -669,16 +667,15 @@ static int btrfs_delayed_inode_reserve_metadata(
if (release) {
trace_btrfs_space_reservation(fs_info, "delalloc",
btrfs_ino(inode), num_bytes, 0);
btrfs_block_rsv_release(root, src_rsv, num_bytes);
btrfs_block_rsv_release(fs_info, src_rsv, num_bytes);
}
return ret;
}
static void btrfs_delayed_inode_release_metadata(struct btrfs_root *root,
static void btrfs_delayed_inode_release_metadata(struct btrfs_fs_info *fs_info,
struct btrfs_delayed_node *node)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_block_rsv *rsv;
if (!node->bytes_reserved)
@ -687,7 +684,7 @@ static void btrfs_delayed_inode_release_metadata(struct btrfs_root *root,
rsv = &fs_info->delayed_block_rsv;
trace_btrfs_space_reservation(fs_info, "delayed_inode",
node->inode_id, node->bytes_reserved, 0);
btrfs_block_rsv_release(root, rsv,
btrfs_block_rsv_release(fs_info, rsv,
node->bytes_reserved);
node->bytes_reserved = 0;
}
@ -700,6 +697,7 @@ static int btrfs_batch_insert_items(struct btrfs_root *root,
struct btrfs_path *path,
struct btrfs_delayed_item *item)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_delayed_item *curr, *next;
int free_space;
int total_data_size = 0, total_size = 0;
@ -716,7 +714,7 @@ static int btrfs_batch_insert_items(struct btrfs_root *root,
BUG_ON(!path->nodes[0]);
leaf = path->nodes[0];
free_space = btrfs_leaf_free_space(root, leaf);
free_space = btrfs_leaf_free_space(fs_info, leaf);
INIT_LIST_HEAD(&head);
next = item;
@ -789,7 +787,7 @@ static int btrfs_batch_insert_items(struct btrfs_root *root,
curr->data_len);
slot++;
btrfs_delayed_item_release_metadata(root, curr);
btrfs_delayed_item_release_metadata(fs_info, curr);
list_del(&curr->tree_list);
btrfs_release_delayed_item(curr);
@ -811,6 +809,7 @@ static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
struct btrfs_path *path,
struct btrfs_delayed_item *delayed_item)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_buffer *leaf;
char *ptr;
int ret;
@ -828,7 +827,7 @@ static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
delayed_item->data_len);
btrfs_mark_buffer_dirty(leaf);
btrfs_delayed_item_release_metadata(root, delayed_item);
btrfs_delayed_item_release_metadata(fs_info, delayed_item);
return 0;
}
@ -880,6 +879,7 @@ static int btrfs_batch_delete_items(struct btrfs_trans_handle *trans,
struct btrfs_path *path,
struct btrfs_delayed_item *item)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_delayed_item *curr, *next;
struct extent_buffer *leaf;
struct btrfs_key key;
@ -929,7 +929,7 @@ static int btrfs_batch_delete_items(struct btrfs_trans_handle *trans,
goto out;
list_for_each_entry_safe(curr, next, &head, tree_list) {
btrfs_delayed_item_release_metadata(root, curr);
btrfs_delayed_item_release_metadata(fs_info, curr);
list_del(&curr->tree_list);
btrfs_release_delayed_item(curr);
}
@ -1015,6 +1015,7 @@ static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
struct btrfs_path *path,
struct btrfs_delayed_node *node)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_key key;
struct btrfs_inode_item *inode_item;
struct extent_buffer *leaf;
@ -1071,7 +1072,7 @@ out:
no_iref:
btrfs_release_path(path);
err_out:
btrfs_delayed_inode_release_metadata(root, node);
btrfs_delayed_inode_release_metadata(fs_info, node);
btrfs_release_delayed_inode(node);
return ret;
@ -1136,9 +1137,8 @@ __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
* outstanding delayed items cleaned up.
*/
static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
struct btrfs_root *root, int nr)
struct btrfs_fs_info *fs_info, int nr)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_delayed_root *delayed_root;
struct btrfs_delayed_node *curr_node, *prev_node;
struct btrfs_path *path;
@ -1184,15 +1184,15 @@ static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
}
int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
struct btrfs_root *root)
struct btrfs_fs_info *fs_info)
{
return __btrfs_run_delayed_items(trans, root, -1);
return __btrfs_run_delayed_items(trans, fs_info, -1);
}
int btrfs_run_delayed_items_nr(struct btrfs_trans_handle *trans,
struct btrfs_root *root, int nr)
struct btrfs_fs_info *fs_info, int nr)
{
return __btrfs_run_delayed_items(trans, root, nr);
return __btrfs_run_delayed_items(trans, fs_info, nr);
}
int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
@ -1235,6 +1235,7 @@ int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
int btrfs_commit_inode_delayed_inode(struct inode *inode)
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_trans_handle *trans;
struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
struct btrfs_path *path;
@ -1266,7 +1267,7 @@ int btrfs_commit_inode_delayed_inode(struct inode *inode)
path->leave_spinning = 1;
block_rsv = trans->block_rsv;
trans->block_rsv = &delayed_node->root->fs_info->delayed_block_rsv;
trans->block_rsv = &fs_info->delayed_block_rsv;
mutex_lock(&delayed_node->mutex);
if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags))
@ -1280,7 +1281,7 @@ int btrfs_commit_inode_delayed_inode(struct inode *inode)
trans->block_rsv = block_rsv;
trans_out:
btrfs_end_transaction(trans, delayed_node->root);
btrfs_btree_balance_dirty(delayed_node->root);
btrfs_btree_balance_dirty(fs_info);
out:
btrfs_release_delayed_node(delayed_node);
@ -1345,7 +1346,7 @@ again:
trans->block_rsv = block_rsv;
btrfs_end_transaction(trans, root);
btrfs_btree_balance_dirty_nodelay(root);
btrfs_btree_balance_dirty_nodelay(root->fs_info);
release_path:
btrfs_release_path(path);
@ -1402,12 +1403,9 @@ static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
return 0;
}
void btrfs_balance_delayed_items(struct btrfs_root *root)
void btrfs_balance_delayed_items(struct btrfs_fs_info *fs_info)
{
struct btrfs_delayed_root *delayed_root;
struct btrfs_fs_info *fs_info = root->fs_info;
delayed_root = fs_info->delayed_root;
struct btrfs_delayed_root *delayed_root = fs_info->delayed_root;
if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
return;
@ -1432,8 +1430,9 @@ void btrfs_balance_delayed_items(struct btrfs_root *root)
/* Will return 0 or -ENOMEM */
int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
struct btrfs_root *root, const char *name,
int name_len, struct inode *dir,
struct btrfs_fs_info *fs_info,
const char *name, int name_len,
struct inode *dir,
struct btrfs_disk_key *disk_key, u8 type,
u64 index)
{
@ -1464,7 +1463,7 @@ int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
btrfs_set_stack_dir_type(dir_item, type);
memcpy((char *)(dir_item + 1), name, name_len);
ret = btrfs_delayed_item_reserve_metadata(trans, root, delayed_item);
ret = btrfs_delayed_item_reserve_metadata(trans, fs_info, delayed_item);
/*
* we have reserved enough space when we start a new transaction,
* so reserving metadata failure is impossible
@ -1475,7 +1474,7 @@ int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
mutex_lock(&delayed_node->mutex);
ret = __btrfs_add_delayed_insertion_item(delayed_node, delayed_item);
if (unlikely(ret)) {
btrfs_err(root->fs_info,
btrfs_err(fs_info,
"err add delayed dir index item(name: %.*s) into the insertion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
name_len, name, delayed_node->root->objectid,
delayed_node->inode_id, ret);
@ -1488,7 +1487,7 @@ release_node:
return ret;
}
static int btrfs_delete_delayed_insertion_item(struct btrfs_root *root,
static int btrfs_delete_delayed_insertion_item(struct btrfs_fs_info *fs_info,
struct btrfs_delayed_node *node,
struct btrfs_key *key)
{
@ -1501,15 +1500,15 @@ static int btrfs_delete_delayed_insertion_item(struct btrfs_root *root,
return 1;
}
btrfs_delayed_item_release_metadata(root, item);
btrfs_delayed_item_release_metadata(fs_info, item);
btrfs_release_delayed_item(item);
mutex_unlock(&node->mutex);
return 0;
}
int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct inode *dir,
u64 index)
struct btrfs_fs_info *fs_info,
struct inode *dir, u64 index)
{
struct btrfs_delayed_node *node;
struct btrfs_delayed_item *item;
@ -1524,7 +1523,7 @@ int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
item_key.type = BTRFS_DIR_INDEX_KEY;
item_key.offset = index;
ret = btrfs_delete_delayed_insertion_item(root, node, &item_key);
ret = btrfs_delete_delayed_insertion_item(fs_info, node, &item_key);
if (!ret)
goto end;
@ -1536,7 +1535,7 @@ int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
item->key = item_key;
ret = btrfs_delayed_item_reserve_metadata(trans, root, item);
ret = btrfs_delayed_item_reserve_metadata(trans, fs_info, item);
/*
* we have reserved enough space when we start a new transaction,
* so reserving metadata failure is impossible.
@ -1546,7 +1545,7 @@ int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
mutex_lock(&node->mutex);
ret = __btrfs_add_delayed_deletion_item(node, item);
if (unlikely(ret)) {
btrfs_err(root->fs_info,
btrfs_err(fs_info,
"err add delayed dir index item(index: %llu) into the deletion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
index, node->root->objectid, node->inode_id, ret);
BUG();
@ -1902,12 +1901,13 @@ release_node:
static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node)
{
struct btrfs_root *root = delayed_node->root;
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_delayed_item *curr_item, *prev_item;
mutex_lock(&delayed_node->mutex);
curr_item = __btrfs_first_delayed_insertion_item(delayed_node);
while (curr_item) {
btrfs_delayed_item_release_metadata(root, curr_item);
btrfs_delayed_item_release_metadata(fs_info, curr_item);
prev_item = curr_item;
curr_item = __btrfs_next_delayed_item(prev_item);
btrfs_release_delayed_item(prev_item);
@ -1915,7 +1915,7 @@ static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node)
curr_item = __btrfs_first_delayed_deletion_item(delayed_node);
while (curr_item) {
btrfs_delayed_item_release_metadata(root, curr_item);
btrfs_delayed_item_release_metadata(fs_info, curr_item);
prev_item = curr_item;
curr_item = __btrfs_next_delayed_item(prev_item);
btrfs_release_delayed_item(prev_item);
@ -1925,7 +1925,7 @@ static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node)
btrfs_release_delayed_iref(delayed_node);
if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
btrfs_delayed_inode_release_metadata(root, delayed_node);
btrfs_delayed_inode_release_metadata(fs_info, delayed_node);
btrfs_release_delayed_inode(delayed_node);
}
mutex_unlock(&delayed_node->mutex);

Просмотреть файл

@ -99,23 +99,24 @@ static inline void btrfs_init_delayed_root(
}
int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
struct btrfs_root *root, const char *name,
int name_len, struct inode *dir,
struct btrfs_fs_info *fs_info,
const char *name, int name_len,
struct inode *dir,
struct btrfs_disk_key *disk_key, u8 type,
u64 index);
int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct inode *dir,
u64 index);
struct btrfs_fs_info *fs_info,
struct inode *dir, u64 index);
int btrfs_inode_delayed_dir_index_count(struct inode *inode);
int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
struct btrfs_fs_info *fs_info);
int btrfs_run_delayed_items_nr(struct btrfs_trans_handle *trans,
struct btrfs_root *root, int nr);
struct btrfs_fs_info *fs_info, int nr);
void btrfs_balance_delayed_items(struct btrfs_root *root);
void btrfs_balance_delayed_items(struct btrfs_fs_info *fs_info);
int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
struct inode *inode);

Просмотреть файл

@ -304,11 +304,11 @@ void btrfs_after_dev_replace_commit(struct btrfs_fs_info *fs_info)
dev_replace->cursor_left_last_write_of_item;
}
int btrfs_dev_replace_start(struct btrfs_root *root, char *tgtdev_name,
int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info, char *tgtdev_name,
u64 srcdevid, char *srcdev_name, int read_src)
{
struct btrfs_root *root = fs_info->dev_root;
struct btrfs_trans_handle *trans;
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
int ret;
struct btrfs_device *tgt_device = NULL;
@ -316,14 +316,14 @@ int btrfs_dev_replace_start(struct btrfs_root *root, char *tgtdev_name,
/* the disk copy procedure reuses the scrub code */
mutex_lock(&fs_info->volume_mutex);
ret = btrfs_find_device_by_devspec(root, srcdevid,
ret = btrfs_find_device_by_devspec(fs_info, srcdevid,
srcdev_name, &src_device);
if (ret) {
mutex_unlock(&fs_info->volume_mutex);
return ret;
}
ret = btrfs_init_dev_replace_tgtdev(root, tgtdev_name,
ret = btrfs_init_dev_replace_tgtdev(fs_info, tgtdev_name,
src_device, &tgt_device);
mutex_unlock(&fs_info->volume_mutex);
if (ret)
@ -422,7 +422,7 @@ leave:
return ret;
}
int btrfs_dev_replace_by_ioctl(struct btrfs_root *root,
int btrfs_dev_replace_by_ioctl(struct btrfs_fs_info *fs_info,
struct btrfs_ioctl_dev_replace_args *args)
{
int ret;
@ -439,7 +439,7 @@ int btrfs_dev_replace_by_ioctl(struct btrfs_root *root,
args->start.tgtdev_name[0] == '\0')
return -EINVAL;
ret = btrfs_dev_replace_start(root, args->start.tgtdev_name,
ret = btrfs_dev_replace_start(fs_info, args->start.tgtdev_name,
args->start.srcdevid,
args->start.srcdev_name,
args->start.cont_reading_from_srcdev_mode);

Просмотреть файл

@ -25,9 +25,9 @@ int btrfs_init_dev_replace(struct btrfs_fs_info *fs_info);
int btrfs_run_dev_replace(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info);
void btrfs_after_dev_replace_commit(struct btrfs_fs_info *fs_info);
int btrfs_dev_replace_by_ioctl(struct btrfs_root *root,
int btrfs_dev_replace_by_ioctl(struct btrfs_fs_info *fs_info,
struct btrfs_ioctl_dev_replace_args *args);
int btrfs_dev_replace_start(struct btrfs_root *root, char *tgtdev_name,
int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info, char *tgtdev_name,
u64 srcdevid, char *srcdev_name, int read_src);
void btrfs_dev_replace_status(struct btrfs_fs_info *fs_info,
struct btrfs_ioctl_dev_replace_args *args);

Просмотреть файл

@ -38,6 +38,7 @@ static struct btrfs_dir_item *insert_with_overflow(struct btrfs_trans_handle
const char *name,
int name_len)
{
struct btrfs_fs_info *fs_info = root->fs_info;
int ret;
char *ptr;
struct btrfs_item *item;
@ -46,10 +47,10 @@ static struct btrfs_dir_item *insert_with_overflow(struct btrfs_trans_handle
ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size);
if (ret == -EEXIST) {
struct btrfs_dir_item *di;
di = btrfs_match_dir_item_name(root, path, name, name_len);
di = btrfs_match_dir_item_name(fs_info, path, name, name_len);
if (di)
return ERR_PTR(-EEXIST);
btrfs_extend_item(root, path, data_size);
btrfs_extend_item(fs_info, path, data_size);
} else if (ret < 0)
return ERR_PTR(ret);
WARN_ON(ret > 0);
@ -172,8 +173,9 @@ second_insert:
}
btrfs_release_path(path);
ret2 = btrfs_insert_delayed_dir_index(trans, root, name, name_len, dir,
&disk_key, type, index);
ret2 = btrfs_insert_delayed_dir_index(trans, root->fs_info, name,
name_len, dir, &disk_key, type,
index);
out_free:
btrfs_free_path(path);
if (ret)
@ -210,7 +212,7 @@ struct btrfs_dir_item *btrfs_lookup_dir_item(struct btrfs_trans_handle *trans,
if (ret > 0)
return NULL;
return btrfs_match_dir_item_name(root, path, name, name_len);
return btrfs_match_dir_item_name(root->fs_info, path, name, name_len);
}
int btrfs_check_dir_item_collision(struct btrfs_root *root, u64 dir,
@ -246,7 +248,7 @@ int btrfs_check_dir_item_collision(struct btrfs_root *root, u64 dir,
}
/* we found an item, look for our name in the item */
di = btrfs_match_dir_item_name(root, path, name, name_len);
di = btrfs_match_dir_item_name(root->fs_info, path, name, name_len);
if (di) {
/* our exact name was found */
ret = -EEXIST;
@ -301,7 +303,7 @@ btrfs_lookup_dir_index_item(struct btrfs_trans_handle *trans,
return ERR_PTR(ret);
if (ret > 0)
return ERR_PTR(-ENOENT);
return btrfs_match_dir_item_name(root, path, name, name_len);
return btrfs_match_dir_item_name(root->fs_info, path, name, name_len);
}
struct btrfs_dir_item *
@ -342,7 +344,8 @@ btrfs_search_dir_index_item(struct btrfs_root *root,
if (key.objectid != dirid || key.type != BTRFS_DIR_INDEX_KEY)
break;
di = btrfs_match_dir_item_name(root, path, name, name_len);
di = btrfs_match_dir_item_name(root->fs_info, path,
name, name_len);
if (di)
return di;
@ -371,7 +374,7 @@ struct btrfs_dir_item *btrfs_lookup_xattr(struct btrfs_trans_handle *trans,
if (ret > 0)
return NULL;
return btrfs_match_dir_item_name(root, path, name, name_len);
return btrfs_match_dir_item_name(root->fs_info, path, name, name_len);
}
/*
@ -379,7 +382,7 @@ struct btrfs_dir_item *btrfs_lookup_xattr(struct btrfs_trans_handle *trans,
* this walks through all the entries in a dir item and finds one
* for a specific name.
*/
struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_root *root,
struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_fs_info *fs_info,
struct btrfs_path *path,
const char *name, int name_len)
{
@ -392,7 +395,7 @@ struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_root *root,
leaf = path->nodes[0];
dir_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dir_item);
if (verify_dir_item(root, leaf, dir_item))
if (verify_dir_item(fs_info, leaf, dir_item))
return NULL;
total_len = btrfs_item_size_nr(leaf, path->slots[0]);
@ -442,16 +445,16 @@ int btrfs_delete_one_dir_name(struct btrfs_trans_handle *trans,
start = btrfs_item_ptr_offset(leaf, path->slots[0]);
memmove_extent_buffer(leaf, ptr, ptr + sub_item_len,
item_len - (ptr + sub_item_len - start));
btrfs_truncate_item(root, path, item_len - sub_item_len, 1);
btrfs_truncate_item(root->fs_info, path,
item_len - sub_item_len, 1);
}
return ret;
}
int verify_dir_item(struct btrfs_root *root,
int verify_dir_item(struct btrfs_fs_info *fs_info,
struct extent_buffer *leaf,
struct btrfs_dir_item *dir_item)
{
struct btrfs_fs_info *fs_info = root->fs_info;
u16 namelen = BTRFS_NAME_LEN;
u8 type = btrfs_dir_type(leaf, dir_item);

Просмотреть файл

@ -68,15 +68,15 @@ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
int read_only);
static void btrfs_destroy_ordered_extents(struct btrfs_root *root);
static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
struct btrfs_root *root);
struct btrfs_fs_info *fs_info);
static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root);
static int btrfs_destroy_marked_extents(struct btrfs_root *root,
static int btrfs_destroy_marked_extents(struct btrfs_fs_info *fs_info,
struct extent_io_tree *dirty_pages,
int mark);
static int btrfs_destroy_pinned_extent(struct btrfs_root *root,
static int btrfs_destroy_pinned_extent(struct btrfs_fs_info *fs_info,
struct extent_io_tree *pinned_extents);
static int btrfs_cleanup_transaction(struct btrfs_root *root);
static void btrfs_error_commit_super(struct btrfs_root *root);
static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info);
static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info);
/*
* btrfs_end_io_wq structs are used to do processing in task context when an IO
@ -440,11 +440,10 @@ static int btrfs_check_super_csum(struct btrfs_fs_info *fs_info,
* helper to read a given tree block, doing retries as required when
* the checksums don't match and we have alternate mirrors to try.
*/
static int btree_read_extent_buffer_pages(struct btrfs_root *root,
static int btree_read_extent_buffer_pages(struct btrfs_fs_info *fs_info,
struct extent_buffer *eb,
u64 parent_transid)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_io_tree *io_tree;
int failed = 0;
int ret;
@ -492,7 +491,7 @@ static int btree_read_extent_buffer_pages(struct btrfs_root *root,
}
if (failed && !ret && failed_mirror)
repair_eb_io_failure(root, eb, failed_mirror);
repair_eb_io_failure(fs_info, eb, failed_mirror);
return ret;
}
@ -983,7 +982,7 @@ static int __btree_submit_bio_done(struct inode *inode, struct bio *bio,
* when we're called for a write, we're already in the async
* submission context. Just jump into btrfs_map_bio
*/
ret = btrfs_map_bio(BTRFS_I(inode)->root, bio, mirror_num, 1);
ret = btrfs_map_bio(btrfs_sb(inode->i_sb), bio, mirror_num, 1);
if (ret) {
bio->bi_error = ret;
bio_endio(bio);
@ -1019,12 +1018,12 @@ static int btree_submit_bio_hook(struct inode *inode, struct bio *bio,
BTRFS_WQ_ENDIO_METADATA);
if (ret)
goto out_w_error;
ret = btrfs_map_bio(BTRFS_I(inode)->root, bio, mirror_num, 0);
ret = btrfs_map_bio(fs_info, bio, mirror_num, 0);
} else if (!async) {
ret = btree_csum_one_bio(bio);
if (ret)
goto out_w_error;
ret = btrfs_map_bio(BTRFS_I(inode)->root, bio, mirror_num, 0);
ret = btrfs_map_bio(fs_info, bio, mirror_num, 0);
} else {
/*
* kthread helpers are used to submit writes so that
@ -1148,12 +1147,12 @@ static const struct address_space_operations btree_aops = {
.set_page_dirty = btree_set_page_dirty,
};
void readahead_tree_block(struct btrfs_root *root, u64 bytenr)
void readahead_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr)
{
struct extent_buffer *buf = NULL;
struct inode *btree_inode = root->fs_info->btree_inode;
struct inode *btree_inode = fs_info->btree_inode;
buf = btrfs_find_create_tree_block(root, bytenr);
buf = btrfs_find_create_tree_block(fs_info, bytenr);
if (IS_ERR(buf))
return;
read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree,
@ -1161,15 +1160,15 @@ void readahead_tree_block(struct btrfs_root *root, u64 bytenr)
free_extent_buffer(buf);
}
int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr,
int reada_tree_block_flagged(struct btrfs_fs_info *fs_info, u64 bytenr,
int mirror_num, struct extent_buffer **eb)
{
struct extent_buffer *buf = NULL;
struct inode *btree_inode = root->fs_info->btree_inode;
struct inode *btree_inode = fs_info->btree_inode;
struct extent_io_tree *io_tree = &BTRFS_I(btree_inode)->io_tree;
int ret;
buf = btrfs_find_create_tree_block(root, bytenr);
buf = btrfs_find_create_tree_block(fs_info, bytenr);
if (IS_ERR(buf))
return 0;
@ -1193,11 +1192,10 @@ int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr,
return 0;
}
struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
u64 bytenr)
struct extent_buffer *btrfs_find_create_tree_block(
struct btrfs_fs_info *fs_info,
u64 bytenr)
{
struct btrfs_fs_info *fs_info = root->fs_info;
if (btrfs_is_testing(fs_info))
return alloc_test_extent_buffer(fs_info, bytenr);
return alloc_extent_buffer(fs_info, bytenr);
@ -1216,17 +1214,17 @@ int btrfs_wait_tree_block_writeback(struct extent_buffer *buf)
buf->start, buf->start + buf->len - 1);
}
struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
struct extent_buffer *read_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr,
u64 parent_transid)
{
struct extent_buffer *buf = NULL;
int ret;
buf = btrfs_find_create_tree_block(root, bytenr);
buf = btrfs_find_create_tree_block(fs_info, bytenr);
if (IS_ERR(buf))
return buf;
ret = btree_read_extent_buffer_pages(root, buf, parent_transid);
ret = btree_read_extent_buffer_pages(fs_info, buf, parent_transid);
if (ret) {
free_extent_buffer(buf);
return ERR_PTR(ret);
@ -1578,7 +1576,8 @@ static struct btrfs_root *btrfs_read_tree_root(struct btrfs_root *tree_root,
}
generation = btrfs_root_generation(&root->root_item);
root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
root->node = read_tree_block(fs_info,
btrfs_root_bytenr(&root->root_item),
generation);
if (IS_ERR(root->node)) {
ret = PTR_ERR(root->node);
@ -1841,7 +1840,7 @@ static int cleaner_kthread(void *arg)
again = 0;
/* Make the cleaner go to sleep early. */
if (btrfs_need_cleaner_sleep(root))
if (btrfs_need_cleaner_sleep(fs_info))
goto sleep;
/*
@ -1858,13 +1857,13 @@ static int cleaner_kthread(void *arg)
* Avoid the problem that we change the status of the fs
* during the above check and trylock.
*/
if (btrfs_need_cleaner_sleep(root)) {
if (btrfs_need_cleaner_sleep(fs_info)) {
mutex_unlock(&fs_info->cleaner_mutex);
goto sleep;
}
mutex_lock(&fs_info->cleaner_delayed_iput_mutex);
btrfs_run_delayed_iputs(root);
btrfs_run_delayed_iputs(fs_info);
mutex_unlock(&fs_info->cleaner_delayed_iput_mutex);
again = btrfs_clean_one_deleted_snapshot(root);
@ -1976,7 +1975,7 @@ sleep:
if (unlikely(test_bit(BTRFS_FS_STATE_ERROR,
&fs_info->fs_state)))
btrfs_cleanup_transaction(root);
btrfs_cleanup_transaction(fs_info);
set_current_state(TASK_INTERRUPTIBLE);
if (!kthread_should_stop() &&
(!btrfs_transaction_blocked(fs_info) ||
@ -2266,8 +2265,7 @@ void btrfs_free_fs_roots(struct btrfs_fs_info *fs_info)
if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
btrfs_free_log_root_tree(NULL, fs_info);
btrfs_destroy_pinned_extent(fs_info->tree_root,
fs_info->pinned_extents);
btrfs_destroy_pinned_extent(fs_info, fs_info->pinned_extents);
}
}
@ -2295,30 +2293,29 @@ static void btrfs_init_balance(struct btrfs_fs_info *fs_info)
static void btrfs_init_btree_inode(struct btrfs_fs_info *fs_info)
{
fs_info->btree_inode->i_ino = BTRFS_BTREE_INODE_OBJECTID;
set_nlink(fs_info->btree_inode, 1);
struct inode *inode = fs_info->btree_inode;
inode->i_ino = BTRFS_BTREE_INODE_OBJECTID;
set_nlink(inode, 1);
/*
* we set the i_size on the btree inode to the max possible int.
* the real end of the address space is determined by all of
* the devices in the system
*/
fs_info->btree_inode->i_size = OFFSET_MAX;
fs_info->btree_inode->i_mapping->a_ops = &btree_aops;
inode->i_size = OFFSET_MAX;
inode->i_mapping->a_ops = &btree_aops;
RB_CLEAR_NODE(&BTRFS_I(fs_info->btree_inode)->rb_node);
extent_io_tree_init(&BTRFS_I(fs_info->btree_inode)->io_tree,
fs_info->btree_inode->i_mapping);
BTRFS_I(fs_info->btree_inode)->io_tree.track_uptodate = 0;
extent_map_tree_init(&BTRFS_I(fs_info->btree_inode)->extent_tree);
RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
extent_io_tree_init(&BTRFS_I(inode)->io_tree, inode->i_mapping);
BTRFS_I(inode)->io_tree.track_uptodate = 0;
extent_map_tree_init(&BTRFS_I(inode)->extent_tree);
BTRFS_I(fs_info->btree_inode)->io_tree.ops = &btree_extent_io_ops;
BTRFS_I(inode)->io_tree.ops = &btree_extent_io_ops;
BTRFS_I(fs_info->btree_inode)->root = fs_info->tree_root;
memset(&BTRFS_I(fs_info->btree_inode)->location, 0,
sizeof(struct btrfs_key));
set_bit(BTRFS_INODE_DUMMY,
&BTRFS_I(fs_info->btree_inode)->runtime_flags);
btrfs_insert_inode_hash(fs_info->btree_inode);
BTRFS_I(inode)->root = fs_info->tree_root;
memset(&BTRFS_I(inode)->location, 0, sizeof(struct btrfs_key));
set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags);
btrfs_insert_inode_hash(inode);
}
static void btrfs_init_dev_replace_locks(struct btrfs_fs_info *fs_info)
@ -2439,7 +2436,6 @@ static int btrfs_replay_log(struct btrfs_fs_info *fs_info,
struct btrfs_fs_devices *fs_devices)
{
int ret;
struct btrfs_root *tree_root = fs_info->tree_root;
struct btrfs_root *log_tree_root;
struct btrfs_super_block *disk_super = fs_info->super_copy;
u64 bytenr = btrfs_super_log_root(disk_super);
@ -2455,8 +2451,8 @@ static int btrfs_replay_log(struct btrfs_fs_info *fs_info,
__setup_root(log_tree_root, fs_info, BTRFS_TREE_LOG_OBJECTID);
log_tree_root->node = read_tree_block(tree_root, bytenr,
fs_info->generation + 1);
log_tree_root->node = read_tree_block(fs_info, bytenr,
fs_info->generation + 1);
if (IS_ERR(log_tree_root->node)) {
btrfs_warn(fs_info, "failed to read log tree");
ret = PTR_ERR(log_tree_root->node);
@ -2819,7 +2815,7 @@ int open_ctree(struct super_block *sb,
*/
fs_info->compress_type = BTRFS_COMPRESS_ZLIB;
ret = btrfs_parse_options(tree_root, options, sb->s_flags);
ret = btrfs_parse_options(fs_info, options, sb->s_flags);
if (ret) {
err = ret;
goto fail_alloc;
@ -2920,7 +2916,7 @@ int open_ctree(struct super_block *sb,
__setup_root(chunk_root, fs_info, BTRFS_CHUNK_TREE_OBJECTID);
chunk_root->node = read_tree_block(chunk_root,
chunk_root->node = read_tree_block(fs_info,
btrfs_super_chunk_root(disk_super),
generation);
if (IS_ERR(chunk_root->node) ||
@ -2957,7 +2953,7 @@ int open_ctree(struct super_block *sb,
retry_root_backup:
generation = btrfs_super_generation(disk_super);
tree_root->node = read_tree_block(tree_root,
tree_root->node = read_tree_block(fs_info,
btrfs_super_root(disk_super),
generation);
if (IS_ERR(tree_root->node) ||
@ -3081,7 +3077,7 @@ retry_root_backup:
#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
if (btrfs_test_opt(fs_info, CHECK_INTEGRITY)) {
ret = btrfsic_mount(tree_root, fs_devices,
ret = btrfsic_mount(fs_info, fs_devices,
btrfs_test_opt(fs_info,
CHECK_INTEGRITY_INCLUDING_EXTENT_DATA) ?
1 : 0,
@ -3233,7 +3229,7 @@ fail_qgroup:
btrfs_free_qgroup_config(fs_info);
fail_trans_kthread:
kthread_stop(fs_info->transaction_kthread);
btrfs_cleanup_transaction(fs_info->tree_root);
btrfs_cleanup_transaction(fs_info);
btrfs_free_fs_roots(fs_info);
fail_cleaner:
kthread_stop(fs_info->cleaner_kthread);
@ -3685,9 +3681,8 @@ int btrfs_calc_num_tolerated_disk_barrier_failures(
return num_tolerated_disk_barrier_failures;
}
static int write_all_supers(struct btrfs_root *root, int max_mirrors)
static int write_all_supers(struct btrfs_fs_info *fs_info, int max_mirrors)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct list_head *head;
struct btrfs_device *dev;
struct btrfs_super_block *sb;
@ -3781,9 +3776,9 @@ static int write_all_supers(struct btrfs_root *root, int max_mirrors)
}
int write_ctree_super(struct btrfs_trans_handle *trans,
struct btrfs_root *root, int max_mirrors)
struct btrfs_fs_info *fs_info, int max_mirrors)
{
return write_all_supers(root, max_mirrors);
return write_all_supers(fs_info, max_mirrors);
}
/* Drop a fs root from the radix tree and free it. */
@ -3819,7 +3814,7 @@ static void free_fs_root(struct btrfs_root *root)
{
iput(root->ino_cache_inode);
WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree));
btrfs_free_block_rsv(root, root->orphan_block_rsv);
btrfs_free_block_rsv(root->fs_info, root->orphan_block_rsv);
root->orphan_block_rsv = NULL;
if (root->anon_dev)
free_anon_bdev(root->anon_dev);
@ -3895,7 +3890,7 @@ int btrfs_commit_super(struct btrfs_fs_info *fs_info)
struct btrfs_trans_handle *trans;
mutex_lock(&fs_info->cleaner_mutex);
btrfs_run_delayed_iputs(root);
btrfs_run_delayed_iputs(fs_info);
mutex_unlock(&fs_info->cleaner_mutex);
wake_up_process(fs_info->cleaner_kthread);
@ -3954,7 +3949,7 @@ void close_ctree(struct btrfs_fs_info *fs_info)
}
if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
btrfs_error_commit_super(root);
btrfs_error_commit_super(fs_info);
kthread_stop(fs_info->transaction_kthread);
kthread_stop(fs_info->cleaner_kthread);
@ -3991,7 +3986,7 @@ void close_ctree(struct btrfs_fs_info *fs_info)
#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
if (btrfs_test_opt(fs_info, CHECK_INTEGRITY))
btrfsic_unmount(root, fs_info->fs_devices);
btrfsic_unmount(fs_info->fs_devices);
#endif
btrfs_close_devices(fs_info->fs_devices);
@ -4066,16 +4061,15 @@ void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
fs_info->dirty_metadata_batch);
#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
if (btrfs_header_level(buf) == 0 && check_leaf(root, buf)) {
btrfs_print_leaf(root, buf);
btrfs_print_leaf(fs_info, buf);
ASSERT(0);
}
#endif
}
static void __btrfs_btree_balance_dirty(struct btrfs_root *root,
static void __btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info,
int flush_delayed)
{
struct btrfs_fs_info *fs_info = root->fs_info;
/*
* looks as though older kernels can get into trouble with
* this code, they end up stuck in balance_dirty_pages forever
@ -4086,7 +4080,7 @@ static void __btrfs_btree_balance_dirty(struct btrfs_root *root,
return;
if (flush_delayed)
btrfs_balance_delayed_items(root);
btrfs_balance_delayed_items(fs_info);
ret = percpu_counter_compare(&fs_info->dirty_metadata_bytes,
BTRFS_DIRTY_METADATA_THRESH);
@ -4095,20 +4089,22 @@ static void __btrfs_btree_balance_dirty(struct btrfs_root *root,
}
}
void btrfs_btree_balance_dirty(struct btrfs_root *root)
void btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info)
{
__btrfs_btree_balance_dirty(root, 1);
__btrfs_btree_balance_dirty(fs_info, 1);
}
void btrfs_btree_balance_dirty_nodelay(struct btrfs_root *root)
void btrfs_btree_balance_dirty_nodelay(struct btrfs_fs_info *fs_info)
{
__btrfs_btree_balance_dirty(root, 0);
__btrfs_btree_balance_dirty(fs_info, 0);
}
int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid)
{
struct btrfs_root *root = BTRFS_I(buf->pages[0]->mapping->host)->root;
return btree_read_extent_buffer_pages(root, buf, parent_transid);
struct btrfs_fs_info *fs_info = root->fs_info;
return btree_read_extent_buffer_pages(fs_info, buf, parent_transid);
}
static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
@ -4259,19 +4255,17 @@ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
return ret;
}
static void btrfs_error_commit_super(struct btrfs_root *root)
static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info)
{
struct btrfs_fs_info *fs_info = root->fs_info;
mutex_lock(&fs_info->cleaner_mutex);
btrfs_run_delayed_iputs(root);
btrfs_run_delayed_iputs(fs_info);
mutex_unlock(&fs_info->cleaner_mutex);
down_write(&fs_info->cleanup_work_sem);
up_write(&fs_info->cleanup_work_sem);
/* cleanup FS via transaction */
btrfs_cleanup_transaction(root);
btrfs_cleanup_transaction(fs_info);
}
static void btrfs_destroy_ordered_extents(struct btrfs_root *root)
@ -4314,9 +4308,8 @@ static void btrfs_destroy_all_ordered_extents(struct btrfs_fs_info *fs_info)
}
static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
struct btrfs_root *root)
struct btrfs_fs_info *fs_info)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct rb_node *node;
struct btrfs_delayed_ref_root *delayed_refs;
struct btrfs_delayed_ref_node *ref;
@ -4372,7 +4365,7 @@ static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
mutex_unlock(&head->mutex);
if (pin_bytes)
btrfs_pin_extent(root, head->node.bytenr,
btrfs_pin_extent(fs_info, head->node.bytenr,
head->node.num_bytes, 1);
btrfs_put_delayed_ref(&head->node);
cond_resched();
@ -4436,11 +4429,10 @@ static void btrfs_destroy_all_delalloc_inodes(struct btrfs_fs_info *fs_info)
spin_unlock(&fs_info->delalloc_root_lock);
}
static int btrfs_destroy_marked_extents(struct btrfs_root *root,
static int btrfs_destroy_marked_extents(struct btrfs_fs_info *fs_info,
struct extent_io_tree *dirty_pages,
int mark)
{
struct btrfs_fs_info *fs_info = root->fs_info;
int ret;
struct extent_buffer *eb;
u64 start = 0;
@ -4470,10 +4462,9 @@ static int btrfs_destroy_marked_extents(struct btrfs_root *root,
return ret;
}
static int btrfs_destroy_pinned_extent(struct btrfs_root *root,
static int btrfs_destroy_pinned_extent(struct btrfs_fs_info *fs_info,
struct extent_io_tree *pinned_extents)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_io_tree *unpin;
u64 start;
u64 end;
@ -4489,7 +4480,7 @@ again:
break;
clear_extent_dirty(unpin, start, end);
btrfs_error_unpin_extent_range(root, start, end);
btrfs_error_unpin_extent_range(fs_info, start, end);
cond_resched();
}
@ -4520,9 +4511,8 @@ static void btrfs_cleanup_bg_io(struct btrfs_block_group_cache *cache)
}
void btrfs_cleanup_dirty_bgs(struct btrfs_transaction *cur_trans,
struct btrfs_root *root)
struct btrfs_fs_info *fs_info)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_block_group_cache *cache;
spin_lock(&cur_trans->dirty_bgs_lock);
@ -4572,14 +4562,13 @@ void btrfs_cleanup_dirty_bgs(struct btrfs_transaction *cur_trans,
}
void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
struct btrfs_root *root)
struct btrfs_fs_info *fs_info)
{
struct btrfs_fs_info *fs_info = root->fs_info;
btrfs_cleanup_dirty_bgs(cur_trans, root);
btrfs_cleanup_dirty_bgs(cur_trans, fs_info);
ASSERT(list_empty(&cur_trans->dirty_bgs));
ASSERT(list_empty(&cur_trans->io_bgs));
btrfs_destroy_delayed_refs(cur_trans, root);
btrfs_destroy_delayed_refs(cur_trans, fs_info);
cur_trans->state = TRANS_STATE_COMMIT_START;
wake_up(&fs_info->transaction_blocked_wait);
@ -4590,9 +4579,9 @@ void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
btrfs_destroy_delayed_inodes(fs_info);
btrfs_assert_delayed_root_empty(fs_info);
btrfs_destroy_marked_extents(root, &cur_trans->dirty_pages,
btrfs_destroy_marked_extents(fs_info, &cur_trans->dirty_pages,
EXTENT_DIRTY);
btrfs_destroy_pinned_extent(root,
btrfs_destroy_pinned_extent(fs_info,
fs_info->pinned_extents);
cur_trans->state =TRANS_STATE_COMPLETED;
@ -4604,9 +4593,8 @@ void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
*/
}
static int btrfs_cleanup_transaction(struct btrfs_root *root)
static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_transaction *t;
mutex_lock(&fs_info->transaction_kthread_mutex);
@ -4618,7 +4606,7 @@ static int btrfs_cleanup_transaction(struct btrfs_root *root)
if (t->state >= TRANS_STATE_COMMIT_START) {
atomic_inc(&t->use_count);
spin_unlock(&fs_info->trans_lock);
btrfs_wait_for_commit(root, t->transid);
btrfs_wait_for_commit(fs_info, t->transid);
btrfs_put_transaction(t);
spin_lock(&fs_info->trans_lock);
continue;
@ -4635,7 +4623,7 @@ static int btrfs_cleanup_transaction(struct btrfs_root *root)
} else {
spin_unlock(&fs_info->trans_lock);
}
btrfs_cleanup_one_transaction(t, root);
btrfs_cleanup_one_transaction(t, fs_info);
spin_lock(&fs_info->trans_lock);
if (t == fs_info->running_transaction)
@ -4644,14 +4632,14 @@ static int btrfs_cleanup_transaction(struct btrfs_root *root)
spin_unlock(&fs_info->trans_lock);
btrfs_put_transaction(t);
trace_btrfs_transaction_commit(root);
trace_btrfs_transaction_commit(fs_info->tree_root);
spin_lock(&fs_info->trans_lock);
}
spin_unlock(&fs_info->trans_lock);
btrfs_destroy_all_ordered_extents(fs_info);
btrfs_destroy_delayed_inodes(fs_info);
btrfs_assert_delayed_root_empty(fs_info);
btrfs_destroy_pinned_extent(root, fs_info->pinned_extents);
btrfs_destroy_pinned_extent(fs_info, fs_info->pinned_extents);
btrfs_destroy_all_delalloc_inodes(fs_info);
mutex_unlock(&fs_info->transaction_kthread_mutex);

Просмотреть файл

@ -44,13 +44,14 @@ static inline u64 btrfs_sb_offset(int mirror)
struct btrfs_device;
struct btrfs_fs_devices;
struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
u64 parent_transid);
void readahead_tree_block(struct btrfs_root *root, u64 bytenr);
int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr,
struct extent_buffer *read_tree_block(struct btrfs_fs_info *fs_info,
u64 bytenr, u64 parent_transid);
void readahead_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr);
int reada_tree_block_flagged(struct btrfs_fs_info *fs_info, u64 bytenr,
int mirror_num, struct extent_buffer **eb);
struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
u64 bytenr);
struct extent_buffer *btrfs_find_create_tree_block(
struct btrfs_fs_info *fs_info,
u64 bytenr);
void clean_tree_block(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info, struct extent_buffer *buf);
int open_ctree(struct super_block *sb,
@ -58,7 +59,7 @@ int open_ctree(struct super_block *sb,
char *options);
void close_ctree(struct btrfs_fs_info *fs_info);
int write_ctree_super(struct btrfs_trans_handle *trans,
struct btrfs_root *root, int max_mirrors);
struct btrfs_fs_info *fs_info, int max_mirrors);
struct buffer_head *btrfs_read_dev_super(struct block_device *bdev);
int btrfs_read_dev_one_super(struct block_device *bdev, int copy_num,
struct buffer_head **bh_ret);
@ -83,8 +84,8 @@ btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info,
}
int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info);
void btrfs_btree_balance_dirty(struct btrfs_root *root);
void btrfs_btree_balance_dirty_nodelay(struct btrfs_root *root);
void btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info);
void btrfs_btree_balance_dirty_nodelay(struct btrfs_fs_info *fs_info);
void btrfs_drop_and_free_fs_root(struct btrfs_fs_info *fs_info,
struct btrfs_root *root);
void btrfs_free_fs_root(struct btrfs_root *root);
@ -134,9 +135,9 @@ int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
void btrfs_cleanup_dirty_bgs(struct btrfs_transaction *trans,
struct btrfs_root *root);
struct btrfs_fs_info *fs_info);
void btrfs_cleanup_one_transaction(struct btrfs_transaction *trans,
struct btrfs_root *root);
struct btrfs_fs_info *fs_info);
struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info,
u64 objectid);

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -2067,10 +2067,9 @@ int repair_io_failure(struct inode *inode, u64 start, u64 length, u64 logical,
return 0;
}
int repair_eb_io_failure(struct btrfs_root *root, struct extent_buffer *eb,
int mirror_num)
int repair_eb_io_failure(struct btrfs_fs_info *fs_info,
struct extent_buffer *eb, int mirror_num)
{
struct btrfs_fs_info *fs_info = root->fs_info;
u64 start = eb->start;
unsigned long i, num_pages = num_extent_pages(eb->start, eb->len);
int ret = 0;
@ -3753,8 +3752,7 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
* header 0 1 2 .. N ... data_N .. data_2 data_1 data_0
*/
start = btrfs_item_nr_offset(nritems);
end = btrfs_leaf_data(eb) +
leaf_data_end(fs_info->tree_root, eb);
end = btrfs_leaf_data(eb) + leaf_data_end(fs_info, eb);
memzero_extent_buffer(eb, start, end - start);
}

Просмотреть файл

@ -457,8 +457,8 @@ int repair_io_failure(struct inode *inode, u64 start, u64 length, u64 logical,
int clean_io_failure(struct inode *inode, u64 start, struct page *page,
unsigned int pg_offset);
void end_extent_writepage(struct page *page, int err, u64 start, u64 end);
int repair_eb_io_failure(struct btrfs_root *root, struct extent_buffer *eb,
int mirror_num);
int repair_eb_io_failure(struct btrfs_fs_info *fs_info,
struct extent_buffer *eb, int mirror_num);
/*
* When IO fails, either with EIO or csum verification fails, we

Просмотреть файл

@ -160,8 +160,7 @@ static void btrfs_io_bio_endio_readpage(struct btrfs_io_bio *bio, int err)
kfree(bio->csum_allocated);
}
static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
struct inode *inode, struct bio *bio,
static int __btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio,
u64 logical_offset, u32 *dst, int dio)
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
@ -304,16 +303,14 @@ next:
return 0;
}
int btrfs_lookup_bio_sums(struct btrfs_root *root, struct inode *inode,
struct bio *bio, u32 *dst)
int btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, u32 *dst)
{
return __btrfs_lookup_bio_sums(root, inode, bio, 0, dst, 0);
return __btrfs_lookup_bio_sums(inode, bio, 0, dst, 0);
}
int btrfs_lookup_bio_sums_dio(struct btrfs_root *root, struct inode *inode,
struct bio *bio, u64 offset)
int btrfs_lookup_bio_sums_dio(struct inode *inode, struct bio *bio, u64 offset)
{
return __btrfs_lookup_bio_sums(root, inode, bio, offset, NULL, 1);
return __btrfs_lookup_bio_sums(inode, bio, offset, NULL, 1);
}
int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
@ -436,8 +433,8 @@ fail:
return ret;
}
int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
struct bio *bio, u64 file_start, int contig)
int btrfs_csum_one_bio(struct inode *inode, struct bio *bio,
u64 file_start, int contig)
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_ordered_sum *sums;
@ -543,12 +540,11 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
* This calls btrfs_truncate_item with the correct args based on the
* overlap, and fixes up the key as required.
*/
static noinline void truncate_one_csum(struct btrfs_root *root,
static noinline void truncate_one_csum(struct btrfs_fs_info *fs_info,
struct btrfs_path *path,
struct btrfs_key *key,
u64 bytenr, u64 len)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_buffer *leaf;
u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
u64 csum_end;
@ -569,7 +565,7 @@ static noinline void truncate_one_csum(struct btrfs_root *root,
*/
u32 new_size = (bytenr - key->offset) >> blocksize_bits;
new_size *= csum_size;
btrfs_truncate_item(root, path, new_size, 1);
btrfs_truncate_item(fs_info, path, new_size, 1);
} else if (key->offset >= bytenr && csum_end > end_byte &&
end_byte > key->offset) {
/*
@ -581,7 +577,7 @@ static noinline void truncate_one_csum(struct btrfs_root *root,
u32 new_size = (csum_end - end_byte) >> blocksize_bits;
new_size *= csum_size;
btrfs_truncate_item(root, path, new_size, 0);
btrfs_truncate_item(fs_info, path, new_size, 0);
key->offset = end_byte;
btrfs_set_item_key_safe(fs_info, path, key);
@ -698,7 +694,7 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
key.offset = end_byte - 1;
} else {
truncate_one_csum(root, path, &key, bytenr, len);
truncate_one_csum(fs_info, path, &key, bytenr, len);
if (key.offset < bytenr)
break;
}
@ -824,11 +820,11 @@ again:
u32 diff;
u32 free_space;
if (btrfs_leaf_free_space(root, leaf) <
if (btrfs_leaf_free_space(fs_info, leaf) <
sizeof(struct btrfs_item) + csum_size * 2)
goto insert;
free_space = btrfs_leaf_free_space(root, leaf) -
free_space = btrfs_leaf_free_space(fs_info, leaf) -
sizeof(struct btrfs_item) - csum_size;
tmp = sums->len - total_bytes;
tmp >>= fs_info->sb->s_blocksize_bits;
@ -844,7 +840,7 @@ again:
diff /= csum_size;
diff *= csum_size;
btrfs_extend_item(root, path, diff);
btrfs_extend_item(fs_info, path, diff);
ret = 0;
goto csum;
}

Просмотреть файл

@ -129,10 +129,8 @@ static int __btrfs_add_inode_defrag(struct inode *inode,
return 0;
}
static inline int __need_auto_defrag(struct btrfs_root *root)
static inline int __need_auto_defrag(struct btrfs_fs_info *fs_info)
{
struct btrfs_fs_info *fs_info = root->fs_info;
if (!btrfs_test_opt(fs_info, AUTO_DEFRAG))
return 0;
@ -155,7 +153,7 @@ int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
u64 transid;
int ret;
if (!__need_auto_defrag(root))
if (!__need_auto_defrag(fs_info))
return 0;
if (test_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags))
@ -200,10 +198,9 @@ static void btrfs_requeue_inode_defrag(struct inode *inode,
struct inode_defrag *defrag)
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root *root = BTRFS_I(inode)->root;
int ret;
if (!__need_auto_defrag(root))
if (!__need_auto_defrag(fs_info))
goto out;
/*
@ -376,7 +373,7 @@ int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
&fs_info->fs_state))
break;
if (!__need_auto_defrag(fs_info->tree_root))
if (!__need_auto_defrag(fs_info))
break;
/* find an inode to defrag */
@ -488,10 +485,9 @@ static void btrfs_drop_pages(struct page **pages, size_t num_pages)
* this also makes the decision about creating an inline extent vs
* doing real data extents, marking pages dirty and delalloc as required.
*/
int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode,
struct page **pages, size_t num_pages,
loff_t pos, size_t write_bytes,
struct extent_state **cached)
int btrfs_dirty_pages(struct inode *inode, struct page **pages,
size_t num_pages, loff_t pos, size_t write_bytes,
struct extent_state **cached)
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
int err = 0;
@ -860,7 +856,7 @@ next_slot:
btrfs_mark_buffer_dirty(leaf);
if (update_refs && disk_bytenr > 0) {
ret = btrfs_inc_extent_ref(trans, root,
ret = btrfs_inc_extent_ref(trans, fs_info,
disk_bytenr, num_bytes, 0,
root->root_key.objectid,
new_key.objectid,
@ -944,7 +940,7 @@ delete_extent_item:
extent_end = ALIGN(extent_end,
fs_info->sectorsize);
} else if (update_refs && disk_bytenr > 0) {
ret = btrfs_free_extent(trans, root,
ret = btrfs_free_extent(trans, fs_info,
disk_bytenr, num_bytes, 0,
root->root_key.objectid,
key.objectid, key.offset -
@ -1001,7 +997,7 @@ delete_extent_item:
if (!ret && replace_extent && leafs_visited == 1 &&
(path->locks[0] == BTRFS_WRITE_LOCK_BLOCKING ||
path->locks[0] == BTRFS_WRITE_LOCK) &&
btrfs_leaf_free_space(root, leaf) >=
btrfs_leaf_free_space(fs_info, leaf) >=
sizeof(struct btrfs_item) + extent_item_size) {
key.objectid = ino;
@ -1238,8 +1234,8 @@ again:
extent_end - split);
btrfs_mark_buffer_dirty(leaf);
ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes, 0,
root->root_key.objectid,
ret = btrfs_inc_extent_ref(trans, fs_info, bytenr, num_bytes,
0, root->root_key.objectid,
ino, orig_offset);
if (ret) {
btrfs_abort_transaction(trans, ret);
@ -1272,7 +1268,7 @@ again:
extent_end = other_end;
del_slot = path->slots[0] + 1;
del_nr++;
ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
ret = btrfs_free_extent(trans, fs_info, bytenr, num_bytes,
0, root->root_key.objectid,
ino, orig_offset);
if (ret) {
@ -1292,7 +1288,7 @@ again:
key.offset = other_start;
del_slot = path->slots[0];
del_nr++;
ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
ret = btrfs_free_extent(trans, fs_info, bytenr, num_bytes,
0, root->root_key.objectid,
ino, orig_offset);
if (ret) {
@ -1698,9 +1694,8 @@ again:
fs_info->sectorsize);
if (copied > 0)
ret = btrfs_dirty_pages(root, inode, pages,
dirty_pages, pos, copied,
NULL);
ret = btrfs_dirty_pages(inode, pages, dirty_pages,
pos, copied, NULL);
if (need_unlock)
unlock_extent_cached(&BTRFS_I(inode)->io_tree,
lockstart, lockend, &cached_state,
@ -1732,7 +1727,7 @@ again:
balance_dirty_pages_ratelimited(inode->i_mapping);
if (dirty_pages < (fs_info->nodesize >> PAGE_SHIFT) + 1)
btrfs_btree_balance_dirty(root);
btrfs_btree_balance_dirty(fs_info);
pos += copied;
num_written += copied;
@ -2519,7 +2514,7 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
goto out;
}
rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP);
rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP);
if (!rsv) {
ret = -ENOMEM;
goto out_free;
@ -2580,7 +2575,7 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
}
btrfs_end_transaction(trans, root);
btrfs_btree_balance_dirty(root);
btrfs_btree_balance_dirty(fs_info);
trans = btrfs_start_transaction(root, rsv_count);
if (IS_ERR(trans)) {
@ -2648,10 +2643,10 @@ out_trans:
ret = btrfs_update_inode(trans, root, inode);
updated_inode = true;
btrfs_end_transaction(trans, root);
btrfs_btree_balance_dirty(root);
btrfs_btree_balance_dirty(fs_info);
out_free:
btrfs_free_path(path);
btrfs_free_block_rsv(root, rsv);
btrfs_free_block_rsv(fs_info, rsv);
out:
unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
&cached_state, GFP_NOFS);

Просмотреть файл

@ -208,10 +208,9 @@ int create_free_space_inode(struct btrfs_root *root,
block_group->key.objectid);
}
int btrfs_check_trunc_cache_free_space(struct btrfs_root *root,
int btrfs_check_trunc_cache_free_space(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *rsv)
{
struct btrfs_fs_info *fs_info = root->fs_info;
u64 needed_bytes;
int ret;
@ -1036,12 +1035,11 @@ fail:
}
static noinline_for_stack int
write_pinned_extent_entries(struct btrfs_root *root,
write_pinned_extent_entries(struct btrfs_fs_info *fs_info,
struct btrfs_block_group_cache *block_group,
struct btrfs_io_ctl *io_ctl,
int *entries)
{
struct btrfs_fs_info *fs_info;
u64 start, extent_start, extent_end, len;
struct extent_io_tree *unpin = NULL;
int ret;
@ -1049,8 +1047,6 @@ write_pinned_extent_entries(struct btrfs_root *root,
if (!block_group)
return 0;
fs_info = block_group->fs_info;
/*
* We want to add any pinned extents to our free space cache
* so we don't leak the space
@ -1243,6 +1239,7 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
struct btrfs_trans_handle *trans,
struct btrfs_path *path, u64 offset)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_state *cached_state = NULL;
LIST_HEAD(bitmap_list);
int entries = 0;
@ -1300,7 +1297,8 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
* If this changes while we are working we'll get added back to
* the dirty list and redo it. No locking needed
*/
ret = write_pinned_extent_entries(root, block_group, io_ctl, &entries);
ret = write_pinned_extent_entries(fs_info, block_group,
io_ctl, &entries);
if (ret)
goto out_nospc_locked;
@ -1319,8 +1317,8 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
io_ctl_zero_remaining_pages(io_ctl);
/* Everything is written out, now we dirty the pages in the file. */
ret = btrfs_dirty_pages(root, inode, io_ctl->pages, io_ctl->num_pages,
0, i_size_read(inode), &cached_state);
ret = btrfs_dirty_pages(inode, io_ctl->pages, io_ctl->num_pages, 0,
i_size_read(inode), &cached_state);
if (ret)
goto out_nospc;
@ -1994,7 +1992,7 @@ static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
bool forced = false;
#ifdef CONFIG_BTRFS_DEBUG
if (btrfs_should_fragment_free_space(fs_info->extent_root, block_group))
if (btrfs_should_fragment_free_space(block_group))
forced = true;
#endif
@ -3034,13 +3032,12 @@ setup_cluster_bitmap(struct btrfs_block_group_cache *block_group,
* returns zero and sets up cluster if things worked out, otherwise
* it returns -enospc
*/
int btrfs_find_space_cluster(struct btrfs_root *root,
int btrfs_find_space_cluster(struct btrfs_fs_info *fs_info,
struct btrfs_block_group_cache *block_group,
struct btrfs_free_cluster *cluster,
u64 offset, u64 bytes, u64 empty_size)
{
struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
struct btrfs_fs_info *fs_info = block_group->fs_info;
struct btrfs_free_space *entry, *tmp;
LIST_HEAD(bitmaps);
u64 min_bytes;
@ -3148,8 +3145,7 @@ static int do_trimming(struct btrfs_block_group_cache *block_group,
spin_unlock(&block_group->lock);
spin_unlock(&space_info->lock);
ret = btrfs_discard_extent(fs_info->extent_root,
start, bytes, &trimmed);
ret = btrfs_discard_extent(fs_info, start, bytes, &trimmed);
if (!ret)
*total_trimmed += trimmed;

Просмотреть файл

@ -59,7 +59,7 @@ int create_free_space_inode(struct btrfs_root *root,
struct btrfs_block_group_cache *block_group,
struct btrfs_path *path);
int btrfs_check_trunc_cache_free_space(struct btrfs_root *root,
int btrfs_check_trunc_cache_free_space(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *rsv);
int btrfs_truncate_free_space_cache(struct btrfs_root *root,
struct btrfs_trans_handle *trans,
@ -109,7 +109,7 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
u64 btrfs_find_ino_for_alloc(struct btrfs_root *fs_root);
void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
u64 bytes);
int btrfs_find_space_cluster(struct btrfs_root *root,
int btrfs_find_space_cluster(struct btrfs_fs_info *fs_info,
struct btrfs_block_group_cache *block_group,
struct btrfs_free_cluster *cluster,
u64 offset, u64 bytes, u64 empty_size);

Просмотреть файл

@ -182,7 +182,7 @@ static int btrfs_del_inode_extref(struct btrfs_trans_handle *trans,
memmove_extent_buffer(leaf, ptr, ptr + del_len,
item_size - (ptr + del_len - item_start));
btrfs_truncate_item(root, path, item_size - del_len, 1);
btrfs_truncate_item(root->fs_info, path, item_size - del_len, 1);
out:
btrfs_free_path(path);
@ -245,7 +245,7 @@ int btrfs_del_inode_ref(struct btrfs_trans_handle *trans,
item_start = btrfs_item_ptr_offset(leaf, path->slots[0]);
memmove_extent_buffer(leaf, ptr, ptr + sub_item_len,
item_size - (ptr + sub_item_len - item_start));
btrfs_truncate_item(root, path, item_size - sub_item_len, 1);
btrfs_truncate_item(root->fs_info, path, item_size - sub_item_len, 1);
out:
btrfs_free_path(path);
@ -297,7 +297,7 @@ static int btrfs_insert_inode_extref(struct btrfs_trans_handle *trans,
name, name_len, NULL))
goto out;
btrfs_extend_item(root, path, ins_len);
btrfs_extend_item(root->fs_info, path, ins_len);
ret = 0;
}
if (ret < 0)
@ -355,7 +355,7 @@ int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans,
goto out;
old_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]);
btrfs_extend_item(root, path, ins_len);
btrfs_extend_item(fs_info, path, ins_len);
ref = btrfs_item_ptr(path->nodes[0], path->slots[0],
struct btrfs_inode_ref);
ref = (struct btrfs_inode_ref *)((unsigned long)ref + old_size);

Просмотреть файл

@ -509,7 +509,8 @@ out_put:
out_release:
trace_btrfs_space_reservation(fs_info, "ino_cache", trans->transid,
trans->bytes_reserved, 0);
btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
btrfs_block_rsv_release(fs_info, trans->block_rsv,
trans->bytes_reserved);
out:
trans->block_rsv = rsv;
trans->bytes_reserved = num_bytes;

Просмотреть файл

@ -874,7 +874,7 @@ retry:
return;
out_free_reserve:
btrfs_dec_block_group_reservations(fs_info, ins.objectid);
btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1);
btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1);
out_free:
extent_clear_unlock_delalloc(inode, async_extent->start,
async_extent->start +
@ -1088,7 +1088,7 @@ out_drop_extent_cache:
btrfs_drop_extent_cache(inode, start, start + ram_size - 1, 0);
out_reserve:
btrfs_dec_block_group_reservations(fs_info, ins.objectid);
btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1);
btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1);
out_unlock:
extent_clear_unlock_delalloc(inode, start, end, delalloc_end,
locked_page,
@ -1216,10 +1216,9 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page,
return 0;
}
static noinline int csum_exist_in_range(struct btrfs_root *root,
static noinline int csum_exist_in_range(struct btrfs_fs_info *fs_info,
u64 bytenr, u64 num_bytes)
{
struct btrfs_fs_info *fs_info = root->fs_info;
int ret;
struct btrfs_ordered_sum *sums;
LIST_HEAD(list);
@ -1381,7 +1380,7 @@ next_slot:
goto out_check;
if (extent_type == BTRFS_FILE_EXTENT_REG && !force)
goto out_check;
if (btrfs_extent_readonly(root, disk_bytenr))
if (btrfs_extent_readonly(fs_info, disk_bytenr))
goto out_check;
if (btrfs_cross_ref_exist(trans, root, ino,
found_key.offset -
@ -1404,7 +1403,8 @@ next_slot:
* this ensure that csum for a given extent are
* either valid or do not exist.
*/
if (csum_exist_in_range(root, disk_bytenr, num_bytes))
if (csum_exist_in_range(fs_info, disk_bytenr,
num_bytes))
goto out_check;
if (!btrfs_inc_nocow_writers(fs_info, disk_bytenr))
goto out_check;
@ -1899,10 +1899,9 @@ static int __btrfs_submit_bio_start(struct inode *inode, struct bio *bio,
int mirror_num, unsigned long bio_flags,
u64 bio_offset)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
int ret = 0;
ret = btrfs_csum_one_bio(root, inode, bio, 0, 0);
ret = btrfs_csum_one_bio(inode, bio, 0, 0);
BUG_ON(ret); /* -ENOMEM */
return 0;
}
@ -1919,10 +1918,10 @@ static int __btrfs_submit_bio_done(struct inode *inode, struct bio *bio,
int mirror_num, unsigned long bio_flags,
u64 bio_offset)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
int ret;
ret = btrfs_map_bio(root, bio, mirror_num, 1);
ret = btrfs_map_bio(fs_info, bio, mirror_num, 1);
if (ret) {
bio->bi_error = ret;
bio_endio(bio);
@ -1961,7 +1960,7 @@ static int btrfs_submit_bio_hook(struct inode *inode, struct bio *bio,
bio_flags);
goto out;
} else if (!skip_sum) {
ret = btrfs_lookup_bio_sums(root, inode, bio, NULL);
ret = btrfs_lookup_bio_sums(inode, bio, NULL);
if (ret)
goto out;
}
@ -1977,13 +1976,13 @@ static int btrfs_submit_bio_hook(struct inode *inode, struct bio *bio,
__btrfs_submit_bio_done);
goto out;
} else if (!skip_sum) {
ret = btrfs_csum_one_bio(root, inode, bio, 0, 0);
ret = btrfs_csum_one_bio(inode, bio, 0, 0);
if (ret)
goto out;
}
mapit:
ret = btrfs_map_bio(root, bio, mirror_num, 0);
ret = btrfs_map_bio(fs_info, bio, mirror_num, 0);
out:
if (ret < 0) {
@ -2194,10 +2193,9 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
ins.objectid = disk_bytenr;
ins.offset = disk_num_bytes;
ins.type = BTRFS_EXTENT_ITEM_KEY;
ret = btrfs_alloc_reserved_file_extent(trans, root,
root->root_key.objectid,
btrfs_ino(inode), file_pos,
ram_bytes, &ins);
ret = btrfs_alloc_reserved_file_extent(trans, root->root_key.objectid,
btrfs_ino(inode), file_pos,
ram_bytes, &ins);
/*
* Release the reserved range from inode dirty range map, as it is
* already moved into delayed_ref_head
@ -2654,7 +2652,7 @@ again:
inode_add_bytes(inode, len);
btrfs_release_path(path);
ret = btrfs_inc_extent_ref(trans, root, new->bytenr,
ret = btrfs_inc_extent_ref(trans, fs_info, new->bytenr,
new->disk_len, 0,
backref->root_id, backref->inum,
new->file_pos); /* start - extent_offset */
@ -2855,10 +2853,9 @@ out_kfree:
return NULL;
}
static void btrfs_release_delalloc_bytes(struct btrfs_root *root,
static void btrfs_release_delalloc_bytes(struct btrfs_fs_info *fs_info,
u64 start, u64 len)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_block_group_cache *cache;
cache = btrfs_lookup_block_group(fs_info, start);
@ -2984,7 +2981,7 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
compress_type, 0, 0,
BTRFS_FILE_EXTENT_REG);
if (!ret)
btrfs_release_delalloc_bytes(root,
btrfs_release_delalloc_bytes(fs_info,
ordered_extent->start,
ordered_extent->disk_len);
}
@ -3038,7 +3035,8 @@ out:
if ((ret || !logical_len) &&
!test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) &&
!test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags))
btrfs_free_reserved_extent(root, ordered_extent->start,
btrfs_free_reserved_extent(fs_info,
ordered_extent->start,
ordered_extent->disk_len, 1);
}
@ -3187,9 +3185,8 @@ void btrfs_add_delayed_iput(struct inode *inode)
spin_unlock(&fs_info->delayed_iput_lock);
}
void btrfs_run_delayed_iputs(struct btrfs_root *root)
void btrfs_run_delayed_iputs(struct btrfs_fs_info *fs_info)
{
struct btrfs_fs_info *fs_info = root->fs_info;
spin_lock(&fs_info->delayed_iput_lock);
while (!list_empty(&fs_info->delayed_iputs)) {
@ -3255,7 +3252,7 @@ void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans,
if (block_rsv) {
WARN_ON(block_rsv->size > 0);
btrfs_free_block_rsv(root, block_rsv);
btrfs_free_block_rsv(fs_info, block_rsv);
}
}
@ -3276,7 +3273,8 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
int ret;
if (!root->orphan_block_rsv) {
block_rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP);
block_rsv = btrfs_alloc_block_rsv(fs_info,
BTRFS_BLOCK_RSV_TEMP);
if (!block_rsv)
return -ENOMEM;
}
@ -3285,7 +3283,7 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
if (!root->orphan_block_rsv) {
root->orphan_block_rsv = block_rsv;
} else if (block_rsv) {
btrfs_free_block_rsv(root, block_rsv);
btrfs_free_block_rsv(fs_info, block_rsv);
block_rsv = NULL;
}
@ -3575,7 +3573,7 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
root->orphan_cleanup_state = ORPHAN_CLEANUP_DONE;
if (root->orphan_block_rsv)
btrfs_block_rsv_release(root, root->orphan_block_rsv,
btrfs_block_rsv_release(fs_info, root->orphan_block_rsv,
(u64)-1);
if (root->orphan_block_rsv ||
@ -4063,7 +4061,7 @@ static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
goto err;
}
skip_backref:
ret = btrfs_delete_delayed_dir_index(trans, root, dir, index);
ret = btrfs_delete_delayed_dir_index(trans, fs_info, dir, index);
if (ret) {
btrfs_abort_transaction(trans, ret);
goto err;
@ -4159,7 +4157,7 @@ static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
out:
btrfs_end_transaction(trans, root);
btrfs_btree_balance_dirty(root);
btrfs_btree_balance_dirty(root->fs_info);
return ret;
}
@ -4227,7 +4225,7 @@ int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
}
btrfs_release_path(path);
ret = btrfs_delete_delayed_dir_index(trans, root, dir, index);
ret = btrfs_delete_delayed_dir_index(trans, fs_info, dir, index);
if (ret) {
btrfs_abort_transaction(trans, ret);
goto out;
@ -4296,7 +4294,7 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
}
out:
btrfs_end_transaction(trans, root);
btrfs_btree_balance_dirty(root);
btrfs_btree_balance_dirty(root->fs_info);
return err;
}
@ -4312,7 +4310,7 @@ static int truncate_space_check(struct btrfs_trans_handle *trans,
* This is only used to apply pressure to the enospc system, we don't
* intend to use this reservation at all.
*/
bytes_deleted = btrfs_csum_bytes_to_leaves(root, bytes_deleted);
bytes_deleted = btrfs_csum_bytes_to_leaves(fs_info, bytes_deleted);
bytes_deleted *= fs_info->nodesize;
ret = btrfs_block_rsv_add(root, &fs_info->trans_block_rsv,
bytes_deleted, BTRFS_RESERVE_NO_FLUSH);
@ -4360,7 +4358,7 @@ static int truncate_inline_extent(struct inode *inode,
btrfs_set_file_extent_ram_bytes(leaf, fi, size);
size = btrfs_file_extent_calc_inline_size(size);
btrfs_truncate_item(root, path, size, 1);
btrfs_truncate_item(root->fs_info, path, size, 1);
if (test_bit(BTRFS_ROOT_REF_COWS, &root->state))
inode_sub_bytes(inode, item_end + 1 - new_size);
@ -4622,13 +4620,13 @@ delete:
root == fs_info->tree_root)) {
btrfs_set_path_blocking(path);
bytes_deleted += extent_num_bytes;
ret = btrfs_free_extent(trans, root, extent_start,
ret = btrfs_free_extent(trans, fs_info, extent_start,
extent_num_bytes, 0,
btrfs_header_owner(leaf),
ino, extent_offset);
BUG_ON(ret);
if (btrfs_should_throttle_delayed_refs(trans, root))
btrfs_async_run_delayed_refs(root,
if (btrfs_should_throttle_delayed_refs(trans, fs_info))
btrfs_async_run_delayed_refs(fs_info,
trans->delayed_ref_updates * 2,
trans->transid, 0);
if (be_nice) {
@ -4637,9 +4635,8 @@ delete:
should_end = 1;
}
if (btrfs_should_throttle_delayed_refs(trans,
root)) {
fs_info))
should_throttle = 1;
}
}
}
@ -4664,7 +4661,9 @@ delete:
unsigned long updates = trans->delayed_ref_updates;
if (updates) {
trans->delayed_ref_updates = 0;
ret = btrfs_run_delayed_refs(trans, root, updates * 2);
ret = btrfs_run_delayed_refs(trans,
fs_info,
updates * 2);
if (ret && !err)
err = ret;
}
@ -4699,7 +4698,8 @@ error:
unsigned long updates = trans->delayed_ref_updates;
if (updates) {
trans->delayed_ref_updates = 0;
ret = btrfs_run_delayed_refs(trans, root, updates * 2);
ret = btrfs_run_delayed_refs(trans, fs_info,
updates * 2);
if (ret && !err)
err = ret;
}
@ -5280,7 +5280,7 @@ void btrfs_evict_inode(struct inode *inode)
goto no_delete;
}
rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP);
rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP);
if (!rsv) {
btrfs_orphan_del(NULL, inode);
goto no_delete;
@ -5325,14 +5325,14 @@ void btrfs_evict_inode(struct inode *inode)
"Could not get space for a delete, will truncate on mount %d",
ret);
btrfs_orphan_del(NULL, inode);
btrfs_free_block_rsv(root, rsv);
btrfs_free_block_rsv(fs_info, rsv);
goto no_delete;
}
trans = btrfs_join_transaction(root);
if (IS_ERR(trans)) {
btrfs_orphan_del(NULL, inode);
btrfs_free_block_rsv(root, rsv);
btrfs_free_block_rsv(fs_info, rsv);
goto no_delete;
}
@ -5342,7 +5342,7 @@ void btrfs_evict_inode(struct inode *inode)
* again.
*/
if (steal_from_global) {
if (!btrfs_check_space_for_delayed_refs(trans, root))
if (!btrfs_check_space_for_delayed_refs(trans, fs_info))
ret = btrfs_block_rsv_migrate(global_rsv, rsv,
min_size, 0);
else
@ -5358,7 +5358,7 @@ void btrfs_evict_inode(struct inode *inode)
ret = btrfs_commit_transaction(trans, root);
if (ret) {
btrfs_orphan_del(NULL, inode);
btrfs_free_block_rsv(root, rsv);
btrfs_free_block_rsv(fs_info, rsv);
goto no_delete;
}
continue;
@ -5375,10 +5375,10 @@ void btrfs_evict_inode(struct inode *inode)
trans->block_rsv = &fs_info->trans_block_rsv;
btrfs_end_transaction(trans, root);
trans = NULL;
btrfs_btree_balance_dirty(root);
btrfs_btree_balance_dirty(fs_info);
}
btrfs_free_block_rsv(root, rsv);
btrfs_free_block_rsv(fs_info, rsv);
/*
* Errors here aren't a big deal, it just means we leave orphan items
@ -5397,7 +5397,7 @@ void btrfs_evict_inode(struct inode *inode)
btrfs_return_ino(root, btrfs_ino(inode));
btrfs_end_transaction(trans, root);
btrfs_btree_balance_dirty(root);
btrfs_btree_balance_dirty(fs_info);
no_delete:
btrfs_remove_delayed_node(inode);
clear_inode(inode);
@ -5443,13 +5443,12 @@ out_err:
* needs to be changed to reflect the root directory of the tree root. This
* is kind of like crossing a mount point.
*/
static int fixup_tree_root_location(struct btrfs_root *root,
static int fixup_tree_root_location(struct btrfs_fs_info *fs_info,
struct inode *dir,
struct dentry *dentry,
struct btrfs_key *location,
struct btrfs_root **sub_root)
{
struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
struct btrfs_path *path;
struct btrfs_root *new_root;
struct btrfs_root_ref *ref;
@ -5749,7 +5748,7 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
BUG_ON(location.type != BTRFS_ROOT_ITEM_KEY);
index = srcu_read_lock(&fs_info->subvol_srcu);
ret = fixup_tree_root_location(root, dir, dentry,
ret = fixup_tree_root_location(fs_info, dir, dentry,
&location, &sub_root);
if (ret < 0) {
if (ret != -ENOENT)
@ -5822,6 +5821,7 @@ unsigned char btrfs_filetype_table[] = {
static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
{
struct inode *inode = file_inode(file);
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_item *item;
struct btrfs_dir_item *di;
@ -5889,7 +5889,7 @@ static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
ctx->pos = found_key.offset;
di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
if (verify_dir_item(root, leaf, di))
if (verify_dir_item(fs_info, leaf, di))
goto next;
name_len = btrfs_dir_name_len(leaf, di);
@ -5988,6 +5988,7 @@ int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc)
*/
static int btrfs_dirty_inode(struct inode *inode)
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_trans_handle *trans;
int ret;
@ -6011,7 +6012,7 @@ static int btrfs_dirty_inode(struct inode *inode)
}
btrfs_end_transaction(trans, root);
if (BTRFS_I(inode)->delayed_node)
btrfs_balance_delayed_items(root);
btrfs_balance_delayed_items(fs_info);
return ret;
}
@ -6394,6 +6395,7 @@ static int btrfs_add_nondir(struct btrfs_trans_handle *trans,
static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
umode_t mode, dev_t rdev)
{
struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
struct btrfs_trans_handle *trans;
struct btrfs_root *root = BTRFS_I(dir)->root;
struct inode *inode = NULL;
@ -6447,8 +6449,8 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
out_unlock:
btrfs_end_transaction(trans, root);
btrfs_balance_delayed_items(root);
btrfs_btree_balance_dirty(root);
btrfs_balance_delayed_items(fs_info);
btrfs_btree_balance_dirty(fs_info);
if (drop_inode) {
inode_dec_link_count(inode);
iput(inode);
@ -6465,6 +6467,7 @@ out_unlock_inode:
static int btrfs_create(struct inode *dir, struct dentry *dentry,
umode_t mode, bool excl)
{
struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
struct btrfs_trans_handle *trans;
struct btrfs_root *root = BTRFS_I(dir)->root;
struct inode *inode = NULL;
@ -6526,8 +6529,8 @@ out_unlock:
inode_dec_link_count(inode);
iput(inode);
}
btrfs_balance_delayed_items(root);
btrfs_btree_balance_dirty(root);
btrfs_balance_delayed_items(fs_info);
btrfs_btree_balance_dirty(fs_info);
return err;
out_unlock_inode:
@ -6542,6 +6545,7 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
struct btrfs_trans_handle *trans = NULL;
struct btrfs_root *root = BTRFS_I(dir)->root;
struct inode *inode = d_inode(old_dentry);
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
u64 index;
int err;
int drop_inode = 0;
@ -6599,7 +6603,7 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
btrfs_log_new_name(trans, inode, NULL, parent);
}
btrfs_balance_delayed_items(root);
btrfs_balance_delayed_items(fs_info);
fail:
if (trans)
btrfs_end_transaction(trans, root);
@ -6607,12 +6611,13 @@ fail:
inode_dec_link_count(inode);
iput(inode);
}
btrfs_btree_balance_dirty(root);
btrfs_btree_balance_dirty(fs_info);
return err;
}
static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
struct inode *inode = NULL;
struct btrfs_trans_handle *trans;
struct btrfs_root *root = BTRFS_I(dir)->root;
@ -6675,8 +6680,8 @@ out_fail:
inode_dec_link_count(inode);
iput(inode);
}
btrfs_balance_delayed_items(root);
btrfs_btree_balance_dirty(root);
btrfs_balance_delayed_items(fs_info);
btrfs_btree_balance_dirty(fs_info);
return err;
out_fail_inode:
@ -7255,7 +7260,8 @@ static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
ins.offset, 0);
btrfs_dec_block_group_reservations(fs_info, ins.objectid);
if (IS_ERR(em))
btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1);
btrfs_free_reserved_extent(fs_info, ins.objectid,
ins.offset, 1);
return em;
}
@ -7268,6 +7274,7 @@ noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len,
u64 *orig_start, u64 *orig_block_len,
u64 *ram_bytes)
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_trans_handle *trans;
struct btrfs_path *path;
int ret;
@ -7348,7 +7355,7 @@ noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len,
*ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
}
if (btrfs_extent_readonly(root, disk_bytenr))
if (btrfs_extent_readonly(fs_info, disk_bytenr))
goto out;
num_bytes = min(offset + *len, extent_end) - offset;
@ -7393,8 +7400,8 @@ noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len,
*/
disk_bytenr += backref_offset;
disk_bytenr += offset - key.offset;
if (csum_exist_in_range(root, disk_bytenr, num_bytes))
goto out;
if (csum_exist_in_range(fs_info, disk_bytenr, num_bytes))
goto out;
/*
* all of the above have passed, it is safe to overwrite this extent
* without cow
@ -7832,19 +7839,18 @@ err:
static inline int submit_dio_repair_bio(struct inode *inode, struct bio *bio,
int mirror_num)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
int ret;
BUG_ON(bio_op(bio) == REQ_OP_WRITE);
bio_get(bio);
ret = btrfs_bio_wq_end_io(root->fs_info, bio,
BTRFS_WQ_ENDIO_DIO_REPAIR);
ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DIO_REPAIR);
if (ret)
goto err;
ret = btrfs_map_bio(root, bio, mirror_num, 0);
ret = btrfs_map_bio(fs_info, bio, mirror_num, 0);
err:
bio_put(bio);
return ret;
@ -8225,8 +8231,7 @@ static int __btrfs_submit_bio_start_direct_io(struct inode *inode,
unsigned long bio_flags, u64 offset)
{
int ret;
struct btrfs_root *root = BTRFS_I(inode)->root;
ret = btrfs_csum_one_bio(root, inode, bio, offset, 1);
ret = btrfs_csum_one_bio(inode, bio, offset, 1);
BUG_ON(ret); /* -ENOMEM */
return 0;
}
@ -8280,8 +8285,7 @@ static struct bio *btrfs_dio_bio_alloc(struct block_device *bdev,
return bio;
}
static inline int btrfs_lookup_and_bind_dio_csum(struct btrfs_root *root,
struct inode *inode,
static inline int btrfs_lookup_and_bind_dio_csum(struct inode *inode,
struct btrfs_dio_private *dip,
struct bio *bio,
u64 file_offset)
@ -8296,7 +8300,7 @@ static inline int btrfs_lookup_and_bind_dio_csum(struct btrfs_root *root,
* contention.
*/
if (dip->logical_offset == file_offset) {
ret = btrfs_lookup_bio_sums_dio(root, inode, dip->orig_bio,
ret = btrfs_lookup_bio_sums_dio(inode, dip->orig_bio,
file_offset);
if (ret)
return ret;
@ -8319,7 +8323,6 @@ static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode,
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_dio_private *dip = bio->bi_private;
bool write = bio_op(bio) == REQ_OP_WRITE;
struct btrfs_root *root = BTRFS_I(inode)->root;
int ret;
if (async_submit)
@ -8347,17 +8350,17 @@ static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode,
* If we aren't doing async submit, calculate the csum of the
* bio now.
*/
ret = btrfs_csum_one_bio(root, inode, bio, file_offset, 1);
ret = btrfs_csum_one_bio(inode, bio, file_offset, 1);
if (ret)
goto err;
} else {
ret = btrfs_lookup_and_bind_dio_csum(root, inode, dip, bio,
ret = btrfs_lookup_and_bind_dio_csum(inode, dip, bio,
file_offset);
if (ret)
goto err;
}
map:
ret = btrfs_map_bio(root, bio, 0, async_submit);
ret = btrfs_map_bio(fs_info, bio, 0, async_submit);
err:
bio_put(bio);
return ret;
@ -8595,10 +8598,10 @@ free_ordered:
kfree(dip);
}
static ssize_t check_direct_IO(struct btrfs_root *root, struct kiocb *iocb,
const struct iov_iter *iter, loff_t offset)
static ssize_t check_direct_IO(struct btrfs_fs_info *fs_info,
struct kiocb *iocb,
const struct iov_iter *iter, loff_t offset)
{
struct btrfs_fs_info *fs_info = root->fs_info;
int seg;
int i;
unsigned int blocksize_mask = fs_info->sectorsize - 1;
@ -8642,7 +8645,7 @@ static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
bool relock = false;
ssize_t ret;
if (check_direct_IO(BTRFS_I(inode)->root, iocb, iter, offset))
if (check_direct_IO(fs_info, iocb, iter, offset))
return 0;
inode_dio_begin(inode);
@ -9150,7 +9153,7 @@ static int btrfs_truncate(struct inode *inode)
* 3) fs_info->trans_block_rsv - this will have 1 items worth left for
* updating the inode.
*/
rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP);
rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP);
if (!rsv)
return -ENOMEM;
rsv->size = min_size;
@ -9198,7 +9201,7 @@ static int btrfs_truncate(struct inode *inode)
}
btrfs_end_transaction(trans, root);
btrfs_btree_balance_dirty(root);
btrfs_btree_balance_dirty(fs_info);
trans = btrfs_start_transaction(root, 2);
if (IS_ERR(trans)) {
@ -9227,10 +9230,10 @@ static int btrfs_truncate(struct inode *inode)
err = ret;
ret = btrfs_end_transaction(trans, root);
btrfs_btree_balance_dirty(root);
btrfs_btree_balance_dirty(fs_info);
}
out:
btrfs_free_block_rsv(root, rsv);
btrfs_free_block_rsv(fs_info, rsv);
if (ret && !err)
err = ret;
@ -10302,7 +10305,7 @@ out_unlock:
inode_dec_link_count(inode);
iput(inode);
}
btrfs_btree_balance_dirty(root);
btrfs_btree_balance_dirty(fs_info);
return err;
out_unlock_inode:
@ -10365,7 +10368,7 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
ins.offset, 0, 0, 0,
BTRFS_FILE_EXTENT_PREALLOC);
if (ret) {
btrfs_free_reserved_extent(root, ins.objectid,
btrfs_free_reserved_extent(fs_info, ins.objectid,
ins.offset, 0);
btrfs_abort_transaction(trans, ret);
if (own_trans)
@ -10482,6 +10485,7 @@ static int btrfs_permission(struct inode *inode, int mask)
static int btrfs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
{
struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
struct btrfs_trans_handle *trans;
struct btrfs_root *root = BTRFS_I(dir)->root;
struct inode *inode = NULL;
@ -10541,8 +10545,8 @@ out:
btrfs_end_transaction(trans, root);
if (ret)
iput(inode);
btrfs_balance_delayed_items(root);
btrfs_btree_balance_dirty(root);
btrfs_balance_delayed_items(fs_info);
btrfs_btree_balance_dirty(fs_info);
return ret;
out_inode:

Просмотреть файл

@ -411,7 +411,7 @@ static noinline int btrfs_ioctl_fitrim(struct file *file, void __user *arg)
range.len = min(range.len, total_bytes - range.start);
range.minlen = max(range.minlen, minlen);
ret = btrfs_trim_fs(fs_info->tree_root, &range);
ret = btrfs_trim_fs(fs_info, &range);
if (ret < 0)
return ret;
@ -487,7 +487,7 @@ static noinline int create_subvol(struct inode *dir,
trans = btrfs_start_transaction(root, 0);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
btrfs_subvolume_release_metadata(root, &block_rsv,
btrfs_subvolume_release_metadata(fs_info, &block_rsv,
qgroup_reserved);
goto fail_free;
}
@ -613,7 +613,7 @@ fail:
kfree(root_item);
trans->block_rsv = NULL;
trans->bytes_reserved = 0;
btrfs_subvolume_release_metadata(root, &block_rsv, qgroup_reserved);
btrfs_subvolume_release_metadata(fs_info, &block_rsv, qgroup_reserved);
if (async_transid) {
*async_transid = trans->transid;
@ -755,7 +755,7 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir,
d_instantiate(dentry, inode);
ret = 0;
fail:
btrfs_subvolume_release_metadata(BTRFS_I(dir)->root,
btrfs_subvolume_release_metadata(fs_info,
&pending_snapshot->block_rsv,
pending_snapshot->qgroup_reserved);
dec_and_free:
@ -2557,7 +2557,7 @@ out_end_trans:
err = ret;
inode->i_flags |= S_DEAD;
out_release:
btrfs_subvolume_release_metadata(root, &block_rsv, qgroup_reserved);
btrfs_subvolume_release_metadata(fs_info, &block_rsv, qgroup_reserved);
out_up_write:
up_write(&fs_info->subvol_sem);
if (err) {
@ -2661,9 +2661,8 @@ out:
return ret;
}
static long btrfs_ioctl_add_dev(struct btrfs_root *root, void __user *arg)
static long btrfs_ioctl_add_dev(struct btrfs_fs_info *fs_info, void __user *arg)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_ioctl_vol_args *vol_args;
int ret;
@ -2681,7 +2680,7 @@ static long btrfs_ioctl_add_dev(struct btrfs_root *root, void __user *arg)
}
vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
ret = btrfs_init_new_device(root->fs_info, vol_args->name);
ret = btrfs_init_new_device(fs_info, vol_args->name);
if (!ret)
btrfs_info(fs_info, "disk added %s", vol_args->name);
@ -2697,7 +2696,6 @@ static long btrfs_ioctl_rm_dev_v2(struct file *file, void __user *arg)
{
struct inode *inode = file_inode(file);
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_ioctl_vol_args_v2 *vol_args;
int ret;
@ -2725,10 +2723,10 @@ static long btrfs_ioctl_rm_dev_v2(struct file *file, void __user *arg)
mutex_lock(&fs_info->volume_mutex);
if (vol_args->flags & BTRFS_DEVICE_SPEC_BY_ID) {
ret = btrfs_rm_device(root, NULL, vol_args->devid);
ret = btrfs_rm_device(fs_info, NULL, vol_args->devid);
} else {
vol_args->name[BTRFS_SUBVOL_NAME_MAX] = '\0';
ret = btrfs_rm_device(root, vol_args->name, 0);
ret = btrfs_rm_device(fs_info, vol_args->name, 0);
}
mutex_unlock(&fs_info->volume_mutex);
atomic_set(&fs_info->mutually_exclusive_operation_running, 0);
@ -2752,7 +2750,6 @@ static long btrfs_ioctl_rm_dev(struct file *file, void __user *arg)
{
struct inode *inode = file_inode(file);
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_ioctl_vol_args *vol_args;
int ret;
@ -2776,7 +2773,7 @@ static long btrfs_ioctl_rm_dev(struct file *file, void __user *arg)
vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
mutex_lock(&fs_info->volume_mutex);
ret = btrfs_rm_device(root, vol_args->name, 0);
ret = btrfs_rm_device(fs_info, vol_args->name, 0);
mutex_unlock(&fs_info->volume_mutex);
if (!ret)
@ -2790,9 +2787,9 @@ out_drop_write:
return ret;
}
static long btrfs_ioctl_fs_info(struct btrfs_root *root, void __user *arg)
static long btrfs_ioctl_fs_info(struct btrfs_fs_info *fs_info,
void __user *arg)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_ioctl_fs_info_args *fi_args;
struct btrfs_device *device;
struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
@ -2823,9 +2820,9 @@ static long btrfs_ioctl_fs_info(struct btrfs_root *root, void __user *arg)
return ret;
}
static long btrfs_ioctl_dev_info(struct btrfs_root *root, void __user *arg)
static long btrfs_ioctl_dev_info(struct btrfs_fs_info *fs_info,
void __user *arg)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_ioctl_dev_info_args *di_args;
struct btrfs_device *dev;
struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
@ -3750,7 +3747,8 @@ process_slot:
if (disko) {
inode_add_bytes(inode, datal);
ret = btrfs_inc_extent_ref(trans, root,
ret = btrfs_inc_extent_ref(trans,
fs_info,
disko, diskl, 0,
root->root_key.objectid,
btrfs_ino(inode),
@ -4151,9 +4149,9 @@ void btrfs_get_block_group_info(struct list_head *groups_list,
}
}
static long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
static long btrfs_ioctl_space_info(struct btrfs_fs_info *fs_info,
void __user *arg)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_ioctl_space_args space_args;
struct btrfs_ioctl_space_info space;
struct btrfs_ioctl_space_info *dest;
@ -4346,7 +4344,7 @@ out:
return 0;
}
static noinline long btrfs_ioctl_wait_sync(struct btrfs_root *root,
static noinline long btrfs_ioctl_wait_sync(struct btrfs_fs_info *fs_info,
void __user *argp)
{
u64 transid;
@ -4357,7 +4355,7 @@ static noinline long btrfs_ioctl_wait_sync(struct btrfs_root *root,
} else {
transid = 0; /* current trans */
}
return btrfs_wait_for_commit(root, transid);
return btrfs_wait_for_commit(fs_info, transid);
}
static long btrfs_ioctl_scrub(struct file *file, void __user *arg)
@ -4393,15 +4391,15 @@ out:
return ret;
}
static long btrfs_ioctl_scrub_cancel(struct btrfs_root *root, void __user *arg)
static long btrfs_ioctl_scrub_cancel(struct btrfs_fs_info *fs_info)
{
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
return btrfs_scrub_cancel(root->fs_info);
return btrfs_scrub_cancel(fs_info);
}
static long btrfs_ioctl_scrub_progress(struct btrfs_root *root,
static long btrfs_ioctl_scrub_progress(struct btrfs_fs_info *fs_info,
void __user *arg)
{
struct btrfs_ioctl_scrub_args *sa;
@ -4414,7 +4412,7 @@ static long btrfs_ioctl_scrub_progress(struct btrfs_root *root,
if (IS_ERR(sa))
return PTR_ERR(sa);
ret = btrfs_scrub_progress(root, sa->devid, &sa->progress);
ret = btrfs_scrub_progress(fs_info, sa->devid, &sa->progress);
if (copy_to_user(arg, sa, sizeof(*sa)))
ret = -EFAULT;
@ -4423,7 +4421,7 @@ static long btrfs_ioctl_scrub_progress(struct btrfs_root *root,
return ret;
}
static long btrfs_ioctl_get_dev_stats(struct btrfs_root *root,
static long btrfs_ioctl_get_dev_stats(struct btrfs_fs_info *fs_info,
void __user *arg)
{
struct btrfs_ioctl_get_dev_stats *sa;
@ -4438,7 +4436,7 @@ static long btrfs_ioctl_get_dev_stats(struct btrfs_root *root,
return -EPERM;
}
ret = btrfs_get_dev_stats(root, sa);
ret = btrfs_get_dev_stats(fs_info, sa);
if (copy_to_user(arg, sa, sizeof(*sa)))
ret = -EFAULT;
@ -4447,9 +4445,9 @@ static long btrfs_ioctl_get_dev_stats(struct btrfs_root *root,
return ret;
}
static long btrfs_ioctl_dev_replace(struct btrfs_root *root, void __user *arg)
static long btrfs_ioctl_dev_replace(struct btrfs_fs_info *fs_info,
void __user *arg)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_ioctl_dev_replace_args *p;
int ret;
@ -4470,7 +4468,7 @@ static long btrfs_ioctl_dev_replace(struct btrfs_root *root, void __user *arg)
&fs_info->mutually_exclusive_operation_running, 1)) {
ret = BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
} else {
ret = btrfs_dev_replace_by_ioctl(root, p);
ret = btrfs_dev_replace_by_ioctl(fs_info, p);
atomic_set(
&fs_info->mutually_exclusive_operation_running, 0);
}
@ -4573,7 +4571,7 @@ static int build_ino_list(u64 inum, u64 offset, u64 root, void *ctx)
return 0;
}
static long btrfs_ioctl_logical_to_ino(struct btrfs_root *root,
static long btrfs_ioctl_logical_to_ino(struct btrfs_fs_info *fs_info,
void __user *arg)
{
int ret = 0;
@ -4603,7 +4601,7 @@ static long btrfs_ioctl_logical_to_ino(struct btrfs_root *root,
goto out;
}
ret = iterate_inodes_from_logical(loi->logical, root->fs_info, path,
ret = iterate_inodes_from_logical(loi->logical, fs_info, path,
build_ino_list, inodes);
if (ret == -EINVAL)
ret = -ENOENT;
@ -4799,10 +4797,8 @@ out:
return ret;
}
static long btrfs_ioctl_balance_ctl(struct btrfs_root *root, int cmd)
static long btrfs_ioctl_balance_ctl(struct btrfs_fs_info *fs_info, int cmd)
{
struct btrfs_fs_info *fs_info = root->fs_info;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
@ -4816,10 +4812,9 @@ static long btrfs_ioctl_balance_ctl(struct btrfs_root *root, int cmd)
return -EINVAL;
}
static long btrfs_ioctl_balance_progress(struct btrfs_root *root,
static long btrfs_ioctl_balance_progress(struct btrfs_fs_info *fs_info,
void __user *arg)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_ioctl_balance_args *bargs;
int ret = 0;
@ -5395,12 +5390,11 @@ static int btrfs_ioctl_get_features(struct file *file, void __user *arg)
return 0;
}
static int check_feature_bits(struct btrfs_root *root,
static int check_feature_bits(struct btrfs_fs_info *fs_info,
enum btrfs_feature_set set,
u64 change_mask, u64 flags, u64 supported_flags,
u64 safe_set, u64 safe_clear)
{
struct btrfs_fs_info *fs_info = root->fs_info;
const char *type = btrfs_feature_set_names[set];
char *names;
u64 disallowed, unsupported;
@ -5455,8 +5449,8 @@ static int check_feature_bits(struct btrfs_root *root,
return 0;
}
#define check_feature(root, change_mask, flags, mask_base) \
check_feature_bits(root, FEAT_##mask_base, change_mask, flags, \
#define check_feature(fs_info, change_mask, flags, mask_base) \
check_feature_bits(fs_info, FEAT_##mask_base, change_mask, flags, \
BTRFS_FEATURE_ ## mask_base ## _SUPP, \
BTRFS_FEATURE_ ## mask_base ## _SAFE_SET, \
BTRFS_FEATURE_ ## mask_base ## _SAFE_CLEAR)
@ -5483,17 +5477,17 @@ static int btrfs_ioctl_set_features(struct file *file, void __user *arg)
!flags[0].incompat_flags)
return 0;
ret = check_feature(root, flags[0].compat_flags,
ret = check_feature(fs_info, flags[0].compat_flags,
flags[1].compat_flags, COMPAT);
if (ret)
return ret;
ret = check_feature(root, flags[0].compat_ro_flags,
ret = check_feature(fs_info, flags[0].compat_ro_flags,
flags[1].compat_ro_flags, COMPAT_RO);
if (ret)
return ret;
ret = check_feature(root, flags[0].incompat_flags,
ret = check_feature(fs_info, flags[0].incompat_flags,
flags[1].incompat_flags, INCOMPAT);
if (ret)
return ret;
@ -5572,15 +5566,15 @@ long btrfs_ioctl(struct file *file, unsigned int
case BTRFS_IOC_RESIZE:
return btrfs_ioctl_resize(file, argp);
case BTRFS_IOC_ADD_DEV:
return btrfs_ioctl_add_dev(root, argp);
return btrfs_ioctl_add_dev(fs_info, argp);
case BTRFS_IOC_RM_DEV:
return btrfs_ioctl_rm_dev(file, argp);
case BTRFS_IOC_RM_DEV_V2:
return btrfs_ioctl_rm_dev_v2(file, argp);
case BTRFS_IOC_FS_INFO:
return btrfs_ioctl_fs_info(root, argp);
return btrfs_ioctl_fs_info(fs_info, argp);
case BTRFS_IOC_DEV_INFO:
return btrfs_ioctl_dev_info(root, argp);
return btrfs_ioctl_dev_info(fs_info, argp);
case BTRFS_IOC_BALANCE:
return btrfs_ioctl_balance(file, NULL);
case BTRFS_IOC_TRANS_START:
@ -5596,9 +5590,9 @@ long btrfs_ioctl(struct file *file, unsigned int
case BTRFS_IOC_INO_PATHS:
return btrfs_ioctl_ino_to_path(root, argp);
case BTRFS_IOC_LOGICAL_INO:
return btrfs_ioctl_logical_to_ino(root, argp);
return btrfs_ioctl_logical_to_ino(fs_info, argp);
case BTRFS_IOC_SPACE_INFO:
return btrfs_ioctl_space_info(root, argp);
return btrfs_ioctl_space_info(fs_info, argp);
case BTRFS_IOC_SYNC: {
int ret;
@ -5617,19 +5611,19 @@ long btrfs_ioctl(struct file *file, unsigned int
case BTRFS_IOC_START_SYNC:
return btrfs_ioctl_start_sync(root, argp);
case BTRFS_IOC_WAIT_SYNC:
return btrfs_ioctl_wait_sync(root, argp);
return btrfs_ioctl_wait_sync(fs_info, argp);
case BTRFS_IOC_SCRUB:
return btrfs_ioctl_scrub(file, argp);
case BTRFS_IOC_SCRUB_CANCEL:
return btrfs_ioctl_scrub_cancel(root, argp);
return btrfs_ioctl_scrub_cancel(fs_info);
case BTRFS_IOC_SCRUB_PROGRESS:
return btrfs_ioctl_scrub_progress(root, argp);
return btrfs_ioctl_scrub_progress(fs_info, argp);
case BTRFS_IOC_BALANCE_V2:
return btrfs_ioctl_balance(file, argp);
case BTRFS_IOC_BALANCE_CTL:
return btrfs_ioctl_balance_ctl(root, arg);
return btrfs_ioctl_balance_ctl(fs_info, arg);
case BTRFS_IOC_BALANCE_PROGRESS:
return btrfs_ioctl_balance_progress(root, argp);
return btrfs_ioctl_balance_progress(fs_info, argp);
case BTRFS_IOC_SET_RECEIVED_SUBVOL:
return btrfs_ioctl_set_received_subvol(file, argp);
#ifdef CONFIG_64BIT
@ -5639,7 +5633,7 @@ long btrfs_ioctl(struct file *file, unsigned int
case BTRFS_IOC_SEND:
return btrfs_ioctl_send(file, argp);
case BTRFS_IOC_GET_DEV_STATS:
return btrfs_ioctl_get_dev_stats(root, argp);
return btrfs_ioctl_get_dev_stats(fs_info, argp);
case BTRFS_IOC_QUOTA_CTL:
return btrfs_ioctl_quota_ctl(file, argp);
case BTRFS_IOC_QGROUP_ASSIGN:
@ -5655,7 +5649,7 @@ long btrfs_ioctl(struct file *file, unsigned int
case BTRFS_IOC_QUOTA_RESCAN_WAIT:
return btrfs_ioctl_quota_rescan_wait(file, argp);
case BTRFS_IOC_DEV_REPLACE:
return btrfs_ioctl_dev_replace(root, argp);
return btrfs_ioctl_dev_replace(fs_info, argp);
case BTRFS_IOC_GET_FSLABEL:
return btrfs_ioctl_get_fslabel(file, argp);
case BTRFS_IOC_SET_FSLABEL:

Просмотреть файл

@ -161,9 +161,8 @@ static void print_uuid_item(struct extent_buffer *l, unsigned long offset,
}
}
void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l)
void btrfs_print_leaf(struct btrfs_fs_info *fs_info, struct extent_buffer *l)
{
struct btrfs_fs_info *fs_info = root->fs_info;
int i;
u32 type, nr;
struct btrfs_item *item;
@ -184,7 +183,8 @@ void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l)
nr = btrfs_header_nritems(l);
btrfs_info(fs_info, "leaf %llu total ptrs %d free space %d",
btrfs_header_bytenr(l), nr, btrfs_leaf_free_space(root, l));
btrfs_header_bytenr(l), nr,
btrfs_leaf_free_space(fs_info, l));
for (i = 0 ; i < nr ; i++) {
item = btrfs_item_nr(i);
btrfs_item_key_to_cpu(l, &key, i);
@ -315,9 +315,8 @@ void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l)
}
}
void btrfs_print_tree(struct btrfs_root *root, struct extent_buffer *c)
void btrfs_print_tree(struct btrfs_fs_info *fs_info, struct extent_buffer *c)
{
struct btrfs_fs_info *fs_info = root->fs_info;
int i; u32 nr;
struct btrfs_key key;
int level;
@ -327,7 +326,7 @@ void btrfs_print_tree(struct btrfs_root *root, struct extent_buffer *c)
nr = btrfs_header_nritems(c);
level = btrfs_header_level(c);
if (level == 0) {
btrfs_print_leaf(root, c);
btrfs_print_leaf(fs_info, c);
return;
}
btrfs_info(fs_info,
@ -341,7 +340,7 @@ void btrfs_print_tree(struct btrfs_root *root, struct extent_buffer *c)
btrfs_node_blockptr(c, i));
}
for (i = 0; i < nr; i++) {
struct extent_buffer *next = read_tree_block(root,
struct extent_buffer *next = read_tree_block(fs_info,
btrfs_node_blockptr(c, i),
btrfs_node_ptr_generation(c, i));
if (IS_ERR(next)) {
@ -357,7 +356,7 @@ void btrfs_print_tree(struct btrfs_root *root, struct extent_buffer *c)
if (btrfs_header_level(next) !=
level - 1)
BUG();
btrfs_print_tree(root, next);
btrfs_print_tree(fs_info, next);
free_extent_buffer(next);
}
}

Просмотреть файл

@ -18,6 +18,6 @@
#ifndef __PRINT_TREE_
#define __PRINT_TREE_
void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l);
void btrfs_print_tree(struct btrfs_root *root, struct extent_buffer *c);
void btrfs_print_leaf(struct btrfs_fs_info *fs_info, struct extent_buffer *l);
void btrfs_print_tree(struct btrfs_fs_info *fs_info, struct extent_buffer *c);
#endif

Просмотреть файл

@ -301,6 +301,7 @@ static int inherit_props(struct btrfs_trans_handle *trans,
struct inode *parent)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_fs_info *fs_info = root->fs_info;
int ret;
int i;
@ -320,14 +321,14 @@ static int inherit_props(struct btrfs_trans_handle *trans,
if (!value)
continue;
num_bytes = btrfs_calc_trans_metadata_size(root->fs_info, 1);
num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
ret = btrfs_block_rsv_add(root, trans->block_rsv,
num_bytes, BTRFS_RESERVE_NO_FLUSH);
if (ret)
goto out;
ret = __btrfs_set_prop(trans, inode, h->xattr_name,
value, strlen(value), 0);
btrfs_block_rsv_release(root, trans->block_rsv, num_bytes);
btrfs_block_rsv_release(fs_info, trans->block_rsv, num_bytes);
if (ret)
goto out;
}

Просмотреть файл

@ -1509,8 +1509,7 @@ int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans,
record->old_roots = NULL;
spin_lock(&delayed_refs->lock);
ret = btrfs_qgroup_trace_extent_nolock(fs_info, delayed_refs,
record);
ret = btrfs_qgroup_trace_extent_nolock(fs_info, delayed_refs, record);
spin_unlock(&delayed_refs->lock);
if (ret > 0)
kfree(record);
@ -1518,10 +1517,9 @@ int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans,
}
int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_fs_info *fs_info,
struct extent_buffer *eb)
{
struct btrfs_fs_info *fs_info = root->fs_info;
int nr = btrfs_header_nritems(eb);
int i, extent_type, ret;
struct btrfs_key key;
@ -1645,7 +1643,7 @@ int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle *trans,
}
if (root_level == 0) {
ret = btrfs_qgroup_trace_leaf_items(trans, root, root_eb);
ret = btrfs_qgroup_trace_leaf_items(trans, fs_info, root_eb);
goto out;
}
@ -1683,7 +1681,7 @@ walk_down:
child_bytenr = btrfs_node_blockptr(eb, parent_slot);
child_gen = btrfs_node_ptr_generation(eb, parent_slot);
eb = read_tree_block(root, child_bytenr, child_gen);
eb = read_tree_block(fs_info, child_bytenr, child_gen);
if (IS_ERR(eb)) {
ret = PTR_ERR(eb);
goto out;
@ -1709,8 +1707,8 @@ walk_down:
}
if (level == 0) {
ret = btrfs_qgroup_trace_leaf_items(trans, root,
path->nodes[level]);
ret = btrfs_qgroup_trace_leaf_items(trans,fs_info,
path->nodes[level]);
if (ret)
goto out;

Просмотреть файл

@ -129,7 +129,7 @@ int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans,
* Return <0 for error(ENOMEM)
*/
int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_fs_info *fs_info,
struct extent_buffer *eb);
/*
* Inform qgroup to trace a whole subtree, including all its child tree

Просмотреть файл

@ -969,8 +969,9 @@ static unsigned long rbio_nr_pages(unsigned long stripe_len, int nr_stripes)
* allocation and initial setup for the btrfs_raid_bio. Not
* this does not allocate any pages for rbio->pages.
*/
static struct btrfs_raid_bio *alloc_rbio(struct btrfs_root *root,
struct btrfs_bio *bbio, u64 stripe_len)
static struct btrfs_raid_bio *alloc_rbio(struct btrfs_fs_info *fs_info,
struct btrfs_bio *bbio,
u64 stripe_len)
{
struct btrfs_raid_bio *rbio;
int nr_data = 0;
@ -991,7 +992,7 @@ static struct btrfs_raid_bio *alloc_rbio(struct btrfs_root *root,
INIT_LIST_HEAD(&rbio->stripe_cache);
INIT_LIST_HEAD(&rbio->hash_list);
rbio->bbio = bbio;
rbio->fs_info = root->fs_info;
rbio->fs_info = fs_info;
rbio->stripe_len = stripe_len;
rbio->nr_pages = num_pages;
rbio->real_stripes = real_stripes;
@ -1734,16 +1735,15 @@ static void btrfs_raid_unplug(struct blk_plug_cb *cb, bool from_schedule)
/*
* our main entry point for writes from the rest of the FS.
*/
int raid56_parity_write(struct btrfs_root *root, struct bio *bio,
int raid56_parity_write(struct btrfs_fs_info *fs_info, struct bio *bio,
struct btrfs_bio *bbio, u64 stripe_len)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_raid_bio *rbio;
struct btrfs_plug_cb *plug = NULL;
struct blk_plug_cb *cb;
int ret;
rbio = alloc_rbio(root, bbio, stripe_len);
rbio = alloc_rbio(fs_info, bbio, stripe_len);
if (IS_ERR(rbio)) {
btrfs_put_bbio(bbio);
return PTR_ERR(rbio);
@ -2113,15 +2113,14 @@ cleanup:
* so we assume the bio they send down corresponds to a failed part
* of the drive.
*/
int raid56_parity_recover(struct btrfs_root *root, struct bio *bio,
int raid56_parity_recover(struct btrfs_fs_info *fs_info, struct bio *bio,
struct btrfs_bio *bbio, u64 stripe_len,
int mirror_num, int generic_io)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_raid_bio *rbio;
int ret;
rbio = alloc_rbio(root, bbio, stripe_len);
rbio = alloc_rbio(fs_info, bbio, stripe_len);
if (IS_ERR(rbio)) {
if (generic_io)
btrfs_put_bbio(bbio);
@ -2203,16 +2202,15 @@ static void read_rebuild_work(struct btrfs_work *work)
*/
struct btrfs_raid_bio *
raid56_parity_alloc_scrub_rbio(struct btrfs_root *root, struct bio *bio,
raid56_parity_alloc_scrub_rbio(struct btrfs_fs_info *fs_info, struct bio *bio,
struct btrfs_bio *bbio, u64 stripe_len,
struct btrfs_device *scrub_dev,
unsigned long *dbitmap, int stripe_nsectors)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_raid_bio *rbio;
int i;
rbio = alloc_rbio(root, bbio, stripe_len);
rbio = alloc_rbio(fs_info, bbio, stripe_len);
if (IS_ERR(rbio))
return NULL;
bio_list_add(&rbio->bio_list, bio);
@ -2653,12 +2651,12 @@ void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio)
/* The following code is used for dev replace of a missing RAID 5/6 device. */
struct btrfs_raid_bio *
raid56_alloc_missing_rbio(struct btrfs_root *root, struct bio *bio,
raid56_alloc_missing_rbio(struct btrfs_fs_info *fs_info, struct bio *bio,
struct btrfs_bio *bbio, u64 length)
{
struct btrfs_raid_bio *rbio;
rbio = alloc_rbio(root, bbio, length);
rbio = alloc_rbio(fs_info, bbio, length);
if (IS_ERR(rbio))
return NULL;

Просмотреть файл

@ -42,24 +42,24 @@ static inline int nr_data_stripes(struct map_lookup *map)
struct btrfs_raid_bio;
struct btrfs_device;
int raid56_parity_recover(struct btrfs_root *root, struct bio *bio,
int raid56_parity_recover(struct btrfs_fs_info *fs_info, struct bio *bio,
struct btrfs_bio *bbio, u64 stripe_len,
int mirror_num, int generic_io);
int raid56_parity_write(struct btrfs_root *root, struct bio *bio,
int raid56_parity_write(struct btrfs_fs_info *fs_info, struct bio *bio,
struct btrfs_bio *bbio, u64 stripe_len);
void raid56_add_scrub_pages(struct btrfs_raid_bio *rbio, struct page *page,
u64 logical);
struct btrfs_raid_bio *
raid56_parity_alloc_scrub_rbio(struct btrfs_root *root, struct bio *bio,
raid56_parity_alloc_scrub_rbio(struct btrfs_fs_info *fs_info, struct bio *bio,
struct btrfs_bio *bbio, u64 stripe_len,
struct btrfs_device *scrub_dev,
unsigned long *dbitmap, int stripe_nsectors);
void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio);
struct btrfs_raid_bio *
raid56_alloc_missing_rbio(struct btrfs_root *root, struct bio *bio,
raid56_alloc_missing_rbio(struct btrfs_fs_info *fs_info, struct bio *bio,
struct btrfs_bio *bbio, u64 length);
void raid56_submit_missing_rbio(struct btrfs_raid_bio *rbio);

Просмотреть файл

@ -303,14 +303,13 @@ static struct reada_zone *reada_find_zone(struct btrfs_fs_info *fs_info,
return zone;
}
static struct reada_extent *reada_find_extent(struct btrfs_root *root,
static struct reada_extent *reada_find_extent(struct btrfs_fs_info *fs_info,
u64 logical,
struct btrfs_key *top)
{
int ret;
struct reada_extent *re = NULL;
struct reada_extent *re_exist = NULL;
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_bio *bbio = NULL;
struct btrfs_device *dev;
struct btrfs_device *prev_dev;
@ -549,7 +548,7 @@ static int reada_add_block(struct reada_control *rc, u64 logical,
struct reada_extctl *rec;
/* takes one ref */
re = reada_find_extent(fs_info->tree_root, logical, top);
re = reada_find_extent(fs_info, logical, top);
if (!re)
return -1;
@ -705,8 +704,7 @@ static int reada_start_machine_dev(struct btrfs_fs_info *fs_info,
logical = re->logical;
atomic_inc(&dev->reada_in_flight);
ret = reada_tree_block_flagged(fs_info->extent_root, logical,
mirror_num, &eb);
ret = reada_tree_block_flagged(fs_info, logical, mirror_num, &eb);
if (ret)
__readahead_hook(fs_info, re, NULL, ret);
else if (eb)

Просмотреть файл

@ -1734,7 +1734,7 @@ int replace_file_extents(struct btrfs_trans_handle *trans,
dirty = 1;
key.offset -= btrfs_file_extent_offset(leaf, fi);
ret = btrfs_inc_extent_ref(trans, root, new_bytenr,
ret = btrfs_inc_extent_ref(trans, fs_info, new_bytenr,
num_bytes, parent,
btrfs_header_owner(leaf),
key.objectid, key.offset);
@ -1743,7 +1743,7 @@ int replace_file_extents(struct btrfs_trans_handle *trans,
break;
}
ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
ret = btrfs_free_extent(trans, fs_info, bytenr, num_bytes,
parent, btrfs_header_owner(leaf),
key.objectid, key.offset);
if (ret) {
@ -1868,7 +1868,7 @@ again:
break;
}
eb = read_tree_block(dest, old_bytenr, old_ptr_gen);
eb = read_tree_block(fs_info, old_bytenr, old_ptr_gen);
if (IS_ERR(eb)) {
ret = PTR_ERR(eb);
break;
@ -1944,21 +1944,21 @@ again:
path->slots[level], old_ptr_gen);
btrfs_mark_buffer_dirty(path->nodes[level]);
ret = btrfs_inc_extent_ref(trans, src, old_bytenr, blocksize,
path->nodes[level]->start,
ret = btrfs_inc_extent_ref(trans, fs_info, old_bytenr,
blocksize, path->nodes[level]->start,
src->root_key.objectid, level - 1, 0);
BUG_ON(ret);
ret = btrfs_inc_extent_ref(trans, dest, new_bytenr, blocksize,
0, dest->root_key.objectid, level - 1,
0);
ret = btrfs_inc_extent_ref(trans, fs_info, new_bytenr,
blocksize, 0, dest->root_key.objectid,
level - 1, 0);
BUG_ON(ret);
ret = btrfs_free_extent(trans, src, new_bytenr, blocksize,
ret = btrfs_free_extent(trans, fs_info, new_bytenr, blocksize,
path->nodes[level]->start,
src->root_key.objectid, level - 1, 0);
BUG_ON(ret);
ret = btrfs_free_extent(trans, dest, old_bytenr, blocksize,
ret = btrfs_free_extent(trans, fs_info, old_bytenr, blocksize,
0, dest->root_key.objectid, level - 1,
0);
BUG_ON(ret);
@ -2017,6 +2017,7 @@ static noinline_for_stack
int walk_down_reloc_tree(struct btrfs_root *root, struct btrfs_path *path,
int *level)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_buffer *eb = NULL;
int i;
u64 bytenr;
@ -2047,7 +2048,7 @@ int walk_down_reloc_tree(struct btrfs_root *root, struct btrfs_path *path,
}
bytenr = btrfs_node_blockptr(eb, path->slots[i]);
eb = read_tree_block(root, bytenr, ptr_gen);
eb = read_tree_block(fs_info, bytenr, ptr_gen);
if (IS_ERR(eb)) {
return PTR_ERR(eb);
} else if (!extent_buffer_uptodate(eb)) {
@ -2272,7 +2273,7 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc,
btrfs_end_transaction_throttle(trans, root);
trans = NULL;
btrfs_btree_balance_dirty(root);
btrfs_btree_balance_dirty(fs_info);
if (replaced && rc->stage == UPDATE_DATA_PTRS)
invalidate_extent_cache(root, &key, &next_key);
@ -2302,7 +2303,7 @@ out:
if (trans)
btrfs_end_transaction_throttle(trans, root);
btrfs_btree_balance_dirty(root);
btrfs_btree_balance_dirty(fs_info);
if (replaced && rc->stage == UPDATE_DATA_PTRS)
invalidate_extent_cache(root, &key, &next_key);
@ -2338,16 +2339,16 @@ again:
trans = btrfs_join_transaction(rc->extent_root);
if (IS_ERR(trans)) {
if (!err)
btrfs_block_rsv_release(rc->extent_root,
rc->block_rsv, num_bytes);
btrfs_block_rsv_release(fs_info, rc->block_rsv,
num_bytes);
return PTR_ERR(trans);
}
if (!err) {
if (num_bytes != rc->merging_rsv_size) {
btrfs_end_transaction(trans, rc->extent_root);
btrfs_block_rsv_release(rc->extent_root,
rc->block_rsv, num_bytes);
btrfs_block_rsv_release(fs_info, rc->block_rsv,
num_bytes);
goto again;
}
}
@ -2698,6 +2699,7 @@ static int do_relocation(struct btrfs_trans_handle *trans,
struct btrfs_key *key,
struct btrfs_path *path, int lowest)
{
struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
struct backref_node *upper;
struct backref_edge *edge;
struct backref_edge *edges[BTRFS_MAX_LEVEL - 1];
@ -2780,7 +2782,7 @@ static int do_relocation(struct btrfs_trans_handle *trans,
blocksize = root->fs_info->nodesize;
generation = btrfs_node_ptr_generation(upper->eb, slot);
eb = read_tree_block(root, bytenr, generation);
eb = read_tree_block(fs_info, bytenr, generation);
if (IS_ERR(eb)) {
err = PTR_ERR(eb);
goto next;
@ -2809,7 +2811,7 @@ static int do_relocation(struct btrfs_trans_handle *trans,
trans->transid);
btrfs_mark_buffer_dirty(upper->eb);
ret = btrfs_inc_extent_ref(trans, root,
ret = btrfs_inc_extent_ref(trans, root->fs_info,
node->eb->start, blocksize,
upper->eb->start,
btrfs_header_owner(upper->eb),
@ -2939,14 +2941,13 @@ static int tree_block_processed(u64 bytenr, struct reloc_control *rc)
return 0;
}
static int get_tree_block_key(struct reloc_control *rc,
static int get_tree_block_key(struct btrfs_fs_info *fs_info,
struct tree_block *block)
{
struct extent_buffer *eb;
BUG_ON(block->key_ready);
eb = read_tree_block(rc->extent_root, block->bytenr,
block->key.offset);
eb = read_tree_block(fs_info, block->bytenr, block->key.offset);
if (IS_ERR(eb)) {
return PTR_ERR(eb);
} else if (!extent_buffer_uptodate(eb)) {
@ -3025,6 +3026,7 @@ static noinline_for_stack
int relocate_tree_blocks(struct btrfs_trans_handle *trans,
struct reloc_control *rc, struct rb_root *blocks)
{
struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
struct backref_node *node;
struct btrfs_path *path;
struct tree_block *block;
@ -3042,7 +3044,7 @@ int relocate_tree_blocks(struct btrfs_trans_handle *trans,
while (rb_node) {
block = rb_entry(rb_node, struct tree_block, rb_node);
if (!block->key_ready)
readahead_tree_block(rc->extent_root, block->bytenr);
readahead_tree_block(fs_info, block->bytenr);
rb_node = rb_next(rb_node);
}
@ -3050,7 +3052,7 @@ int relocate_tree_blocks(struct btrfs_trans_handle *trans,
while (rb_node) {
block = rb_entry(rb_node, struct tree_block, rb_node);
if (!block->key_ready) {
err = get_tree_block_key(rc, block);
err = get_tree_block_key(fs_info, block);
if (err)
goto out_free_path;
}
@ -3178,6 +3180,7 @@ int setup_extent_mapping(struct inode *inode, u64 start, u64 end,
static int relocate_file_extent_cluster(struct inode *inode,
struct file_extent_cluster *cluster)
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
u64 page_start;
u64 page_end;
u64 offset = BTRFS_I(inode)->index_cnt;
@ -3273,7 +3276,7 @@ static int relocate_file_extent_cluster(struct inode *inode,
index++;
balance_dirty_pages_ratelimited(inode->i_mapping);
btrfs_throttle(BTRFS_I(inode)->root);
btrfs_throttle(fs_info);
}
WARN_ON(nr != cluster->nr);
out:
@ -3502,7 +3505,7 @@ static int block_use_full_backref(struct reloc_control *rc,
btrfs_header_backref_rev(eb) < BTRFS_MIXED_BACKREF_REV)
return 1;
ret = btrfs_lookup_extent_info(NULL, rc->extent_root,
ret = btrfs_lookup_extent_info(NULL, rc->extent_root->fs_info,
eb->start, btrfs_header_level(eb), 1,
NULL, &flags);
BUG_ON(ret);
@ -3539,7 +3542,7 @@ static int delete_block_group_cache(struct btrfs_fs_info *fs_info,
}
truncate:
ret = btrfs_check_trunc_cache_free_space(root,
ret = btrfs_check_trunc_cache_free_space(fs_info,
&fs_info->global_block_rsv);
if (ret)
goto out;
@ -3553,7 +3556,7 @@ truncate:
ret = btrfs_truncate_free_space_cache(root, trans, block_group, inode);
btrfs_end_transaction(trans, root);
btrfs_btree_balance_dirty(root);
btrfs_btree_balance_dirty(fs_info);
out:
iput(inode);
return ret;
@ -3951,7 +3954,7 @@ int prepare_to_relocate(struct reloc_control *rc)
struct btrfs_trans_handle *trans;
int ret;
rc->block_rsv = btrfs_alloc_block_rsv(rc->extent_root,
rc->block_rsv = btrfs_alloc_block_rsv(rc->extent_root->fs_info,
BTRFS_BLOCK_RSV_TEMP);
if (!rc->block_rsv)
return -ENOMEM;
@ -3989,6 +3992,7 @@ int prepare_to_relocate(struct reloc_control *rc)
static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
{
struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
struct rb_root blocks = RB_ROOT;
struct btrfs_key key;
struct btrfs_trans_handle *trans = NULL;
@ -4118,7 +4122,7 @@ restart:
}
btrfs_end_transaction_throttle(trans, rc->extent_root);
btrfs_btree_balance_dirty(rc->extent_root);
btrfs_btree_balance_dirty(fs_info);
trans = NULL;
if (rc->stage == MOVE_DATA_EXTENTS &&
@ -4133,7 +4137,7 @@ restart:
}
}
if (trans && progress && err == -ENOSPC) {
ret = btrfs_force_chunk_alloc(trans, rc->extent_root,
ret = btrfs_force_chunk_alloc(trans, fs_info,
rc->block_group->flags);
if (ret == 1) {
err = 0;
@ -4147,7 +4151,7 @@ restart:
if (trans) {
btrfs_end_transaction_throttle(trans, rc->extent_root);
btrfs_btree_balance_dirty(rc->extent_root);
btrfs_btree_balance_dirty(fs_info);
}
if (!err) {
@ -4161,7 +4165,7 @@ restart:
set_reloc_control(rc);
backref_cache_cleanup(&rc->backref_cache);
btrfs_block_rsv_release(rc->extent_root, rc->block_rsv, (u64)-1);
btrfs_block_rsv_release(fs_info, rc->block_rsv, (u64)-1);
err = prepare_to_merge(rc, err);
@ -4169,7 +4173,7 @@ restart:
rc->merge_reloc_tree = 0;
unset_reloc_control(rc);
btrfs_block_rsv_release(rc->extent_root, rc->block_rsv, (u64)-1);
btrfs_block_rsv_release(fs_info, rc->block_rsv, (u64)-1);
/* get rid of pinned extents */
trans = btrfs_join_transaction(rc->extent_root);
@ -4179,7 +4183,7 @@ restart:
}
btrfs_commit_transaction(trans, rc->extent_root);
out_free:
btrfs_free_block_rsv(rc->extent_root, rc->block_rsv);
btrfs_free_block_rsv(fs_info, rc->block_rsv);
btrfs_free_path(path);
return err;
}
@ -4254,7 +4258,7 @@ struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info,
err = btrfs_orphan_add(trans, inode);
out:
btrfs_end_transaction(trans, root);
btrfs_btree_balance_dirty(root);
btrfs_btree_balance_dirty(fs_info);
if (err) {
if (inode)
iput(inode);
@ -4414,7 +4418,7 @@ int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start)
WARN_ON(btrfs_block_group_used(&rc->block_group->item) > 0);
out:
if (err && rw)
btrfs_dec_block_group_ro(extent_root, rc->block_group);
btrfs_dec_block_group_ro(rc->block_group);
iput(rc->data_inode);
btrfs_put_block_group(rc->block_group);
kfree(rc);

Просмотреть файл

@ -151,7 +151,7 @@ int btrfs_update_root(struct btrfs_trans_handle *trans, struct btrfs_root
}
if (ret != 0) {
btrfs_print_leaf(root, path->nodes[0]);
btrfs_print_leaf(fs_info, path->nodes[0]);
btrfs_crit(fs_info, "unable to update root key %llu %u %llu",
key->objectid, key->type, key->offset);
BUG_ON(1);

Просмотреть файл

@ -1450,7 +1450,7 @@ static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info,
bio->bi_private = &done;
bio->bi_end_io = scrub_bio_wait_endio;
ret = raid56_parity_recover(fs_info->fs_root, bio, page->recover->bbio,
ret = raid56_parity_recover(fs_info, bio, page->recover->bbio,
page->recover->map_length,
page->mirror_num, 0);
if (ret)
@ -2181,7 +2181,6 @@ static void scrub_missing_raid56_pages(struct scrub_block *sblock)
{
struct scrub_ctx *sctx = sblock->sctx;
struct btrfs_fs_info *fs_info = sctx->fs_info;
struct btrfs_root *dev_root = fs_info->dev_root;
u64 length = sblock->page_count * PAGE_SIZE;
u64 logical = sblock->pagev[0]->logical;
struct btrfs_bio *bbio = NULL;
@ -2214,7 +2213,7 @@ static void scrub_missing_raid56_pages(struct scrub_block *sblock)
bio->bi_private = sblock;
bio->bi_end_io = scrub_missing_raid56_end_io;
rbio = raid56_alloc_missing_rbio(dev_root, bio, bbio, length);
rbio = raid56_alloc_missing_rbio(fs_info, bio, bbio, length);
if (!rbio)
goto rbio_out;
@ -2766,7 +2765,6 @@ static void scrub_parity_check_and_repair(struct scrub_parity *sparity)
{
struct scrub_ctx *sctx = sparity->sctx;
struct btrfs_fs_info *fs_info = sctx->fs_info;
struct btrfs_root *dev_root = fs_info->dev_root;
struct bio *bio;
struct btrfs_raid_bio *rbio;
struct scrub_page *spage;
@ -2792,7 +2790,7 @@ static void scrub_parity_check_and_repair(struct scrub_parity *sparity)
bio->bi_private = sparity;
bio->bi_end_io = scrub_parity_bio_endio;
rbio = raid56_parity_alloc_scrub_rbio(dev_root, bio, bbio,
rbio = raid56_parity_alloc_scrub_rbio(fs_info, bio, bbio,
length, sparity->scrub_dev,
sparity->dbitmap,
sparity->nsectors);
@ -3694,7 +3692,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
btrfs_dev_replace_unlock(&fs_info->dev_replace, 1);
if (ro_set)
btrfs_dec_block_group_ro(root, cache);
btrfs_dec_block_group_ro(cache);
/*
* We might have prevented the cleaner kthread from deleting
@ -3980,10 +3978,8 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
return ret;
}
void btrfs_scrub_pause(struct btrfs_root *root)
void btrfs_scrub_pause(struct btrfs_fs_info *fs_info)
{
struct btrfs_fs_info *fs_info = root->fs_info;
mutex_lock(&fs_info->scrub_lock);
atomic_inc(&fs_info->scrub_pause_req);
while (atomic_read(&fs_info->scrubs_paused) !=
@ -3997,10 +3993,8 @@ void btrfs_scrub_pause(struct btrfs_root *root)
mutex_unlock(&fs_info->scrub_lock);
}
void btrfs_scrub_continue(struct btrfs_root *root)
void btrfs_scrub_continue(struct btrfs_fs_info *fs_info)
{
struct btrfs_fs_info *fs_info = root->fs_info;
atomic_dec(&fs_info->scrub_pause_req);
wake_up(&fs_info->scrub_pause_wait);
}
@ -4049,10 +4043,9 @@ int btrfs_scrub_cancel_dev(struct btrfs_fs_info *fs_info,
return 0;
}
int btrfs_scrub_progress(struct btrfs_root *root, u64 devid,
int btrfs_scrub_progress(struct btrfs_fs_info *fs_info, u64 devid,
struct btrfs_scrub_progress *progress)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_device *dev;
struct scrub_ctx *sctx = NULL;

Просмотреть файл

@ -3435,6 +3435,7 @@ static int wait_for_dest_dir_move(struct send_ctx *sctx,
struct recorded_ref *parent_ref,
const bool is_orphan)
{
struct btrfs_fs_info *fs_info = sctx->parent_root->fs_info;
struct btrfs_path *path;
struct btrfs_key key;
struct btrfs_key di_key;
@ -3463,8 +3464,8 @@ static int wait_for_dest_dir_move(struct send_ctx *sctx,
goto out;
}
di = btrfs_match_dir_item_name(sctx->parent_root, path,
parent_ref->name, parent_ref->name_len);
di = btrfs_match_dir_item_name(fs_info, path, parent_ref->name,
parent_ref->name_len);
if (!di) {
ret = 0;
goto out;

Просмотреть файл

@ -396,10 +396,9 @@ static const match_table_t tokens = {
* reading in a new superblock is parsed here.
* XXX JDM: This needs to be cleaned up for remount.
*/
int btrfs_parse_options(struct btrfs_root *root, char *options,
int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
unsigned long new_flags)
{
struct btrfs_fs_info *info = root->fs_info;
substring_t args[MAX_OPT_ARGS];
char *p, *num, *orig = NULL;
u64 cache_gen;
@ -1733,7 +1732,7 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
}
}
ret = btrfs_parse_options(root, data, *flags);
ret = btrfs_parse_options(fs_info, data, *flags);
if (ret) {
ret = -EINVAL;
goto restore;

Просмотреть файл

@ -184,10 +184,10 @@ static inline int extwriter_counter_read(struct btrfs_transaction *trans)
/*
* either allocate a new transaction or hop into the existing one
*/
static noinline int join_transaction(struct btrfs_root *root, unsigned int type)
static noinline int join_transaction(struct btrfs_fs_info *fs_info,
unsigned int type)
{
struct btrfs_transaction *cur_trans;
struct btrfs_fs_info *fs_info = root->fs_info;
spin_lock(&fs_info->trans_lock);
loop:
@ -425,9 +425,8 @@ static inline int is_transaction_blocked(struct btrfs_transaction *trans)
* when this is done, it is safe to start a new transaction, but the current
* transaction might not be fully on disk.
*/
static void wait_current_trans(struct btrfs_root *root)
static void wait_current_trans(struct btrfs_fs_info *fs_info)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_transaction *cur_trans;
spin_lock(&fs_info->trans_lock);
@ -445,10 +444,8 @@ static void wait_current_trans(struct btrfs_root *root)
}
}
static int may_wait_transaction(struct btrfs_root *root, int type)
static int may_wait_transaction(struct btrfs_fs_info *fs_info, int type)
{
struct btrfs_fs_info *fs_info = root->fs_info;
if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
return 0;
@ -548,13 +545,13 @@ again:
if (type & __TRANS_FREEZABLE)
sb_start_intwrite(fs_info->sb);
if (may_wait_transaction(root, type))
wait_current_trans(root);
if (may_wait_transaction(fs_info, type))
wait_current_trans(fs_info);
do {
ret = join_transaction(root, type);
ret = join_transaction(fs_info, type);
if (ret == -EBUSY) {
wait_current_trans(root);
wait_current_trans(fs_info);
if (unlikely(type == TRANS_ATTACH))
ret = -ENOENT;
}
@ -578,7 +575,7 @@ again:
smp_mb();
if (cur_trans->state >= TRANS_STATE_BLOCKED &&
may_wait_transaction(root, type)) {
may_wait_transaction(fs_info, type)) {
current->journal_info = h;
btrfs_commit_transaction(h, root);
goto again;
@ -605,7 +602,7 @@ join_fail:
kmem_cache_free(btrfs_trans_handle_cachep, h);
alloc_fail:
if (num_bytes)
btrfs_block_rsv_release(root, &fs_info->trans_block_rsv,
btrfs_block_rsv_release(fs_info, &fs_info->trans_block_rsv,
num_bytes);
reserve_fail:
btrfs_qgroup_free_meta(root, qgroup_reserved);
@ -712,21 +709,19 @@ btrfs_attach_transaction_barrier(struct btrfs_root *root)
trans = start_transaction(root, 0, TRANS_ATTACH,
BTRFS_RESERVE_NO_FLUSH);
if (IS_ERR(trans) && PTR_ERR(trans) == -ENOENT)
btrfs_wait_for_commit(root, 0);
btrfs_wait_for_commit(root->fs_info, 0);
return trans;
}
/* wait for a transaction commit to be fully complete */
static noinline void wait_for_commit(struct btrfs_root *root,
struct btrfs_transaction *commit)
static noinline void wait_for_commit(struct btrfs_transaction *commit)
{
wait_event(commit->commit_wait, commit->state == TRANS_STATE_COMPLETED);
}
int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid)
int btrfs_wait_for_commit(struct btrfs_fs_info *fs_info, u64 transid)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_transaction *cur_trans = NULL, *t;
int ret = 0;
@ -777,35 +772,33 @@ int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid)
goto out; /* nothing committing|committed */
}
wait_for_commit(root, cur_trans);
wait_for_commit(cur_trans);
btrfs_put_transaction(cur_trans);
out:
return ret;
}
void btrfs_throttle(struct btrfs_root *root)
void btrfs_throttle(struct btrfs_fs_info *fs_info)
{
struct btrfs_fs_info *fs_info = root->fs_info;
if (!atomic_read(&fs_info->open_ioctl_trans))
wait_current_trans(root);
wait_current_trans(fs_info);
}
static int should_end_transaction(struct btrfs_trans_handle *trans,
struct btrfs_root *root)
static int should_end_transaction(struct btrfs_trans_handle *trans)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_fs_info *fs_info = trans->fs_info;
if (fs_info->global_block_rsv.space_info->full &&
btrfs_check_space_for_delayed_refs(trans, root))
btrfs_check_space_for_delayed_refs(trans, fs_info))
return 1;
return !!btrfs_block_rsv_check(root, &fs_info->global_block_rsv, 5);
return !!btrfs_block_rsv_check(&fs_info->global_block_rsv, 5);
}
int btrfs_should_end_transaction(struct btrfs_trans_handle *trans,
struct btrfs_root *root)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_transaction *cur_trans = trans->transaction;
int updates;
int err;
@ -818,12 +811,12 @@ int btrfs_should_end_transaction(struct btrfs_trans_handle *trans,
updates = trans->delayed_ref_updates;
trans->delayed_ref_updates = 0;
if (updates) {
err = btrfs_run_delayed_refs(trans, root, updates * 2);
err = btrfs_run_delayed_refs(trans, fs_info, updates * 2);
if (err) /* Error code will also eval true */
return err;
}
return should_end_transaction(trans, root);
return should_end_transaction(trans);
}
static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
@ -843,16 +836,16 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
return 0;
}
btrfs_trans_release_metadata(trans, root);
btrfs_trans_release_metadata(trans, info);
trans->block_rsv = NULL;
if (!list_empty(&trans->new_bgs))
btrfs_create_pending_block_groups(trans, root);
btrfs_create_pending_block_groups(trans, info);
trans->delayed_ref_updates = 0;
if (!trans->sync) {
must_run_delayed_refs =
btrfs_should_throttle_delayed_refs(trans, root);
btrfs_should_throttle_delayed_refs(trans, info);
cur = max_t(unsigned long, cur, 32);
/*
@ -864,16 +857,16 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
must_run_delayed_refs = 2;
}
btrfs_trans_release_metadata(trans, root);
btrfs_trans_release_metadata(trans, info);
trans->block_rsv = NULL;
if (!list_empty(&trans->new_bgs))
btrfs_create_pending_block_groups(trans, root);
btrfs_create_pending_block_groups(trans, info);
btrfs_trans_release_chunk_metadata(trans);
if (lock && !atomic_read(&info->open_ioctl_trans) &&
should_end_transaction(trans, root) &&
should_end_transaction(trans) &&
ACCESS_ONCE(cur_trans->state) == TRANS_STATE_RUNNING) {
spin_lock(&info->trans_lock);
if (cur_trans->state == TRANS_STATE_RUNNING)
@ -908,7 +901,7 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
current->journal_info = NULL;
if (throttle)
btrfs_run_delayed_iputs(root);
btrfs_run_delayed_iputs(info);
if (trans->aborted ||
test_bit(BTRFS_FS_STATE_ERROR, &info->fs_state)) {
@ -919,7 +912,7 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
kmem_cache_free(btrfs_trans_handle_cachep, trans);
if (must_run_delayed_refs) {
btrfs_async_run_delayed_refs(root, cur, transid,
btrfs_async_run_delayed_refs(info, cur, transid,
must_run_delayed_refs == 1);
}
return err;
@ -942,12 +935,11 @@ int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans,
* them in one of two extent_io trees. This is used to make sure all of
* those extents are sent to disk but does not wait on them
*/
int btrfs_write_marked_extents(struct btrfs_root *root,
int btrfs_write_marked_extents(struct btrfs_fs_info *fs_info,
struct extent_io_tree *dirty_pages, int mark)
{
int err = 0;
int werr = 0;
struct btrfs_fs_info *fs_info = root->fs_info;
struct address_space *mapping = fs_info->btree_inode->i_mapping;
struct extent_state *cached_state = NULL;
u64 start = 0;
@ -1068,7 +1060,7 @@ static int btrfs_write_and_wait_marked_extents(struct btrfs_root *root,
struct blk_plug plug;
blk_start_plug(&plug);
ret = btrfs_write_marked_extents(root, dirty_pages, mark);
ret = btrfs_write_marked_extents(root->fs_info, dirty_pages, mark);
blk_finish_plug(&plug);
ret2 = btrfs_wait_marked_extents(root, dirty_pages, mark);
@ -1080,7 +1072,7 @@ static int btrfs_write_and_wait_marked_extents(struct btrfs_root *root,
}
static int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
struct btrfs_root *root)
struct btrfs_root *root)
{
int ret;
@ -1140,9 +1132,8 @@ static int update_cowonly_root(struct btrfs_trans_handle *trans,
* to clean up the delayed refs.
*/
static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
struct btrfs_root *root)
struct btrfs_fs_info *fs_info)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct list_head *dirty_bgs = &trans->transaction->dirty_bgs;
struct list_head *io_bgs = &trans->transaction->io_bgs;
struct list_head *next;
@ -1158,7 +1149,7 @@ static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
if (ret)
return ret;
ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
ret = btrfs_run_delayed_refs(trans, fs_info, (unsigned long)-1);
if (ret)
return ret;
@ -1172,16 +1163,17 @@ static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
if (ret)
return ret;
ret = btrfs_setup_space_cache(trans, root);
ret = btrfs_setup_space_cache(trans, fs_info);
if (ret)
return ret;
/* run_qgroups might have added some more refs */
ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
ret = btrfs_run_delayed_refs(trans, fs_info, (unsigned long)-1);
if (ret)
return ret;
again:
while (!list_empty(&fs_info->dirty_cowonly_roots)) {
struct btrfs_root *root;
next = fs_info->dirty_cowonly_roots.next;
list_del_init(next);
root = list_entry(next, struct btrfs_root, dirty_list);
@ -1193,16 +1185,16 @@ again:
ret = update_cowonly_root(trans, root);
if (ret)
return ret;
ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
ret = btrfs_run_delayed_refs(trans, fs_info, (unsigned long)-1);
if (ret)
return ret;
}
while (!list_empty(dirty_bgs) || !list_empty(io_bgs)) {
ret = btrfs_write_dirty_block_groups(trans, root);
ret = btrfs_write_dirty_block_groups(trans, fs_info);
if (ret)
return ret;
ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
ret = btrfs_run_delayed_refs(trans, fs_info, (unsigned long)-1);
if (ret)
return ret;
}
@ -1309,7 +1301,7 @@ int btrfs_defrag_root(struct btrfs_root *root)
ret = btrfs_defrag_leaves(trans, root);
btrfs_end_transaction(trans, root);
btrfs_btree_balance_dirty(info->tree_root);
btrfs_btree_balance_dirty(info);
cond_resched();
if (btrfs_fs_closing(info) || ret != -EAGAIN)
@ -1388,7 +1380,7 @@ static int qgroup_account_snapshot(struct btrfs_trans_handle *trans,
* like chunk and root tree, as they won't affect qgroup.
* And we don't write super to avoid half committed status.
*/
ret = commit_cowonly_roots(trans, src);
ret = commit_cowonly_roots(trans, fs_info);
if (ret)
goto out;
switch_commit_roots(trans->transaction, fs_info);
@ -1515,7 +1507,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
* otherwise we corrupt the FS during
* snapshot
*/
ret = btrfs_run_delayed_items(trans, root);
ret = btrfs_run_delayed_items(trans, fs_info);
if (ret) { /* Transaction aborted */
btrfs_abort_transaction(trans, ret);
goto fail;
@ -1611,7 +1603,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
goto fail;
}
ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
ret = btrfs_run_delayed_refs(trans, fs_info, (unsigned long)-1);
if (ret) {
btrfs_abort_transaction(trans, ret);
goto fail;
@ -1665,7 +1657,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
}
}
ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
ret = btrfs_run_delayed_refs(trans, fs_info, (unsigned long)-1);
if (ret) {
btrfs_abort_transaction(trans, ret);
goto fail;
@ -1706,9 +1698,8 @@ static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans,
return ret;
}
static void update_super_roots(struct btrfs_root *root)
static void update_super_roots(struct btrfs_fs_info *fs_info)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_root_item *root_item;
struct btrfs_super_block *super;
@ -1759,24 +1750,23 @@ int btrfs_transaction_blocked(struct btrfs_fs_info *info)
* wait for the current transaction commit to start and block subsequent
* transaction joins
*/
static void wait_current_trans_commit_start(struct btrfs_root *root,
static void wait_current_trans_commit_start(struct btrfs_fs_info *fs_info,
struct btrfs_transaction *trans)
{
wait_event(root->fs_info->transaction_blocked_wait,
trans->state >= TRANS_STATE_COMMIT_START ||
trans->aborted);
wait_event(fs_info->transaction_blocked_wait,
trans->state >= TRANS_STATE_COMMIT_START || trans->aborted);
}
/*
* wait for the current transaction to start and then become unblocked.
* caller holds ref.
*/
static void wait_current_trans_commit_start_and_unblock(struct btrfs_root *root,
struct btrfs_transaction *trans)
static void wait_current_trans_commit_start_and_unblock(
struct btrfs_fs_info *fs_info,
struct btrfs_transaction *trans)
{
wait_event(root->fs_info->transaction_wait,
trans->state >= TRANS_STATE_UNBLOCKED ||
trans->aborted);
wait_event(fs_info->transaction_wait,
trans->state >= TRANS_STATE_UNBLOCKED || trans->aborted);
}
/*
@ -1845,9 +1835,9 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
/* wait for transaction to start and unblock */
if (wait_for_unblock)
wait_current_trans_commit_start_and_unblock(root, cur_trans);
wait_current_trans_commit_start_and_unblock(fs_info, cur_trans);
else
wait_current_trans_commit_start(root, cur_trans);
wait_current_trans_commit_start(fs_info, cur_trans);
if (current->journal_info == trans)
current->journal_info = NULL;
@ -1888,7 +1878,7 @@ static void cleanup_transaction(struct btrfs_trans_handle *trans,
}
spin_unlock(&fs_info->trans_lock);
btrfs_cleanup_one_transaction(trans->transaction, root);
btrfs_cleanup_one_transaction(trans->transaction, fs_info);
spin_lock(&fs_info->trans_lock);
if (cur_trans == fs_info->running_transaction)
@ -1947,13 +1937,13 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
/* make a pass through all the delayed refs we have so far
* any runnings procs may add more while we are here
*/
ret = btrfs_run_delayed_refs(trans, root, 0);
ret = btrfs_run_delayed_refs(trans, fs_info, 0);
if (ret) {
btrfs_end_transaction(trans, root);
return ret;
}
btrfs_trans_release_metadata(trans, root);
btrfs_trans_release_metadata(trans, fs_info);
trans->block_rsv = NULL;
cur_trans = trans->transaction;
@ -1966,9 +1956,9 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
smp_wmb();
if (!list_empty(&trans->new_bgs))
btrfs_create_pending_block_groups(trans, root);
btrfs_create_pending_block_groups(trans, fs_info);
ret = btrfs_run_delayed_refs(trans, root, 0);
ret = btrfs_run_delayed_refs(trans, fs_info, 0);
if (ret) {
btrfs_end_transaction(trans, root);
return ret;
@ -1997,7 +1987,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
mutex_unlock(&fs_info->ro_block_group_mutex);
if (run_it)
ret = btrfs_start_dirty_block_groups(trans, root);
ret = btrfs_start_dirty_block_groups(trans, fs_info);
}
if (ret) {
btrfs_end_transaction(trans, root);
@ -2010,7 +2000,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
atomic_inc(&cur_trans->use_count);
ret = btrfs_end_transaction(trans, root);
wait_for_commit(root, cur_trans);
wait_for_commit(cur_trans);
if (unlikely(cur_trans->aborted))
ret = cur_trans->aborted;
@ -2030,7 +2020,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
atomic_inc(&prev_trans->use_count);
spin_unlock(&fs_info->trans_lock);
wait_for_commit(root, prev_trans);
wait_for_commit(prev_trans);
ret = prev_trans->aborted;
btrfs_put_transaction(prev_trans);
@ -2049,7 +2039,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
if (ret)
goto cleanup_transaction;
ret = btrfs_run_delayed_items(trans, root);
ret = btrfs_run_delayed_items(trans, fs_info);
if (ret)
goto cleanup_transaction;
@ -2057,7 +2047,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
extwriter_counter_read(cur_trans) == 0);
/* some pending stuffs might be added after the previous flush. */
ret = btrfs_run_delayed_items(trans, root);
ret = btrfs_run_delayed_items(trans, fs_info);
if (ret)
goto cleanup_transaction;
@ -2065,7 +2055,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
btrfs_wait_pending_ordered(cur_trans);
btrfs_scrub_pause(root);
btrfs_scrub_pause(fs_info);
/*
* Ok now we need to make sure to block out any other joins while we
* commit the transaction. We could have started a join before setting
@ -2110,13 +2100,13 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
* because all the tree which are snapshoted will be forced to COW
* the nodes and leaves.
*/
ret = btrfs_run_delayed_items(trans, root);
ret = btrfs_run_delayed_items(trans, fs_info);
if (ret) {
mutex_unlock(&fs_info->reloc_mutex);
goto scrub_continue;
}
ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
ret = btrfs_run_delayed_refs(trans, fs_info, (unsigned long)-1);
if (ret) {
mutex_unlock(&fs_info->reloc_mutex);
goto scrub_continue;
@ -2181,7 +2171,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
goto scrub_continue;
}
ret = commit_cowonly_roots(trans, root);
ret = commit_cowonly_roots(trans, fs_info);
if (ret) {
mutex_unlock(&fs_info->tree_log_mutex);
mutex_unlock(&fs_info->reloc_mutex);
@ -2199,7 +2189,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
goto scrub_continue;
}
btrfs_prepare_extent_commit(trans, root);
btrfs_prepare_extent_commit(trans, fs_info);
cur_trans = fs_info->running_transaction;
@ -2218,7 +2208,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
assert_qgroups_uptodate(trans);
ASSERT(list_empty(&cur_trans->dirty_bgs));
ASSERT(list_empty(&cur_trans->io_bgs));
update_super_roots(root);
update_super_roots(fs_info);
btrfs_set_super_log_root(fs_info->super_copy, 0);
btrfs_set_super_log_root_level(fs_info->super_copy, 0);
@ -2226,7 +2216,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
sizeof(*fs_info->super_copy));
btrfs_update_commit_device_size(fs_info);
btrfs_update_commit_device_bytes_used(root, cur_trans);
btrfs_update_commit_device_bytes_used(fs_info, cur_trans);
clear_bit(BTRFS_FS_LOG1_ERR, &fs_info->flags);
clear_bit(BTRFS_FS_LOG2_ERR, &fs_info->flags);
@ -2249,7 +2239,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
goto scrub_continue;
}
ret = write_ctree_super(trans, root, 0);
ret = write_ctree_super(trans, fs_info, 0);
if (ret) {
mutex_unlock(&fs_info->tree_log_mutex);
goto scrub_continue;
@ -2261,7 +2251,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
*/
mutex_unlock(&fs_info->tree_log_mutex);
btrfs_finish_extent_commit(trans, root);
btrfs_finish_extent_commit(trans, fs_info);
if (test_bit(BTRFS_TRANS_HAVE_FREE_BGS, &cur_trans->flags))
btrfs_clear_space_info_full(fs_info);
@ -2286,7 +2276,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
trace_btrfs_transaction_commit(root);
btrfs_scrub_continue(root);
btrfs_scrub_continue(fs_info);
if (current->journal_info == trans)
current->journal_info = NULL;
@ -2299,14 +2289,14 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
*/
if (current != fs_info->transaction_kthread &&
current != fs_info->cleaner_kthread && !fs_info->fs_frozen)
btrfs_run_delayed_iputs(root);
btrfs_run_delayed_iputs(fs_info);
return ret;
scrub_continue:
btrfs_scrub_continue(root);
btrfs_scrub_continue(fs_info);
cleanup_transaction:
btrfs_trans_release_metadata(trans, root);
btrfs_trans_release_metadata(trans, fs_info);
btrfs_trans_release_chunk_metadata(trans);
trans->block_rsv = NULL;
btrfs_warn(fs_info, "Skipping commit of aborted transaction.");

Просмотреть файл

@ -202,7 +202,7 @@ struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root);
struct btrfs_trans_handle *btrfs_attach_transaction_barrier(
struct btrfs_root *root);
struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *root);
int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid);
int btrfs_wait_for_commit(struct btrfs_fs_info *fs_info, u64 transid);
void btrfs_add_dead_root(struct btrfs_root *root);
int btrfs_defrag_root(struct btrfs_root *root);
@ -216,10 +216,10 @@ int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
int btrfs_should_end_transaction(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
void btrfs_throttle(struct btrfs_root *root);
void btrfs_throttle(struct btrfs_fs_info *fs_info);
int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
int btrfs_write_marked_extents(struct btrfs_root *root,
int btrfs_write_marked_extents(struct btrfs_fs_info *fs_info,
struct extent_io_tree *dirty_pages, int mark);
int btrfs_wait_marked_extents(struct btrfs_root *root,
struct extent_io_tree *dirty_pages, int mark);

Просмотреть файл

@ -307,12 +307,12 @@ static int process_one_buffer(struct btrfs_root *log,
}
if (wc->pin)
ret = btrfs_pin_extent_for_log_replay(fs_info->extent_root,
eb->start, eb->len);
ret = btrfs_pin_extent_for_log_replay(fs_info, eb->start,
eb->len);
if (!ret && btrfs_buffer_uptodate(eb, gen, 0)) {
if (wc->pin && btrfs_header_level(eb) == 0)
ret = btrfs_exclude_logged_extents(log, eb);
ret = btrfs_exclude_logged_extents(fs_info, eb);
if (wc->write)
btrfs_write_tree_block(eb);
if (wc->wait)
@ -341,6 +341,7 @@ static noinline int overwrite_item(struct btrfs_trans_handle *trans,
struct extent_buffer *eb, int slot,
struct btrfs_key *key)
{
struct btrfs_fs_info *fs_info = root->fs_info;
int ret;
u32 item_size;
u64 saved_i_size = 0;
@ -461,9 +462,9 @@ insert:
found_size = btrfs_item_size_nr(path->nodes[0],
path->slots[0]);
if (found_size > item_size)
btrfs_truncate_item(root, path, item_size, 1);
btrfs_truncate_item(fs_info, path, item_size, 1);
else if (found_size < item_size)
btrfs_extend_item(root, path,
btrfs_extend_item(fs_info, path,
item_size - found_size);
} else if (ret) {
return ret;
@ -708,10 +709,10 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
* is this extent already allocated in the extent
* allocation tree? If so, just add a reference
*/
ret = btrfs_lookup_data_extent(root, ins.objectid,
ret = btrfs_lookup_data_extent(fs_info, ins.objectid,
ins.offset);
if (ret == 0) {
ret = btrfs_inc_extent_ref(trans, root,
ret = btrfs_inc_extent_ref(trans, fs_info,
ins.objectid, ins.offset,
0, root->root_key.objectid,
key->objectid, offset);
@ -723,7 +724,8 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
* allocation tree
*/
ret = btrfs_alloc_logged_file_extent(trans,
root, root->root_key.objectid,
fs_info,
root->root_key.objectid,
key->objectid, offset, &ins);
if (ret)
goto out;
@ -843,6 +845,7 @@ static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans,
struct inode *dir,
struct btrfs_dir_item *di)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct inode *inode;
char *name;
int name_len;
@ -875,7 +878,7 @@ static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans,
if (ret)
goto out;
else
ret = btrfs_run_delayed_items(trans, root);
ret = btrfs_run_delayed_items(trans, fs_info);
out:
kfree(name);
iput(inode);
@ -993,6 +996,7 @@ static inline int __add_inode_ref(struct btrfs_trans_handle *trans,
u64 ref_index, char *name, int namelen,
int *search_done)
{
struct btrfs_fs_info *fs_info = root->fs_info;
int ret;
char *victim_name;
int victim_name_len;
@ -1051,7 +1055,7 @@ again:
kfree(victim_name);
if (ret)
return ret;
ret = btrfs_run_delayed_items(trans, root);
ret = btrfs_run_delayed_items(trans, fs_info);
if (ret)
return ret;
*search_done = 1;
@ -1122,7 +1126,8 @@ again:
victim_name_len);
if (!ret)
ret = btrfs_run_delayed_items(
trans, root);
trans,
fs_info);
}
iput(victim_parent);
kfree(victim_name);
@ -1813,6 +1818,7 @@ static noinline int replay_one_dir_item(struct btrfs_trans_handle *trans,
struct extent_buffer *eb, int slot,
struct btrfs_key *key)
{
struct btrfs_fs_info *fs_info = root->fs_info;
int ret = 0;
u32 item_size = btrfs_item_size_nr(eb, slot);
struct btrfs_dir_item *di;
@ -1825,7 +1831,7 @@ static noinline int replay_one_dir_item(struct btrfs_trans_handle *trans,
ptr_end = ptr + item_size;
while (ptr < ptr_end) {
di = (struct btrfs_dir_item *)ptr;
if (verify_dir_item(root, eb, di))
if (verify_dir_item(fs_info, eb, di))
return -EIO;
name_len = btrfs_dir_name_len(eb, di);
ret = replay_one_name(trans, root, path, eb, di, key);
@ -1980,6 +1986,7 @@ static noinline int check_item_in_log(struct btrfs_trans_handle *trans,
struct inode *dir,
struct btrfs_key *dir_key)
{
struct btrfs_fs_info *fs_info = root->fs_info;
int ret;
struct extent_buffer *eb;
int slot;
@ -2001,7 +2008,7 @@ again:
ptr_end = ptr + item_size;
while (ptr < ptr_end) {
di = (struct btrfs_dir_item *)ptr;
if (verify_dir_item(root, eb, di)) {
if (verify_dir_item(fs_info, eb, di)) {
ret = -EIO;
goto out;
}
@ -2048,7 +2055,7 @@ again:
ret = btrfs_unlink_inode(trans, root, dir, inode,
name, name_len);
if (!ret)
ret = btrfs_run_delayed_items(trans, root);
ret = btrfs_run_delayed_items(trans, fs_info);
kfree(name);
iput(inode);
if (ret)
@ -2440,7 +2447,7 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
parent = path->nodes[*level];
root_owner = btrfs_header_owner(parent);
next = btrfs_find_create_tree_block(root, bytenr);
next = btrfs_find_create_tree_block(fs_info, bytenr);
if (IS_ERR(next))
return PTR_ERR(next);
@ -2469,8 +2476,9 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
WARN_ON(root_owner !=
BTRFS_TREE_LOG_OBJECTID);
ret = btrfs_free_and_pin_reserved_extent(root,
bytenr, blocksize);
ret = btrfs_free_and_pin_reserved_extent(
fs_info, bytenr,
blocksize);
if (ret) {
free_extent_buffer(next);
return ret;
@ -2547,7 +2555,8 @@ static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans,
}
WARN_ON(root_owner != BTRFS_TREE_LOG_OBJECTID);
ret = btrfs_free_and_pin_reserved_extent(root,
ret = btrfs_free_and_pin_reserved_extent(
fs_info,
path->nodes[*level]->start,
path->nodes[*level]->len);
if (ret)
@ -2569,6 +2578,7 @@ static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans,
static int walk_log_tree(struct btrfs_trans_handle *trans,
struct btrfs_root *log, struct walk_control *wc)
{
struct btrfs_fs_info *fs_info = log->fs_info;
int ret = 0;
int wret;
int level;
@ -2617,15 +2627,15 @@ static int walk_log_tree(struct btrfs_trans_handle *trans,
if (trans) {
btrfs_tree_lock(next);
btrfs_set_lock_blocking(next);
clean_tree_block(trans, log->fs_info, next);
clean_tree_block(trans, fs_info, next);
btrfs_wait_tree_block_writeback(next);
btrfs_tree_unlock(next);
}
WARN_ON(log->root_key.objectid !=
BTRFS_TREE_LOG_OBJECTID);
ret = btrfs_free_and_pin_reserved_extent(log, next->start,
next->len);
ret = btrfs_free_and_pin_reserved_extent(fs_info,
next->start, next->len);
if (ret)
goto out;
}
@ -2803,7 +2813,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
* wait for them until later.
*/
blk_start_plug(&plug);
ret = btrfs_write_marked_extents(log, &log->dirty_log_pages, mark);
ret = btrfs_write_marked_extents(fs_info, &log->dirty_log_pages, mark);
if (ret) {
blk_finish_plug(&plug);
btrfs_abort_transaction(trans, ret);
@ -2911,7 +2921,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
goto out_wake_log_root;
}
ret = btrfs_write_marked_extents(log_root_tree,
ret = btrfs_write_marked_extents(fs_info,
&log_root_tree->dirty_log_pages,
EXTENT_DIRTY | EXTENT_NEW);
blk_finish_plug(&plug);
@ -2950,7 +2960,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
* the running transaction open, so a full commit can't hop
* in and cause problems either.
*/
ret = write_ctree_super(trans, fs_info->tree_root, 1);
ret = write_ctree_super(trans, fs_info, 1);
if (ret) {
btrfs_set_log_full_commit(fs_info, trans);
btrfs_abort_transaction(trans, ret);

Просмотреть файл

@ -133,7 +133,7 @@ int btrfs_uuid_tree_add(struct btrfs_trans_handle *trans,
* An item with that type already exists.
* Extend the item and store the new subid at the end.
*/
btrfs_extend_item(uuid_root, path, sizeof(subid_le));
btrfs_extend_item(fs_info, path, sizeof(subid_le));
eb = path->nodes[0];
slot = path->slots[0];
offset = btrfs_item_ptr_offset(eb, slot);
@ -231,7 +231,7 @@ int btrfs_uuid_tree_rem(struct btrfs_trans_handle *trans,
move_src = offset + sizeof(subid);
move_len = item_size - (move_src - btrfs_item_ptr_offset(eb, slot));
memmove_extent_buffer(eb, move_dst, move_src, move_len);
btrfs_truncate_item(uuid_root, path, item_size - sizeof(subid), 1);
btrfs_truncate_item(fs_info, path, item_size - sizeof(subid), 1);
out:
btrfs_free_path(path);

Просмотреть файл

@ -134,9 +134,9 @@ const int btrfs_raid_mindev_error[BTRFS_NR_RAID_TYPES] = {
};
static int init_first_rw_device(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_fs_info *fs_info,
struct btrfs_device *device);
static int btrfs_relocate_sys_chunks(struct btrfs_root *root);
static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info);
static void __btrfs_reset_dev_stats(struct btrfs_device *dev);
static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev);
static void btrfs_dev_stat_print_on_load(struct btrfs_device *device);
@ -1852,9 +1852,8 @@ void btrfs_assign_next_active_device(struct btrfs_fs_info *fs_info,
fs_info->fs_devices->latest_bdev = next_device->bdev;
}
int btrfs_rm_device(struct btrfs_root *root, char *device_path, u64 devid)
int btrfs_rm_device(struct btrfs_fs_info *fs_info, char *device_path, u64 devid)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_device *device;
struct btrfs_fs_devices *cur_devices;
u64 num_devices;
@ -1875,8 +1874,8 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path, u64 devid)
if (ret)
goto out;
ret = btrfs_find_device_by_devspec(root, devid, device_path,
&device);
ret = btrfs_find_device_by_devspec(fs_info, devid, device_path,
&device);
if (ret)
goto out;
@ -2092,10 +2091,10 @@ void btrfs_destroy_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
call_rcu(&tgtdev->rcu, free_device);
}
static int btrfs_find_device_by_path(struct btrfs_root *root, char *device_path,
static int btrfs_find_device_by_path(struct btrfs_fs_info *fs_info,
char *device_path,
struct btrfs_device **device)
{
struct btrfs_fs_info *fs_info = root->fs_info;
int ret = 0;
struct btrfs_super_block *disk_super;
u64 devid;
@ -2119,12 +2118,10 @@ static int btrfs_find_device_by_path(struct btrfs_root *root, char *device_path,
return ret;
}
int btrfs_find_device_missing_or_by_path(struct btrfs_root *root,
int btrfs_find_device_missing_or_by_path(struct btrfs_fs_info *fs_info,
char *device_path,
struct btrfs_device **device)
{
struct btrfs_fs_info *fs_info = root->fs_info;
*device = NULL;
if (strcmp(device_path, "missing") == 0) {
struct list_head *devices;
@ -2147,18 +2144,16 @@ int btrfs_find_device_missing_or_by_path(struct btrfs_root *root,
return 0;
} else {
return btrfs_find_device_by_path(root, device_path, device);
return btrfs_find_device_by_path(fs_info, device_path, device);
}
}
/*
* Lookup a device given by device id, or the path if the id is 0.
*/
int btrfs_find_device_by_devspec(struct btrfs_root *root, u64 devid,
char *devpath,
struct btrfs_device **device)
int btrfs_find_device_by_devspec(struct btrfs_fs_info *fs_info, u64 devid,
char *devpath, struct btrfs_device **device)
{
struct btrfs_fs_info *fs_info = root->fs_info;
int ret;
if (devid) {
@ -2170,7 +2165,7 @@ int btrfs_find_device_by_devspec(struct btrfs_root *root, u64 devid,
if (!devpath || !devpath[0])
return -EINVAL;
ret = btrfs_find_device_missing_or_by_path(root, devpath,
ret = btrfs_find_device_missing_or_by_path(fs_info, devpath,
device);
}
return ret;
@ -2179,9 +2174,8 @@ int btrfs_find_device_by_devspec(struct btrfs_root *root, u64 devid,
/*
* does all the dirty work required for changing file system's UUID.
*/
static int btrfs_prepare_sprout(struct btrfs_root *root)
static int btrfs_prepare_sprout(struct btrfs_fs_info *fs_info)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
struct btrfs_fs_devices *old_devices;
struct btrfs_fs_devices *seed_devices;
@ -2401,7 +2395,7 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, char *device_path)
if (seeding_dev) {
sb->s_flags &= ~MS_RDONLY;
ret = btrfs_prepare_sprout(root);
ret = btrfs_prepare_sprout(fs_info);
BUG_ON(ret); /* -ENOMEM */
}
@ -2446,7 +2440,7 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, char *device_path)
if (seeding_dev) {
lock_chunks(fs_info);
ret = init_first_rw_device(trans, root, device);
ret = init_first_rw_device(trans, fs_info, device);
unlock_chunks(fs_info);
if (ret) {
btrfs_abort_transaction(trans, ret);
@ -2490,7 +2484,7 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, char *device_path)
if (ret) /* transaction commit */
return ret;
ret = btrfs_relocate_sys_chunks(root);
ret = btrfs_relocate_sys_chunks(fs_info);
if (ret < 0)
btrfs_handle_fs_error(fs_info, ret,
"Failed to relocate sys chunks after device initialization. This can be fixed using the \"btrfs balance\" command.");
@ -2521,14 +2515,14 @@ error:
return ret;
}
int btrfs_init_dev_replace_tgtdev(struct btrfs_root *root, char *device_path,
int btrfs_init_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
char *device_path,
struct btrfs_device *srcdev,
struct btrfs_device **device_out)
{
struct request_queue *q;
struct btrfs_device *device;
struct block_device *bdev;
struct btrfs_fs_info *fs_info = root->fs_info;
struct list_head *devices;
struct rcu_string *name;
u64 devid = BTRFS_DEV_REPLACE_DEVID;
@ -2805,7 +2799,6 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans,
{
struct extent_map_tree *em_tree;
struct extent_map *em;
struct btrfs_root *extent_root = fs_info->extent_root;
struct map_lookup *map;
u64 dev_extent_len = 0;
u64 chunk_objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
@ -2832,7 +2825,7 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans,
}
map = em->map_lookup;
lock_chunks(fs_info);
check_system_chunk(trans, extent_root, map->type);
check_system_chunk(trans, fs_info, map->type);
unlock_chunks(fs_info);
/*
@ -2929,9 +2922,9 @@ static int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
return -ENOSPC;
/* step one, relocate all the extents inside this chunk */
btrfs_scrub_pause(root);
btrfs_scrub_pause(fs_info);
ret = btrfs_relocate_block_group(fs_info, chunk_offset);
btrfs_scrub_continue(root);
btrfs_scrub_continue(fs_info);
if (ret)
return ret;
@ -2952,9 +2945,8 @@ static int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
return ret;
}
static int btrfs_relocate_sys_chunks(struct btrfs_root *root)
static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_root *chunk_root = fs_info->chunk_root;
struct btrfs_path *path;
struct extent_buffer *leaf;
@ -3369,11 +3361,10 @@ static int chunk_soft_convert_filter(u64 chunk_type,
return 0;
}
static int should_balance_chunk(struct btrfs_root *root,
static int should_balance_chunk(struct btrfs_fs_info *fs_info,
struct extent_buffer *leaf,
struct btrfs_chunk *chunk, u64 chunk_offset)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_balance_control *bctl = fs_info->balance_ctl;
struct btrfs_balance_args *bargs = NULL;
u64 chunk_type = btrfs_chunk_type(leaf, chunk);
@ -3607,7 +3598,7 @@ again:
spin_unlock(&fs_info->balance_lock);
}
ret = should_balance_chunk(chunk_root, leaf, chunk,
ret = should_balance_chunk(fs_info, leaf, chunk,
found_key.offset);
btrfs_release_path(path);
@ -3660,7 +3651,7 @@ again:
goto error;
}
ret = btrfs_force_chunk_alloc(trans, chunk_root,
ret = btrfs_force_chunk_alloc(trans, fs_info,
BTRFS_BLOCK_GROUP_DATA);
btrfs_end_transaction(trans, chunk_root);
if (ret < 0) {
@ -4522,11 +4513,10 @@ done:
return ret;
}
static int btrfs_add_system_chunk(struct btrfs_root *root,
static int btrfs_add_system_chunk(struct btrfs_fs_info *fs_info,
struct btrfs_key *key,
struct btrfs_chunk *chunk, int item_size)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_super_block *super_copy = fs_info->super_copy;
struct btrfs_disk_key disk_key;
u32 array_size;
@ -4595,10 +4585,10 @@ static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type)
/ sizeof(struct btrfs_stripe) + 1)
static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
struct btrfs_root *extent_root, u64 start,
struct btrfs_fs_info *fs_info, u64 start,
u64 type)
{
struct btrfs_fs_info *info = extent_root->fs_info;
struct btrfs_fs_info *info = trans->fs_info;
struct btrfs_fs_devices *fs_devices = info->fs_devices;
struct list_head *cur;
struct map_lookup *map = NULL;
@ -4852,7 +4842,7 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
goto error;
}
ret = btrfs_make_block_group(trans, extent_root, 0, type,
ret = btrfs_make_block_group(trans, info, 0, type,
BTRFS_FIRST_CHUNK_TREE_OBJECTID,
start, num_bytes);
if (ret)
@ -4997,8 +4987,7 @@ int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
* TODO: Cleanup of inserted chunk root in case of
* failure.
*/
ret = btrfs_add_system_chunk(chunk_root, &key, chunk,
item_size);
ret = btrfs_add_system_chunk(fs_info, &key, chunk, item_size);
}
out:
@ -5015,37 +5004,34 @@ out:
* bootstrap process of adding storage to a seed btrfs.
*/
int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
struct btrfs_root *extent_root, u64 type)
struct btrfs_fs_info *fs_info, u64 type)
{
struct btrfs_fs_info *fs_info = extent_root->fs_info;
u64 chunk_offset;
ASSERT(mutex_is_locked(&fs_info->chunk_mutex));
chunk_offset = find_next_chunk(fs_info);
return __btrfs_alloc_chunk(trans, extent_root, chunk_offset, type);
return __btrfs_alloc_chunk(trans, fs_info, chunk_offset, type);
}
static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_fs_info *fs_info,
struct btrfs_device *device)
{
struct btrfs_root *extent_root = fs_info->extent_root;
u64 chunk_offset;
u64 sys_chunk_offset;
u64 alloc_profile;
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_root *extent_root = fs_info->extent_root;
int ret;
chunk_offset = find_next_chunk(fs_info);
alloc_profile = btrfs_get_alloc_profile(extent_root, 0);
ret = __btrfs_alloc_chunk(trans, extent_root, chunk_offset,
alloc_profile);
ret = __btrfs_alloc_chunk(trans, fs_info, chunk_offset, alloc_profile);
if (ret)
return ret;
sys_chunk_offset = find_next_chunk(fs_info);
alloc_profile = btrfs_get_alloc_profile(fs_info->chunk_root, 0);
ret = __btrfs_alloc_chunk(trans, extent_root, sys_chunk_offset,
ret = __btrfs_alloc_chunk(trans, fs_info, sys_chunk_offset,
alloc_profile);
return ret;
}
@ -5068,9 +5054,8 @@ static inline int btrfs_chunk_max_errors(struct map_lookup *map)
return max_errors;
}
int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset)
int btrfs_chunk_readonly(struct btrfs_fs_info *fs_info, u64 chunk_offset)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_map *em;
struct map_lookup *map;
struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
@ -5185,11 +5170,10 @@ int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
return ret;
}
unsigned long btrfs_full_stripe_len(struct btrfs_root *root,
unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info,
struct btrfs_mapping_tree *map_tree,
u64 logical)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_map *em;
struct map_lookup *map;
struct extent_map_tree *em_tree = &map_tree->map_tree;
@ -6075,8 +6059,7 @@ static void btrfs_end_bio(struct bio *bio)
* This will add one bio to the pending list for a device and make sure
* the work struct is scheduled.
*/
static noinline void btrfs_schedule_bio(struct btrfs_root *root,
struct btrfs_device *device,
static noinline void btrfs_schedule_bio(struct btrfs_device *device,
struct bio *bio)
{
struct btrfs_fs_info *fs_info = device->fs_info;
@ -6127,11 +6110,11 @@ static noinline void btrfs_schedule_bio(struct btrfs_root *root,
btrfs_queue_work(fs_info->submit_workers, &device->work);
}
static void submit_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
struct bio *bio, u64 physical, int dev_nr,
int async)
static void submit_stripe_bio(struct btrfs_bio *bbio, struct bio *bio,
u64 physical, int dev_nr, int async)
{
struct btrfs_device *dev = bbio->stripes[dev_nr].dev;
struct btrfs_fs_info *fs_info = bbio->fs_info;
bio->bi_private = bbio;
btrfs_io_bio(bio)->stripe_index = dev_nr;
@ -6154,10 +6137,10 @@ static void submit_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
#endif
bio->bi_bdev = dev->bdev;
btrfs_bio_counter_inc_noblocked(root->fs_info);
btrfs_bio_counter_inc_noblocked(fs_info);
if (async)
btrfs_schedule_bio(root, dev, bio);
btrfs_schedule_bio(dev, bio);
else
btrfsic_submit_bio(bio);
}
@ -6176,10 +6159,9 @@ static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
}
}
int btrfs_map_bio(struct btrfs_root *root, struct bio *bio,
int btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
int mirror_num, int async_submit)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_device *dev;
struct bio *first_bio = bio;
u64 logical = (u64)bio->bi_iter.bi_sector << 9;
@ -6213,10 +6195,11 @@ int btrfs_map_bio(struct btrfs_root *root, struct bio *bio,
/* In this case, map_length has been set to the length of
a single stripe; not the whole write */
if (bio_op(bio) == REQ_OP_WRITE) {
ret = raid56_parity_write(root, bio, bbio, map_length);
ret = raid56_parity_write(fs_info, bio, bbio,
map_length);
} else {
ret = raid56_parity_recover(root, bio, bbio, map_length,
mirror_num, 1);
ret = raid56_parity_recover(fs_info, bio, bbio,
map_length, mirror_num, 1);
}
btrfs_bio_counter_dec(fs_info);
@ -6244,9 +6227,8 @@ int btrfs_map_bio(struct btrfs_root *root, struct bio *bio,
} else
bio = first_bio;
submit_stripe_bio(root, bbio, bio,
bbio->stripes[dev_nr].physical, dev_nr,
async_submit);
submit_stripe_bio(bbio, bio, bbio->stripes[dev_nr].physical,
dev_nr, async_submit);
}
btrfs_bio_counter_dec(fs_info);
return 0;
@ -6272,8 +6254,7 @@ struct btrfs_device *btrfs_find_device(struct btrfs_fs_info *fs_info, u64 devid,
return NULL;
}
static struct btrfs_device *add_missing_dev(struct btrfs_root *root,
struct btrfs_fs_devices *fs_devices,
static struct btrfs_device *add_missing_dev(struct btrfs_fs_devices *fs_devices,
u64 devid, u8 *dev_uuid)
{
struct btrfs_device *device;
@ -6344,11 +6325,10 @@ struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
}
/* Return -EIO if any error, otherwise return 0. */
static int btrfs_check_chunk_valid(struct btrfs_root *root,
static int btrfs_check_chunk_valid(struct btrfs_fs_info *fs_info,
struct extent_buffer *leaf,
struct btrfs_chunk *chunk, u64 logical)
{
struct btrfs_fs_info *fs_info = root->fs_info;
u64 length;
u64 stripe_len;
u16 num_stripes;
@ -6409,11 +6389,10 @@ static int btrfs_check_chunk_valid(struct btrfs_root *root,
return 0;
}
static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
static int read_one_chunk(struct btrfs_fs_info *fs_info, struct btrfs_key *key,
struct extent_buffer *leaf,
struct btrfs_chunk *chunk)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
struct map_lookup *map;
struct extent_map *em;
@ -6431,7 +6410,7 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
ret = btrfs_check_chunk_valid(root, leaf, chunk, logical);
ret = btrfs_check_chunk_valid(fs_info, leaf, chunk, logical);
if (ret)
return ret;
@ -6487,8 +6466,8 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
}
if (!map->stripes[i].dev) {
map->stripes[i].dev =
add_missing_dev(root, fs_info->fs_devices,
devid, uuid);
add_missing_dev(fs_info->fs_devices, devid,
uuid);
if (!map->stripes[i].dev) {
free_extent_map(em);
return -EIO;
@ -6531,10 +6510,9 @@ static void fill_device_from_item(struct extent_buffer *leaf,
read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
}
static struct btrfs_fs_devices *open_seed_devices(struct btrfs_root *root,
static struct btrfs_fs_devices *open_seed_devices(struct btrfs_fs_info *fs_info,
u8 *fsid)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_fs_devices *fs_devices;
int ret;
@ -6587,11 +6565,10 @@ out:
return fs_devices;
}
static int read_one_dev(struct btrfs_root *root,
static int read_one_dev(struct btrfs_fs_info *fs_info,
struct extent_buffer *leaf,
struct btrfs_dev_item *dev_item)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
struct btrfs_device *device;
u64 devid;
@ -6606,7 +6583,7 @@ static int read_one_dev(struct btrfs_root *root,
BTRFS_UUID_SIZE);
if (memcmp(fs_uuid, fs_info->fsid, BTRFS_UUID_SIZE)) {
fs_devices = open_seed_devices(root, fs_uuid);
fs_devices = open_seed_devices(fs_info, fs_uuid);
if (IS_ERR(fs_devices))
return PTR_ERR(fs_devices);
}
@ -6616,7 +6593,7 @@ static int read_one_dev(struct btrfs_root *root,
if (!btrfs_test_opt(fs_info, DEGRADED))
return -EIO;
device = add_missing_dev(root, fs_devices, devid, dev_uuid);
device = add_missing_dev(fs_devices, devid, dev_uuid);
if (!device)
return -ENOMEM;
btrfs_warn(fs_info, "devid %llu uuid %pU missing",
@ -6694,7 +6671,7 @@ int btrfs_read_sys_array(struct btrfs_fs_info *fs_info)
* fixed to BTRFS_SUPER_INFO_SIZE. If nodesize > sb size, this will
* overallocate but we can keep it as-is, only the first page is used.
*/
sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET);
sb = btrfs_find_create_tree_block(fs_info, BTRFS_SUPER_INFO_OFFSET);
if (IS_ERR(sb))
return PTR_ERR(sb);
set_extent_buffer_uptodate(sb);
@ -6765,7 +6742,7 @@ int btrfs_read_sys_array(struct btrfs_fs_info *fs_info)
if (cur_offset + len > array_size)
goto out_short_read;
ret = read_one_chunk(root, &key, sb, chunk);
ret = read_one_chunk(fs_info, &key, sb, chunk);
if (ret)
break;
} else {
@ -6837,14 +6814,14 @@ int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info)
struct btrfs_dev_item *dev_item;
dev_item = btrfs_item_ptr(leaf, slot,
struct btrfs_dev_item);
ret = read_one_dev(root, leaf, dev_item);
ret = read_one_dev(fs_info, leaf, dev_item);
if (ret)
goto error;
total_dev++;
} else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
struct btrfs_chunk *chunk;
chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
ret = read_one_chunk(root, &found_key, leaf, chunk);
ret = read_one_chunk(fs_info, &found_key, leaf, chunk);
if (ret)
goto error;
}
@ -7095,10 +7072,9 @@ static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev)
btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
}
int btrfs_get_dev_stats(struct btrfs_root *root,
int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info,
struct btrfs_ioctl_get_dev_stats *stats)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_device *dev;
struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
int i;
@ -7185,10 +7161,9 @@ void btrfs_update_commit_device_size(struct btrfs_fs_info *fs_info)
}
/* Must be invoked during the transaction commit */
void btrfs_update_commit_device_bytes_used(struct btrfs_root *root,
void btrfs_update_commit_device_bytes_used(struct btrfs_fs_info *fs_info,
struct btrfs_transaction *transaction)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_map *em;
struct map_lookup *map;
struct btrfs_device *dev;

Просмотреть файл

@ -408,10 +408,10 @@ int btrfs_rmap_block(struct btrfs_fs_info *fs_info,
int btrfs_read_sys_array(struct btrfs_fs_info *fs_info);
int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info);
int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
struct btrfs_root *extent_root, u64 type);
struct btrfs_fs_info *fs_info, u64 type);
void btrfs_mapping_init(struct btrfs_mapping_tree *tree);
void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree);
int btrfs_map_bio(struct btrfs_root *root, struct bio *bio,
int btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
int mirror_num, int async_submit);
int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
fmode_t flags, void *holder);
@ -421,16 +421,17 @@ int btrfs_close_devices(struct btrfs_fs_devices *fs_devices);
void btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices, int step);
void btrfs_assign_next_active_device(struct btrfs_fs_info *fs_info,
struct btrfs_device *device, struct btrfs_device *this_dev);
int btrfs_find_device_missing_or_by_path(struct btrfs_root *root,
int btrfs_find_device_missing_or_by_path(struct btrfs_fs_info *fs_info,
char *device_path,
struct btrfs_device **device);
int btrfs_find_device_by_devspec(struct btrfs_root *root, u64 devid,
int btrfs_find_device_by_devspec(struct btrfs_fs_info *fs_info, u64 devid,
char *devpath,
struct btrfs_device **device);
struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
const u64 *devid,
const u8 *uuid);
int btrfs_rm_device(struct btrfs_root *root, char *device_path, u64 devid);
int btrfs_rm_device(struct btrfs_fs_info *fs_info,
char *device_path, u64 devid);
void btrfs_cleanup_fs_uuids(void);
int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len);
int btrfs_grow_device(struct btrfs_trans_handle *trans,
@ -439,7 +440,8 @@ struct btrfs_device *btrfs_find_device(struct btrfs_fs_info *fs_info, u64 devid,
u8 *uuid, u8 *fsid);
int btrfs_shrink_device(struct btrfs_device *device, u64 new_size);
int btrfs_init_new_device(struct btrfs_fs_info *fs_info, char *path);
int btrfs_init_dev_replace_tgtdev(struct btrfs_root *root, char *device_path,
int btrfs_init_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
char *device_path,
struct btrfs_device *srcdev,
struct btrfs_device **device_out);
int btrfs_balance(struct btrfs_balance_control *bctl,
@ -450,7 +452,7 @@ int btrfs_pause_balance(struct btrfs_fs_info *fs_info);
int btrfs_cancel_balance(struct btrfs_fs_info *fs_info);
int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info);
int btrfs_check_uuid_tree(struct btrfs_fs_info *fs_info);
int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset);
int btrfs_chunk_readonly(struct btrfs_fs_info *fs_info, u64 chunk_offset);
int find_free_dev_extent_start(struct btrfs_transaction *transaction,
struct btrfs_device *device, u64 num_bytes,
u64 search_start, u64 *start, u64 *max_avail);
@ -458,7 +460,7 @@ int find_free_dev_extent(struct btrfs_trans_handle *trans,
struct btrfs_device *device, u64 num_bytes,
u64 *start, u64 *max_avail);
void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index);
int btrfs_get_dev_stats(struct btrfs_root *root,
int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info,
struct btrfs_ioctl_get_dev_stats *stats);
void btrfs_init_devices_late(struct btrfs_fs_info *fs_info);
int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info);
@ -475,7 +477,7 @@ void btrfs_init_dev_replace_tgtdev_for_resume(struct btrfs_fs_info *fs_info,
void btrfs_scratch_superblocks(struct block_device *bdev, char *device_path);
int btrfs_is_parity_mirror(struct btrfs_mapping_tree *map_tree,
u64 logical, u64 len, int mirror_num);
unsigned long btrfs_full_stripe_len(struct btrfs_root *root,
unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info,
struct btrfs_mapping_tree *map_tree,
u64 logical);
int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
@ -529,7 +531,7 @@ static inline void btrfs_dev_stat_reset(struct btrfs_device *dev,
}
void btrfs_update_commit_device_size(struct btrfs_fs_info *fs_info);
void btrfs_update_commit_device_bytes_used(struct btrfs_root *root,
void btrfs_update_commit_device_bytes_used(struct btrfs_fs_info *fs_info,
struct btrfs_transaction *transaction);
static inline void lock_chunks(struct btrfs_fs_info *fs_info)

Просмотреть файл

@ -94,6 +94,7 @@ static int do_setxattr(struct btrfs_trans_handle *trans,
{
struct btrfs_dir_item *di = NULL;
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_path *path;
size_t name_len = strlen(name);
int ret = 0;
@ -149,14 +150,14 @@ static int do_setxattr(struct btrfs_trans_handle *trans,
*/
ret = 0;
btrfs_assert_tree_locked(path->nodes[0]);
di = btrfs_match_dir_item_name(root, path, name, name_len);
di = btrfs_match_dir_item_name(fs_info, path, name, name_len);
if (!di && !(flags & XATTR_REPLACE)) {
ret = -ENOSPC;
goto out;
}
} else if (ret == -EEXIST) {
ret = 0;
di = btrfs_match_dir_item_name(root, path, name, name_len);
di = btrfs_match_dir_item_name(fs_info, path, name, name_len);
ASSERT(di); /* logic error */
} else if (ret) {
goto out;
@ -185,7 +186,7 @@ static int do_setxattr(struct btrfs_trans_handle *trans,
char *ptr;
if (size > old_data_len) {
if (btrfs_leaf_free_space(root, leaf) <
if (btrfs_leaf_free_space(fs_info, leaf) <
(size - old_data_len)) {
ret = -ENOSPC;
goto out;
@ -195,16 +196,17 @@ static int do_setxattr(struct btrfs_trans_handle *trans,
if (old_data_len + name_len + sizeof(*di) == item_size) {
/* No other xattrs packed in the same leaf item. */
if (size > old_data_len)
btrfs_extend_item(root, path,
btrfs_extend_item(fs_info, path,
size - old_data_len);
else if (size < old_data_len)
btrfs_truncate_item(root, path, data_size, 1);
btrfs_truncate_item(fs_info, path,
data_size, 1);
} else {
/* There are other xattrs packed in the same item. */
ret = btrfs_delete_one_dir_name(trans, root, path, di);
if (ret)
goto out;
btrfs_extend_item(root, path, data_size);
btrfs_extend_item(fs_info, path, data_size);
}
item = btrfs_item_nr(slot);
@ -265,6 +267,7 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size)
{
struct btrfs_key key;
struct inode *inode = d_inode(dentry);
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_path *path;
int ret = 0;
@ -333,7 +336,7 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size)
u32 this_len = sizeof(*di) + name_len + data_len;
unsigned long name_ptr = (unsigned long)(di + 1);
if (verify_dir_item(root, leaf, di)) {
if (verify_dir_item(fs_info, leaf, di)) {
ret = -EIO;
goto err;
}