Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs
Pull btrfs update from Chris Mason: "These are the usual mixture of bugs, cleanups and performance fixes. Miao has some really nice tuning of our crc code as well as our transaction commits. Josef is peeling off more and more problems related to early enospc, and has a number of important bug fixes in here too" * 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs: (81 commits) Btrfs: wait ordered range before doing direct io Btrfs: only do the tree_mod_log_free_eb if this is our last ref Btrfs: hold the tree mod lock in __tree_mod_log_rewind Btrfs: make backref walking code handle skinny metadata Btrfs: fix crash regarding to ulist_add_merge Btrfs: fix several potential problems in copy_nocow_pages_for_inode Btrfs: cleanup the code of copy_nocow_pages_for_inode() Btrfs: fix oops when recovering the file data by scrub function Btrfs: make the chunk allocator completely tree lockless Btrfs: cleanup orphaned root orphan item Btrfs: fix wrong mirror number tuning Btrfs: cleanup redundant code in btrfs_submit_direct() Btrfs: remove btrfs_sector_sum structure Btrfs: check if we can nocow if we don't have data space Btrfs: stop using try_to_writeback_inodes_sb_nr to flush delalloc Btrfs: use a percpu to keep track of possibly pinned bytes Btrfs: check for actual acls rather than just xattrs when caching no acl Btrfs: move btrfs_truncate_page to btrfs_cont_expand instead of btrfs_truncate Btrfs: optimize reada_for_balance Btrfs: optimize read_block_for_search ...
This commit is contained in:
Коммит
e3a0dd98e1
|
@ -255,13 +255,11 @@ static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
|
|||
* to a logical address
|
||||
*/
|
||||
static int __resolve_indirect_ref(struct btrfs_fs_info *fs_info,
|
||||
int search_commit_root,
|
||||
u64 time_seq,
|
||||
struct __prelim_ref *ref,
|
||||
struct ulist *parents,
|
||||
const u64 *extent_item_pos)
|
||||
struct btrfs_path *path, u64 time_seq,
|
||||
struct __prelim_ref *ref,
|
||||
struct ulist *parents,
|
||||
const u64 *extent_item_pos)
|
||||
{
|
||||
struct btrfs_path *path;
|
||||
struct btrfs_root *root;
|
||||
struct btrfs_key root_key;
|
||||
struct extent_buffer *eb;
|
||||
|
@ -269,11 +267,6 @@ static int __resolve_indirect_ref(struct btrfs_fs_info *fs_info,
|
|||
int root_level;
|
||||
int level = ref->level;
|
||||
|
||||
path = btrfs_alloc_path();
|
||||
if (!path)
|
||||
return -ENOMEM;
|
||||
path->search_commit_root = !!search_commit_root;
|
||||
|
||||
root_key.objectid = ref->root_id;
|
||||
root_key.type = BTRFS_ROOT_ITEM_KEY;
|
||||
root_key.offset = (u64)-1;
|
||||
|
@ -314,7 +307,8 @@ static int __resolve_indirect_ref(struct btrfs_fs_info *fs_info,
|
|||
time_seq, ref->wanted_disk_byte,
|
||||
extent_item_pos);
|
||||
out:
|
||||
btrfs_free_path(path);
|
||||
path->lowest_level = 0;
|
||||
btrfs_release_path(path);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -322,7 +316,7 @@ out:
|
|||
* resolve all indirect backrefs from the list
|
||||
*/
|
||||
static int __resolve_indirect_refs(struct btrfs_fs_info *fs_info,
|
||||
int search_commit_root, u64 time_seq,
|
||||
struct btrfs_path *path, u64 time_seq,
|
||||
struct list_head *head,
|
||||
const u64 *extent_item_pos)
|
||||
{
|
||||
|
@ -349,9 +343,8 @@ static int __resolve_indirect_refs(struct btrfs_fs_info *fs_info,
|
|||
continue;
|
||||
if (ref->count == 0)
|
||||
continue;
|
||||
err = __resolve_indirect_ref(fs_info, search_commit_root,
|
||||
time_seq, ref, parents,
|
||||
extent_item_pos);
|
||||
err = __resolve_indirect_ref(fs_info, path, time_seq, ref,
|
||||
parents, extent_item_pos);
|
||||
if (err == -ENOMEM)
|
||||
goto out;
|
||||
if (err)
|
||||
|
@ -604,6 +597,7 @@ static int __add_inline_refs(struct btrfs_fs_info *fs_info,
|
|||
int slot;
|
||||
struct extent_buffer *leaf;
|
||||
struct btrfs_key key;
|
||||
struct btrfs_key found_key;
|
||||
unsigned long ptr;
|
||||
unsigned long end;
|
||||
struct btrfs_extent_item *ei;
|
||||
|
@ -621,17 +615,21 @@ static int __add_inline_refs(struct btrfs_fs_info *fs_info,
|
|||
|
||||
ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
|
||||
flags = btrfs_extent_flags(leaf, ei);
|
||||
btrfs_item_key_to_cpu(leaf, &found_key, slot);
|
||||
|
||||
ptr = (unsigned long)(ei + 1);
|
||||
end = (unsigned long)ei + item_size;
|
||||
|
||||
if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
|
||||
if (found_key.type == BTRFS_EXTENT_ITEM_KEY &&
|
||||
flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
|
||||
struct btrfs_tree_block_info *info;
|
||||
|
||||
info = (struct btrfs_tree_block_info *)ptr;
|
||||
*info_level = btrfs_tree_block_level(leaf, info);
|
||||
ptr += sizeof(struct btrfs_tree_block_info);
|
||||
BUG_ON(ptr > end);
|
||||
} else if (found_key.type == BTRFS_METADATA_ITEM_KEY) {
|
||||
*info_level = found_key.offset;
|
||||
} else {
|
||||
BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
|
||||
}
|
||||
|
@ -795,7 +793,6 @@ static int find_parent_nodes(struct btrfs_trans_handle *trans,
|
|||
struct btrfs_delayed_ref_head *head;
|
||||
int info_level = 0;
|
||||
int ret;
|
||||
int search_commit_root = (trans == BTRFS_BACKREF_SEARCH_COMMIT_ROOT);
|
||||
struct list_head prefs_delayed;
|
||||
struct list_head prefs;
|
||||
struct __prelim_ref *ref;
|
||||
|
@ -804,13 +801,17 @@ static int find_parent_nodes(struct btrfs_trans_handle *trans,
|
|||
INIT_LIST_HEAD(&prefs_delayed);
|
||||
|
||||
key.objectid = bytenr;
|
||||
key.type = BTRFS_EXTENT_ITEM_KEY;
|
||||
key.offset = (u64)-1;
|
||||
if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
|
||||
key.type = BTRFS_METADATA_ITEM_KEY;
|
||||
else
|
||||
key.type = BTRFS_EXTENT_ITEM_KEY;
|
||||
|
||||
path = btrfs_alloc_path();
|
||||
if (!path)
|
||||
return -ENOMEM;
|
||||
path->search_commit_root = !!search_commit_root;
|
||||
if (!trans)
|
||||
path->search_commit_root = 1;
|
||||
|
||||
/*
|
||||
* grab both a lock on the path and a lock on the delayed ref head.
|
||||
|
@ -825,7 +826,7 @@ again:
|
|||
goto out;
|
||||
BUG_ON(ret == 0);
|
||||
|
||||
if (trans != BTRFS_BACKREF_SEARCH_COMMIT_ROOT) {
|
||||
if (trans) {
|
||||
/*
|
||||
* look if there are updates for this ref queued and lock the
|
||||
* head
|
||||
|
@ -869,7 +870,8 @@ again:
|
|||
slot = path->slots[0];
|
||||
btrfs_item_key_to_cpu(leaf, &key, slot);
|
||||
if (key.objectid == bytenr &&
|
||||
key.type == BTRFS_EXTENT_ITEM_KEY) {
|
||||
(key.type == BTRFS_EXTENT_ITEM_KEY ||
|
||||
key.type == BTRFS_METADATA_ITEM_KEY)) {
|
||||
ret = __add_inline_refs(fs_info, path, bytenr,
|
||||
&info_level, &prefs);
|
||||
if (ret)
|
||||
|
@ -890,8 +892,8 @@ again:
|
|||
|
||||
__merge_refs(&prefs, 1);
|
||||
|
||||
ret = __resolve_indirect_refs(fs_info, search_commit_root, time_seq,
|
||||
&prefs, extent_item_pos);
|
||||
ret = __resolve_indirect_refs(fs_info, path, time_seq, &prefs,
|
||||
extent_item_pos);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
|
@ -1283,12 +1285,16 @@ int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
|
|||
{
|
||||
int ret;
|
||||
u64 flags;
|
||||
u64 size = 0;
|
||||
u32 item_size;
|
||||
struct extent_buffer *eb;
|
||||
struct btrfs_extent_item *ei;
|
||||
struct btrfs_key key;
|
||||
|
||||
key.type = BTRFS_EXTENT_ITEM_KEY;
|
||||
if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
|
||||
key.type = BTRFS_METADATA_ITEM_KEY;
|
||||
else
|
||||
key.type = BTRFS_EXTENT_ITEM_KEY;
|
||||
key.objectid = logical;
|
||||
key.offset = (u64)-1;
|
||||
|
||||
|
@ -1301,9 +1307,15 @@ int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
|
|||
return ret;
|
||||
|
||||
btrfs_item_key_to_cpu(path->nodes[0], found_key, path->slots[0]);
|
||||
if (found_key->type != BTRFS_EXTENT_ITEM_KEY ||
|
||||
if (found_key->type == BTRFS_METADATA_ITEM_KEY)
|
||||
size = fs_info->extent_root->leafsize;
|
||||
else if (found_key->type == BTRFS_EXTENT_ITEM_KEY)
|
||||
size = found_key->offset;
|
||||
|
||||
if ((found_key->type != BTRFS_EXTENT_ITEM_KEY &&
|
||||
found_key->type != BTRFS_METADATA_ITEM_KEY) ||
|
||||
found_key->objectid > logical ||
|
||||
found_key->objectid + found_key->offset <= logical) {
|
||||
found_key->objectid + size <= logical) {
|
||||
pr_debug("logical %llu is not within any extent\n",
|
||||
(unsigned long long)logical);
|
||||
return -ENOENT;
|
||||
|
@ -1459,7 +1471,7 @@ int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
|
|||
iterate_extent_inodes_t *iterate, void *ctx)
|
||||
{
|
||||
int ret;
|
||||
struct btrfs_trans_handle *trans;
|
||||
struct btrfs_trans_handle *trans = NULL;
|
||||
struct ulist *refs = NULL;
|
||||
struct ulist *roots = NULL;
|
||||
struct ulist_node *ref_node = NULL;
|
||||
|
@ -1471,9 +1483,7 @@ int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
|
|||
pr_debug("resolving all inodes for extent %llu\n",
|
||||
extent_item_objectid);
|
||||
|
||||
if (search_commit_root) {
|
||||
trans = BTRFS_BACKREF_SEARCH_COMMIT_ROOT;
|
||||
} else {
|
||||
if (!search_commit_root) {
|
||||
trans = btrfs_join_transaction(fs_info->extent_root);
|
||||
if (IS_ERR(trans))
|
||||
return PTR_ERR(trans);
|
||||
|
|
|
@ -23,8 +23,6 @@
|
|||
#include "ulist.h"
|
||||
#include "extent_io.h"
|
||||
|
||||
#define BTRFS_BACKREF_SEARCH_COMMIT_ROOT ((struct btrfs_trans_handle *)0)
|
||||
|
||||
struct inode_fs_paths {
|
||||
struct btrfs_path *btrfs_path;
|
||||
struct btrfs_root *fs_root;
|
||||
|
|
120
fs/btrfs/ctree.c
120
fs/btrfs/ctree.c
|
@ -1089,7 +1089,8 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
|
|||
btrfs_set_node_ptr_generation(parent, parent_slot,
|
||||
trans->transid);
|
||||
btrfs_mark_buffer_dirty(parent);
|
||||
tree_mod_log_free_eb(root->fs_info, buf);
|
||||
if (last_ref)
|
||||
tree_mod_log_free_eb(root->fs_info, buf);
|
||||
btrfs_free_tree_block(trans, root, buf, parent_start,
|
||||
last_ref);
|
||||
}
|
||||
|
@ -1161,8 +1162,8 @@ __tree_mod_log_oldest_root(struct btrfs_fs_info *fs_info,
|
|||
* time_seq).
|
||||
*/
|
||||
static void
|
||||
__tree_mod_log_rewind(struct extent_buffer *eb, u64 time_seq,
|
||||
struct tree_mod_elem *first_tm)
|
||||
__tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
|
||||
u64 time_seq, struct tree_mod_elem *first_tm)
|
||||
{
|
||||
u32 n;
|
||||
struct rb_node *next;
|
||||
|
@ -1172,6 +1173,7 @@ __tree_mod_log_rewind(struct extent_buffer *eb, u64 time_seq,
|
|||
unsigned long p_size = sizeof(struct btrfs_key_ptr);
|
||||
|
||||
n = btrfs_header_nritems(eb);
|
||||
tree_mod_log_read_lock(fs_info);
|
||||
while (tm && tm->seq >= time_seq) {
|
||||
/*
|
||||
* all the operations are recorded with the operator used for
|
||||
|
@ -1226,6 +1228,7 @@ __tree_mod_log_rewind(struct extent_buffer *eb, u64 time_seq,
|
|||
if (tm->index != first_tm->index)
|
||||
break;
|
||||
}
|
||||
tree_mod_log_read_unlock(fs_info);
|
||||
btrfs_set_header_nritems(eb, n);
|
||||
}
|
||||
|
||||
|
@ -1274,7 +1277,7 @@ tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
|
|||
|
||||
extent_buffer_get(eb_rewin);
|
||||
btrfs_tree_read_lock(eb_rewin);
|
||||
__tree_mod_log_rewind(eb_rewin, time_seq, tm);
|
||||
__tree_mod_log_rewind(fs_info, eb_rewin, time_seq, tm);
|
||||
WARN_ON(btrfs_header_nritems(eb_rewin) >
|
||||
BTRFS_NODEPTRS_PER_BLOCK(fs_info->tree_root));
|
||||
|
||||
|
@ -1350,7 +1353,7 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
|
|||
btrfs_set_header_generation(eb, old_generation);
|
||||
}
|
||||
if (tm)
|
||||
__tree_mod_log_rewind(eb, time_seq, tm);
|
||||
__tree_mod_log_rewind(root->fs_info, eb, time_seq, tm);
|
||||
else
|
||||
WARN_ON(btrfs_header_level(eb) != 0);
|
||||
WARN_ON(btrfs_header_nritems(eb) > BTRFS_NODEPTRS_PER_BLOCK(root));
|
||||
|
@ -2178,12 +2181,8 @@ static void reada_for_search(struct btrfs_root *root,
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* returns -EAGAIN if it had to drop the path, or zero if everything was in
|
||||
* cache
|
||||
*/
|
||||
static noinline int reada_for_balance(struct btrfs_root *root,
|
||||
struct btrfs_path *path, int level)
|
||||
static noinline void reada_for_balance(struct btrfs_root *root,
|
||||
struct btrfs_path *path, int level)
|
||||
{
|
||||
int slot;
|
||||
int nritems;
|
||||
|
@ -2192,12 +2191,11 @@ static noinline int reada_for_balance(struct btrfs_root *root,
|
|||
u64 gen;
|
||||
u64 block1 = 0;
|
||||
u64 block2 = 0;
|
||||
int ret = 0;
|
||||
int blocksize;
|
||||
|
||||
parent = path->nodes[level + 1];
|
||||
if (!parent)
|
||||
return 0;
|
||||
return;
|
||||
|
||||
nritems = btrfs_header_nritems(parent);
|
||||
slot = path->slots[level + 1];
|
||||
|
@ -2224,28 +2222,11 @@ static noinline int reada_for_balance(struct btrfs_root *root,
|
|||
block2 = 0;
|
||||
free_extent_buffer(eb);
|
||||
}
|
||||
if (block1 || block2) {
|
||||
ret = -EAGAIN;
|
||||
|
||||
/* release the whole path */
|
||||
btrfs_release_path(path);
|
||||
|
||||
/* read the blocks */
|
||||
if (block1)
|
||||
readahead_tree_block(root, block1, blocksize, 0);
|
||||
if (block2)
|
||||
readahead_tree_block(root, block2, blocksize, 0);
|
||||
|
||||
if (block1) {
|
||||
eb = read_tree_block(root, block1, blocksize, 0);
|
||||
free_extent_buffer(eb);
|
||||
}
|
||||
if (block2) {
|
||||
eb = read_tree_block(root, block2, blocksize, 0);
|
||||
free_extent_buffer(eb);
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
if (block1)
|
||||
readahead_tree_block(root, block1, blocksize, 0);
|
||||
if (block2)
|
||||
readahead_tree_block(root, block2, blocksize, 0);
|
||||
}
|
||||
|
||||
|
||||
|
@ -2359,35 +2340,28 @@ read_block_for_search(struct btrfs_trans_handle *trans,
|
|||
tmp = btrfs_find_tree_block(root, blocknr, blocksize);
|
||||
if (tmp) {
|
||||
/* first we do an atomic uptodate check */
|
||||
if (btrfs_buffer_uptodate(tmp, 0, 1) > 0) {
|
||||
if (btrfs_buffer_uptodate(tmp, gen, 1) > 0) {
|
||||
/*
|
||||
* we found an up to date block without
|
||||
* sleeping, return
|
||||
* right away
|
||||
*/
|
||||
*eb_ret = tmp;
|
||||
return 0;
|
||||
}
|
||||
/* the pages were up to date, but we failed
|
||||
* the generation number check. Do a full
|
||||
* read for the generation number that is correct.
|
||||
* We must do this without dropping locks so
|
||||
* we can trust our generation number
|
||||
*/
|
||||
free_extent_buffer(tmp);
|
||||
btrfs_set_path_blocking(p);
|
||||
|
||||
/* now we're allowed to do a blocking uptodate check */
|
||||
tmp = read_tree_block(root, blocknr, blocksize, gen);
|
||||
if (tmp && btrfs_buffer_uptodate(tmp, gen, 0) > 0) {
|
||||
*eb_ret = tmp;
|
||||
return 0;
|
||||
}
|
||||
free_extent_buffer(tmp);
|
||||
btrfs_release_path(p);
|
||||
return -EIO;
|
||||
if (btrfs_buffer_uptodate(tmp, gen, 1) > 0) {
|
||||
*eb_ret = tmp;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* the pages were up to date, but we failed
|
||||
* the generation number check. Do a full
|
||||
* read for the generation number that is correct.
|
||||
* We must do this without dropping locks so
|
||||
* we can trust our generation number
|
||||
*/
|
||||
btrfs_set_path_blocking(p);
|
||||
|
||||
/* now we're allowed to do a blocking uptodate check */
|
||||
ret = btrfs_read_buffer(tmp, gen);
|
||||
if (!ret) {
|
||||
*eb_ret = tmp;
|
||||
return 0;
|
||||
}
|
||||
free_extent_buffer(tmp);
|
||||
btrfs_release_path(p);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -2448,11 +2422,8 @@ setup_nodes_for_search(struct btrfs_trans_handle *trans,
|
|||
goto again;
|
||||
}
|
||||
|
||||
sret = reada_for_balance(root, p, level);
|
||||
if (sret)
|
||||
goto again;
|
||||
|
||||
btrfs_set_path_blocking(p);
|
||||
reada_for_balance(root, p, level);
|
||||
sret = split_node(trans, root, p, level);
|
||||
btrfs_clear_path_blocking(p, NULL, 0);
|
||||
|
||||
|
@ -2472,11 +2443,8 @@ setup_nodes_for_search(struct btrfs_trans_handle *trans,
|
|||
goto again;
|
||||
}
|
||||
|
||||
sret = reada_for_balance(root, p, level);
|
||||
if (sret)
|
||||
goto again;
|
||||
|
||||
btrfs_set_path_blocking(p);
|
||||
reada_for_balance(root, p, level);
|
||||
sret = balance_level(trans, root, p, level);
|
||||
btrfs_clear_path_blocking(p, NULL, 0);
|
||||
|
||||
|
@ -3143,7 +3111,7 @@ static int balance_node_right(struct btrfs_trans_handle *trans,
|
|||
*/
|
||||
static noinline int insert_new_root(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root,
|
||||
struct btrfs_path *path, int level, int log_removal)
|
||||
struct btrfs_path *path, int level)
|
||||
{
|
||||
u64 lower_gen;
|
||||
struct extent_buffer *lower;
|
||||
|
@ -3194,7 +3162,7 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans,
|
|||
btrfs_mark_buffer_dirty(c);
|
||||
|
||||
old = root->node;
|
||||
tree_mod_log_set_root_pointer(root, c, log_removal);
|
||||
tree_mod_log_set_root_pointer(root, c, 0);
|
||||
rcu_assign_pointer(root->node, c);
|
||||
|
||||
/* the super has an extra ref to root->node */
|
||||
|
@ -3278,14 +3246,14 @@ static noinline int split_node(struct btrfs_trans_handle *trans,
|
|||
/*
|
||||
* trying to split the root, lets make a new one
|
||||
*
|
||||
* tree mod log: We pass 0 as log_removal parameter to
|
||||
* tree mod log: We don't log_removal old root in
|
||||
* insert_new_root, because that root buffer will be kept as a
|
||||
* normal node. We are going to log removal of half of the
|
||||
* elements below with tree_mod_log_eb_copy. We're holding a
|
||||
* tree lock on the buffer, which is why we cannot race with
|
||||
* other tree_mod_log users.
|
||||
*/
|
||||
ret = insert_new_root(trans, root, path, level + 1, 0);
|
||||
ret = insert_new_root(trans, root, path, level + 1);
|
||||
if (ret)
|
||||
return ret;
|
||||
} else {
|
||||
|
@ -3986,7 +3954,7 @@ static noinline int split_leaf(struct btrfs_trans_handle *trans,
|
|||
return -EOVERFLOW;
|
||||
|
||||
/* first try to make some room by pushing left and right */
|
||||
if (data_size) {
|
||||
if (data_size && path->nodes[1]) {
|
||||
wret = push_leaf_right(trans, root, path, data_size,
|
||||
data_size, 0, 0);
|
||||
if (wret < 0)
|
||||
|
@ -4005,7 +3973,7 @@ static noinline int split_leaf(struct btrfs_trans_handle *trans,
|
|||
}
|
||||
|
||||
if (!path->nodes[1]) {
|
||||
ret = insert_new_root(trans, root, path, 1, 1);
|
||||
ret = insert_new_root(trans, root, path, 1);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
|
105
fs/btrfs/ctree.h
105
fs/btrfs/ctree.h
|
@ -961,8 +961,8 @@ struct btrfs_dev_replace_item {
|
|||
#define BTRFS_BLOCK_GROUP_RAID1 (1ULL << 4)
|
||||
#define BTRFS_BLOCK_GROUP_DUP (1ULL << 5)
|
||||
#define BTRFS_BLOCK_GROUP_RAID10 (1ULL << 6)
|
||||
#define BTRFS_BLOCK_GROUP_RAID5 (1 << 7)
|
||||
#define BTRFS_BLOCK_GROUP_RAID6 (1 << 8)
|
||||
#define BTRFS_BLOCK_GROUP_RAID5 (1ULL << 7)
|
||||
#define BTRFS_BLOCK_GROUP_RAID6 (1ULL << 8)
|
||||
#define BTRFS_BLOCK_GROUP_RESERVED BTRFS_AVAIL_ALLOC_BIT_SINGLE
|
||||
|
||||
enum btrfs_raid_types {
|
||||
|
@ -1101,6 +1101,18 @@ struct btrfs_space_info {
|
|||
u64 disk_total; /* total bytes on disk, takes mirrors into
|
||||
account */
|
||||
|
||||
/*
|
||||
* bytes_pinned is kept in line with what is actually pinned, as in
|
||||
* we've called update_block_group and dropped the bytes_used counter
|
||||
* and increased the bytes_pinned counter. However this means that
|
||||
* bytes_pinned does not reflect the bytes that will be pinned once the
|
||||
* delayed refs are flushed, so this counter is inc'ed everytime we call
|
||||
* btrfs_free_extent so it is a realtime count of what will be freed
|
||||
* once the transaction is committed. It will be zero'ed everytime the
|
||||
* transaction commits.
|
||||
*/
|
||||
struct percpu_counter total_bytes_pinned;
|
||||
|
||||
/*
|
||||
* we bump reservation progress every time we decrement
|
||||
* bytes_reserved. This way people waiting for reservations
|
||||
|
@ -1437,25 +1449,22 @@ struct btrfs_fs_info {
|
|||
atomic_t open_ioctl_trans;
|
||||
|
||||
/*
|
||||
* this is used by the balancing code to wait for all the pending
|
||||
* ordered extents
|
||||
* this is used to protect the following list -- ordered_roots.
|
||||
*/
|
||||
spinlock_t ordered_extent_lock;
|
||||
spinlock_t ordered_root_lock;
|
||||
|
||||
/*
|
||||
* all of the data=ordered extents pending writeback
|
||||
* all fs/file tree roots in which there are data=ordered extents
|
||||
* pending writeback are added into this list.
|
||||
*
|
||||
* these can span multiple transactions and basically include
|
||||
* every dirty data page that isn't from nodatacow
|
||||
*/
|
||||
struct list_head ordered_extents;
|
||||
struct list_head ordered_roots;
|
||||
|
||||
spinlock_t delalloc_lock;
|
||||
/*
|
||||
* all of the inodes that have delalloc bytes. It is possible for
|
||||
* this list to be empty even when there is still dirty data=ordered
|
||||
* extents waiting to finish IO.
|
||||
*/
|
||||
struct list_head delalloc_inodes;
|
||||
spinlock_t delalloc_root_lock;
|
||||
/* all fs/file tree roots that have delalloc inodes. */
|
||||
struct list_head delalloc_roots;
|
||||
|
||||
/*
|
||||
* there is a pool of worker threads for checksumming during writes
|
||||
|
@ -1498,8 +1507,6 @@ struct btrfs_fs_info {
|
|||
int do_barriers;
|
||||
int closing;
|
||||
int log_root_recovering;
|
||||
int enospc_unlink;
|
||||
int trans_no_join;
|
||||
|
||||
u64 total_pinned;
|
||||
|
||||
|
@ -1594,6 +1601,12 @@ struct btrfs_fs_info {
|
|||
struct rb_root qgroup_tree;
|
||||
spinlock_t qgroup_lock;
|
||||
|
||||
/*
|
||||
* used to avoid frequently calling ulist_alloc()/ulist_free()
|
||||
* when doing qgroup accounting, it must be protected by qgroup_lock.
|
||||
*/
|
||||
struct ulist *qgroup_ulist;
|
||||
|
||||
/* protect user change for quota operations */
|
||||
struct mutex qgroup_ioctl_lock;
|
||||
|
||||
|
@ -1607,6 +1620,8 @@ struct btrfs_fs_info {
|
|||
struct mutex qgroup_rescan_lock; /* protects the progress item */
|
||||
struct btrfs_key qgroup_rescan_progress;
|
||||
struct btrfs_workers qgroup_rescan_workers;
|
||||
struct completion qgroup_rescan_completion;
|
||||
struct btrfs_work qgroup_rescan_work;
|
||||
|
||||
/* filesystem state */
|
||||
unsigned long fs_state;
|
||||
|
@ -1739,6 +1754,31 @@ struct btrfs_root {
|
|||
int force_cow;
|
||||
|
||||
spinlock_t root_item_lock;
|
||||
atomic_t refs;
|
||||
|
||||
spinlock_t delalloc_lock;
|
||||
/*
|
||||
* all of the inodes that have delalloc bytes. It is possible for
|
||||
* this list to be empty even when there is still dirty data=ordered
|
||||
* extents waiting to finish IO.
|
||||
*/
|
||||
struct list_head delalloc_inodes;
|
||||
struct list_head delalloc_root;
|
||||
u64 nr_delalloc_inodes;
|
||||
/*
|
||||
* this is used by the balancing code to wait for all the pending
|
||||
* ordered extents
|
||||
*/
|
||||
spinlock_t ordered_extent_lock;
|
||||
|
||||
/*
|
||||
* all of the data=ordered extents pending writeback
|
||||
* these can span multiple transactions and basically include
|
||||
* every dirty data page that isn't from nodatacow
|
||||
*/
|
||||
struct list_head ordered_extents;
|
||||
struct list_head ordered_root;
|
||||
u64 nr_ordered_extents;
|
||||
};
|
||||
|
||||
struct btrfs_ioctl_defrag_range_args {
|
||||
|
@ -3028,6 +3068,8 @@ static inline u64 btrfs_calc_trunc_metadata_size(struct btrfs_root *root,
|
|||
num_items;
|
||||
}
|
||||
|
||||
int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root);
|
||||
void btrfs_put_block_group(struct btrfs_block_group_cache *cache);
|
||||
int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root, unsigned long count);
|
||||
|
@ -3039,6 +3081,8 @@ int btrfs_pin_extent(struct btrfs_root *root,
|
|||
u64 bytenr, u64 num, int reserved);
|
||||
int btrfs_pin_extent_for_log_replay(struct btrfs_root *root,
|
||||
u64 bytenr, u64 num_bytes);
|
||||
int btrfs_exclude_logged_extents(struct btrfs_root *root,
|
||||
struct extent_buffer *eb);
|
||||
int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root,
|
||||
u64 objectid, u64 offset, u64 bytenr);
|
||||
|
@ -3155,6 +3199,9 @@ int btrfs_block_rsv_refill(struct btrfs_root *root,
|
|||
int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
|
||||
struct btrfs_block_rsv *dst_rsv,
|
||||
u64 num_bytes);
|
||||
int btrfs_cond_migrate_bytes(struct btrfs_fs_info *fs_info,
|
||||
struct btrfs_block_rsv *dest, u64 num_bytes,
|
||||
int min_factor);
|
||||
void btrfs_block_rsv_release(struct btrfs_root *root,
|
||||
struct btrfs_block_rsv *block_rsv,
|
||||
u64 num_bytes);
|
||||
|
@ -3311,6 +3358,18 @@ static inline int btrfs_fs_closing(struct btrfs_fs_info *fs_info)
|
|||
smp_mb();
|
||||
return fs_info->closing;
|
||||
}
|
||||
|
||||
/*
|
||||
* If we remount the fs to be R/O or umount the fs, the cleaner needn't do
|
||||
* anything except sleeping. This function is used to check the status of
|
||||
* the fs.
|
||||
*/
|
||||
static inline int btrfs_need_cleaner_sleep(struct btrfs_root *root)
|
||||
{
|
||||
return (root->fs_info->sb->s_flags & MS_RDONLY ||
|
||||
btrfs_fs_closing(root->fs_info));
|
||||
}
|
||||
|
||||
static inline void free_fs_info(struct btrfs_fs_info *fs_info)
|
||||
{
|
||||
kfree(fs_info->balance_ctl);
|
||||
|
@ -3357,9 +3416,9 @@ int __must_check btrfs_update_root(struct btrfs_trans_handle *trans,
|
|||
struct btrfs_root_item *item);
|
||||
void btrfs_read_root_item(struct extent_buffer *eb, int slot,
|
||||
struct btrfs_root_item *item);
|
||||
int btrfs_find_last_root(struct btrfs_root *root, u64 objectid, struct
|
||||
btrfs_root_item *item, struct btrfs_key *key);
|
||||
int btrfs_find_dead_roots(struct btrfs_root *root, u64 objectid);
|
||||
int btrfs_find_root(struct btrfs_root *root, struct btrfs_key *search_key,
|
||||
struct btrfs_path *path, struct btrfs_root_item *root_item,
|
||||
struct btrfs_key *root_key);
|
||||
int btrfs_find_orphan_roots(struct btrfs_root *tree_root);
|
||||
void btrfs_set_root_node(struct btrfs_root_item *item,
|
||||
struct extent_buffer *node);
|
||||
|
@ -3493,6 +3552,10 @@ void btrfs_wait_and_free_delalloc_work(struct btrfs_delalloc_work *work);
|
|||
struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *page,
|
||||
size_t pg_offset, u64 start, u64 len,
|
||||
int create);
|
||||
noinline int can_nocow_extent(struct btrfs_trans_handle *trans,
|
||||
struct inode *inode, u64 offset, u64 *len,
|
||||
u64 *orig_start, u64 *orig_block_len,
|
||||
u64 *ram_bytes);
|
||||
|
||||
/* RHEL and EL kernels have a patch that renames PG_checked to FsMisc */
|
||||
#if defined(ClearPageFsMisc) && !defined(ClearPageChecked)
|
||||
|
@ -3530,6 +3593,8 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
|
|||
u32 min_type);
|
||||
|
||||
int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput);
|
||||
int btrfs_start_all_delalloc_inodes(struct btrfs_fs_info *fs_info,
|
||||
int delay_iput);
|
||||
int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
|
||||
struct extent_state **cached_state);
|
||||
int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
|
||||
|
@ -3814,6 +3879,8 @@ int btrfs_quota_enable(struct btrfs_trans_handle *trans,
|
|||
int btrfs_quota_disable(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_fs_info *fs_info);
|
||||
int btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info);
|
||||
void btrfs_qgroup_rescan_resume(struct btrfs_fs_info *fs_info);
|
||||
int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info);
|
||||
int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_fs_info *fs_info, u64 src, u64 dst);
|
||||
int btrfs_del_qgroup_relation(struct btrfs_trans_handle *trans,
|
||||
|
|
|
@ -535,20 +535,6 @@ static struct btrfs_delayed_item *__btrfs_next_delayed_item(
|
|||
return next;
|
||||
}
|
||||
|
||||
static inline struct btrfs_root *btrfs_get_fs_root(struct btrfs_root *root,
|
||||
u64 root_id)
|
||||
{
|
||||
struct btrfs_key root_key;
|
||||
|
||||
if (root->objectid == root_id)
|
||||
return root;
|
||||
|
||||
root_key.objectid = root_id;
|
||||
root_key.type = BTRFS_ROOT_ITEM_KEY;
|
||||
root_key.offset = (u64)-1;
|
||||
return btrfs_read_fs_root_no_name(root->fs_info, &root_key);
|
||||
}
|
||||
|
||||
static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root,
|
||||
struct btrfs_delayed_item *item)
|
||||
|
|
|
@ -400,7 +400,7 @@ int btrfs_dev_replace_start(struct btrfs_root *root,
|
|||
args->result = BTRFS_IOCTL_DEV_REPLACE_RESULT_NO_ERROR;
|
||||
btrfs_dev_replace_unlock(dev_replace);
|
||||
|
||||
btrfs_wait_ordered_extents(root, 0);
|
||||
btrfs_wait_all_ordered_extents(root->fs_info, 0);
|
||||
|
||||
/* force writing the updated state information to disk */
|
||||
trans = btrfs_start_transaction(root, 0);
|
||||
|
@ -470,12 +470,12 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
|
|||
* flush all outstanding I/O and inode extent mappings before the
|
||||
* copy operation is declared as being finished
|
||||
*/
|
||||
ret = btrfs_start_delalloc_inodes(root, 0);
|
||||
ret = btrfs_start_all_delalloc_inodes(root->fs_info, 0);
|
||||
if (ret) {
|
||||
mutex_unlock(&dev_replace->lock_finishing_cancel_unmount);
|
||||
return ret;
|
||||
}
|
||||
btrfs_wait_ordered_extents(root, 0);
|
||||
btrfs_wait_all_ordered_extents(root->fs_info, 0);
|
||||
|
||||
trans = btrfs_start_transaction(root, 0);
|
||||
if (IS_ERR(trans)) {
|
||||
|
|
|
@ -1192,6 +1192,8 @@ static void __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
|
|||
root->objectid = objectid;
|
||||
root->last_trans = 0;
|
||||
root->highest_objectid = 0;
|
||||
root->nr_delalloc_inodes = 0;
|
||||
root->nr_ordered_extents = 0;
|
||||
root->name = NULL;
|
||||
root->inode_tree = RB_ROOT;
|
||||
INIT_RADIX_TREE(&root->delayed_nodes_tree, GFP_ATOMIC);
|
||||
|
@ -1200,10 +1202,16 @@ static void __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
|
|||
|
||||
INIT_LIST_HEAD(&root->dirty_list);
|
||||
INIT_LIST_HEAD(&root->root_list);
|
||||
INIT_LIST_HEAD(&root->delalloc_inodes);
|
||||
INIT_LIST_HEAD(&root->delalloc_root);
|
||||
INIT_LIST_HEAD(&root->ordered_extents);
|
||||
INIT_LIST_HEAD(&root->ordered_root);
|
||||
INIT_LIST_HEAD(&root->logged_list[0]);
|
||||
INIT_LIST_HEAD(&root->logged_list[1]);
|
||||
spin_lock_init(&root->orphan_lock);
|
||||
spin_lock_init(&root->inode_lock);
|
||||
spin_lock_init(&root->delalloc_lock);
|
||||
spin_lock_init(&root->ordered_extent_lock);
|
||||
spin_lock_init(&root->accounting_lock);
|
||||
spin_lock_init(&root->log_extents_lock[0]);
|
||||
spin_lock_init(&root->log_extents_lock[1]);
|
||||
|
@ -1217,6 +1225,7 @@ static void __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
|
|||
atomic_set(&root->log_writers, 0);
|
||||
atomic_set(&root->log_batch, 0);
|
||||
atomic_set(&root->orphan_inodes, 0);
|
||||
atomic_set(&root->refs, 1);
|
||||
root->log_transid = 0;
|
||||
root->last_log_commit = 0;
|
||||
extent_io_tree_init(&root->dirty_log_pages,
|
||||
|
@ -1235,39 +1244,6 @@ static void __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
|
|||
spin_lock_init(&root->root_item_lock);
|
||||
}
|
||||
|
||||
static int __must_check find_and_setup_root(struct btrfs_root *tree_root,
|
||||
struct btrfs_fs_info *fs_info,
|
||||
u64 objectid,
|
||||
struct btrfs_root *root)
|
||||
{
|
||||
int ret;
|
||||
u32 blocksize;
|
||||
u64 generation;
|
||||
|
||||
__setup_root(tree_root->nodesize, tree_root->leafsize,
|
||||
tree_root->sectorsize, tree_root->stripesize,
|
||||
root, fs_info, objectid);
|
||||
ret = btrfs_find_last_root(tree_root, objectid,
|
||||
&root->root_item, &root->root_key);
|
||||
if (ret > 0)
|
||||
return -ENOENT;
|
||||
else if (ret < 0)
|
||||
return ret;
|
||||
|
||||
generation = btrfs_root_generation(&root->root_item);
|
||||
blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item));
|
||||
root->commit_root = NULL;
|
||||
root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
|
||||
blocksize, generation);
|
||||
if (!root->node || !btrfs_buffer_uptodate(root->node, generation, 0)) {
|
||||
free_extent_buffer(root->node);
|
||||
root->node = NULL;
|
||||
return -EIO;
|
||||
}
|
||||
root->commit_root = btrfs_root_node(root);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct btrfs_root *btrfs_alloc_root(struct btrfs_fs_info *fs_info)
|
||||
{
|
||||
struct btrfs_root *root = kzalloc(sizeof(*root), GFP_NOFS);
|
||||
|
@ -1452,70 +1428,73 @@ int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
|
|||
return 0;
|
||||
}
|
||||
|
||||
struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root,
|
||||
struct btrfs_key *location)
|
||||
struct btrfs_root *btrfs_read_tree_root(struct btrfs_root *tree_root,
|
||||
struct btrfs_key *key)
|
||||
{
|
||||
struct btrfs_root *root;
|
||||
struct btrfs_fs_info *fs_info = tree_root->fs_info;
|
||||
struct btrfs_path *path;
|
||||
struct extent_buffer *l;
|
||||
u64 generation;
|
||||
u32 blocksize;
|
||||
int ret = 0;
|
||||
int slot;
|
||||
int ret;
|
||||
|
||||
path = btrfs_alloc_path();
|
||||
if (!path)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
root = btrfs_alloc_root(fs_info);
|
||||
if (!root)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
if (location->offset == (u64)-1) {
|
||||
ret = find_and_setup_root(tree_root, fs_info,
|
||||
location->objectid, root);
|
||||
if (ret) {
|
||||
kfree(root);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
goto out;
|
||||
if (!root) {
|
||||
ret = -ENOMEM;
|
||||
goto alloc_fail;
|
||||
}
|
||||
|
||||
__setup_root(tree_root->nodesize, tree_root->leafsize,
|
||||
tree_root->sectorsize, tree_root->stripesize,
|
||||
root, fs_info, location->objectid);
|
||||
root, fs_info, key->objectid);
|
||||
|
||||
path = btrfs_alloc_path();
|
||||
if (!path) {
|
||||
kfree(root);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
ret = btrfs_search_slot(NULL, tree_root, location, path, 0, 0);
|
||||
if (ret == 0) {
|
||||
l = path->nodes[0];
|
||||
slot = path->slots[0];
|
||||
btrfs_read_root_item(l, slot, &root->root_item);
|
||||
memcpy(&root->root_key, location, sizeof(*location));
|
||||
}
|
||||
btrfs_free_path(path);
|
||||
ret = btrfs_find_root(tree_root, key, path,
|
||||
&root->root_item, &root->root_key);
|
||||
if (ret) {
|
||||
kfree(root);
|
||||
if (ret > 0)
|
||||
ret = -ENOENT;
|
||||
return ERR_PTR(ret);
|
||||
goto find_fail;
|
||||
}
|
||||
|
||||
generation = btrfs_root_generation(&root->root_item);
|
||||
blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item));
|
||||
root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
|
||||
blocksize, generation);
|
||||
if (!root->node || !extent_buffer_uptodate(root->node)) {
|
||||
ret = (!root->node) ? -ENOMEM : -EIO;
|
||||
|
||||
free_extent_buffer(root->node);
|
||||
kfree(root);
|
||||
return ERR_PTR(ret);
|
||||
if (!root->node) {
|
||||
ret = -ENOMEM;
|
||||
goto find_fail;
|
||||
} else if (!btrfs_buffer_uptodate(root->node, generation, 0)) {
|
||||
ret = -EIO;
|
||||
goto read_fail;
|
||||
}
|
||||
|
||||
root->commit_root = btrfs_root_node(root);
|
||||
out:
|
||||
if (location->objectid != BTRFS_TREE_LOG_OBJECTID) {
|
||||
btrfs_free_path(path);
|
||||
return root;
|
||||
|
||||
read_fail:
|
||||
free_extent_buffer(root->node);
|
||||
find_fail:
|
||||
kfree(root);
|
||||
alloc_fail:
|
||||
root = ERR_PTR(ret);
|
||||
goto out;
|
||||
}
|
||||
|
||||
struct btrfs_root *btrfs_read_fs_root(struct btrfs_root *tree_root,
|
||||
struct btrfs_key *location)
|
||||
{
|
||||
struct btrfs_root *root;
|
||||
|
||||
root = btrfs_read_tree_root(tree_root, location);
|
||||
if (IS_ERR(root))
|
||||
return root;
|
||||
|
||||
if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
|
||||
root->ref_cows = 1;
|
||||
btrfs_check_and_init_root_item(&root->root_item);
|
||||
}
|
||||
|
@ -1523,6 +1502,66 @@ out:
|
|||
return root;
|
||||
}
|
||||
|
||||
int btrfs_init_fs_root(struct btrfs_root *root)
|
||||
{
|
||||
int ret;
|
||||
|
||||
root->free_ino_ctl = kzalloc(sizeof(*root->free_ino_ctl), GFP_NOFS);
|
||||
root->free_ino_pinned = kzalloc(sizeof(*root->free_ino_pinned),
|
||||
GFP_NOFS);
|
||||
if (!root->free_ino_pinned || !root->free_ino_ctl) {
|
||||
ret = -ENOMEM;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
btrfs_init_free_ino_ctl(root);
|
||||
mutex_init(&root->fs_commit_mutex);
|
||||
spin_lock_init(&root->cache_lock);
|
||||
init_waitqueue_head(&root->cache_wait);
|
||||
|
||||
ret = get_anon_bdev(&root->anon_dev);
|
||||
if (ret)
|
||||
goto fail;
|
||||
return 0;
|
||||
fail:
|
||||
kfree(root->free_ino_ctl);
|
||||
kfree(root->free_ino_pinned);
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
|
||||
u64 root_id)
|
||||
{
|
||||
struct btrfs_root *root;
|
||||
|
||||
spin_lock(&fs_info->fs_roots_radix_lock);
|
||||
root = radix_tree_lookup(&fs_info->fs_roots_radix,
|
||||
(unsigned long)root_id);
|
||||
spin_unlock(&fs_info->fs_roots_radix_lock);
|
||||
return root;
|
||||
}
|
||||
|
||||
int btrfs_insert_fs_root(struct btrfs_fs_info *fs_info,
|
||||
struct btrfs_root *root)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
spin_lock(&fs_info->fs_roots_radix_lock);
|
||||
ret = radix_tree_insert(&fs_info->fs_roots_radix,
|
||||
(unsigned long)root->root_key.objectid,
|
||||
root);
|
||||
if (ret == 0)
|
||||
root->in_radix = 1;
|
||||
spin_unlock(&fs_info->fs_roots_radix_lock);
|
||||
radix_tree_preload_end();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info,
|
||||
struct btrfs_key *location)
|
||||
{
|
||||
|
@ -1543,58 +1582,30 @@ struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info,
|
|||
return fs_info->quota_root ? fs_info->quota_root :
|
||||
ERR_PTR(-ENOENT);
|
||||
again:
|
||||
spin_lock(&fs_info->fs_roots_radix_lock);
|
||||
root = radix_tree_lookup(&fs_info->fs_roots_radix,
|
||||
(unsigned long)location->objectid);
|
||||
spin_unlock(&fs_info->fs_roots_radix_lock);
|
||||
root = btrfs_lookup_fs_root(fs_info, location->objectid);
|
||||
if (root)
|
||||
return root;
|
||||
|
||||
root = btrfs_read_fs_root_no_radix(fs_info->tree_root, location);
|
||||
root = btrfs_read_fs_root(fs_info->tree_root, location);
|
||||
if (IS_ERR(root))
|
||||
return root;
|
||||
|
||||
root->free_ino_ctl = kzalloc(sizeof(*root->free_ino_ctl), GFP_NOFS);
|
||||
root->free_ino_pinned = kzalloc(sizeof(*root->free_ino_pinned),
|
||||
GFP_NOFS);
|
||||
if (!root->free_ino_pinned || !root->free_ino_ctl) {
|
||||
ret = -ENOMEM;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
btrfs_init_free_ino_ctl(root);
|
||||
mutex_init(&root->fs_commit_mutex);
|
||||
spin_lock_init(&root->cache_lock);
|
||||
init_waitqueue_head(&root->cache_wait);
|
||||
|
||||
ret = get_anon_bdev(&root->anon_dev);
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
if (btrfs_root_refs(&root->root_item) == 0) {
|
||||
ret = -ENOENT;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
ret = btrfs_init_fs_root(root);
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
ret = btrfs_find_orphan_item(fs_info->tree_root, location->objectid);
|
||||
if (ret < 0)
|
||||
goto fail;
|
||||
if (ret == 0)
|
||||
root->orphan_item_inserted = 1;
|
||||
|
||||
ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
spin_lock(&fs_info->fs_roots_radix_lock);
|
||||
ret = radix_tree_insert(&fs_info->fs_roots_radix,
|
||||
(unsigned long)root->root_key.objectid,
|
||||
root);
|
||||
if (ret == 0)
|
||||
root->in_radix = 1;
|
||||
|
||||
spin_unlock(&fs_info->fs_roots_radix_lock);
|
||||
radix_tree_preload_end();
|
||||
ret = btrfs_insert_fs_root(fs_info, root);
|
||||
if (ret) {
|
||||
if (ret == -EEXIST) {
|
||||
free_fs_root(root);
|
||||
|
@ -1602,10 +1613,6 @@ again:
|
|||
}
|
||||
goto fail;
|
||||
}
|
||||
|
||||
ret = btrfs_find_dead_roots(fs_info->tree_root,
|
||||
root->root_key.objectid);
|
||||
WARN_ON(ret);
|
||||
return root;
|
||||
fail:
|
||||
free_fs_root(root);
|
||||
|
@ -1677,21 +1684,37 @@ static void end_workqueue_fn(struct btrfs_work *work)
|
|||
static int cleaner_kthread(void *arg)
|
||||
{
|
||||
struct btrfs_root *root = arg;
|
||||
int again;
|
||||
|
||||
do {
|
||||
int again = 0;
|
||||
again = 0;
|
||||
|
||||
if (!(root->fs_info->sb->s_flags & MS_RDONLY) &&
|
||||
down_read_trylock(&root->fs_info->sb->s_umount)) {
|
||||
if (mutex_trylock(&root->fs_info->cleaner_mutex)) {
|
||||
btrfs_run_delayed_iputs(root);
|
||||
again = btrfs_clean_one_deleted_snapshot(root);
|
||||
mutex_unlock(&root->fs_info->cleaner_mutex);
|
||||
}
|
||||
btrfs_run_defrag_inodes(root->fs_info);
|
||||
up_read(&root->fs_info->sb->s_umount);
|
||||
/* Make the cleaner go to sleep early. */
|
||||
if (btrfs_need_cleaner_sleep(root))
|
||||
goto sleep;
|
||||
|
||||
if (!mutex_trylock(&root->fs_info->cleaner_mutex))
|
||||
goto sleep;
|
||||
|
||||
/*
|
||||
* Avoid the problem that we change the status of the fs
|
||||
* during the above check and trylock.
|
||||
*/
|
||||
if (btrfs_need_cleaner_sleep(root)) {
|
||||
mutex_unlock(&root->fs_info->cleaner_mutex);
|
||||
goto sleep;
|
||||
}
|
||||
|
||||
btrfs_run_delayed_iputs(root);
|
||||
again = btrfs_clean_one_deleted_snapshot(root);
|
||||
mutex_unlock(&root->fs_info->cleaner_mutex);
|
||||
|
||||
/*
|
||||
* The defragger has dealt with the R/O remount and umount,
|
||||
* needn't do anything special here.
|
||||
*/
|
||||
btrfs_run_defrag_inodes(root->fs_info);
|
||||
sleep:
|
||||
if (!try_to_freeze() && !again) {
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
if (!kthread_should_stop())
|
||||
|
@ -1725,7 +1748,7 @@ static int transaction_kthread(void *arg)
|
|||
}
|
||||
|
||||
now = get_seconds();
|
||||
if (!cur->blocked &&
|
||||
if (cur->state < TRANS_STATE_BLOCKED &&
|
||||
(now < cur->start_time || now - cur->start_time < 30)) {
|
||||
spin_unlock(&root->fs_info->trans_lock);
|
||||
delay = HZ * 5;
|
||||
|
@ -2035,11 +2058,11 @@ static void del_fs_roots(struct btrfs_fs_info *fs_info)
|
|||
list_del(&gang[0]->root_list);
|
||||
|
||||
if (gang[0]->in_radix) {
|
||||
btrfs_free_fs_root(fs_info, gang[0]);
|
||||
btrfs_drop_and_free_fs_root(fs_info, gang[0]);
|
||||
} else {
|
||||
free_extent_buffer(gang[0]->node);
|
||||
free_extent_buffer(gang[0]->commit_root);
|
||||
kfree(gang[0]);
|
||||
btrfs_put_fs_root(gang[0]);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2050,7 +2073,7 @@ static void del_fs_roots(struct btrfs_fs_info *fs_info)
|
|||
if (!ret)
|
||||
break;
|
||||
for (i = 0; i < ret; i++)
|
||||
btrfs_free_fs_root(fs_info, gang[i]);
|
||||
btrfs_drop_and_free_fs_root(fs_info, gang[i]);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2082,14 +2105,8 @@ int open_ctree(struct super_block *sb,
|
|||
int backup_index = 0;
|
||||
|
||||
tree_root = fs_info->tree_root = btrfs_alloc_root(fs_info);
|
||||
extent_root = fs_info->extent_root = btrfs_alloc_root(fs_info);
|
||||
csum_root = fs_info->csum_root = btrfs_alloc_root(fs_info);
|
||||
chunk_root = fs_info->chunk_root = btrfs_alloc_root(fs_info);
|
||||
dev_root = fs_info->dev_root = btrfs_alloc_root(fs_info);
|
||||
quota_root = fs_info->quota_root = btrfs_alloc_root(fs_info);
|
||||
|
||||
if (!tree_root || !extent_root || !csum_root ||
|
||||
!chunk_root || !dev_root || !quota_root) {
|
||||
if (!tree_root || !chunk_root) {
|
||||
err = -ENOMEM;
|
||||
goto fail;
|
||||
}
|
||||
|
@ -2132,9 +2149,9 @@ int open_ctree(struct super_block *sb,
|
|||
INIT_LIST_HEAD(&fs_info->trans_list);
|
||||
INIT_LIST_HEAD(&fs_info->dead_roots);
|
||||
INIT_LIST_HEAD(&fs_info->delayed_iputs);
|
||||
INIT_LIST_HEAD(&fs_info->delalloc_inodes);
|
||||
INIT_LIST_HEAD(&fs_info->delalloc_roots);
|
||||
INIT_LIST_HEAD(&fs_info->caching_block_groups);
|
||||
spin_lock_init(&fs_info->delalloc_lock);
|
||||
spin_lock_init(&fs_info->delalloc_root_lock);
|
||||
spin_lock_init(&fs_info->trans_lock);
|
||||
spin_lock_init(&fs_info->fs_roots_radix_lock);
|
||||
spin_lock_init(&fs_info->delayed_iput_lock);
|
||||
|
@ -2170,7 +2187,6 @@ int open_ctree(struct super_block *sb,
|
|||
fs_info->max_inline = 8192 * 1024;
|
||||
fs_info->metadata_ratio = 0;
|
||||
fs_info->defrag_inodes = RB_ROOT;
|
||||
fs_info->trans_no_join = 0;
|
||||
fs_info->free_chunk_space = 0;
|
||||
fs_info->tree_mod_log = RB_ROOT;
|
||||
|
||||
|
@ -2181,8 +2197,8 @@ int open_ctree(struct super_block *sb,
|
|||
fs_info->thread_pool_size = min_t(unsigned long,
|
||||
num_online_cpus() + 2, 8);
|
||||
|
||||
INIT_LIST_HEAD(&fs_info->ordered_extents);
|
||||
spin_lock_init(&fs_info->ordered_extent_lock);
|
||||
INIT_LIST_HEAD(&fs_info->ordered_roots);
|
||||
spin_lock_init(&fs_info->ordered_root_lock);
|
||||
fs_info->delayed_root = kmalloc(sizeof(struct btrfs_delayed_root),
|
||||
GFP_NOFS);
|
||||
if (!fs_info->delayed_root) {
|
||||
|
@ -2275,6 +2291,7 @@ int open_ctree(struct super_block *sb,
|
|||
fs_info->qgroup_seq = 1;
|
||||
fs_info->quota_enabled = 0;
|
||||
fs_info->pending_quota_state = 0;
|
||||
fs_info->qgroup_ulist = NULL;
|
||||
mutex_init(&fs_info->qgroup_rescan_lock);
|
||||
|
||||
btrfs_init_free_cluster(&fs_info->meta_alloc_cluster);
|
||||
|
@ -2639,33 +2656,44 @@ retry_root_backup:
|
|||
btrfs_set_root_node(&tree_root->root_item, tree_root->node);
|
||||
tree_root->commit_root = btrfs_root_node(tree_root);
|
||||
|
||||
ret = find_and_setup_root(tree_root, fs_info,
|
||||
BTRFS_EXTENT_TREE_OBJECTID, extent_root);
|
||||
if (ret)
|
||||
location.objectid = BTRFS_EXTENT_TREE_OBJECTID;
|
||||
location.type = BTRFS_ROOT_ITEM_KEY;
|
||||
location.offset = 0;
|
||||
|
||||
extent_root = btrfs_read_tree_root(tree_root, &location);
|
||||
if (IS_ERR(extent_root)) {
|
||||
ret = PTR_ERR(extent_root);
|
||||
goto recovery_tree_root;
|
||||
}
|
||||
extent_root->track_dirty = 1;
|
||||
fs_info->extent_root = extent_root;
|
||||
|
||||
ret = find_and_setup_root(tree_root, fs_info,
|
||||
BTRFS_DEV_TREE_OBJECTID, dev_root);
|
||||
if (ret)
|
||||
location.objectid = BTRFS_DEV_TREE_OBJECTID;
|
||||
dev_root = btrfs_read_tree_root(tree_root, &location);
|
||||
if (IS_ERR(dev_root)) {
|
||||
ret = PTR_ERR(dev_root);
|
||||
goto recovery_tree_root;
|
||||
}
|
||||
dev_root->track_dirty = 1;
|
||||
fs_info->dev_root = dev_root;
|
||||
btrfs_init_devices_late(fs_info);
|
||||
|
||||
ret = find_and_setup_root(tree_root, fs_info,
|
||||
BTRFS_CSUM_TREE_OBJECTID, csum_root);
|
||||
if (ret)
|
||||
location.objectid = BTRFS_CSUM_TREE_OBJECTID;
|
||||
csum_root = btrfs_read_tree_root(tree_root, &location);
|
||||
if (IS_ERR(csum_root)) {
|
||||
ret = PTR_ERR(csum_root);
|
||||
goto recovery_tree_root;
|
||||
}
|
||||
csum_root->track_dirty = 1;
|
||||
fs_info->csum_root = csum_root;
|
||||
|
||||
ret = find_and_setup_root(tree_root, fs_info,
|
||||
BTRFS_QUOTA_TREE_OBJECTID, quota_root);
|
||||
if (ret) {
|
||||
kfree(quota_root);
|
||||
quota_root = fs_info->quota_root = NULL;
|
||||
} else {
|
||||
location.objectid = BTRFS_QUOTA_TREE_OBJECTID;
|
||||
quota_root = btrfs_read_tree_root(tree_root, &location);
|
||||
if (!IS_ERR(quota_root)) {
|
||||
quota_root->track_dirty = 1;
|
||||
fs_info->quota_enabled = 1;
|
||||
fs_info->pending_quota_state = 1;
|
||||
fs_info->quota_root = quota_root;
|
||||
}
|
||||
|
||||
fs_info->generation = generation;
|
||||
|
@ -2818,11 +2846,9 @@ retry_root_backup:
|
|||
|
||||
location.objectid = BTRFS_FS_TREE_OBJECTID;
|
||||
location.type = BTRFS_ROOT_ITEM_KEY;
|
||||
location.offset = (u64)-1;
|
||||
location.offset = 0;
|
||||
|
||||
fs_info->fs_root = btrfs_read_fs_root_no_name(fs_info, &location);
|
||||
if (!fs_info->fs_root)
|
||||
goto fail_qgroup;
|
||||
if (IS_ERR(fs_info->fs_root)) {
|
||||
err = PTR_ERR(fs_info->fs_root);
|
||||
goto fail_qgroup;
|
||||
|
@ -2854,6 +2880,8 @@ retry_root_backup:
|
|||
return ret;
|
||||
}
|
||||
|
||||
btrfs_qgroup_rescan_resume(fs_info);
|
||||
|
||||
return 0;
|
||||
|
||||
fail_qgroup:
|
||||
|
@ -3259,7 +3287,7 @@ int btrfs_calc_num_tolerated_disk_barrier_failures(
|
|||
BTRFS_BLOCK_GROUP_RAID10)) {
|
||||
num_tolerated_disk_barrier_failures = 1;
|
||||
} else if (flags &
|
||||
BTRFS_BLOCK_GROUP_RAID5) {
|
||||
BTRFS_BLOCK_GROUP_RAID6) {
|
||||
num_tolerated_disk_barrier_failures = 2;
|
||||
}
|
||||
}
|
||||
|
@ -3367,7 +3395,9 @@ int write_ctree_super(struct btrfs_trans_handle *trans,
|
|||
return ret;
|
||||
}
|
||||
|
||||
void btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
|
||||
/* Drop a fs root from the radix tree and free it. */
|
||||
void btrfs_drop_and_free_fs_root(struct btrfs_fs_info *fs_info,
|
||||
struct btrfs_root *root)
|
||||
{
|
||||
spin_lock(&fs_info->fs_roots_radix_lock);
|
||||
radix_tree_delete(&fs_info->fs_roots_radix,
|
||||
|
@ -3398,7 +3428,12 @@ static void free_fs_root(struct btrfs_root *root)
|
|||
kfree(root->free_ino_ctl);
|
||||
kfree(root->free_ino_pinned);
|
||||
kfree(root->name);
|
||||
kfree(root);
|
||||
btrfs_put_fs_root(root);
|
||||
}
|
||||
|
||||
void btrfs_free_fs_root(struct btrfs_root *root)
|
||||
{
|
||||
free_fs_root(root);
|
||||
}
|
||||
|
||||
int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info)
|
||||
|
@ -3654,7 +3689,7 @@ static void btrfs_destroy_ordered_operations(struct btrfs_transaction *t,
|
|||
INIT_LIST_HEAD(&splice);
|
||||
|
||||
mutex_lock(&root->fs_info->ordered_operations_mutex);
|
||||
spin_lock(&root->fs_info->ordered_extent_lock);
|
||||
spin_lock(&root->fs_info->ordered_root_lock);
|
||||
|
||||
list_splice_init(&t->ordered_operations, &splice);
|
||||
while (!list_empty(&splice)) {
|
||||
|
@ -3662,14 +3697,14 @@ static void btrfs_destroy_ordered_operations(struct btrfs_transaction *t,
|
|||
ordered_operations);
|
||||
|
||||
list_del_init(&btrfs_inode->ordered_operations);
|
||||
spin_unlock(&root->fs_info->ordered_extent_lock);
|
||||
spin_unlock(&root->fs_info->ordered_root_lock);
|
||||
|
||||
btrfs_invalidate_inodes(btrfs_inode->root);
|
||||
|
||||
spin_lock(&root->fs_info->ordered_extent_lock);
|
||||
spin_lock(&root->fs_info->ordered_root_lock);
|
||||
}
|
||||
|
||||
spin_unlock(&root->fs_info->ordered_extent_lock);
|
||||
spin_unlock(&root->fs_info->ordered_root_lock);
|
||||
mutex_unlock(&root->fs_info->ordered_operations_mutex);
|
||||
}
|
||||
|
||||
|
@ -3677,15 +3712,36 @@ static void btrfs_destroy_ordered_extents(struct btrfs_root *root)
|
|||
{
|
||||
struct btrfs_ordered_extent *ordered;
|
||||
|
||||
spin_lock(&root->fs_info->ordered_extent_lock);
|
||||
spin_lock(&root->ordered_extent_lock);
|
||||
/*
|
||||
* This will just short circuit the ordered completion stuff which will
|
||||
* make sure the ordered extent gets properly cleaned up.
|
||||
*/
|
||||
list_for_each_entry(ordered, &root->fs_info->ordered_extents,
|
||||
list_for_each_entry(ordered, &root->ordered_extents,
|
||||
root_extent_list)
|
||||
set_bit(BTRFS_ORDERED_IOERR, &ordered->flags);
|
||||
spin_unlock(&root->fs_info->ordered_extent_lock);
|
||||
spin_unlock(&root->ordered_extent_lock);
|
||||
}
|
||||
|
||||
static void btrfs_destroy_all_ordered_extents(struct btrfs_fs_info *fs_info)
|
||||
{
|
||||
struct btrfs_root *root;
|
||||
struct list_head splice;
|
||||
|
||||
INIT_LIST_HEAD(&splice);
|
||||
|
||||
spin_lock(&fs_info->ordered_root_lock);
|
||||
list_splice_init(&fs_info->ordered_roots, &splice);
|
||||
while (!list_empty(&splice)) {
|
||||
root = list_first_entry(&splice, struct btrfs_root,
|
||||
ordered_root);
|
||||
list_del_init(&root->ordered_root);
|
||||
|
||||
btrfs_destroy_ordered_extents(root);
|
||||
|
||||
cond_resched_lock(&fs_info->ordered_root_lock);
|
||||
}
|
||||
spin_unlock(&fs_info->ordered_root_lock);
|
||||
}
|
||||
|
||||
int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
|
||||
|
@ -3707,6 +3763,7 @@ int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
|
|||
|
||||
while ((node = rb_first(&delayed_refs->root)) != NULL) {
|
||||
struct btrfs_delayed_ref_head *head = NULL;
|
||||
bool pin_bytes = false;
|
||||
|
||||
ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
|
||||
atomic_set(&ref->refs, 1);
|
||||
|
@ -3727,8 +3784,7 @@ int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
|
|||
}
|
||||
|
||||
if (head->must_insert_reserved)
|
||||
btrfs_pin_extent(root, ref->bytenr,
|
||||
ref->num_bytes, 1);
|
||||
pin_bytes = true;
|
||||
btrfs_free_delayed_extent_op(head->extent_op);
|
||||
delayed_refs->num_heads--;
|
||||
if (list_empty(&head->cluster))
|
||||
|
@ -3739,9 +3795,13 @@ int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
|
|||
ref->in_tree = 0;
|
||||
rb_erase(&ref->rb_node, &delayed_refs->root);
|
||||
delayed_refs->num_entries--;
|
||||
if (head)
|
||||
mutex_unlock(&head->mutex);
|
||||
spin_unlock(&delayed_refs->lock);
|
||||
if (head) {
|
||||
if (pin_bytes)
|
||||
btrfs_pin_extent(root, ref->bytenr,
|
||||
ref->num_bytes, 1);
|
||||
mutex_unlock(&head->mutex);
|
||||
}
|
||||
btrfs_put_delayed_ref(ref);
|
||||
|
||||
cond_resched();
|
||||
|
@ -3778,24 +3838,49 @@ static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root)
|
|||
|
||||
INIT_LIST_HEAD(&splice);
|
||||
|
||||
spin_lock(&root->fs_info->delalloc_lock);
|
||||
list_splice_init(&root->fs_info->delalloc_inodes, &splice);
|
||||
spin_lock(&root->delalloc_lock);
|
||||
list_splice_init(&root->delalloc_inodes, &splice);
|
||||
|
||||
while (!list_empty(&splice)) {
|
||||
btrfs_inode = list_entry(splice.next, struct btrfs_inode,
|
||||
delalloc_inodes);
|
||||
btrfs_inode = list_first_entry(&splice, struct btrfs_inode,
|
||||
delalloc_inodes);
|
||||
|
||||
list_del_init(&btrfs_inode->delalloc_inodes);
|
||||
clear_bit(BTRFS_INODE_IN_DELALLOC_LIST,
|
||||
&btrfs_inode->runtime_flags);
|
||||
spin_unlock(&root->fs_info->delalloc_lock);
|
||||
spin_unlock(&root->delalloc_lock);
|
||||
|
||||
btrfs_invalidate_inodes(btrfs_inode->root);
|
||||
|
||||
spin_lock(&root->fs_info->delalloc_lock);
|
||||
spin_lock(&root->delalloc_lock);
|
||||
}
|
||||
|
||||
spin_unlock(&root->fs_info->delalloc_lock);
|
||||
spin_unlock(&root->delalloc_lock);
|
||||
}
|
||||
|
||||
static void btrfs_destroy_all_delalloc_inodes(struct btrfs_fs_info *fs_info)
|
||||
{
|
||||
struct btrfs_root *root;
|
||||
struct list_head splice;
|
||||
|
||||
INIT_LIST_HEAD(&splice);
|
||||
|
||||
spin_lock(&fs_info->delalloc_root_lock);
|
||||
list_splice_init(&fs_info->delalloc_roots, &splice);
|
||||
while (!list_empty(&splice)) {
|
||||
root = list_first_entry(&splice, struct btrfs_root,
|
||||
delalloc_root);
|
||||
list_del_init(&root->delalloc_root);
|
||||
root = btrfs_grab_fs_root(root);
|
||||
BUG_ON(!root);
|
||||
spin_unlock(&fs_info->delalloc_root_lock);
|
||||
|
||||
btrfs_destroy_delalloc_inodes(root);
|
||||
btrfs_put_fs_root(root);
|
||||
|
||||
spin_lock(&fs_info->delalloc_root_lock);
|
||||
}
|
||||
spin_unlock(&fs_info->delalloc_root_lock);
|
||||
}
|
||||
|
||||
static int btrfs_destroy_marked_extents(struct btrfs_root *root,
|
||||
|
@ -3879,19 +3964,14 @@ void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
|
|||
btrfs_block_rsv_release(root, &root->fs_info->trans_block_rsv,
|
||||
cur_trans->dirty_pages.dirty_bytes);
|
||||
|
||||
/* FIXME: cleanup wait for commit */
|
||||
cur_trans->in_commit = 1;
|
||||
cur_trans->blocked = 1;
|
||||
cur_trans->state = TRANS_STATE_COMMIT_START;
|
||||
wake_up(&root->fs_info->transaction_blocked_wait);
|
||||
|
||||
btrfs_evict_pending_snapshots(cur_trans);
|
||||
|
||||
cur_trans->blocked = 0;
|
||||
cur_trans->state = TRANS_STATE_UNBLOCKED;
|
||||
wake_up(&root->fs_info->transaction_wait);
|
||||
|
||||
cur_trans->commit_done = 1;
|
||||
wake_up(&cur_trans->commit_wait);
|
||||
|
||||
btrfs_destroy_delayed_inodes(root);
|
||||
btrfs_assert_delayed_root_empty(root);
|
||||
|
||||
|
@ -3900,6 +3980,9 @@ void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
|
|||
btrfs_destroy_pinned_extent(root,
|
||||
root->fs_info->pinned_extents);
|
||||
|
||||
cur_trans->state =TRANS_STATE_COMPLETED;
|
||||
wake_up(&cur_trans->commit_wait);
|
||||
|
||||
/*
|
||||
memset(cur_trans, 0, sizeof(*cur_trans));
|
||||
kmem_cache_free(btrfs_transaction_cachep, cur_trans);
|
||||
|
@ -3915,7 +3998,7 @@ static int btrfs_cleanup_transaction(struct btrfs_root *root)
|
|||
|
||||
spin_lock(&root->fs_info->trans_lock);
|
||||
list_splice_init(&root->fs_info->trans_list, &list);
|
||||
root->fs_info->trans_no_join = 1;
|
||||
root->fs_info->running_transaction = NULL;
|
||||
spin_unlock(&root->fs_info->trans_lock);
|
||||
|
||||
while (!list_empty(&list)) {
|
||||
|
@ -3923,37 +4006,31 @@ static int btrfs_cleanup_transaction(struct btrfs_root *root)
|
|||
|
||||
btrfs_destroy_ordered_operations(t, root);
|
||||
|
||||
btrfs_destroy_ordered_extents(root);
|
||||
btrfs_destroy_all_ordered_extents(root->fs_info);
|
||||
|
||||
btrfs_destroy_delayed_refs(t, root);
|
||||
|
||||
/* FIXME: cleanup wait for commit */
|
||||
t->in_commit = 1;
|
||||
t->blocked = 1;
|
||||
/*
|
||||
* FIXME: cleanup wait for commit
|
||||
* We needn't acquire the lock here, because we are during
|
||||
* the umount, there is no other task which will change it.
|
||||
*/
|
||||
t->state = TRANS_STATE_COMMIT_START;
|
||||
smp_mb();
|
||||
if (waitqueue_active(&root->fs_info->transaction_blocked_wait))
|
||||
wake_up(&root->fs_info->transaction_blocked_wait);
|
||||
|
||||
btrfs_evict_pending_snapshots(t);
|
||||
|
||||
t->blocked = 0;
|
||||
t->state = TRANS_STATE_UNBLOCKED;
|
||||
smp_mb();
|
||||
if (waitqueue_active(&root->fs_info->transaction_wait))
|
||||
wake_up(&root->fs_info->transaction_wait);
|
||||
|
||||
t->commit_done = 1;
|
||||
smp_mb();
|
||||
if (waitqueue_active(&t->commit_wait))
|
||||
wake_up(&t->commit_wait);
|
||||
|
||||
btrfs_destroy_delayed_inodes(root);
|
||||
btrfs_assert_delayed_root_empty(root);
|
||||
|
||||
btrfs_destroy_delalloc_inodes(root);
|
||||
|
||||
spin_lock(&root->fs_info->trans_lock);
|
||||
root->fs_info->running_transaction = NULL;
|
||||
spin_unlock(&root->fs_info->trans_lock);
|
||||
btrfs_destroy_all_delalloc_inodes(root->fs_info);
|
||||
|
||||
btrfs_destroy_marked_extents(root, &t->dirty_pages,
|
||||
EXTENT_DIRTY);
|
||||
|
@ -3961,15 +4038,17 @@ static int btrfs_cleanup_transaction(struct btrfs_root *root)
|
|||
btrfs_destroy_pinned_extent(root,
|
||||
root->fs_info->pinned_extents);
|
||||
|
||||
t->state = TRANS_STATE_COMPLETED;
|
||||
smp_mb();
|
||||
if (waitqueue_active(&t->commit_wait))
|
||||
wake_up(&t->commit_wait);
|
||||
|
||||
atomic_set(&t->use_count, 0);
|
||||
list_del_init(&t->list);
|
||||
memset(t, 0, sizeof(*t));
|
||||
kmem_cache_free(btrfs_transaction_cachep, t);
|
||||
}
|
||||
|
||||
spin_lock(&root->fs_info->trans_lock);
|
||||
root->fs_info->trans_no_join = 0;
|
||||
spin_unlock(&root->fs_info->trans_lock);
|
||||
mutex_unlock(&root->fs_info->transaction_kthread_mutex);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -63,14 +63,40 @@ struct buffer_head *btrfs_read_dev_super(struct block_device *bdev);
|
|||
int btrfs_commit_super(struct btrfs_root *root);
|
||||
struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root,
|
||||
u64 bytenr, u32 blocksize);
|
||||
struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root,
|
||||
struct btrfs_key *location);
|
||||
struct btrfs_root *btrfs_read_fs_root(struct btrfs_root *tree_root,
|
||||
struct btrfs_key *location);
|
||||
int btrfs_init_fs_root(struct btrfs_root *root);
|
||||
int btrfs_insert_fs_root(struct btrfs_fs_info *fs_info,
|
||||
struct btrfs_root *root);
|
||||
struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info,
|
||||
struct btrfs_key *location);
|
||||
int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info);
|
||||
void btrfs_btree_balance_dirty(struct btrfs_root *root);
|
||||
void btrfs_btree_balance_dirty_nodelay(struct btrfs_root *root);
|
||||
void btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root);
|
||||
void btrfs_drop_and_free_fs_root(struct btrfs_fs_info *fs_info,
|
||||
struct btrfs_root *root);
|
||||
void btrfs_free_fs_root(struct btrfs_root *root);
|
||||
|
||||
/*
|
||||
* This function is used to grab the root, and avoid it is freed when we
|
||||
* access it. But it doesn't ensure that the tree is not dropped.
|
||||
*
|
||||
* If you want to ensure the whole tree is safe, you should use
|
||||
* fs_info->subvol_srcu
|
||||
*/
|
||||
static inline struct btrfs_root *btrfs_grab_fs_root(struct btrfs_root *root)
|
||||
{
|
||||
if (atomic_inc_not_zero(&root->refs))
|
||||
return root;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline void btrfs_put_fs_root(struct btrfs_root *root)
|
||||
{
|
||||
if (atomic_dec_and_test(&root->refs))
|
||||
kfree(root);
|
||||
}
|
||||
|
||||
void btrfs_mark_buffer_dirty(struct extent_buffer *buf);
|
||||
int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid,
|
||||
int atomic);
|
||||
|
|
|
@ -82,11 +82,6 @@ static struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid,
|
|||
goto fail;
|
||||
}
|
||||
|
||||
if (btrfs_root_refs(&root->root_item) == 0) {
|
||||
err = -ENOENT;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
key.objectid = objectid;
|
||||
btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
|
||||
key.offset = 0;
|
||||
|
|
|
@ -24,6 +24,7 @@
|
|||
#include <linux/kthread.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/ratelimit.h>
|
||||
#include <linux/percpu_counter.h>
|
||||
#include "compat.h"
|
||||
#include "hash.h"
|
||||
#include "ctree.h"
|
||||
|
@ -2526,6 +2527,51 @@ static int refs_newer(struct btrfs_delayed_ref_root *delayed_refs, int seq,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline u64 heads_to_leaves(struct btrfs_root *root, u64 heads)
|
||||
{
|
||||
u64 num_bytes;
|
||||
|
||||
num_bytes = heads * (sizeof(struct btrfs_extent_item) +
|
||||
sizeof(struct btrfs_extent_inline_ref));
|
||||
if (!btrfs_fs_incompat(root->fs_info, SKINNY_METADATA))
|
||||
num_bytes += heads * sizeof(struct btrfs_tree_block_info);
|
||||
|
||||
/*
|
||||
* We don't ever fill up leaves all the way so multiply by 2 just to be
|
||||
* closer to what we're really going to want to ouse.
|
||||
*/
|
||||
return div64_u64(num_bytes, BTRFS_LEAF_DATA_SIZE(root));
|
||||
}
|
||||
|
||||
int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root)
|
||||
{
|
||||
struct btrfs_block_rsv *global_rsv;
|
||||
u64 num_heads = trans->transaction->delayed_refs.num_heads_ready;
|
||||
u64 num_bytes;
|
||||
int ret = 0;
|
||||
|
||||
num_bytes = btrfs_calc_trans_metadata_size(root, 1);
|
||||
num_heads = heads_to_leaves(root, num_heads);
|
||||
if (num_heads > 1)
|
||||
num_bytes += (num_heads - 1) * root->leafsize;
|
||||
num_bytes <<= 1;
|
||||
global_rsv = &root->fs_info->global_block_rsv;
|
||||
|
||||
/*
|
||||
* If we can't allocate any more chunks lets make sure we have _lots_ of
|
||||
* wiggle room since running delayed refs can create more delayed refs.
|
||||
*/
|
||||
if (global_rsv->space_info->full)
|
||||
num_bytes <<= 1;
|
||||
|
||||
spin_lock(&global_rsv->lock);
|
||||
if (global_rsv->reserved <= num_bytes)
|
||||
ret = 1;
|
||||
spin_unlock(&global_rsv->lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* this starts processing the delayed reference count updates and
|
||||
* extent insertions we have queued up so far. count can be
|
||||
|
@ -2573,7 +2619,8 @@ progress:
|
|||
old = atomic_cmpxchg(&delayed_refs->procs_running_refs, 0, 1);
|
||||
if (old) {
|
||||
DEFINE_WAIT(__wait);
|
||||
if (delayed_refs->num_entries < 16348)
|
||||
if (delayed_refs->flushing ||
|
||||
!btrfs_should_throttle_delayed_refs(trans, root))
|
||||
return 0;
|
||||
|
||||
prepare_to_wait(&delayed_refs->wait, &__wait,
|
||||
|
@ -2608,7 +2655,7 @@ again:
|
|||
|
||||
while (1) {
|
||||
if (!(run_all || run_most) &&
|
||||
delayed_refs->num_heads_ready < 64)
|
||||
!btrfs_should_throttle_delayed_refs(trans, root))
|
||||
break;
|
||||
|
||||
/*
|
||||
|
@ -2629,6 +2676,7 @@ again:
|
|||
spin_unlock(&delayed_refs->lock);
|
||||
btrfs_abort_transaction(trans, root, ret);
|
||||
atomic_dec(&delayed_refs->procs_running_refs);
|
||||
wake_up(&delayed_refs->wait);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -3310,6 +3358,7 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags,
|
|||
struct btrfs_space_info *found;
|
||||
int i;
|
||||
int factor;
|
||||
int ret;
|
||||
|
||||
if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
|
||||
BTRFS_BLOCK_GROUP_RAID10))
|
||||
|
@ -3333,6 +3382,12 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags,
|
|||
if (!found)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = percpu_counter_init(&found->total_bytes_pinned, 0);
|
||||
if (ret) {
|
||||
kfree(found);
|
||||
return ret;
|
||||
}
|
||||
|
||||
for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
|
||||
INIT_LIST_HEAD(&found->block_groups[i]);
|
||||
init_rwsem(&found->groups_sem);
|
||||
|
@ -3565,10 +3620,11 @@ alloc:
|
|||
}
|
||||
|
||||
/*
|
||||
* If we have less pinned bytes than we want to allocate then
|
||||
* don't bother committing the transaction, it won't help us.
|
||||
* If we don't have enough pinned space to deal with this
|
||||
* allocation don't bother committing the transaction.
|
||||
*/
|
||||
if (data_sinfo->bytes_pinned < bytes)
|
||||
if (percpu_counter_compare(&data_sinfo->total_bytes_pinned,
|
||||
bytes) < 0)
|
||||
committed = 1;
|
||||
spin_unlock(&data_sinfo->lock);
|
||||
|
||||
|
@ -3577,6 +3633,7 @@ commit_trans:
|
|||
if (!committed &&
|
||||
!atomic_read(&root->fs_info->open_ioctl_trans)) {
|
||||
committed = 1;
|
||||
|
||||
trans = btrfs_join_transaction(root);
|
||||
if (IS_ERR(trans))
|
||||
return PTR_ERR(trans);
|
||||
|
@ -3609,6 +3666,7 @@ void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes)
|
|||
|
||||
data_sinfo = root->fs_info->data_sinfo;
|
||||
spin_lock(&data_sinfo->lock);
|
||||
WARN_ON(data_sinfo->bytes_may_use < bytes);
|
||||
data_sinfo->bytes_may_use -= bytes;
|
||||
trace_btrfs_space_reservation(root->fs_info, "space_info",
|
||||
data_sinfo->flags, bytes, 0);
|
||||
|
@ -3886,12 +3944,11 @@ static void btrfs_writeback_inodes_sb_nr(struct btrfs_root *root,
|
|||
unsigned long nr_pages)
|
||||
{
|
||||
struct super_block *sb = root->fs_info->sb;
|
||||
int started;
|
||||
|
||||
/* If we can not start writeback, just sync all the delalloc file. */
|
||||
started = try_to_writeback_inodes_sb_nr(sb, nr_pages,
|
||||
WB_REASON_FS_FREE_SPACE);
|
||||
if (!started) {
|
||||
if (down_read_trylock(&sb->s_umount)) {
|
||||
writeback_inodes_sb_nr(sb, nr_pages, WB_REASON_FS_FREE_SPACE);
|
||||
up_read(&sb->s_umount);
|
||||
} else {
|
||||
/*
|
||||
* We needn't worry the filesystem going from r/w to r/o though
|
||||
* we don't acquire ->s_umount mutex, because the filesystem
|
||||
|
@ -3899,9 +3956,9 @@ static void btrfs_writeback_inodes_sb_nr(struct btrfs_root *root,
|
|||
* the filesystem is readonly(all dirty pages are written to
|
||||
* the disk).
|
||||
*/
|
||||
btrfs_start_delalloc_inodes(root, 0);
|
||||
btrfs_start_all_delalloc_inodes(root->fs_info, 0);
|
||||
if (!current->journal_info)
|
||||
btrfs_wait_ordered_extents(root, 0);
|
||||
btrfs_wait_all_ordered_extents(root->fs_info, 0);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3931,7 +3988,7 @@ static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
|
|||
if (delalloc_bytes == 0) {
|
||||
if (trans)
|
||||
return;
|
||||
btrfs_wait_ordered_extents(root, 0);
|
||||
btrfs_wait_all_ordered_extents(root->fs_info, 0);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -3959,7 +4016,7 @@ static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
|
|||
|
||||
loops++;
|
||||
if (wait_ordered && !trans) {
|
||||
btrfs_wait_ordered_extents(root, 0);
|
||||
btrfs_wait_all_ordered_extents(root->fs_info, 0);
|
||||
} else {
|
||||
time_left = schedule_timeout_killable(1);
|
||||
if (time_left)
|
||||
|
@ -3997,7 +4054,8 @@ static int may_commit_transaction(struct btrfs_root *root,
|
|||
|
||||
/* See if there is enough pinned space to make this reservation */
|
||||
spin_lock(&space_info->lock);
|
||||
if (space_info->bytes_pinned >= bytes) {
|
||||
if (percpu_counter_compare(&space_info->total_bytes_pinned,
|
||||
bytes) >= 0) {
|
||||
spin_unlock(&space_info->lock);
|
||||
goto commit;
|
||||
}
|
||||
|
@ -4012,7 +4070,8 @@ static int may_commit_transaction(struct btrfs_root *root,
|
|||
|
||||
spin_lock(&space_info->lock);
|
||||
spin_lock(&delayed_rsv->lock);
|
||||
if (space_info->bytes_pinned + delayed_rsv->size < bytes) {
|
||||
if (percpu_counter_compare(&space_info->total_bytes_pinned,
|
||||
bytes - delayed_rsv->size) >= 0) {
|
||||
spin_unlock(&delayed_rsv->lock);
|
||||
spin_unlock(&space_info->lock);
|
||||
return -ENOSPC;
|
||||
|
@ -4297,6 +4356,31 @@ static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
|
|||
spin_unlock(&block_rsv->lock);
|
||||
}
|
||||
|
||||
int btrfs_cond_migrate_bytes(struct btrfs_fs_info *fs_info,
|
||||
struct btrfs_block_rsv *dest, u64 num_bytes,
|
||||
int min_factor)
|
||||
{
|
||||
struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
|
||||
u64 min_bytes;
|
||||
|
||||
if (global_rsv->space_info != dest->space_info)
|
||||
return -ENOSPC;
|
||||
|
||||
spin_lock(&global_rsv->lock);
|
||||
min_bytes = div_factor(global_rsv->size, min_factor);
|
||||
if (global_rsv->reserved < min_bytes + num_bytes) {
|
||||
spin_unlock(&global_rsv->lock);
|
||||
return -ENOSPC;
|
||||
}
|
||||
global_rsv->reserved -= num_bytes;
|
||||
if (global_rsv->reserved < global_rsv->size)
|
||||
global_rsv->full = 0;
|
||||
spin_unlock(&global_rsv->lock);
|
||||
|
||||
block_rsv_add_bytes(dest, num_bytes, 1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
|
||||
struct btrfs_block_rsv *block_rsv,
|
||||
struct btrfs_block_rsv *dest, u64 num_bytes)
|
||||
|
@ -5030,14 +5114,14 @@ static int update_block_group(struct btrfs_root *root,
|
|||
int factor;
|
||||
|
||||
/* block accounting for super block */
|
||||
spin_lock(&info->delalloc_lock);
|
||||
spin_lock(&info->delalloc_root_lock);
|
||||
old_val = btrfs_super_bytes_used(info->super_copy);
|
||||
if (alloc)
|
||||
old_val += num_bytes;
|
||||
else
|
||||
old_val -= num_bytes;
|
||||
btrfs_set_super_bytes_used(info->super_copy, old_val);
|
||||
spin_unlock(&info->delalloc_lock);
|
||||
spin_unlock(&info->delalloc_root_lock);
|
||||
|
||||
while (total) {
|
||||
cache = btrfs_lookup_block_group(info, bytenr);
|
||||
|
@ -5189,6 +5273,80 @@ int btrfs_pin_extent_for_log_replay(struct btrfs_root *root,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int __exclude_logged_extent(struct btrfs_root *root, u64 start, u64 num_bytes)
|
||||
{
|
||||
int ret;
|
||||
struct btrfs_block_group_cache *block_group;
|
||||
struct btrfs_caching_control *caching_ctl;
|
||||
|
||||
block_group = btrfs_lookup_block_group(root->fs_info, start);
|
||||
if (!block_group)
|
||||
return -EINVAL;
|
||||
|
||||
cache_block_group(block_group, 0);
|
||||
caching_ctl = get_caching_control(block_group);
|
||||
|
||||
if (!caching_ctl) {
|
||||
/* Logic error */
|
||||
BUG_ON(!block_group_cache_done(block_group));
|
||||
ret = btrfs_remove_free_space(block_group, start, num_bytes);
|
||||
} else {
|
||||
mutex_lock(&caching_ctl->mutex);
|
||||
|
||||
if (start >= caching_ctl->progress) {
|
||||
ret = add_excluded_extent(root, start, num_bytes);
|
||||
} else if (start + num_bytes <= caching_ctl->progress) {
|
||||
ret = btrfs_remove_free_space(block_group,
|
||||
start, num_bytes);
|
||||
} else {
|
||||
num_bytes = caching_ctl->progress - start;
|
||||
ret = btrfs_remove_free_space(block_group,
|
||||
start, num_bytes);
|
||||
if (ret)
|
||||
goto out_lock;
|
||||
|
||||
num_bytes = (start + num_bytes) -
|
||||
caching_ctl->progress;
|
||||
start = caching_ctl->progress;
|
||||
ret = add_excluded_extent(root, start, num_bytes);
|
||||
}
|
||||
out_lock:
|
||||
mutex_unlock(&caching_ctl->mutex);
|
||||
put_caching_control(caching_ctl);
|
||||
}
|
||||
btrfs_put_block_group(block_group);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int btrfs_exclude_logged_extents(struct btrfs_root *log,
|
||||
struct extent_buffer *eb)
|
||||
{
|
||||
struct btrfs_file_extent_item *item;
|
||||
struct btrfs_key key;
|
||||
int found_type;
|
||||
int i;
|
||||
|
||||
if (!btrfs_fs_incompat(log->fs_info, MIXED_GROUPS))
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < btrfs_header_nritems(eb); i++) {
|
||||
btrfs_item_key_to_cpu(eb, &key, i);
|
||||
if (key.type != BTRFS_EXTENT_DATA_KEY)
|
||||
continue;
|
||||
item = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
|
||||
found_type = btrfs_file_extent_type(eb, item);
|
||||
if (found_type == BTRFS_FILE_EXTENT_INLINE)
|
||||
continue;
|
||||
if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
|
||||
continue;
|
||||
key.objectid = btrfs_file_extent_disk_bytenr(eb, item);
|
||||
key.offset = btrfs_file_extent_disk_num_bytes(eb, item);
|
||||
__exclude_logged_extent(log, key.objectid, key.offset);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* btrfs_update_reserved_bytes - update the block_group and space info counters
|
||||
* @cache: The cache we are manipulating
|
||||
|
@ -5251,6 +5409,7 @@ void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
|
|||
struct btrfs_caching_control *next;
|
||||
struct btrfs_caching_control *caching_ctl;
|
||||
struct btrfs_block_group_cache *cache;
|
||||
struct btrfs_space_info *space_info;
|
||||
|
||||
down_write(&fs_info->extent_commit_sem);
|
||||
|
||||
|
@ -5273,6 +5432,9 @@ void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
|
|||
|
||||
up_write(&fs_info->extent_commit_sem);
|
||||
|
||||
list_for_each_entry_rcu(space_info, &fs_info->space_info, list)
|
||||
percpu_counter_set(&space_info->total_bytes_pinned, 0);
|
||||
|
||||
update_global_block_rsv(fs_info);
|
||||
}
|
||||
|
||||
|
@ -5370,6 +5532,27 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void add_pinned_bytes(struct btrfs_fs_info *fs_info, u64 num_bytes,
|
||||
u64 owner, u64 root_objectid)
|
||||
{
|
||||
struct btrfs_space_info *space_info;
|
||||
u64 flags;
|
||||
|
||||
if (owner < BTRFS_FIRST_FREE_OBJECTID) {
|
||||
if (root_objectid == BTRFS_CHUNK_TREE_OBJECTID)
|
||||
flags = BTRFS_BLOCK_GROUP_SYSTEM;
|
||||
else
|
||||
flags = BTRFS_BLOCK_GROUP_METADATA;
|
||||
} else {
|
||||
flags = BTRFS_BLOCK_GROUP_DATA;
|
||||
}
|
||||
|
||||
space_info = __find_space_info(fs_info, flags);
|
||||
BUG_ON(!space_info); /* Logic bug */
|
||||
percpu_counter_add(&space_info->total_bytes_pinned, num_bytes);
|
||||
}
|
||||
|
||||
|
||||
static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root,
|
||||
u64 bytenr, u64 num_bytes, u64 parent,
|
||||
|
@ -5590,6 +5773,8 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
|
|||
goto out;
|
||||
}
|
||||
}
|
||||
add_pinned_bytes(root->fs_info, -num_bytes, owner_objectid,
|
||||
root_objectid);
|
||||
} else {
|
||||
if (found_extent) {
|
||||
BUG_ON(is_data && refs_to_drop !=
|
||||
|
@ -5713,6 +5898,7 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
|
|||
u64 parent, int last_ref)
|
||||
{
|
||||
struct btrfs_block_group_cache *cache = NULL;
|
||||
int pin = 1;
|
||||
int ret;
|
||||
|
||||
if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
|
||||
|
@ -5745,8 +5931,14 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
|
|||
|
||||
btrfs_add_free_space(cache, buf->start, buf->len);
|
||||
btrfs_update_reserved_bytes(cache, buf->len, RESERVE_FREE);
|
||||
pin = 0;
|
||||
}
|
||||
out:
|
||||
if (pin)
|
||||
add_pinned_bytes(root->fs_info, buf->len,
|
||||
btrfs_header_level(buf),
|
||||
root->root_key.objectid);
|
||||
|
||||
/*
|
||||
* Deleting the buffer, clear the corrupt flag since it doesn't matter
|
||||
* anymore.
|
||||
|
@ -5763,6 +5955,8 @@ int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root,
|
|||
int ret;
|
||||
struct btrfs_fs_info *fs_info = root->fs_info;
|
||||
|
||||
add_pinned_bytes(root->fs_info, num_bytes, owner, root_objectid);
|
||||
|
||||
/*
|
||||
* tree log blocks never actually go into the extent allocation
|
||||
* tree, just update pinning info and exit early.
|
||||
|
@ -6560,52 +6754,26 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
|
|||
{
|
||||
int ret;
|
||||
struct btrfs_block_group_cache *block_group;
|
||||
struct btrfs_caching_control *caching_ctl;
|
||||
u64 start = ins->objectid;
|
||||
u64 num_bytes = ins->offset;
|
||||
|
||||
/*
|
||||
* Mixed block groups will exclude before processing the log so we only
|
||||
* need to do the exlude dance if this fs isn't mixed.
|
||||
*/
|
||||
if (!btrfs_fs_incompat(root->fs_info, MIXED_GROUPS)) {
|
||||
ret = __exclude_logged_extent(root, ins->objectid, ins->offset);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
|
||||
cache_block_group(block_group, 0);
|
||||
caching_ctl = get_caching_control(block_group);
|
||||
|
||||
if (!caching_ctl) {
|
||||
BUG_ON(!block_group_cache_done(block_group));
|
||||
ret = btrfs_remove_free_space(block_group, start, num_bytes);
|
||||
if (ret)
|
||||
goto out;
|
||||
} else {
|
||||
mutex_lock(&caching_ctl->mutex);
|
||||
|
||||
if (start >= caching_ctl->progress) {
|
||||
ret = add_excluded_extent(root, start, num_bytes);
|
||||
} else if (start + num_bytes <= caching_ctl->progress) {
|
||||
ret = btrfs_remove_free_space(block_group,
|
||||
start, num_bytes);
|
||||
} else {
|
||||
num_bytes = caching_ctl->progress - start;
|
||||
ret = btrfs_remove_free_space(block_group,
|
||||
start, num_bytes);
|
||||
if (ret)
|
||||
goto out_lock;
|
||||
|
||||
start = caching_ctl->progress;
|
||||
num_bytes = ins->objectid + ins->offset -
|
||||
caching_ctl->progress;
|
||||
ret = add_excluded_extent(root, start, num_bytes);
|
||||
}
|
||||
out_lock:
|
||||
mutex_unlock(&caching_ctl->mutex);
|
||||
put_caching_control(caching_ctl);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
if (!block_group)
|
||||
return -EINVAL;
|
||||
|
||||
ret = btrfs_update_reserved_bytes(block_group, ins->offset,
|
||||
RESERVE_ALLOC_NO_ACCOUNT);
|
||||
BUG_ON(ret); /* logic error */
|
||||
ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
|
||||
0, owner, offset, ins, 1);
|
||||
out:
|
||||
btrfs_put_block_group(block_group);
|
||||
return ret;
|
||||
}
|
||||
|
@ -7384,7 +7552,7 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
|
|||
wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
|
||||
|
||||
while (1) {
|
||||
if (!for_reloc && btrfs_fs_closing(root->fs_info)) {
|
||||
if (!for_reloc && btrfs_need_cleaner_sleep(root)) {
|
||||
pr_debug("btrfs: drop snapshot early exit\n");
|
||||
err = -EAGAIN;
|
||||
goto out_end_trans;
|
||||
|
@ -7447,8 +7615,8 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
|
|||
}
|
||||
|
||||
if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
|
||||
ret = btrfs_find_last_root(tree_root, root->root_key.objectid,
|
||||
NULL, NULL);
|
||||
ret = btrfs_find_root(tree_root, &root->root_key, path,
|
||||
NULL, NULL);
|
||||
if (ret < 0) {
|
||||
btrfs_abort_transaction(trans, tree_root, ret);
|
||||
err = ret;
|
||||
|
@ -7465,11 +7633,11 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
|
|||
}
|
||||
|
||||
if (root->in_radix) {
|
||||
btrfs_free_fs_root(tree_root->fs_info, root);
|
||||
btrfs_drop_and_free_fs_root(tree_root->fs_info, root);
|
||||
} else {
|
||||
free_extent_buffer(root->node);
|
||||
free_extent_buffer(root->commit_root);
|
||||
kfree(root);
|
||||
btrfs_put_fs_root(root);
|
||||
}
|
||||
out_end_trans:
|
||||
btrfs_end_transaction_throttle(trans, tree_root);
|
||||
|
@ -7782,6 +7950,7 @@ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
|
|||
struct btrfs_space_info *space_info;
|
||||
struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
|
||||
struct btrfs_device *device;
|
||||
struct btrfs_trans_handle *trans;
|
||||
u64 min_free;
|
||||
u64 dev_min = 1;
|
||||
u64 dev_nr = 0;
|
||||
|
@ -7868,6 +8037,13 @@ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
|
|||
do_div(min_free, dev_min);
|
||||
}
|
||||
|
||||
/* We need to do this so that we can look at pending chunks */
|
||||
trans = btrfs_join_transaction(root);
|
||||
if (IS_ERR(trans)) {
|
||||
ret = PTR_ERR(trans);
|
||||
goto out;
|
||||
}
|
||||
|
||||
mutex_lock(&root->fs_info->chunk_mutex);
|
||||
list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
|
||||
u64 dev_offset;
|
||||
|
@ -7878,7 +8054,7 @@ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
|
|||
*/
|
||||
if (device->total_bytes > device->bytes_used + min_free &&
|
||||
!device->is_tgtdev_for_dev_replace) {
|
||||
ret = find_free_dev_extent(device, min_free,
|
||||
ret = find_free_dev_extent(trans, device, min_free,
|
||||
&dev_offset, NULL);
|
||||
if (!ret)
|
||||
dev_nr++;
|
||||
|
@ -7890,6 +8066,7 @@ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
|
|||
}
|
||||
}
|
||||
mutex_unlock(&root->fs_info->chunk_mutex);
|
||||
btrfs_end_transaction(trans, root);
|
||||
out:
|
||||
btrfs_put_block_group(block_group);
|
||||
return ret;
|
||||
|
@ -8032,6 +8209,7 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info)
|
|||
dump_space_info(space_info, 0, 0);
|
||||
}
|
||||
}
|
||||
percpu_counter_destroy(&space_info->total_bytes_pinned);
|
||||
list_del(&space_info->list);
|
||||
kfree(space_info);
|
||||
}
|
||||
|
@ -8254,6 +8432,10 @@ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
|
|||
sizeof(item));
|
||||
if (ret)
|
||||
btrfs_abort_transaction(trans, extent_root, ret);
|
||||
ret = btrfs_finish_chunk_alloc(trans, extent_root,
|
||||
key.objectid, key.offset);
|
||||
if (ret)
|
||||
btrfs_abort_transaction(trans, extent_root, ret);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -8591,8 +8773,15 @@ int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
|
|||
if (end - start >= range->minlen) {
|
||||
if (!block_group_cache_done(cache)) {
|
||||
ret = cache_block_group(cache, 0);
|
||||
if (!ret)
|
||||
wait_block_group_cache_done(cache);
|
||||
if (ret) {
|
||||
btrfs_put_block_group(cache);
|
||||
break;
|
||||
}
|
||||
ret = wait_block_group_cache_done(cache);
|
||||
if (ret) {
|
||||
btrfs_put_block_group(cache);
|
||||
break;
|
||||
}
|
||||
}
|
||||
ret = btrfs_trim_block_group(cache,
|
||||
&group_trimmed,
|
||||
|
|
|
@ -77,10 +77,29 @@ void btrfs_leak_debug_check(void)
|
|||
kmem_cache_free(extent_buffer_cache, eb);
|
||||
}
|
||||
}
|
||||
|
||||
#define btrfs_debug_check_extent_io_range(inode, start, end) \
|
||||
__btrfs_debug_check_extent_io_range(__func__, (inode), (start), (end))
|
||||
static inline void __btrfs_debug_check_extent_io_range(const char *caller,
|
||||
struct inode *inode, u64 start, u64 end)
|
||||
{
|
||||
u64 isize = i_size_read(inode);
|
||||
|
||||
if (end >= PAGE_SIZE && (end % 2) == 0 && end != isize - 1) {
|
||||
printk_ratelimited(KERN_DEBUG
|
||||
"btrfs: %s: ino %llu isize %llu odd range [%llu,%llu]\n",
|
||||
caller,
|
||||
(unsigned long long)btrfs_ino(inode),
|
||||
(unsigned long long)isize,
|
||||
(unsigned long long)start,
|
||||
(unsigned long long)end);
|
||||
}
|
||||
}
|
||||
#else
|
||||
#define btrfs_leak_debug_add(new, head) do {} while (0)
|
||||
#define btrfs_leak_debug_del(entry) do {} while (0)
|
||||
#define btrfs_leak_debug_check() do {} while (0)
|
||||
#define btrfs_debug_check_extent_io_range(c, s, e) do {} while (0)
|
||||
#endif
|
||||
|
||||
#define BUFFER_LRU_MAX 64
|
||||
|
@ -522,6 +541,11 @@ int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
|
|||
int err;
|
||||
int clear = 0;
|
||||
|
||||
btrfs_debug_check_extent_io_range(tree->mapping->host, start, end);
|
||||
|
||||
if (bits & EXTENT_DELALLOC)
|
||||
bits |= EXTENT_NORESERVE;
|
||||
|
||||
if (delete)
|
||||
bits |= ~EXTENT_CTLBITS;
|
||||
bits |= EXTENT_FIRST_DELALLOC;
|
||||
|
@ -677,6 +701,8 @@ static void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
|
|||
struct extent_state *state;
|
||||
struct rb_node *node;
|
||||
|
||||
btrfs_debug_check_extent_io_range(tree->mapping->host, start, end);
|
||||
|
||||
spin_lock(&tree->lock);
|
||||
again:
|
||||
while (1) {
|
||||
|
@ -769,6 +795,8 @@ __set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
|
|||
u64 last_start;
|
||||
u64 last_end;
|
||||
|
||||
btrfs_debug_check_extent_io_range(tree->mapping->host, start, end);
|
||||
|
||||
bits |= EXTENT_FIRST_DELALLOC;
|
||||
again:
|
||||
if (!prealloc && (mask & __GFP_WAIT)) {
|
||||
|
@ -989,6 +1017,8 @@ int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
|
|||
u64 last_start;
|
||||
u64 last_end;
|
||||
|
||||
btrfs_debug_check_extent_io_range(tree->mapping->host, start, end);
|
||||
|
||||
again:
|
||||
if (!prealloc && (mask & __GFP_WAIT)) {
|
||||
prealloc = alloc_extent_state(mask);
|
||||
|
@ -2450,11 +2480,12 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
|
|||
struct extent_state *cached = NULL;
|
||||
struct extent_state *state;
|
||||
struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
|
||||
struct inode *inode = page->mapping->host;
|
||||
|
||||
pr_debug("end_bio_extent_readpage: bi_sector=%llu, err=%d, "
|
||||
"mirror=%lu\n", (u64)bio->bi_sector, err,
|
||||
io_bio->mirror_num);
|
||||
tree = &BTRFS_I(page->mapping->host)->io_tree;
|
||||
tree = &BTRFS_I(inode)->io_tree;
|
||||
|
||||
/* We always issue full-page reads, but if some block
|
||||
* in a page fails to read, blk_update_request() will
|
||||
|
@ -2528,6 +2559,14 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
|
|||
unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC);
|
||||
|
||||
if (uptodate) {
|
||||
loff_t i_size = i_size_read(inode);
|
||||
pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
|
||||
unsigned offset;
|
||||
|
||||
/* Zero out the end if this page straddles i_size */
|
||||
offset = i_size & (PAGE_CACHE_SIZE-1);
|
||||
if (page->index == end_index && offset)
|
||||
zero_user_segment(page, offset, PAGE_CACHE_SIZE);
|
||||
SetPageUptodate(page);
|
||||
} else {
|
||||
ClearPageUptodate(page);
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
#define EXTENT_FIRST_DELALLOC (1 << 12)
|
||||
#define EXTENT_NEED_WAIT (1 << 13)
|
||||
#define EXTENT_DAMAGED (1 << 14)
|
||||
#define EXTENT_NORESERVE (1 << 15)
|
||||
#define EXTENT_IOBITS (EXTENT_LOCKED | EXTENT_WRITEBACK)
|
||||
#define EXTENT_CTLBITS (EXTENT_DO_ACCOUNTING | EXTENT_FIRST_DELALLOC)
|
||||
|
||||
|
|
|
@ -34,8 +34,7 @@
|
|||
|
||||
#define MAX_ORDERED_SUM_BYTES(r) ((PAGE_SIZE - \
|
||||
sizeof(struct btrfs_ordered_sum)) / \
|
||||
sizeof(struct btrfs_sector_sum) * \
|
||||
(r)->sectorsize - (r)->sectorsize)
|
||||
sizeof(u32) * (r)->sectorsize)
|
||||
|
||||
int btrfs_insert_file_extent(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root,
|
||||
|
@ -297,7 +296,6 @@ int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
|
|||
struct btrfs_path *path;
|
||||
struct extent_buffer *leaf;
|
||||
struct btrfs_ordered_sum *sums;
|
||||
struct btrfs_sector_sum *sector_sum;
|
||||
struct btrfs_csum_item *item;
|
||||
LIST_HEAD(tmplist);
|
||||
unsigned long offset;
|
||||
|
@ -368,34 +366,28 @@ int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
|
|||
struct btrfs_csum_item);
|
||||
while (start < csum_end) {
|
||||
size = min_t(size_t, csum_end - start,
|
||||
MAX_ORDERED_SUM_BYTES(root));
|
||||
MAX_ORDERED_SUM_BYTES(root));
|
||||
sums = kzalloc(btrfs_ordered_sum_size(root, size),
|
||||
GFP_NOFS);
|
||||
GFP_NOFS);
|
||||
if (!sums) {
|
||||
ret = -ENOMEM;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
sector_sum = sums->sums;
|
||||
sums->bytenr = start;
|
||||
sums->len = size;
|
||||
sums->len = (int)size;
|
||||
|
||||
offset = (start - key.offset) >>
|
||||
root->fs_info->sb->s_blocksize_bits;
|
||||
offset *= csum_size;
|
||||
size >>= root->fs_info->sb->s_blocksize_bits;
|
||||
|
||||
while (size > 0) {
|
||||
read_extent_buffer(path->nodes[0],
|
||||
§or_sum->sum,
|
||||
((unsigned long)item) +
|
||||
offset, csum_size);
|
||||
sector_sum->bytenr = start;
|
||||
read_extent_buffer(path->nodes[0],
|
||||
sums->sums,
|
||||
((unsigned long)item) + offset,
|
||||
csum_size * size);
|
||||
|
||||
size -= root->sectorsize;
|
||||
start += root->sectorsize;
|
||||
offset += csum_size;
|
||||
sector_sum++;
|
||||
}
|
||||
start += root->sectorsize * size;
|
||||
list_add_tail(&sums->list, &tmplist);
|
||||
}
|
||||
path->slots[0]++;
|
||||
|
@ -417,23 +409,20 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
|
|||
struct bio *bio, u64 file_start, int contig)
|
||||
{
|
||||
struct btrfs_ordered_sum *sums;
|
||||
struct btrfs_sector_sum *sector_sum;
|
||||
struct btrfs_ordered_extent *ordered;
|
||||
char *data;
|
||||
struct bio_vec *bvec = bio->bi_io_vec;
|
||||
int bio_index = 0;
|
||||
int index;
|
||||
unsigned long total_bytes = 0;
|
||||
unsigned long this_sum_bytes = 0;
|
||||
u64 offset;
|
||||
u64 disk_bytenr;
|
||||
|
||||
WARN_ON(bio->bi_vcnt <= 0);
|
||||
sums = kzalloc(btrfs_ordered_sum_size(root, bio->bi_size), GFP_NOFS);
|
||||
if (!sums)
|
||||
return -ENOMEM;
|
||||
|
||||
sector_sum = sums->sums;
|
||||
disk_bytenr = (u64)bio->bi_sector << 9;
|
||||
sums->len = bio->bi_size;
|
||||
INIT_LIST_HEAD(&sums->list);
|
||||
|
||||
|
@ -444,7 +433,8 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
|
|||
|
||||
ordered = btrfs_lookup_ordered_extent(inode, offset);
|
||||
BUG_ON(!ordered); /* Logic error */
|
||||
sums->bytenr = ordered->start;
|
||||
sums->bytenr = (u64)bio->bi_sector << 9;
|
||||
index = 0;
|
||||
|
||||
while (bio_index < bio->bi_vcnt) {
|
||||
if (!contig)
|
||||
|
@ -463,28 +453,27 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
|
|||
sums = kzalloc(btrfs_ordered_sum_size(root, bytes_left),
|
||||
GFP_NOFS);
|
||||
BUG_ON(!sums); /* -ENOMEM */
|
||||
sector_sum = sums->sums;
|
||||
sums->len = bytes_left;
|
||||
ordered = btrfs_lookup_ordered_extent(inode, offset);
|
||||
BUG_ON(!ordered); /* Logic error */
|
||||
sums->bytenr = ordered->start;
|
||||
sums->bytenr = ((u64)bio->bi_sector << 9) +
|
||||
total_bytes;
|
||||
index = 0;
|
||||
}
|
||||
|
||||
data = kmap_atomic(bvec->bv_page);
|
||||
sector_sum->sum = ~(u32)0;
|
||||
sector_sum->sum = btrfs_csum_data(data + bvec->bv_offset,
|
||||
sector_sum->sum,
|
||||
bvec->bv_len);
|
||||
sums->sums[index] = ~(u32)0;
|
||||
sums->sums[index] = btrfs_csum_data(data + bvec->bv_offset,
|
||||
sums->sums[index],
|
||||
bvec->bv_len);
|
||||
kunmap_atomic(data);
|
||||
btrfs_csum_final(sector_sum->sum,
|
||||
(char *)§or_sum->sum);
|
||||
sector_sum->bytenr = disk_bytenr;
|
||||
btrfs_csum_final(sums->sums[index],
|
||||
(char *)(sums->sums + index));
|
||||
|
||||
sector_sum++;
|
||||
bio_index++;
|
||||
index++;
|
||||
total_bytes += bvec->bv_len;
|
||||
this_sum_bytes += bvec->bv_len;
|
||||
disk_bytenr += bvec->bv_len;
|
||||
offset += bvec->bv_len;
|
||||
bvec++;
|
||||
}
|
||||
|
@ -672,62 +661,46 @@ out:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static u64 btrfs_sector_sum_left(struct btrfs_ordered_sum *sums,
|
||||
struct btrfs_sector_sum *sector_sum,
|
||||
u64 total_bytes, u64 sectorsize)
|
||||
{
|
||||
u64 tmp = sectorsize;
|
||||
u64 next_sector = sector_sum->bytenr;
|
||||
struct btrfs_sector_sum *next = sector_sum + 1;
|
||||
|
||||
while ((tmp + total_bytes) < sums->len) {
|
||||
if (next_sector + sectorsize != next->bytenr)
|
||||
break;
|
||||
tmp += sectorsize;
|
||||
next_sector = next->bytenr;
|
||||
next++;
|
||||
}
|
||||
return tmp;
|
||||
}
|
||||
|
||||
int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root,
|
||||
struct btrfs_ordered_sum *sums)
|
||||
{
|
||||
u64 bytenr;
|
||||
int ret;
|
||||
struct btrfs_key file_key;
|
||||
struct btrfs_key found_key;
|
||||
u64 next_offset;
|
||||
u64 total_bytes = 0;
|
||||
int found_next;
|
||||
struct btrfs_path *path;
|
||||
struct btrfs_csum_item *item;
|
||||
struct btrfs_csum_item *item_end;
|
||||
struct extent_buffer *leaf = NULL;
|
||||
u64 next_offset;
|
||||
u64 total_bytes = 0;
|
||||
u64 csum_offset;
|
||||
struct btrfs_sector_sum *sector_sum;
|
||||
u64 bytenr;
|
||||
u32 nritems;
|
||||
u32 ins_size;
|
||||
int index = 0;
|
||||
int found_next;
|
||||
int ret;
|
||||
u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
|
||||
|
||||
path = btrfs_alloc_path();
|
||||
if (!path)
|
||||
return -ENOMEM;
|
||||
|
||||
sector_sum = sums->sums;
|
||||
again:
|
||||
next_offset = (u64)-1;
|
||||
found_next = 0;
|
||||
bytenr = sums->bytenr + total_bytes;
|
||||
file_key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
|
||||
file_key.offset = sector_sum->bytenr;
|
||||
bytenr = sector_sum->bytenr;
|
||||
file_key.offset = bytenr;
|
||||
btrfs_set_key_type(&file_key, BTRFS_EXTENT_CSUM_KEY);
|
||||
|
||||
item = btrfs_lookup_csum(trans, root, path, sector_sum->bytenr, 1);
|
||||
item = btrfs_lookup_csum(trans, root, path, bytenr, 1);
|
||||
if (!IS_ERR(item)) {
|
||||
leaf = path->nodes[0];
|
||||
ret = 0;
|
||||
leaf = path->nodes[0];
|
||||
item_end = btrfs_item_ptr(leaf, path->slots[0],
|
||||
struct btrfs_csum_item);
|
||||
item_end = (struct btrfs_csum_item *)((char *)item_end +
|
||||
btrfs_item_size_nr(leaf, path->slots[0]));
|
||||
goto found;
|
||||
}
|
||||
ret = PTR_ERR(item);
|
||||
|
@ -807,8 +780,7 @@ again:
|
|||
|
||||
free_space = btrfs_leaf_free_space(root, leaf) -
|
||||
sizeof(struct btrfs_item) - csum_size;
|
||||
tmp = btrfs_sector_sum_left(sums, sector_sum, total_bytes,
|
||||
root->sectorsize);
|
||||
tmp = sums->len - total_bytes;
|
||||
tmp >>= root->fs_info->sb->s_blocksize_bits;
|
||||
WARN_ON(tmp < 1);
|
||||
|
||||
|
@ -822,6 +794,7 @@ again:
|
|||
diff *= csum_size;
|
||||
|
||||
btrfs_extend_item(root, path, diff);
|
||||
ret = 0;
|
||||
goto csum;
|
||||
}
|
||||
|
||||
|
@ -831,8 +804,7 @@ insert:
|
|||
if (found_next) {
|
||||
u64 tmp;
|
||||
|
||||
tmp = btrfs_sector_sum_left(sums, sector_sum, total_bytes,
|
||||
root->sectorsize);
|
||||
tmp = sums->len - total_bytes;
|
||||
tmp >>= root->fs_info->sb->s_blocksize_bits;
|
||||
tmp = min(tmp, (next_offset - file_key.offset) >>
|
||||
root->fs_info->sb->s_blocksize_bits);
|
||||
|
@ -853,31 +825,25 @@ insert:
|
|||
WARN_ON(1);
|
||||
goto fail_unlock;
|
||||
}
|
||||
csum:
|
||||
leaf = path->nodes[0];
|
||||
csum:
|
||||
item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item);
|
||||
ret = 0;
|
||||
item_end = (struct btrfs_csum_item *)((unsigned char *)item +
|
||||
btrfs_item_size_nr(leaf, path->slots[0]));
|
||||
item = (struct btrfs_csum_item *)((unsigned char *)item +
|
||||
csum_offset * csum_size);
|
||||
found:
|
||||
item_end = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item);
|
||||
item_end = (struct btrfs_csum_item *)((unsigned char *)item_end +
|
||||
btrfs_item_size_nr(leaf, path->slots[0]));
|
||||
next_sector:
|
||||
ins_size = (u32)(sums->len - total_bytes) >>
|
||||
root->fs_info->sb->s_blocksize_bits;
|
||||
ins_size *= csum_size;
|
||||
ins_size = min_t(u32, (unsigned long)item_end - (unsigned long)item,
|
||||
ins_size);
|
||||
write_extent_buffer(leaf, sums->sums + index, (unsigned long)item,
|
||||
ins_size);
|
||||
|
||||
write_extent_buffer(leaf, §or_sum->sum, (unsigned long)item, csum_size);
|
||||
|
||||
total_bytes += root->sectorsize;
|
||||
sector_sum++;
|
||||
if (total_bytes < sums->len) {
|
||||
item = (struct btrfs_csum_item *)((char *)item +
|
||||
csum_size);
|
||||
if (item < item_end && bytenr + PAGE_CACHE_SIZE ==
|
||||
sector_sum->bytenr) {
|
||||
bytenr = sector_sum->bytenr;
|
||||
goto next_sector;
|
||||
}
|
||||
}
|
||||
ins_size /= csum_size;
|
||||
total_bytes += ins_size * root->sectorsize;
|
||||
index += ins_size;
|
||||
|
||||
btrfs_mark_buffer_dirty(path->nodes[0]);
|
||||
if (total_bytes < sums->len) {
|
||||
|
|
150
fs/btrfs/file.c
150
fs/btrfs/file.c
|
@ -309,10 +309,6 @@ static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info,
|
|||
ret = PTR_ERR(inode_root);
|
||||
goto cleanup;
|
||||
}
|
||||
if (btrfs_root_refs(&inode_root->root_item) == 0) {
|
||||
ret = -ENOENT;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
key.objectid = defrag->ino;
|
||||
btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
|
||||
|
@ -1317,6 +1313,56 @@ fail:
|
|||
|
||||
}
|
||||
|
||||
static noinline int check_can_nocow(struct inode *inode, loff_t pos,
|
||||
size_t *write_bytes)
|
||||
{
|
||||
struct btrfs_trans_handle *trans;
|
||||
struct btrfs_root *root = BTRFS_I(inode)->root;
|
||||
struct btrfs_ordered_extent *ordered;
|
||||
u64 lockstart, lockend;
|
||||
u64 num_bytes;
|
||||
int ret;
|
||||
|
||||
lockstart = round_down(pos, root->sectorsize);
|
||||
lockend = lockstart + round_up(*write_bytes, root->sectorsize) - 1;
|
||||
|
||||
while (1) {
|
||||
lock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend);
|
||||
ordered = btrfs_lookup_ordered_range(inode, lockstart,
|
||||
lockend - lockstart + 1);
|
||||
if (!ordered) {
|
||||
break;
|
||||
}
|
||||
unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend);
|
||||
btrfs_start_ordered_extent(inode, ordered, 1);
|
||||
btrfs_put_ordered_extent(ordered);
|
||||
}
|
||||
|
||||
trans = btrfs_join_transaction(root);
|
||||
if (IS_ERR(trans)) {
|
||||
unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend);
|
||||
return PTR_ERR(trans);
|
||||
}
|
||||
|
||||
num_bytes = lockend - lockstart + 1;
|
||||
ret = can_nocow_extent(trans, inode, lockstart, &num_bytes, NULL, NULL,
|
||||
NULL);
|
||||
btrfs_end_transaction(trans, root);
|
||||
if (ret <= 0) {
|
||||
ret = 0;
|
||||
} else {
|
||||
clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend,
|
||||
EXTENT_DIRTY | EXTENT_DELALLOC |
|
||||
EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 0, 0,
|
||||
NULL, GFP_NOFS);
|
||||
*write_bytes = min_t(size_t, *write_bytes, num_bytes);
|
||||
}
|
||||
|
||||
unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static noinline ssize_t __btrfs_buffered_write(struct file *file,
|
||||
struct iov_iter *i,
|
||||
loff_t pos)
|
||||
|
@ -1324,10 +1370,12 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
|
|||
struct inode *inode = file_inode(file);
|
||||
struct btrfs_root *root = BTRFS_I(inode)->root;
|
||||
struct page **pages = NULL;
|
||||
u64 release_bytes = 0;
|
||||
unsigned long first_index;
|
||||
size_t num_written = 0;
|
||||
int nrptrs;
|
||||
int ret = 0;
|
||||
bool only_release_metadata = false;
|
||||
bool force_page_uptodate = false;
|
||||
|
||||
nrptrs = min((iov_iter_count(i) + PAGE_CACHE_SIZE - 1) /
|
||||
|
@ -1348,6 +1396,7 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
|
|||
offset);
|
||||
size_t num_pages = (write_bytes + offset +
|
||||
PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
|
||||
size_t reserve_bytes;
|
||||
size_t dirty_pages;
|
||||
size_t copied;
|
||||
|
||||
|
@ -1362,11 +1411,41 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
|
|||
break;
|
||||
}
|
||||
|
||||
ret = btrfs_delalloc_reserve_space(inode,
|
||||
num_pages << PAGE_CACHE_SHIFT);
|
||||
reserve_bytes = num_pages << PAGE_CACHE_SHIFT;
|
||||
ret = btrfs_check_data_free_space(inode, reserve_bytes);
|
||||
if (ret == -ENOSPC &&
|
||||
(BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW |
|
||||
BTRFS_INODE_PREALLOC))) {
|
||||
ret = check_can_nocow(inode, pos, &write_bytes);
|
||||
if (ret > 0) {
|
||||
only_release_metadata = true;
|
||||
/*
|
||||
* our prealloc extent may be smaller than
|
||||
* write_bytes, so scale down.
|
||||
*/
|
||||
num_pages = (write_bytes + offset +
|
||||
PAGE_CACHE_SIZE - 1) >>
|
||||
PAGE_CACHE_SHIFT;
|
||||
reserve_bytes = num_pages << PAGE_CACHE_SHIFT;
|
||||
ret = 0;
|
||||
} else {
|
||||
ret = -ENOSPC;
|
||||
}
|
||||
}
|
||||
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
ret = btrfs_delalloc_reserve_metadata(inode, reserve_bytes);
|
||||
if (ret) {
|
||||
if (!only_release_metadata)
|
||||
btrfs_free_reserved_data_space(inode,
|
||||
reserve_bytes);
|
||||
break;
|
||||
}
|
||||
|
||||
release_bytes = reserve_bytes;
|
||||
|
||||
/*
|
||||
* This is going to setup the pages array with the number of
|
||||
* pages we want, so we don't really need to worry about the
|
||||
|
@ -1375,11 +1454,8 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
|
|||
ret = prepare_pages(root, file, pages, num_pages,
|
||||
pos, first_index, write_bytes,
|
||||
force_page_uptodate);
|
||||
if (ret) {
|
||||
btrfs_delalloc_release_space(inode,
|
||||
num_pages << PAGE_CACHE_SHIFT);
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
|
||||
copied = btrfs_copy_from_user(pos, num_pages,
|
||||
write_bytes, pages, i);
|
||||
|
@ -1409,30 +1485,46 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
|
|||
* managed to copy.
|
||||
*/
|
||||
if (num_pages > dirty_pages) {
|
||||
release_bytes = (num_pages - dirty_pages) <<
|
||||
PAGE_CACHE_SHIFT;
|
||||
if (copied > 0) {
|
||||
spin_lock(&BTRFS_I(inode)->lock);
|
||||
BTRFS_I(inode)->outstanding_extents++;
|
||||
spin_unlock(&BTRFS_I(inode)->lock);
|
||||
}
|
||||
btrfs_delalloc_release_space(inode,
|
||||
(num_pages - dirty_pages) <<
|
||||
PAGE_CACHE_SHIFT);
|
||||
if (only_release_metadata)
|
||||
btrfs_delalloc_release_metadata(inode,
|
||||
release_bytes);
|
||||
else
|
||||
btrfs_delalloc_release_space(inode,
|
||||
release_bytes);
|
||||
}
|
||||
|
||||
release_bytes = dirty_pages << PAGE_CACHE_SHIFT;
|
||||
if (copied > 0) {
|
||||
ret = btrfs_dirty_pages(root, inode, pages,
|
||||
dirty_pages, pos, copied,
|
||||
NULL);
|
||||
if (ret) {
|
||||
btrfs_delalloc_release_space(inode,
|
||||
dirty_pages << PAGE_CACHE_SHIFT);
|
||||
btrfs_drop_pages(pages, num_pages);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
release_bytes = 0;
|
||||
btrfs_drop_pages(pages, num_pages);
|
||||
|
||||
if (only_release_metadata && copied > 0) {
|
||||
u64 lockstart = round_down(pos, root->sectorsize);
|
||||
u64 lockend = lockstart +
|
||||
(dirty_pages << PAGE_CACHE_SHIFT) - 1;
|
||||
|
||||
set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
|
||||
lockend, EXTENT_NORESERVE, NULL,
|
||||
NULL, GFP_NOFS);
|
||||
only_release_metadata = false;
|
||||
}
|
||||
|
||||
cond_resched();
|
||||
|
||||
balance_dirty_pages_ratelimited(inode->i_mapping);
|
||||
|
@ -1445,6 +1537,13 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
|
|||
|
||||
kfree(pages);
|
||||
|
||||
if (release_bytes) {
|
||||
if (only_release_metadata)
|
||||
btrfs_delalloc_release_metadata(inode, release_bytes);
|
||||
else
|
||||
btrfs_delalloc_release_space(inode, release_bytes);
|
||||
}
|
||||
|
||||
return num_written ? num_written : ret;
|
||||
}
|
||||
|
||||
|
@ -2175,12 +2274,6 @@ static long btrfs_fallocate(struct file *file, int mode,
|
|||
goto out_reserve_fail;
|
||||
}
|
||||
|
||||
/*
|
||||
* wait for ordered IO before we have any locks. We'll loop again
|
||||
* below with the locks held.
|
||||
*/
|
||||
btrfs_wait_ordered_range(inode, alloc_start, alloc_end - alloc_start);
|
||||
|
||||
mutex_lock(&inode->i_mutex);
|
||||
ret = inode_newsize_ok(inode, alloc_end);
|
||||
if (ret)
|
||||
|
@ -2191,8 +2284,23 @@ static long btrfs_fallocate(struct file *file, int mode,
|
|||
alloc_start);
|
||||
if (ret)
|
||||
goto out;
|
||||
} else {
|
||||
/*
|
||||
* If we are fallocating from the end of the file onward we
|
||||
* need to zero out the end of the page if i_size lands in the
|
||||
* middle of a page.
|
||||
*/
|
||||
ret = btrfs_truncate_page(inode, inode->i_size, 0, 0);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* wait for ordered IO before we have any locks. We'll loop again
|
||||
* below with the locks held.
|
||||
*/
|
||||
btrfs_wait_ordered_range(inode, alloc_start, alloc_end - alloc_start);
|
||||
|
||||
locked_end = alloc_end - 1;
|
||||
while (1) {
|
||||
struct btrfs_ordered_extent *ordered;
|
||||
|
|
|
@ -213,7 +213,7 @@ int btrfs_check_trunc_cache_free_space(struct btrfs_root *root,
|
|||
else
|
||||
ret = 0;
|
||||
spin_unlock(&rsv->lock);
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
int btrfs_truncate_free_space_cache(struct btrfs_root *root,
|
||||
|
@ -3150,6 +3150,8 @@ again:
|
|||
return 0;
|
||||
}
|
||||
|
||||
#define test_msg(fmt, ...) printk(KERN_INFO "btrfs: selftest: " fmt, ##__VA_ARGS__)
|
||||
|
||||
/*
|
||||
* This test just does basic sanity checking, making sure we can add an exten
|
||||
* entry and remove space from either end and the middle, and make sure we can
|
||||
|
@ -3159,63 +3161,63 @@ static int test_extents(struct btrfs_block_group_cache *cache)
|
|||
{
|
||||
int ret = 0;
|
||||
|
||||
printk(KERN_ERR "Running extent only tests\n");
|
||||
test_msg("Running extent only tests\n");
|
||||
|
||||
/* First just make sure we can remove an entire entry */
|
||||
ret = btrfs_add_free_space(cache, 0, 4 * 1024 * 1024);
|
||||
if (ret) {
|
||||
printk(KERN_ERR "Error adding initial extents %d\n", ret);
|
||||
test_msg("Error adding initial extents %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = btrfs_remove_free_space(cache, 0, 4 * 1024 * 1024);
|
||||
if (ret) {
|
||||
printk(KERN_ERR "Error removing extent %d\n", ret);
|
||||
test_msg("Error removing extent %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (check_exists(cache, 0, 4 * 1024 * 1024)) {
|
||||
printk(KERN_ERR "Full remove left some lingering space\n");
|
||||
test_msg("Full remove left some lingering space\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* Ok edge and middle cases now */
|
||||
ret = btrfs_add_free_space(cache, 0, 4 * 1024 * 1024);
|
||||
if (ret) {
|
||||
printk(KERN_ERR "Error adding half extent %d\n", ret);
|
||||
test_msg("Error adding half extent %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = btrfs_remove_free_space(cache, 3 * 1024 * 1024, 1 * 1024 * 1024);
|
||||
if (ret) {
|
||||
printk(KERN_ERR "Error removing tail end %d\n", ret);
|
||||
test_msg("Error removing tail end %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = btrfs_remove_free_space(cache, 0, 1 * 1024 * 1024);
|
||||
if (ret) {
|
||||
printk(KERN_ERR "Error removing front end %d\n", ret);
|
||||
test_msg("Error removing front end %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = btrfs_remove_free_space(cache, 2 * 1024 * 1024, 4096);
|
||||
if (ret) {
|
||||
printk(KERN_ERR "Error removing middle piece %d\n", ret);
|
||||
test_msg("Error removing middle piece %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (check_exists(cache, 0, 1 * 1024 * 1024)) {
|
||||
printk(KERN_ERR "Still have space at the front\n");
|
||||
test_msg("Still have space at the front\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (check_exists(cache, 2 * 1024 * 1024, 4096)) {
|
||||
printk(KERN_ERR "Still have space in the middle\n");
|
||||
test_msg("Still have space in the middle\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (check_exists(cache, 3 * 1024 * 1024, 1 * 1024 * 1024)) {
|
||||
printk(KERN_ERR "Still have space at the end\n");
|
||||
test_msg("Still have space at the end\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -3230,34 +3232,34 @@ static int test_bitmaps(struct btrfs_block_group_cache *cache)
|
|||
u64 next_bitmap_offset;
|
||||
int ret;
|
||||
|
||||
printk(KERN_ERR "Running bitmap only tests\n");
|
||||
test_msg("Running bitmap only tests\n");
|
||||
|
||||
ret = add_free_space_entry(cache, 0, 4 * 1024 * 1024, 1);
|
||||
if (ret) {
|
||||
printk(KERN_ERR "Couldn't create a bitmap entry %d\n", ret);
|
||||
test_msg("Couldn't create a bitmap entry %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = btrfs_remove_free_space(cache, 0, 4 * 1024 * 1024);
|
||||
if (ret) {
|
||||
printk(KERN_ERR "Error removing bitmap full range %d\n", ret);
|
||||
test_msg("Error removing bitmap full range %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (check_exists(cache, 0, 4 * 1024 * 1024)) {
|
||||
printk(KERN_ERR "Left some space in bitmap\n");
|
||||
test_msg("Left some space in bitmap\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
ret = add_free_space_entry(cache, 0, 4 * 1024 * 1024, 1);
|
||||
if (ret) {
|
||||
printk(KERN_ERR "Couldn't add to our bitmap entry %d\n", ret);
|
||||
test_msg("Couldn't add to our bitmap entry %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = btrfs_remove_free_space(cache, 1 * 1024 * 1024, 2 * 1024 * 1024);
|
||||
if (ret) {
|
||||
printk(KERN_ERR "Couldn't remove middle chunk %d\n", ret);
|
||||
test_msg("Couldn't remove middle chunk %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -3271,21 +3273,21 @@ static int test_bitmaps(struct btrfs_block_group_cache *cache)
|
|||
ret = add_free_space_entry(cache, next_bitmap_offset -
|
||||
(2 * 1024 * 1024), 4 * 1024 * 1024, 1);
|
||||
if (ret) {
|
||||
printk(KERN_ERR "Couldn't add space that straddles two bitmaps"
|
||||
" %d\n", ret);
|
||||
test_msg("Couldn't add space that straddles two bitmaps %d\n",
|
||||
ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = btrfs_remove_free_space(cache, next_bitmap_offset -
|
||||
(1 * 1024 * 1024), 2 * 1024 * 1024);
|
||||
if (ret) {
|
||||
printk(KERN_ERR "Couldn't remove overlapping space %d\n", ret);
|
||||
test_msg("Couldn't remove overlapping space %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (check_exists(cache, next_bitmap_offset - (1 * 1024 * 1024),
|
||||
2 * 1024 * 1024)) {
|
||||
printk(KERN_ERR "Left some space when removing overlapping\n");
|
||||
test_msg("Left some space when removing overlapping\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -3300,7 +3302,7 @@ static int test_bitmaps_and_extents(struct btrfs_block_group_cache *cache)
|
|||
u64 bitmap_offset = (u64)(BITS_PER_BITMAP * 4096);
|
||||
int ret;
|
||||
|
||||
printk(KERN_ERR "Running bitmap and extent tests\n");
|
||||
test_msg("Running bitmap and extent tests\n");
|
||||
|
||||
/*
|
||||
* First let's do something simple, an extent at the same offset as the
|
||||
|
@ -3309,42 +3311,42 @@ static int test_bitmaps_and_extents(struct btrfs_block_group_cache *cache)
|
|||
*/
|
||||
ret = add_free_space_entry(cache, 4 * 1024 * 1024, 1 * 1024 * 1024, 1);
|
||||
if (ret) {
|
||||
printk(KERN_ERR "Couldn't create bitmap entry %d\n", ret);
|
||||
test_msg("Couldn't create bitmap entry %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = add_free_space_entry(cache, 0, 1 * 1024 * 1024, 0);
|
||||
if (ret) {
|
||||
printk(KERN_ERR "Couldn't add extent entry %d\n", ret);
|
||||
test_msg("Couldn't add extent entry %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = btrfs_remove_free_space(cache, 0, 1 * 1024 * 1024);
|
||||
if (ret) {
|
||||
printk(KERN_ERR "Couldn't remove extent entry %d\n", ret);
|
||||
test_msg("Couldn't remove extent entry %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (check_exists(cache, 0, 1 * 1024 * 1024)) {
|
||||
printk(KERN_ERR "Left remnants after our remove\n");
|
||||
test_msg("Left remnants after our remove\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* Now to add back the extent entry and remove from the bitmap */
|
||||
ret = add_free_space_entry(cache, 0, 1 * 1024 * 1024, 0);
|
||||
if (ret) {
|
||||
printk(KERN_ERR "Couldn't re-add extent entry %d\n", ret);
|
||||
test_msg("Couldn't re-add extent entry %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = btrfs_remove_free_space(cache, 4 * 1024 * 1024, 1 * 1024 * 1024);
|
||||
if (ret) {
|
||||
printk(KERN_ERR "Couldn't remove from bitmap %d\n", ret);
|
||||
test_msg("Couldn't remove from bitmap %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (check_exists(cache, 4 * 1024 * 1024, 1 * 1024 * 1024)) {
|
||||
printk(KERN_ERR "Left remnants in the bitmap\n");
|
||||
test_msg("Left remnants in the bitmap\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -3354,19 +3356,18 @@ static int test_bitmaps_and_extents(struct btrfs_block_group_cache *cache)
|
|||
*/
|
||||
ret = add_free_space_entry(cache, 1 * 1024 * 1024, 4 * 1024 * 1024, 1);
|
||||
if (ret) {
|
||||
printk(KERN_ERR "Couldn't add to a bitmap %d\n", ret);
|
||||
test_msg("Couldn't add to a bitmap %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = btrfs_remove_free_space(cache, 512 * 1024, 3 * 1024 * 1024);
|
||||
if (ret) {
|
||||
printk(KERN_ERR "Couldn't remove overlapping space %d\n", ret);
|
||||
test_msg("Couldn't remove overlapping space %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (check_exists(cache, 512 * 1024, 3 * 1024 * 1024)) {
|
||||
printk(KERN_ERR "Left over peices after removing "
|
||||
"overlapping\n");
|
||||
test_msg("Left over peices after removing overlapping\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -3375,24 +3376,24 @@ static int test_bitmaps_and_extents(struct btrfs_block_group_cache *cache)
|
|||
/* Now with the extent entry offset into the bitmap */
|
||||
ret = add_free_space_entry(cache, 4 * 1024 * 1024, 4 * 1024 * 1024, 1);
|
||||
if (ret) {
|
||||
printk(KERN_ERR "Couldn't add space to the bitmap %d\n", ret);
|
||||
test_msg("Couldn't add space to the bitmap %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = add_free_space_entry(cache, 2 * 1024 * 1024, 2 * 1024 * 1024, 0);
|
||||
if (ret) {
|
||||
printk(KERN_ERR "Couldn't add extent to the cache %d\n", ret);
|
||||
test_msg("Couldn't add extent to the cache %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = btrfs_remove_free_space(cache, 3 * 1024 * 1024, 4 * 1024 * 1024);
|
||||
if (ret) {
|
||||
printk(KERN_ERR "Problem removing overlapping space %d\n", ret);
|
||||
test_msg("Problem removing overlapping space %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (check_exists(cache, 3 * 1024 * 1024, 4 * 1024 * 1024)) {
|
||||
printk(KERN_ERR "Left something behind when removing space");
|
||||
test_msg("Left something behind when removing space");
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -3410,27 +3411,27 @@ static int test_bitmaps_and_extents(struct btrfs_block_group_cache *cache)
|
|||
ret = add_free_space_entry(cache, bitmap_offset + 4 * 1024 * 1024,
|
||||
4 * 1024 * 1024, 1);
|
||||
if (ret) {
|
||||
printk(KERN_ERR "Couldn't add bitmap %d\n", ret);
|
||||
test_msg("Couldn't add bitmap %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = add_free_space_entry(cache, bitmap_offset - 1 * 1024 * 1024,
|
||||
5 * 1024 * 1024, 0);
|
||||
if (ret) {
|
||||
printk(KERN_ERR "Couldn't add extent entry %d\n", ret);
|
||||
test_msg("Couldn't add extent entry %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = btrfs_remove_free_space(cache, bitmap_offset + 1 * 1024 * 1024,
|
||||
5 * 1024 * 1024);
|
||||
if (ret) {
|
||||
printk(KERN_ERR "Failed to free our space %d\n", ret);
|
||||
test_msg("Failed to free our space %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (check_exists(cache, bitmap_offset + 1 * 1024 * 1024,
|
||||
5 * 1024 * 1024)) {
|
||||
printk(KERN_ERR "Left stuff over\n");
|
||||
test_msg("Left stuff over\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -3444,20 +3445,19 @@ static int test_bitmaps_and_extents(struct btrfs_block_group_cache *cache)
|
|||
*/
|
||||
ret = add_free_space_entry(cache, 1 * 1024 * 1024, 2 * 1024 * 1024, 1);
|
||||
if (ret) {
|
||||
printk(KERN_ERR "Couldn't add bitmap entry %d\n", ret);
|
||||
test_msg("Couldn't add bitmap entry %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = add_free_space_entry(cache, 3 * 1024 * 1024, 1 * 1024 * 1024, 0);
|
||||
if (ret) {
|
||||
printk(KERN_ERR "Couldn't add extent entry %d\n", ret);
|
||||
test_msg("Couldn't add extent entry %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = btrfs_remove_free_space(cache, 1 * 1024 * 1024, 3 * 1024 * 1024);
|
||||
if (ret) {
|
||||
printk(KERN_ERR "Error removing bitmap and extent "
|
||||
"overlapping %d\n", ret);
|
||||
test_msg("Error removing bitmap and extent overlapping %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -3469,11 +3469,11 @@ void btrfs_test_free_space_cache(void)
|
|||
{
|
||||
struct btrfs_block_group_cache *cache;
|
||||
|
||||
printk(KERN_ERR "Running btrfs free space cache tests\n");
|
||||
test_msg("Running btrfs free space cache tests\n");
|
||||
|
||||
cache = init_test_block_group();
|
||||
if (!cache) {
|
||||
printk(KERN_ERR "Couldn't run the tests\n");
|
||||
test_msg("Couldn't run the tests\n");
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -3487,6 +3487,9 @@ out:
|
|||
__btrfs_remove_free_space_cache(cache->free_space_ctl);
|
||||
kfree(cache->free_space_ctl);
|
||||
kfree(cache);
|
||||
printk(KERN_ERR "Free space cache tests finished\n");
|
||||
test_msg("Free space cache tests finished\n");
|
||||
}
|
||||
#endif /* CONFIG_BTRFS_FS_RUN_SANITY_TESTS */
|
||||
#undef test_msg
|
||||
#else /* !CONFIG_BTRFS_FS_RUN_SANITY_TESTS */
|
||||
void btrfs_test_free_space_cache(void) {}
|
||||
#endif /* !CONFIG_BTRFS_FS_RUN_SANITY_TESTS */
|
||||
|
|
|
@ -113,8 +113,6 @@ int btrfs_return_cluster_to_free_space(
|
|||
int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group,
|
||||
u64 *trimmed, u64 start, u64 end, u64 minlen);
|
||||
|
||||
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
|
||||
void btrfs_test_free_space_cache(void);
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
|
501
fs/btrfs/inode.c
501
fs/btrfs/inode.c
|
@ -42,6 +42,7 @@
|
|||
#include <linux/mount.h>
|
||||
#include <linux/btrfs.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/posix_acl_xattr.h>
|
||||
#include "compat.h"
|
||||
#include "ctree.h"
|
||||
#include "disk-io.h"
|
||||
|
@ -57,6 +58,7 @@
|
|||
#include "free-space-cache.h"
|
||||
#include "inode-map.h"
|
||||
#include "backref.h"
|
||||
#include "hash.h"
|
||||
|
||||
struct btrfs_iget_args {
|
||||
u64 ino;
|
||||
|
@ -701,8 +703,12 @@ retry:
|
|||
async_extent->nr_pages = 0;
|
||||
async_extent->pages = NULL;
|
||||
|
||||
if (ret == -ENOSPC)
|
||||
if (ret == -ENOSPC) {
|
||||
unlock_extent(io_tree, async_extent->start,
|
||||
async_extent->start +
|
||||
async_extent->ram_size - 1);
|
||||
goto retry;
|
||||
}
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
|
@ -1529,6 +1535,46 @@ static void btrfs_merge_extent_hook(struct inode *inode,
|
|||
spin_unlock(&BTRFS_I(inode)->lock);
|
||||
}
|
||||
|
||||
static void btrfs_add_delalloc_inodes(struct btrfs_root *root,
|
||||
struct inode *inode)
|
||||
{
|
||||
spin_lock(&root->delalloc_lock);
|
||||
if (list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
|
||||
list_add_tail(&BTRFS_I(inode)->delalloc_inodes,
|
||||
&root->delalloc_inodes);
|
||||
set_bit(BTRFS_INODE_IN_DELALLOC_LIST,
|
||||
&BTRFS_I(inode)->runtime_flags);
|
||||
root->nr_delalloc_inodes++;
|
||||
if (root->nr_delalloc_inodes == 1) {
|
||||
spin_lock(&root->fs_info->delalloc_root_lock);
|
||||
BUG_ON(!list_empty(&root->delalloc_root));
|
||||
list_add_tail(&root->delalloc_root,
|
||||
&root->fs_info->delalloc_roots);
|
||||
spin_unlock(&root->fs_info->delalloc_root_lock);
|
||||
}
|
||||
}
|
||||
spin_unlock(&root->delalloc_lock);
|
||||
}
|
||||
|
||||
static void btrfs_del_delalloc_inode(struct btrfs_root *root,
|
||||
struct inode *inode)
|
||||
{
|
||||
spin_lock(&root->delalloc_lock);
|
||||
if (!list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
|
||||
list_del_init(&BTRFS_I(inode)->delalloc_inodes);
|
||||
clear_bit(BTRFS_INODE_IN_DELALLOC_LIST,
|
||||
&BTRFS_I(inode)->runtime_flags);
|
||||
root->nr_delalloc_inodes--;
|
||||
if (!root->nr_delalloc_inodes) {
|
||||
spin_lock(&root->fs_info->delalloc_root_lock);
|
||||
BUG_ON(list_empty(&root->delalloc_root));
|
||||
list_del_init(&root->delalloc_root);
|
||||
spin_unlock(&root->fs_info->delalloc_root_lock);
|
||||
}
|
||||
}
|
||||
spin_unlock(&root->delalloc_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* extent_io.c set_bit_hook, used to track delayed allocation
|
||||
* bytes in this file, and to maintain the list of inodes that
|
||||
|
@ -1561,16 +1607,8 @@ static void btrfs_set_bit_hook(struct inode *inode,
|
|||
spin_lock(&BTRFS_I(inode)->lock);
|
||||
BTRFS_I(inode)->delalloc_bytes += len;
|
||||
if (do_list && !test_bit(BTRFS_INODE_IN_DELALLOC_LIST,
|
||||
&BTRFS_I(inode)->runtime_flags)) {
|
||||
spin_lock(&root->fs_info->delalloc_lock);
|
||||
if (list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
|
||||
list_add_tail(&BTRFS_I(inode)->delalloc_inodes,
|
||||
&root->fs_info->delalloc_inodes);
|
||||
set_bit(BTRFS_INODE_IN_DELALLOC_LIST,
|
||||
&BTRFS_I(inode)->runtime_flags);
|
||||
}
|
||||
spin_unlock(&root->fs_info->delalloc_lock);
|
||||
}
|
||||
&BTRFS_I(inode)->runtime_flags))
|
||||
btrfs_add_delalloc_inodes(root, inode);
|
||||
spin_unlock(&BTRFS_I(inode)->lock);
|
||||
}
|
||||
}
|
||||
|
@ -1604,7 +1642,7 @@ static void btrfs_clear_bit_hook(struct inode *inode,
|
|||
btrfs_delalloc_release_metadata(inode, len);
|
||||
|
||||
if (root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID
|
||||
&& do_list)
|
||||
&& do_list && !(state->state & EXTENT_NORESERVE))
|
||||
btrfs_free_reserved_data_space(inode, len);
|
||||
|
||||
__percpu_counter_add(&root->fs_info->delalloc_bytes, -len,
|
||||
|
@ -1613,15 +1651,8 @@ static void btrfs_clear_bit_hook(struct inode *inode,
|
|||
BTRFS_I(inode)->delalloc_bytes -= len;
|
||||
if (do_list && BTRFS_I(inode)->delalloc_bytes == 0 &&
|
||||
test_bit(BTRFS_INODE_IN_DELALLOC_LIST,
|
||||
&BTRFS_I(inode)->runtime_flags)) {
|
||||
spin_lock(&root->fs_info->delalloc_lock);
|
||||
if (!list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
|
||||
list_del_init(&BTRFS_I(inode)->delalloc_inodes);
|
||||
clear_bit(BTRFS_INODE_IN_DELALLOC_LIST,
|
||||
&BTRFS_I(inode)->runtime_flags);
|
||||
}
|
||||
spin_unlock(&root->fs_info->delalloc_lock);
|
||||
}
|
||||
&BTRFS_I(inode)->runtime_flags))
|
||||
btrfs_del_delalloc_inode(root, inode);
|
||||
spin_unlock(&BTRFS_I(inode)->lock);
|
||||
}
|
||||
}
|
||||
|
@ -2263,11 +2294,6 @@ static noinline int relink_extent_backref(struct btrfs_path *path,
|
|||
return 0;
|
||||
return PTR_ERR(root);
|
||||
}
|
||||
if (btrfs_root_refs(&root->root_item) == 0) {
|
||||
srcu_read_unlock(&fs_info->subvol_srcu, index);
|
||||
/* parse ENOENT to 0 */
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* step 2: get inode */
|
||||
key.objectid = backref->inum;
|
||||
|
@ -3215,13 +3241,16 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
|
|||
/* 1 for the orphan item deletion. */
|
||||
trans = btrfs_start_transaction(root, 1);
|
||||
if (IS_ERR(trans)) {
|
||||
iput(inode);
|
||||
ret = PTR_ERR(trans);
|
||||
goto out;
|
||||
}
|
||||
ret = btrfs_orphan_add(trans, inode);
|
||||
btrfs_end_transaction(trans, root);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
iput(inode);
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = btrfs_truncate(inode);
|
||||
if (ret)
|
||||
|
@ -3274,8 +3303,17 @@ static noinline int acls_after_inode_item(struct extent_buffer *leaf,
|
|||
{
|
||||
u32 nritems = btrfs_header_nritems(leaf);
|
||||
struct btrfs_key found_key;
|
||||
static u64 xattr_access = 0;
|
||||
static u64 xattr_default = 0;
|
||||
int scanned = 0;
|
||||
|
||||
if (!xattr_access) {
|
||||
xattr_access = btrfs_name_hash(POSIX_ACL_XATTR_ACCESS,
|
||||
strlen(POSIX_ACL_XATTR_ACCESS));
|
||||
xattr_default = btrfs_name_hash(POSIX_ACL_XATTR_DEFAULT,
|
||||
strlen(POSIX_ACL_XATTR_DEFAULT));
|
||||
}
|
||||
|
||||
slot++;
|
||||
while (slot < nritems) {
|
||||
btrfs_item_key_to_cpu(leaf, &found_key, slot);
|
||||
|
@ -3285,8 +3323,11 @@ static noinline int acls_after_inode_item(struct extent_buffer *leaf,
|
|||
return 0;
|
||||
|
||||
/* we found an xattr, assume we've got an acl */
|
||||
if (found_key.type == BTRFS_XATTR_ITEM_KEY)
|
||||
return 1;
|
||||
if (found_key.type == BTRFS_XATTR_ITEM_KEY) {
|
||||
if (found_key.offset == xattr_access ||
|
||||
found_key.offset == xattr_default)
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* we found a key greater than an xattr key, there can't
|
||||
|
@ -3660,53 +3701,20 @@ int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
|
|||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
/* helper to check if there is any shared block in the path */
|
||||
static int check_path_shared(struct btrfs_root *root,
|
||||
struct btrfs_path *path)
|
||||
{
|
||||
struct extent_buffer *eb;
|
||||
int level;
|
||||
u64 refs = 1;
|
||||
|
||||
for (level = 0; level < BTRFS_MAX_LEVEL; level++) {
|
||||
int ret;
|
||||
|
||||
if (!path->nodes[level])
|
||||
break;
|
||||
eb = path->nodes[level];
|
||||
if (!btrfs_block_can_be_shared(root, eb))
|
||||
continue;
|
||||
ret = btrfs_lookup_extent_info(NULL, root, eb->start, level, 1,
|
||||
&refs, NULL);
|
||||
if (refs > 1)
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* helper to start transaction for unlink and rmdir.
|
||||
*
|
||||
* unlink and rmdir are special in btrfs, they do not always free space.
|
||||
* so in enospc case, we should make sure they will free space before
|
||||
* allowing them to use the global metadata reservation.
|
||||
* unlink and rmdir are special in btrfs, they do not always free space, so
|
||||
* if we cannot make our reservations the normal way try and see if there is
|
||||
* plenty of slack room in the global reserve to migrate, otherwise we cannot
|
||||
* allow the unlink to occur.
|
||||
*/
|
||||
static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir,
|
||||
struct dentry *dentry)
|
||||
static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir)
|
||||
{
|
||||
struct btrfs_trans_handle *trans;
|
||||
struct btrfs_root *root = BTRFS_I(dir)->root;
|
||||
struct btrfs_path *path;
|
||||
struct btrfs_dir_item *di;
|
||||
struct inode *inode = dentry->d_inode;
|
||||
u64 index;
|
||||
int check_link = 1;
|
||||
int err = -ENOSPC;
|
||||
int ret;
|
||||
u64 ino = btrfs_ino(inode);
|
||||
u64 dir_ino = btrfs_ino(dir);
|
||||
|
||||
/*
|
||||
* 1 for the possible orphan item
|
||||
|
@ -3719,158 +3727,23 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir,
|
|||
if (!IS_ERR(trans) || PTR_ERR(trans) != -ENOSPC)
|
||||
return trans;
|
||||
|
||||
if (ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
|
||||
return ERR_PTR(-ENOSPC);
|
||||
if (PTR_ERR(trans) == -ENOSPC) {
|
||||
u64 num_bytes = btrfs_calc_trans_metadata_size(root, 5);
|
||||
|
||||
/* check if there is someone else holds reference */
|
||||
if (S_ISDIR(inode->i_mode) && atomic_read(&inode->i_count) > 1)
|
||||
return ERR_PTR(-ENOSPC);
|
||||
|
||||
if (atomic_read(&inode->i_count) > 2)
|
||||
return ERR_PTR(-ENOSPC);
|
||||
|
||||
if (xchg(&root->fs_info->enospc_unlink, 1))
|
||||
return ERR_PTR(-ENOSPC);
|
||||
|
||||
path = btrfs_alloc_path();
|
||||
if (!path) {
|
||||
root->fs_info->enospc_unlink = 0;
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
/* 1 for the orphan item */
|
||||
trans = btrfs_start_transaction(root, 1);
|
||||
if (IS_ERR(trans)) {
|
||||
btrfs_free_path(path);
|
||||
root->fs_info->enospc_unlink = 0;
|
||||
return trans;
|
||||
}
|
||||
|
||||
path->skip_locking = 1;
|
||||
path->search_commit_root = 1;
|
||||
|
||||
ret = btrfs_lookup_inode(trans, root, path,
|
||||
&BTRFS_I(dir)->location, 0);
|
||||
if (ret < 0) {
|
||||
err = ret;
|
||||
goto out;
|
||||
}
|
||||
if (ret == 0) {
|
||||
if (check_path_shared(root, path))
|
||||
goto out;
|
||||
} else {
|
||||
check_link = 0;
|
||||
}
|
||||
btrfs_release_path(path);
|
||||
|
||||
ret = btrfs_lookup_inode(trans, root, path,
|
||||
&BTRFS_I(inode)->location, 0);
|
||||
if (ret < 0) {
|
||||
err = ret;
|
||||
goto out;
|
||||
}
|
||||
if (ret == 0) {
|
||||
if (check_path_shared(root, path))
|
||||
goto out;
|
||||
} else {
|
||||
check_link = 0;
|
||||
}
|
||||
btrfs_release_path(path);
|
||||
|
||||
if (ret == 0 && S_ISREG(inode->i_mode)) {
|
||||
ret = btrfs_lookup_file_extent(trans, root, path,
|
||||
ino, (u64)-1, 0);
|
||||
if (ret < 0) {
|
||||
err = ret;
|
||||
goto out;
|
||||
trans = btrfs_start_transaction(root, 0);
|
||||
if (IS_ERR(trans))
|
||||
return trans;
|
||||
ret = btrfs_cond_migrate_bytes(root->fs_info,
|
||||
&root->fs_info->trans_block_rsv,
|
||||
num_bytes, 5);
|
||||
if (ret) {
|
||||
btrfs_end_transaction(trans, root);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
BUG_ON(ret == 0); /* Corruption */
|
||||
if (check_path_shared(root, path))
|
||||
goto out;
|
||||
btrfs_release_path(path);
|
||||
}
|
||||
|
||||
if (!check_link) {
|
||||
err = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
|
||||
dentry->d_name.name, dentry->d_name.len, 0);
|
||||
if (IS_ERR(di)) {
|
||||
err = PTR_ERR(di);
|
||||
goto out;
|
||||
}
|
||||
if (di) {
|
||||
if (check_path_shared(root, path))
|
||||
goto out;
|
||||
} else {
|
||||
err = 0;
|
||||
goto out;
|
||||
}
|
||||
btrfs_release_path(path);
|
||||
|
||||
ret = btrfs_get_inode_ref_index(trans, root, path, dentry->d_name.name,
|
||||
dentry->d_name.len, ino, dir_ino, 0,
|
||||
&index);
|
||||
if (ret) {
|
||||
err = ret;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (check_path_shared(root, path))
|
||||
goto out;
|
||||
|
||||
btrfs_release_path(path);
|
||||
|
||||
/*
|
||||
* This is a commit root search, if we can lookup inode item and other
|
||||
* relative items in the commit root, it means the transaction of
|
||||
* dir/file creation has been committed, and the dir index item that we
|
||||
* delay to insert has also been inserted into the commit root. So
|
||||
* we needn't worry about the delayed insertion of the dir index item
|
||||
* here.
|
||||
*/
|
||||
di = btrfs_lookup_dir_index_item(trans, root, path, dir_ino, index,
|
||||
dentry->d_name.name, dentry->d_name.len, 0);
|
||||
if (IS_ERR(di)) {
|
||||
err = PTR_ERR(di);
|
||||
goto out;
|
||||
}
|
||||
BUG_ON(ret == -ENOENT);
|
||||
if (check_path_shared(root, path))
|
||||
goto out;
|
||||
|
||||
err = 0;
|
||||
out:
|
||||
btrfs_free_path(path);
|
||||
/* Migrate the orphan reservation over */
|
||||
if (!err)
|
||||
err = btrfs_block_rsv_migrate(trans->block_rsv,
|
||||
&root->fs_info->global_block_rsv,
|
||||
trans->bytes_reserved);
|
||||
|
||||
if (err) {
|
||||
btrfs_end_transaction(trans, root);
|
||||
root->fs_info->enospc_unlink = 0;
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
trans->block_rsv = &root->fs_info->global_block_rsv;
|
||||
return trans;
|
||||
}
|
||||
|
||||
static void __unlink_end_trans(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root)
|
||||
{
|
||||
if (trans->block_rsv->type == BTRFS_BLOCK_RSV_GLOBAL) {
|
||||
btrfs_block_rsv_release(root, trans->block_rsv,
|
||||
trans->bytes_reserved);
|
||||
trans->block_rsv = &root->fs_info->trans_block_rsv;
|
||||
BUG_ON(!root->fs_info->enospc_unlink);
|
||||
root->fs_info->enospc_unlink = 0;
|
||||
trans->bytes_reserved = num_bytes;
|
||||
}
|
||||
btrfs_end_transaction(trans, root);
|
||||
return trans;
|
||||
}
|
||||
|
||||
static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
|
||||
|
@ -3880,7 +3753,7 @@ static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
|
|||
struct inode *inode = dentry->d_inode;
|
||||
int ret;
|
||||
|
||||
trans = __unlink_start_trans(dir, dentry);
|
||||
trans = __unlink_start_trans(dir);
|
||||
if (IS_ERR(trans))
|
||||
return PTR_ERR(trans);
|
||||
|
||||
|
@ -3898,7 +3771,7 @@ static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
|
|||
}
|
||||
|
||||
out:
|
||||
__unlink_end_trans(trans, root);
|
||||
btrfs_end_transaction(trans, root);
|
||||
btrfs_btree_balance_dirty(root);
|
||||
return ret;
|
||||
}
|
||||
|
@ -3995,7 +3868,7 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
|
|||
if (btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID)
|
||||
return -EPERM;
|
||||
|
||||
trans = __unlink_start_trans(dir, dentry);
|
||||
trans = __unlink_start_trans(dir);
|
||||
if (IS_ERR(trans))
|
||||
return PTR_ERR(trans);
|
||||
|
||||
|
@ -4017,7 +3890,7 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
|
|||
if (!err)
|
||||
btrfs_i_size_write(inode, 0);
|
||||
out:
|
||||
__unlink_end_trans(trans, root);
|
||||
btrfs_end_transaction(trans, root);
|
||||
btrfs_btree_balance_dirty(root);
|
||||
|
||||
return err;
|
||||
|
@ -4395,6 +4268,15 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
|
|||
u64 hole_size;
|
||||
int err = 0;
|
||||
|
||||
/*
|
||||
* If our size started in the middle of a page we need to zero out the
|
||||
* rest of the page before we expand the i_size, otherwise we could
|
||||
* expose stale data.
|
||||
*/
|
||||
err = btrfs_truncate_page(inode, oldsize, 0, 0);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (size <= hole_start)
|
||||
return 0;
|
||||
|
||||
|
@ -4822,11 +4704,6 @@ static int fixup_tree_root_location(struct btrfs_root *root,
|
|||
goto out;
|
||||
}
|
||||
|
||||
if (btrfs_root_refs(&new_root->root_item) == 0) {
|
||||
err = -ENOENT;
|
||||
goto out;
|
||||
}
|
||||
|
||||
*sub_root = new_root;
|
||||
location->objectid = btrfs_root_dirid(&new_root->root_item);
|
||||
location->type = BTRFS_INODE_ITEM_KEY;
|
||||
|
@ -5092,8 +4969,10 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
|
|||
if (!(inode->i_sb->s_flags & MS_RDONLY))
|
||||
ret = btrfs_orphan_cleanup(sub_root);
|
||||
up_read(&root->fs_info->cleanup_work_sem);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
iput(inode);
|
||||
inode = ERR_PTR(ret);
|
||||
}
|
||||
}
|
||||
|
||||
return inode;
|
||||
|
@ -6501,10 +6380,10 @@ out:
|
|||
* returns 1 when the nocow is safe, < 1 on error, 0 if the
|
||||
* block must be cow'd
|
||||
*/
|
||||
static noinline int can_nocow_odirect(struct btrfs_trans_handle *trans,
|
||||
struct inode *inode, u64 offset, u64 *len,
|
||||
u64 *orig_start, u64 *orig_block_len,
|
||||
u64 *ram_bytes)
|
||||
noinline int can_nocow_extent(struct btrfs_trans_handle *trans,
|
||||
struct inode *inode, u64 offset, u64 *len,
|
||||
u64 *orig_start, u64 *orig_block_len,
|
||||
u64 *ram_bytes)
|
||||
{
|
||||
struct btrfs_path *path;
|
||||
int ret;
|
||||
|
@ -6518,7 +6397,7 @@ static noinline int can_nocow_odirect(struct btrfs_trans_handle *trans,
|
|||
u64 num_bytes;
|
||||
int slot;
|
||||
int found_type;
|
||||
|
||||
bool nocow = (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW);
|
||||
path = btrfs_alloc_path();
|
||||
if (!path)
|
||||
return -ENOMEM;
|
||||
|
@ -6558,18 +6437,28 @@ static noinline int can_nocow_odirect(struct btrfs_trans_handle *trans,
|
|||
/* not a regular extent, must cow */
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!nocow && found_type == BTRFS_FILE_EXTENT_REG)
|
||||
goto out;
|
||||
|
||||
disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
|
||||
if (disk_bytenr == 0)
|
||||
goto out;
|
||||
|
||||
if (btrfs_file_extent_compression(leaf, fi) ||
|
||||
btrfs_file_extent_encryption(leaf, fi) ||
|
||||
btrfs_file_extent_other_encoding(leaf, fi))
|
||||
goto out;
|
||||
|
||||
backref_offset = btrfs_file_extent_offset(leaf, fi);
|
||||
|
||||
*orig_start = key.offset - backref_offset;
|
||||
*orig_block_len = btrfs_file_extent_disk_num_bytes(leaf, fi);
|
||||
*ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
|
||||
if (orig_start) {
|
||||
*orig_start = key.offset - backref_offset;
|
||||
*orig_block_len = btrfs_file_extent_disk_num_bytes(leaf, fi);
|
||||
*ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
|
||||
}
|
||||
|
||||
extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
|
||||
if (extent_end < offset + *len) {
|
||||
/* extent doesn't include our full range, must cow */
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (btrfs_extent_readonly(root, disk_bytenr))
|
||||
goto out;
|
||||
|
@ -6813,8 +6702,8 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
|
|||
if (IS_ERR(trans))
|
||||
goto must_cow;
|
||||
|
||||
if (can_nocow_odirect(trans, inode, start, &len, &orig_start,
|
||||
&orig_block_len, &ram_bytes) == 1) {
|
||||
if (can_nocow_extent(trans, inode, start, &len, &orig_start,
|
||||
&orig_block_len, &ram_bytes) == 1) {
|
||||
if (type == BTRFS_ORDERED_PREALLOC) {
|
||||
free_extent_map(em);
|
||||
em = create_pinned_em(inode, start, len,
|
||||
|
@ -7243,7 +7132,6 @@ static void btrfs_submit_direct(int rw, struct bio *dio_bio,
|
|||
{
|
||||
struct btrfs_root *root = BTRFS_I(inode)->root;
|
||||
struct btrfs_dio_private *dip;
|
||||
struct bio_vec *bvec = dio_bio->bi_io_vec;
|
||||
struct bio *io_bio;
|
||||
int skip_sum;
|
||||
int write = rw & REQ_WRITE;
|
||||
|
@ -7265,16 +7153,9 @@ static void btrfs_submit_direct(int rw, struct bio *dio_bio,
|
|||
}
|
||||
|
||||
dip->private = dio_bio->bi_private;
|
||||
io_bio->bi_private = dio_bio->bi_private;
|
||||
dip->inode = inode;
|
||||
dip->logical_offset = file_offset;
|
||||
|
||||
dip->bytes = 0;
|
||||
do {
|
||||
dip->bytes += bvec->bv_len;
|
||||
bvec++;
|
||||
} while (bvec <= (dio_bio->bi_io_vec + dio_bio->bi_vcnt - 1));
|
||||
|
||||
dip->bytes = dio_bio->bi_size;
|
||||
dip->disk_bytenr = (u64)dio_bio->bi_sector << 9;
|
||||
io_bio->bi_private = dip;
|
||||
dip->errors = 0;
|
||||
|
@ -7373,8 +7254,16 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
|
|||
atomic_inc(&inode->i_dio_count);
|
||||
smp_mb__after_atomic_inc();
|
||||
|
||||
/*
|
||||
* The generic stuff only does filemap_write_and_wait_range, which isn't
|
||||
* enough if we've written compressed pages to this area, so we need to
|
||||
* call btrfs_wait_ordered_range to make absolutely sure that any
|
||||
* outstanding dirty pages are on disk.
|
||||
*/
|
||||
count = iov_length(iov, nr_segs);
|
||||
btrfs_wait_ordered_range(inode, offset, count);
|
||||
|
||||
if (rw & WRITE) {
|
||||
count = iov_length(iov, nr_segs);
|
||||
/*
|
||||
* If the write DIO is beyond the EOF, we need update
|
||||
* the isize, but it is protected by i_mutex. So we can
|
||||
|
@ -7694,16 +7583,12 @@ static int btrfs_truncate(struct inode *inode)
|
|||
{
|
||||
struct btrfs_root *root = BTRFS_I(inode)->root;
|
||||
struct btrfs_block_rsv *rsv;
|
||||
int ret;
|
||||
int ret = 0;
|
||||
int err = 0;
|
||||
struct btrfs_trans_handle *trans;
|
||||
u64 mask = root->sectorsize - 1;
|
||||
u64 min_size = btrfs_calc_trunc_metadata_size(root, 1);
|
||||
|
||||
ret = btrfs_truncate_page(inode, inode->i_size, 0, 0);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
btrfs_wait_ordered_range(inode, inode->i_size & (~mask), (u64)-1);
|
||||
btrfs_ordered_update_i_size(inode, inode->i_size, NULL);
|
||||
|
||||
|
@ -7961,9 +7846,9 @@ void btrfs_destroy_inode(struct inode *inode)
|
|||
*/
|
||||
smp_mb();
|
||||
if (!list_empty(&BTRFS_I(inode)->ordered_operations)) {
|
||||
spin_lock(&root->fs_info->ordered_extent_lock);
|
||||
spin_lock(&root->fs_info->ordered_root_lock);
|
||||
list_del_init(&BTRFS_I(inode)->ordered_operations);
|
||||
spin_unlock(&root->fs_info->ordered_extent_lock);
|
||||
spin_unlock(&root->fs_info->ordered_root_lock);
|
||||
}
|
||||
|
||||
if (test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
|
||||
|
@ -8333,7 +8218,7 @@ void btrfs_wait_and_free_delalloc_work(struct btrfs_delalloc_work *work)
|
|||
* some fairly slow code that needs optimization. This walks the list
|
||||
* of all the inodes with pending delalloc and forces them to disk.
|
||||
*/
|
||||
int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput)
|
||||
static int __start_delalloc_inodes(struct btrfs_root *root, int delay_iput)
|
||||
{
|
||||
struct btrfs_inode *binode;
|
||||
struct inode *inode;
|
||||
|
@ -8342,30 +8227,23 @@ int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput)
|
|||
struct list_head splice;
|
||||
int ret = 0;
|
||||
|
||||
if (root->fs_info->sb->s_flags & MS_RDONLY)
|
||||
return -EROFS;
|
||||
|
||||
INIT_LIST_HEAD(&works);
|
||||
INIT_LIST_HEAD(&splice);
|
||||
|
||||
spin_lock(&root->fs_info->delalloc_lock);
|
||||
list_splice_init(&root->fs_info->delalloc_inodes, &splice);
|
||||
spin_lock(&root->delalloc_lock);
|
||||
list_splice_init(&root->delalloc_inodes, &splice);
|
||||
while (!list_empty(&splice)) {
|
||||
binode = list_entry(splice.next, struct btrfs_inode,
|
||||
delalloc_inodes);
|
||||
|
||||
list_del_init(&binode->delalloc_inodes);
|
||||
|
||||
list_move_tail(&binode->delalloc_inodes,
|
||||
&root->delalloc_inodes);
|
||||
inode = igrab(&binode->vfs_inode);
|
||||
if (!inode) {
|
||||
clear_bit(BTRFS_INODE_IN_DELALLOC_LIST,
|
||||
&binode->runtime_flags);
|
||||
cond_resched_lock(&root->delalloc_lock);
|
||||
continue;
|
||||
}
|
||||
|
||||
list_add_tail(&binode->delalloc_inodes,
|
||||
&root->fs_info->delalloc_inodes);
|
||||
spin_unlock(&root->fs_info->delalloc_lock);
|
||||
spin_unlock(&root->delalloc_lock);
|
||||
|
||||
work = btrfs_alloc_delalloc_work(inode, 0, delay_iput);
|
||||
if (unlikely(!work)) {
|
||||
|
@ -8377,16 +8255,39 @@ int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput)
|
|||
&work->work);
|
||||
|
||||
cond_resched();
|
||||
spin_lock(&root->fs_info->delalloc_lock);
|
||||
spin_lock(&root->delalloc_lock);
|
||||
}
|
||||
spin_unlock(&root->fs_info->delalloc_lock);
|
||||
spin_unlock(&root->delalloc_lock);
|
||||
|
||||
list_for_each_entry_safe(work, next, &works, list) {
|
||||
list_del_init(&work->list);
|
||||
btrfs_wait_and_free_delalloc_work(work);
|
||||
}
|
||||
return 0;
|
||||
out:
|
||||
list_for_each_entry_safe(work, next, &works, list) {
|
||||
list_del_init(&work->list);
|
||||
btrfs_wait_and_free_delalloc_work(work);
|
||||
}
|
||||
|
||||
/* the filemap_flush will queue IO into the worker threads, but
|
||||
if (!list_empty_careful(&splice)) {
|
||||
spin_lock(&root->delalloc_lock);
|
||||
list_splice_tail(&splice, &root->delalloc_inodes);
|
||||
spin_unlock(&root->delalloc_lock);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (root->fs_info->sb->s_flags & MS_RDONLY)
|
||||
return -EROFS;
|
||||
|
||||
ret = __start_delalloc_inodes(root, delay_iput);
|
||||
/*
|
||||
* the filemap_flush will queue IO into the worker threads, but
|
||||
* we have to make sure the IO is actually started and that
|
||||
* ordered extents get created before we return
|
||||
*/
|
||||
|
@ -8398,17 +8299,55 @@ int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput)
|
|||
atomic_read(&root->fs_info->async_delalloc_pages) == 0));
|
||||
}
|
||||
atomic_dec(&root->fs_info->async_submit_draining);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int btrfs_start_all_delalloc_inodes(struct btrfs_fs_info *fs_info,
|
||||
int delay_iput)
|
||||
{
|
||||
struct btrfs_root *root;
|
||||
struct list_head splice;
|
||||
int ret;
|
||||
|
||||
if (fs_info->sb->s_flags & MS_RDONLY)
|
||||
return -EROFS;
|
||||
|
||||
INIT_LIST_HEAD(&splice);
|
||||
|
||||
spin_lock(&fs_info->delalloc_root_lock);
|
||||
list_splice_init(&fs_info->delalloc_roots, &splice);
|
||||
while (!list_empty(&splice)) {
|
||||
root = list_first_entry(&splice, struct btrfs_root,
|
||||
delalloc_root);
|
||||
root = btrfs_grab_fs_root(root);
|
||||
BUG_ON(!root);
|
||||
list_move_tail(&root->delalloc_root,
|
||||
&fs_info->delalloc_roots);
|
||||
spin_unlock(&fs_info->delalloc_root_lock);
|
||||
|
||||
ret = __start_delalloc_inodes(root, delay_iput);
|
||||
btrfs_put_fs_root(root);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
spin_lock(&fs_info->delalloc_root_lock);
|
||||
}
|
||||
spin_unlock(&fs_info->delalloc_root_lock);
|
||||
|
||||
atomic_inc(&fs_info->async_submit_draining);
|
||||
while (atomic_read(&fs_info->nr_async_submits) ||
|
||||
atomic_read(&fs_info->async_delalloc_pages)) {
|
||||
wait_event(fs_info->async_submit_wait,
|
||||
(atomic_read(&fs_info->nr_async_submits) == 0 &&
|
||||
atomic_read(&fs_info->async_delalloc_pages) == 0));
|
||||
}
|
||||
atomic_dec(&fs_info->async_submit_draining);
|
||||
return 0;
|
||||
out:
|
||||
list_for_each_entry_safe(work, next, &works, list) {
|
||||
list_del_init(&work->list);
|
||||
btrfs_wait_and_free_delalloc_work(work);
|
||||
}
|
||||
|
||||
if (!list_empty_careful(&splice)) {
|
||||
spin_lock(&root->fs_info->delalloc_lock);
|
||||
list_splice_tail(&splice, &root->fs_info->delalloc_inodes);
|
||||
spin_unlock(&root->fs_info->delalloc_lock);
|
||||
spin_lock(&fs_info->delalloc_root_lock);
|
||||
list_splice_tail(&splice, &fs_info->delalloc_roots);
|
||||
spin_unlock(&fs_info->delalloc_root_lock);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -555,6 +555,12 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir,
|
|||
if (!root->ref_cows)
|
||||
return -EINVAL;
|
||||
|
||||
ret = btrfs_start_delalloc_inodes(root, 0);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
btrfs_wait_ordered_extents(root, 0);
|
||||
|
||||
pending_snapshot = kzalloc(sizeof(*pending_snapshot), GFP_NOFS);
|
||||
if (!pending_snapshot)
|
||||
return -ENOMEM;
|
||||
|
@ -2354,14 +2360,6 @@ static long btrfs_ioctl_rm_dev(struct file *file, void __user *arg)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (atomic_xchg(&root->fs_info->mutually_exclusive_operation_running,
|
||||
1)) {
|
||||
pr_info("btrfs: dev add/delete/balance/replace/resize operation in progress\n");
|
||||
mnt_drop_write_file(file);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
mutex_lock(&root->fs_info->volume_mutex);
|
||||
vol_args = memdup_user(arg, sizeof(*vol_args));
|
||||
if (IS_ERR(vol_args)) {
|
||||
ret = PTR_ERR(vol_args);
|
||||
|
@ -2369,12 +2367,20 @@ static long btrfs_ioctl_rm_dev(struct file *file, void __user *arg)
|
|||
}
|
||||
|
||||
vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
|
||||
ret = btrfs_rm_device(root, vol_args->name);
|
||||
|
||||
kfree(vol_args);
|
||||
out:
|
||||
if (atomic_xchg(&root->fs_info->mutually_exclusive_operation_running,
|
||||
1)) {
|
||||
ret = BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS;
|
||||
goto out;
|
||||
}
|
||||
|
||||
mutex_lock(&root->fs_info->volume_mutex);
|
||||
ret = btrfs_rm_device(root, vol_args->name);
|
||||
mutex_unlock(&root->fs_info->volume_mutex);
|
||||
atomic_set(&root->fs_info->mutually_exclusive_operation_running, 0);
|
||||
|
||||
out:
|
||||
kfree(vol_args);
|
||||
mnt_drop_write_file(file);
|
||||
return ret;
|
||||
}
|
||||
|
@ -2480,6 +2486,7 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
|
|||
int ret;
|
||||
u64 len = olen;
|
||||
u64 bs = root->fs_info->sb->s_blocksize;
|
||||
int same_inode = 0;
|
||||
|
||||
/*
|
||||
* TODO:
|
||||
|
@ -2516,7 +2523,7 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
|
|||
|
||||
ret = -EINVAL;
|
||||
if (src == inode)
|
||||
goto out_fput;
|
||||
same_inode = 1;
|
||||
|
||||
/* the src must be open for reading */
|
||||
if (!(src_file.file->f_mode & FMODE_READ))
|
||||
|
@ -2547,12 +2554,16 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
|
|||
}
|
||||
path->reada = 2;
|
||||
|
||||
if (inode < src) {
|
||||
mutex_lock_nested(&inode->i_mutex, I_MUTEX_PARENT);
|
||||
mutex_lock_nested(&src->i_mutex, I_MUTEX_CHILD);
|
||||
if (!same_inode) {
|
||||
if (inode < src) {
|
||||
mutex_lock_nested(&inode->i_mutex, I_MUTEX_PARENT);
|
||||
mutex_lock_nested(&src->i_mutex, I_MUTEX_CHILD);
|
||||
} else {
|
||||
mutex_lock_nested(&src->i_mutex, I_MUTEX_PARENT);
|
||||
mutex_lock_nested(&inode->i_mutex, I_MUTEX_CHILD);
|
||||
}
|
||||
} else {
|
||||
mutex_lock_nested(&src->i_mutex, I_MUTEX_PARENT);
|
||||
mutex_lock_nested(&inode->i_mutex, I_MUTEX_CHILD);
|
||||
mutex_lock(&src->i_mutex);
|
||||
}
|
||||
|
||||
/* determine range to clone */
|
||||
|
@ -2570,6 +2581,12 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
|
|||
!IS_ALIGNED(destoff, bs))
|
||||
goto out_unlock;
|
||||
|
||||
/* verify if ranges are overlapped within the same file */
|
||||
if (same_inode) {
|
||||
if (destoff + len > off && destoff < off + len)
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
if (destoff > inode->i_size) {
|
||||
ret = btrfs_cont_expand(inode, inode->i_size, destoff);
|
||||
if (ret)
|
||||
|
@ -2846,7 +2863,8 @@ out:
|
|||
unlock_extent(&BTRFS_I(src)->io_tree, off, off + len - 1);
|
||||
out_unlock:
|
||||
mutex_unlock(&src->i_mutex);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
if (!same_inode)
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
vfree(buf);
|
||||
btrfs_free_path(path);
|
||||
out_fput:
|
||||
|
@ -2951,11 +2969,6 @@ static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp)
|
|||
goto out;
|
||||
}
|
||||
|
||||
if (btrfs_root_refs(&new_root->root_item) == 0) {
|
||||
ret = -ENOENT;
|
||||
goto out;
|
||||
}
|
||||
|
||||
path = btrfs_alloc_path();
|
||||
if (!path) {
|
||||
ret = -ENOMEM;
|
||||
|
@ -3719,9 +3732,6 @@ static long btrfs_ioctl_quota_ctl(struct file *file, void __user *arg)
|
|||
break;
|
||||
}
|
||||
|
||||
if (copy_to_user(arg, sa, sizeof(*sa)))
|
||||
ret = -EFAULT;
|
||||
|
||||
err = btrfs_commit_transaction(trans, root->fs_info->tree_root);
|
||||
if (err && !ret)
|
||||
ret = err;
|
||||
|
@ -3937,6 +3947,16 @@ static long btrfs_ioctl_quota_rescan_status(struct file *file, void __user *arg)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static long btrfs_ioctl_quota_rescan_wait(struct file *file, void __user *arg)
|
||||
{
|
||||
struct btrfs_root *root = BTRFS_I(fdentry(file)->d_inode)->root;
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EPERM;
|
||||
|
||||
return btrfs_qgroup_wait_for_completion(root->fs_info);
|
||||
}
|
||||
|
||||
static long btrfs_ioctl_set_received_subvol(struct file *file,
|
||||
void __user *arg)
|
||||
{
|
||||
|
@ -4179,6 +4199,8 @@ long btrfs_ioctl(struct file *file, unsigned int
|
|||
return btrfs_ioctl_quota_rescan(file, argp);
|
||||
case BTRFS_IOC_QUOTA_RESCAN_STATUS:
|
||||
return btrfs_ioctl_quota_rescan_status(file, argp);
|
||||
case BTRFS_IOC_QUOTA_RESCAN_WAIT:
|
||||
return btrfs_ioctl_quota_rescan_wait(file, argp);
|
||||
case BTRFS_IOC_DEV_REPLACE:
|
||||
return btrfs_ioctl_dev_replace(root, argp);
|
||||
case BTRFS_IOC_GET_FSLABEL:
|
||||
|
|
|
@ -31,8 +31,8 @@
|
|||
|
||||
struct workspace {
|
||||
void *mem;
|
||||
void *buf; /* where compressed data goes */
|
||||
void *cbuf; /* where decompressed data goes */
|
||||
void *buf; /* where decompressed data goes */
|
||||
void *cbuf; /* where compressed data goes */
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
|
|
|
@ -24,6 +24,7 @@
|
|||
#include "transaction.h"
|
||||
#include "btrfs_inode.h"
|
||||
#include "extent_io.h"
|
||||
#include "disk-io.h"
|
||||
|
||||
static struct kmem_cache *btrfs_ordered_extent_cache;
|
||||
|
||||
|
@ -184,6 +185,7 @@ static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
|
|||
u64 start, u64 len, u64 disk_len,
|
||||
int type, int dio, int compress_type)
|
||||
{
|
||||
struct btrfs_root *root = BTRFS_I(inode)->root;
|
||||
struct btrfs_ordered_inode_tree *tree;
|
||||
struct rb_node *node;
|
||||
struct btrfs_ordered_extent *entry;
|
||||
|
@ -227,10 +229,18 @@ static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
|
|||
ordered_data_tree_panic(inode, -EEXIST, file_offset);
|
||||
spin_unlock_irq(&tree->lock);
|
||||
|
||||
spin_lock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock);
|
||||
spin_lock(&root->ordered_extent_lock);
|
||||
list_add_tail(&entry->root_extent_list,
|
||||
&BTRFS_I(inode)->root->fs_info->ordered_extents);
|
||||
spin_unlock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock);
|
||||
&root->ordered_extents);
|
||||
root->nr_ordered_extents++;
|
||||
if (root->nr_ordered_extents == 1) {
|
||||
spin_lock(&root->fs_info->ordered_root_lock);
|
||||
BUG_ON(!list_empty(&root->ordered_root));
|
||||
list_add_tail(&root->ordered_root,
|
||||
&root->fs_info->ordered_roots);
|
||||
spin_unlock(&root->fs_info->ordered_root_lock);
|
||||
}
|
||||
spin_unlock(&root->ordered_extent_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -516,8 +526,9 @@ void btrfs_remove_ordered_extent(struct inode *inode,
|
|||
set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags);
|
||||
spin_unlock_irq(&tree->lock);
|
||||
|
||||
spin_lock(&root->fs_info->ordered_extent_lock);
|
||||
spin_lock(&root->ordered_extent_lock);
|
||||
list_del_init(&entry->root_extent_list);
|
||||
root->nr_ordered_extents--;
|
||||
|
||||
trace_btrfs_ordered_extent_remove(inode, entry);
|
||||
|
||||
|
@ -530,7 +541,14 @@ void btrfs_remove_ordered_extent(struct inode *inode,
|
|||
!mapping_tagged(inode->i_mapping, PAGECACHE_TAG_DIRTY)) {
|
||||
list_del_init(&BTRFS_I(inode)->ordered_operations);
|
||||
}
|
||||
spin_unlock(&root->fs_info->ordered_extent_lock);
|
||||
|
||||
if (!root->nr_ordered_extents) {
|
||||
spin_lock(&root->fs_info->ordered_root_lock);
|
||||
BUG_ON(list_empty(&root->ordered_root));
|
||||
list_del_init(&root->ordered_root);
|
||||
spin_unlock(&root->fs_info->ordered_root_lock);
|
||||
}
|
||||
spin_unlock(&root->ordered_extent_lock);
|
||||
wake_up(&entry->wait);
|
||||
}
|
||||
|
||||
|
@ -550,7 +568,6 @@ static void btrfs_run_ordered_extent_work(struct btrfs_work *work)
|
|||
void btrfs_wait_ordered_extents(struct btrfs_root *root, int delay_iput)
|
||||
{
|
||||
struct list_head splice, works;
|
||||
struct list_head *cur;
|
||||
struct btrfs_ordered_extent *ordered, *next;
|
||||
struct inode *inode;
|
||||
|
||||
|
@ -558,35 +575,34 @@ void btrfs_wait_ordered_extents(struct btrfs_root *root, int delay_iput)
|
|||
INIT_LIST_HEAD(&works);
|
||||
|
||||
mutex_lock(&root->fs_info->ordered_operations_mutex);
|
||||
spin_lock(&root->fs_info->ordered_extent_lock);
|
||||
list_splice_init(&root->fs_info->ordered_extents, &splice);
|
||||
spin_lock(&root->ordered_extent_lock);
|
||||
list_splice_init(&root->ordered_extents, &splice);
|
||||
while (!list_empty(&splice)) {
|
||||
cur = splice.next;
|
||||
ordered = list_entry(cur, struct btrfs_ordered_extent,
|
||||
root_extent_list);
|
||||
list_del_init(&ordered->root_extent_list);
|
||||
atomic_inc(&ordered->refs);
|
||||
|
||||
ordered = list_first_entry(&splice, struct btrfs_ordered_extent,
|
||||
root_extent_list);
|
||||
list_move_tail(&ordered->root_extent_list,
|
||||
&root->ordered_extents);
|
||||
/*
|
||||
* the inode may be getting freed (in sys_unlink path).
|
||||
*/
|
||||
inode = igrab(ordered->inode);
|
||||
|
||||
spin_unlock(&root->fs_info->ordered_extent_lock);
|
||||
|
||||
if (inode) {
|
||||
ordered->flush_work.func = btrfs_run_ordered_extent_work;
|
||||
list_add_tail(&ordered->work_list, &works);
|
||||
btrfs_queue_worker(&root->fs_info->flush_workers,
|
||||
&ordered->flush_work);
|
||||
} else {
|
||||
btrfs_put_ordered_extent(ordered);
|
||||
if (!inode) {
|
||||
cond_resched_lock(&root->ordered_extent_lock);
|
||||
continue;
|
||||
}
|
||||
|
||||
atomic_inc(&ordered->refs);
|
||||
spin_unlock(&root->ordered_extent_lock);
|
||||
|
||||
ordered->flush_work.func = btrfs_run_ordered_extent_work;
|
||||
list_add_tail(&ordered->work_list, &works);
|
||||
btrfs_queue_worker(&root->fs_info->flush_workers,
|
||||
&ordered->flush_work);
|
||||
|
||||
cond_resched();
|
||||
spin_lock(&root->fs_info->ordered_extent_lock);
|
||||
spin_lock(&root->ordered_extent_lock);
|
||||
}
|
||||
spin_unlock(&root->fs_info->ordered_extent_lock);
|
||||
spin_unlock(&root->ordered_extent_lock);
|
||||
|
||||
list_for_each_entry_safe(ordered, next, &works, work_list) {
|
||||
list_del_init(&ordered->work_list);
|
||||
|
@ -604,6 +620,33 @@ void btrfs_wait_ordered_extents(struct btrfs_root *root, int delay_iput)
|
|||
mutex_unlock(&root->fs_info->ordered_operations_mutex);
|
||||
}
|
||||
|
||||
void btrfs_wait_all_ordered_extents(struct btrfs_fs_info *fs_info,
|
||||
int delay_iput)
|
||||
{
|
||||
struct btrfs_root *root;
|
||||
struct list_head splice;
|
||||
|
||||
INIT_LIST_HEAD(&splice);
|
||||
|
||||
spin_lock(&fs_info->ordered_root_lock);
|
||||
list_splice_init(&fs_info->ordered_roots, &splice);
|
||||
while (!list_empty(&splice)) {
|
||||
root = list_first_entry(&splice, struct btrfs_root,
|
||||
ordered_root);
|
||||
root = btrfs_grab_fs_root(root);
|
||||
BUG_ON(!root);
|
||||
list_move_tail(&root->ordered_root,
|
||||
&fs_info->ordered_roots);
|
||||
spin_unlock(&fs_info->ordered_root_lock);
|
||||
|
||||
btrfs_wait_ordered_extents(root, delay_iput);
|
||||
btrfs_put_fs_root(root);
|
||||
|
||||
spin_lock(&fs_info->ordered_root_lock);
|
||||
}
|
||||
spin_unlock(&fs_info->ordered_root_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* this is used during transaction commit to write all the inodes
|
||||
* added to the ordered operation list. These files must be fully on
|
||||
|
@ -629,7 +672,7 @@ int btrfs_run_ordered_operations(struct btrfs_trans_handle *trans,
|
|||
INIT_LIST_HEAD(&works);
|
||||
|
||||
mutex_lock(&root->fs_info->ordered_operations_mutex);
|
||||
spin_lock(&root->fs_info->ordered_extent_lock);
|
||||
spin_lock(&root->fs_info->ordered_root_lock);
|
||||
list_splice_init(&cur_trans->ordered_operations, &splice);
|
||||
while (!list_empty(&splice)) {
|
||||
btrfs_inode = list_entry(splice.next, struct btrfs_inode,
|
||||
|
@ -648,17 +691,17 @@ int btrfs_run_ordered_operations(struct btrfs_trans_handle *trans,
|
|||
if (!wait)
|
||||
list_add_tail(&BTRFS_I(inode)->ordered_operations,
|
||||
&cur_trans->ordered_operations);
|
||||
spin_unlock(&root->fs_info->ordered_extent_lock);
|
||||
spin_unlock(&root->fs_info->ordered_root_lock);
|
||||
|
||||
work = btrfs_alloc_delalloc_work(inode, wait, 1);
|
||||
if (!work) {
|
||||
spin_lock(&root->fs_info->ordered_extent_lock);
|
||||
spin_lock(&root->fs_info->ordered_root_lock);
|
||||
if (list_empty(&BTRFS_I(inode)->ordered_operations))
|
||||
list_add_tail(&btrfs_inode->ordered_operations,
|
||||
&splice);
|
||||
list_splice_tail(&splice,
|
||||
&cur_trans->ordered_operations);
|
||||
spin_unlock(&root->fs_info->ordered_extent_lock);
|
||||
spin_unlock(&root->fs_info->ordered_root_lock);
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
@ -667,9 +710,9 @@ int btrfs_run_ordered_operations(struct btrfs_trans_handle *trans,
|
|||
&work->work);
|
||||
|
||||
cond_resched();
|
||||
spin_lock(&root->fs_info->ordered_extent_lock);
|
||||
spin_lock(&root->fs_info->ordered_root_lock);
|
||||
}
|
||||
spin_unlock(&root->fs_info->ordered_extent_lock);
|
||||
spin_unlock(&root->fs_info->ordered_root_lock);
|
||||
out:
|
||||
list_for_each_entry_safe(work, next, &works, list) {
|
||||
list_del_init(&work->list);
|
||||
|
@ -989,7 +1032,6 @@ int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr,
|
|||
u32 *sum, int len)
|
||||
{
|
||||
struct btrfs_ordered_sum *ordered_sum;
|
||||
struct btrfs_sector_sum *sector_sums;
|
||||
struct btrfs_ordered_extent *ordered;
|
||||
struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
|
||||
unsigned long num_sectors;
|
||||
|
@ -1007,18 +1049,16 @@ int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr,
|
|||
disk_bytenr < ordered_sum->bytenr + ordered_sum->len) {
|
||||
i = (disk_bytenr - ordered_sum->bytenr) >>
|
||||
inode->i_sb->s_blocksize_bits;
|
||||
sector_sums = ordered_sum->sums + i;
|
||||
num_sectors = ordered_sum->len >>
|
||||
inode->i_sb->s_blocksize_bits;
|
||||
for (; i < num_sectors; i++) {
|
||||
if (sector_sums[i].bytenr == disk_bytenr) {
|
||||
sum[index] = sector_sums[i].sum;
|
||||
index++;
|
||||
if (index == len)
|
||||
goto out;
|
||||
disk_bytenr += sectorsize;
|
||||
}
|
||||
}
|
||||
num_sectors = min_t(int, len - index, num_sectors - i);
|
||||
memcpy(sum + index, ordered_sum->sums + i,
|
||||
num_sectors);
|
||||
|
||||
index += (int)num_sectors;
|
||||
if (index == len)
|
||||
goto out;
|
||||
disk_bytenr += num_sectors * sectorsize;
|
||||
}
|
||||
}
|
||||
out:
|
||||
|
@ -1055,12 +1095,12 @@ void btrfs_add_ordered_operation(struct btrfs_trans_handle *trans,
|
|||
if (last_mod < root->fs_info->last_trans_committed)
|
||||
return;
|
||||
|
||||
spin_lock(&root->fs_info->ordered_extent_lock);
|
||||
spin_lock(&root->fs_info->ordered_root_lock);
|
||||
if (list_empty(&BTRFS_I(inode)->ordered_operations)) {
|
||||
list_add_tail(&BTRFS_I(inode)->ordered_operations,
|
||||
&cur_trans->ordered_operations);
|
||||
}
|
||||
spin_unlock(&root->fs_info->ordered_extent_lock);
|
||||
spin_unlock(&root->fs_info->ordered_root_lock);
|
||||
}
|
||||
|
||||
int __init ordered_data_init(void)
|
||||
|
|
|
@ -26,18 +26,6 @@ struct btrfs_ordered_inode_tree {
|
|||
struct rb_node *last;
|
||||
};
|
||||
|
||||
/*
|
||||
* these are used to collect checksums done just before bios submission.
|
||||
* They are attached via a list into the ordered extent, and
|
||||
* checksum items are inserted into the tree after all the blocks in
|
||||
* the ordered extent are on disk
|
||||
*/
|
||||
struct btrfs_sector_sum {
|
||||
/* bytenr on disk */
|
||||
u64 bytenr;
|
||||
u32 sum;
|
||||
};
|
||||
|
||||
struct btrfs_ordered_sum {
|
||||
/* bytenr is the start of this extent on disk */
|
||||
u64 bytenr;
|
||||
|
@ -45,10 +33,10 @@ struct btrfs_ordered_sum {
|
|||
/*
|
||||
* this is the length in bytes covered by the sums array below.
|
||||
*/
|
||||
unsigned long len;
|
||||
int len;
|
||||
struct list_head list;
|
||||
/* last field is a variable length array of btrfs_sector_sums */
|
||||
struct btrfs_sector_sum sums[];
|
||||
/* last field is a variable length array of csums */
|
||||
u32 sums[];
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -149,11 +137,8 @@ struct btrfs_ordered_extent {
|
|||
static inline int btrfs_ordered_sum_size(struct btrfs_root *root,
|
||||
unsigned long bytes)
|
||||
{
|
||||
unsigned long num_sectors = (bytes + root->sectorsize - 1) /
|
||||
root->sectorsize;
|
||||
num_sectors++;
|
||||
return sizeof(struct btrfs_ordered_sum) +
|
||||
num_sectors * sizeof(struct btrfs_sector_sum);
|
||||
int num_sectors = (int)DIV_ROUND_UP(bytes, root->sectorsize);
|
||||
return sizeof(struct btrfs_ordered_sum) + num_sectors * sizeof(u32);
|
||||
}
|
||||
|
||||
static inline void
|
||||
|
@ -204,6 +189,8 @@ void btrfs_add_ordered_operation(struct btrfs_trans_handle *trans,
|
|||
struct btrfs_root *root,
|
||||
struct inode *inode);
|
||||
void btrfs_wait_ordered_extents(struct btrfs_root *root, int delay_iput);
|
||||
void btrfs_wait_all_ordered_extents(struct btrfs_fs_info *fs_info,
|
||||
int delay_iput);
|
||||
void btrfs_get_logged_extents(struct btrfs_root *log, struct inode *inode);
|
||||
void btrfs_wait_logged_extents(struct btrfs_root *log, u64 transid);
|
||||
void btrfs_free_logged_extents(struct btrfs_root *log, u64 transid);
|
||||
|
|
|
@ -98,13 +98,10 @@ struct btrfs_qgroup_list {
|
|||
struct btrfs_qgroup *member;
|
||||
};
|
||||
|
||||
struct qgroup_rescan {
|
||||
struct btrfs_work work;
|
||||
struct btrfs_fs_info *fs_info;
|
||||
};
|
||||
|
||||
static void qgroup_rescan_start(struct btrfs_fs_info *fs_info,
|
||||
struct qgroup_rescan *qscan);
|
||||
static int
|
||||
qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
|
||||
int init_flags);
|
||||
static void qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info);
|
||||
|
||||
/* must be called with qgroup_ioctl_lock held */
|
||||
static struct btrfs_qgroup *find_qgroup_rb(struct btrfs_fs_info *fs_info,
|
||||
|
@ -255,10 +252,17 @@ int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info)
|
|||
int slot;
|
||||
int ret = 0;
|
||||
u64 flags = 0;
|
||||
u64 rescan_progress = 0;
|
||||
|
||||
if (!fs_info->quota_enabled)
|
||||
return 0;
|
||||
|
||||
fs_info->qgroup_ulist = ulist_alloc(GFP_NOFS);
|
||||
if (!fs_info->qgroup_ulist) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
path = btrfs_alloc_path();
|
||||
if (!path) {
|
||||
ret = -ENOMEM;
|
||||
|
@ -306,20 +310,7 @@ int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info)
|
|||
}
|
||||
fs_info->qgroup_flags = btrfs_qgroup_status_flags(l,
|
||||
ptr);
|
||||
fs_info->qgroup_rescan_progress.objectid =
|
||||
btrfs_qgroup_status_rescan(l, ptr);
|
||||
if (fs_info->qgroup_flags &
|
||||
BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
|
||||
struct qgroup_rescan *qscan =
|
||||
kmalloc(sizeof(*qscan), GFP_NOFS);
|
||||
if (!qscan) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
fs_info->qgroup_rescan_progress.type = 0;
|
||||
fs_info->qgroup_rescan_progress.offset = 0;
|
||||
qgroup_rescan_start(fs_info, qscan);
|
||||
}
|
||||
rescan_progress = btrfs_qgroup_status_rescan(l, ptr);
|
||||
goto next1;
|
||||
}
|
||||
|
||||
|
@ -421,9 +412,18 @@ out:
|
|||
if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON)) {
|
||||
fs_info->quota_enabled = 0;
|
||||
fs_info->pending_quota_state = 0;
|
||||
} else if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN &&
|
||||
ret >= 0) {
|
||||
ret = qgroup_rescan_init(fs_info, rescan_progress, 0);
|
||||
}
|
||||
btrfs_free_path(path);
|
||||
|
||||
if (ret < 0) {
|
||||
ulist_free(fs_info->qgroup_ulist);
|
||||
fs_info->qgroup_ulist = NULL;
|
||||
fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
|
||||
}
|
||||
|
||||
return ret < 0 ? ret : 0;
|
||||
}
|
||||
|
||||
|
@ -460,6 +460,7 @@ void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info)
|
|||
}
|
||||
kfree(qgroup);
|
||||
}
|
||||
ulist_free(fs_info->qgroup_ulist);
|
||||
}
|
||||
|
||||
static int add_qgroup_relation_item(struct btrfs_trans_handle *trans,
|
||||
|
@ -819,6 +820,12 @@ int btrfs_quota_enable(struct btrfs_trans_handle *trans,
|
|||
goto out;
|
||||
}
|
||||
|
||||
fs_info->qgroup_ulist = ulist_alloc(GFP_NOFS);
|
||||
if (!fs_info->qgroup_ulist) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* initially create the quota tree
|
||||
*/
|
||||
|
@ -916,6 +923,10 @@ out_free_root:
|
|||
kfree(quota_root);
|
||||
}
|
||||
out:
|
||||
if (ret) {
|
||||
ulist_free(fs_info->qgroup_ulist);
|
||||
fs_info->qgroup_ulist = NULL;
|
||||
}
|
||||
mutex_unlock(&fs_info->qgroup_ioctl_lock);
|
||||
return ret;
|
||||
}
|
||||
|
@ -1355,7 +1366,6 @@ int btrfs_qgroup_account_ref(struct btrfs_trans_handle *trans,
|
|||
u64 ref_root;
|
||||
struct btrfs_qgroup *qgroup;
|
||||
struct ulist *roots = NULL;
|
||||
struct ulist *tmp = NULL;
|
||||
u64 seq;
|
||||
int ret = 0;
|
||||
int sgn;
|
||||
|
@ -1428,14 +1438,7 @@ int btrfs_qgroup_account_ref(struct btrfs_trans_handle *trans,
|
|||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
mutex_lock(&fs_info->qgroup_rescan_lock);
|
||||
spin_lock(&fs_info->qgroup_lock);
|
||||
if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) {
|
||||
if (fs_info->qgroup_rescan_progress.objectid <= node->bytenr) {
|
||||
ret = 0;
|
||||
goto unlock;
|
||||
}
|
||||
}
|
||||
|
||||
quota_root = fs_info->quota_root;
|
||||
if (!quota_root)
|
||||
|
@ -1448,39 +1451,34 @@ int btrfs_qgroup_account_ref(struct btrfs_trans_handle *trans,
|
|||
/*
|
||||
* step 1: for each old ref, visit all nodes once and inc refcnt
|
||||
*/
|
||||
tmp = ulist_alloc(GFP_ATOMIC);
|
||||
if (!tmp) {
|
||||
ret = -ENOMEM;
|
||||
goto unlock;
|
||||
}
|
||||
ulist_reinit(fs_info->qgroup_ulist);
|
||||
seq = fs_info->qgroup_seq;
|
||||
fs_info->qgroup_seq += roots->nnodes + 1; /* max refcnt */
|
||||
|
||||
ret = qgroup_account_ref_step1(fs_info, roots, tmp, seq);
|
||||
ret = qgroup_account_ref_step1(fs_info, roots, fs_info->qgroup_ulist,
|
||||
seq);
|
||||
if (ret)
|
||||
goto unlock;
|
||||
|
||||
/*
|
||||
* step 2: walk from the new root
|
||||
*/
|
||||
ret = qgroup_account_ref_step2(fs_info, roots, tmp, seq, sgn,
|
||||
node->num_bytes, qgroup);
|
||||
ret = qgroup_account_ref_step2(fs_info, roots, fs_info->qgroup_ulist,
|
||||
seq, sgn, node->num_bytes, qgroup);
|
||||
if (ret)
|
||||
goto unlock;
|
||||
|
||||
/*
|
||||
* step 3: walk again from old refs
|
||||
*/
|
||||
ret = qgroup_account_ref_step3(fs_info, roots, tmp, seq, sgn,
|
||||
node->num_bytes);
|
||||
ret = qgroup_account_ref_step3(fs_info, roots, fs_info->qgroup_ulist,
|
||||
seq, sgn, node->num_bytes);
|
||||
if (ret)
|
||||
goto unlock;
|
||||
|
||||
unlock:
|
||||
spin_unlock(&fs_info->qgroup_lock);
|
||||
mutex_unlock(&fs_info->qgroup_rescan_lock);
|
||||
ulist_free(roots);
|
||||
ulist_free(tmp);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -1527,9 +1525,12 @@ int btrfs_run_qgroups(struct btrfs_trans_handle *trans,
|
|||
fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
|
||||
|
||||
if (!ret && start_rescan_worker) {
|
||||
ret = btrfs_qgroup_rescan(fs_info);
|
||||
if (ret)
|
||||
pr_err("btrfs: start rescan quota failed: %d\n", ret);
|
||||
ret = qgroup_rescan_init(fs_info, 0, 1);
|
||||
if (!ret) {
|
||||
qgroup_rescan_zero_tracking(fs_info);
|
||||
btrfs_queue_worker(&fs_info->qgroup_rescan_workers,
|
||||
&fs_info->qgroup_rescan_work);
|
||||
}
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
|
@ -1720,7 +1721,6 @@ int btrfs_qgroup_reserve(struct btrfs_root *root, u64 num_bytes)
|
|||
struct btrfs_fs_info *fs_info = root->fs_info;
|
||||
u64 ref_root = root->root_key.objectid;
|
||||
int ret = 0;
|
||||
struct ulist *ulist = NULL;
|
||||
struct ulist_node *unode;
|
||||
struct ulist_iterator uiter;
|
||||
|
||||
|
@ -1743,17 +1743,13 @@ int btrfs_qgroup_reserve(struct btrfs_root *root, u64 num_bytes)
|
|||
* in a first step, we check all affected qgroups if any limits would
|
||||
* be exceeded
|
||||
*/
|
||||
ulist = ulist_alloc(GFP_ATOMIC);
|
||||
if (!ulist) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
ret = ulist_add(ulist, qgroup->qgroupid,
|
||||
ulist_reinit(fs_info->qgroup_ulist);
|
||||
ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid,
|
||||
(uintptr_t)qgroup, GFP_ATOMIC);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
ULIST_ITER_INIT(&uiter);
|
||||
while ((unode = ulist_next(ulist, &uiter))) {
|
||||
while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
|
||||
struct btrfs_qgroup *qg;
|
||||
struct btrfs_qgroup_list *glist;
|
||||
|
||||
|
@ -1774,7 +1770,8 @@ int btrfs_qgroup_reserve(struct btrfs_root *root, u64 num_bytes)
|
|||
}
|
||||
|
||||
list_for_each_entry(glist, &qg->groups, next_group) {
|
||||
ret = ulist_add(ulist, glist->group->qgroupid,
|
||||
ret = ulist_add(fs_info->qgroup_ulist,
|
||||
glist->group->qgroupid,
|
||||
(uintptr_t)glist->group, GFP_ATOMIC);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
@ -1785,7 +1782,7 @@ int btrfs_qgroup_reserve(struct btrfs_root *root, u64 num_bytes)
|
|||
* no limits exceeded, now record the reservation into all qgroups
|
||||
*/
|
||||
ULIST_ITER_INIT(&uiter);
|
||||
while ((unode = ulist_next(ulist, &uiter))) {
|
||||
while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
|
||||
struct btrfs_qgroup *qg;
|
||||
|
||||
qg = (struct btrfs_qgroup *)(uintptr_t)unode->aux;
|
||||
|
@ -1795,8 +1792,6 @@ int btrfs_qgroup_reserve(struct btrfs_root *root, u64 num_bytes)
|
|||
|
||||
out:
|
||||
spin_unlock(&fs_info->qgroup_lock);
|
||||
ulist_free(ulist);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1805,7 +1800,6 @@ void btrfs_qgroup_free(struct btrfs_root *root, u64 num_bytes)
|
|||
struct btrfs_root *quota_root;
|
||||
struct btrfs_qgroup *qgroup;
|
||||
struct btrfs_fs_info *fs_info = root->fs_info;
|
||||
struct ulist *ulist = NULL;
|
||||
struct ulist_node *unode;
|
||||
struct ulist_iterator uiter;
|
||||
u64 ref_root = root->root_key.objectid;
|
||||
|
@ -1827,17 +1821,13 @@ void btrfs_qgroup_free(struct btrfs_root *root, u64 num_bytes)
|
|||
if (!qgroup)
|
||||
goto out;
|
||||
|
||||
ulist = ulist_alloc(GFP_ATOMIC);
|
||||
if (!ulist) {
|
||||
btrfs_std_error(fs_info, -ENOMEM);
|
||||
goto out;
|
||||
}
|
||||
ret = ulist_add(ulist, qgroup->qgroupid,
|
||||
ulist_reinit(fs_info->qgroup_ulist);
|
||||
ret = ulist_add(fs_info->qgroup_ulist, qgroup->qgroupid,
|
||||
(uintptr_t)qgroup, GFP_ATOMIC);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
ULIST_ITER_INIT(&uiter);
|
||||
while ((unode = ulist_next(ulist, &uiter))) {
|
||||
while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
|
||||
struct btrfs_qgroup *qg;
|
||||
struct btrfs_qgroup_list *glist;
|
||||
|
||||
|
@ -1846,7 +1836,8 @@ void btrfs_qgroup_free(struct btrfs_root *root, u64 num_bytes)
|
|||
qg->reserved -= num_bytes;
|
||||
|
||||
list_for_each_entry(glist, &qg->groups, next_group) {
|
||||
ret = ulist_add(ulist, glist->group->qgroupid,
|
||||
ret = ulist_add(fs_info->qgroup_ulist,
|
||||
glist->group->qgroupid,
|
||||
(uintptr_t)glist->group, GFP_ATOMIC);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
@ -1855,7 +1846,6 @@ void btrfs_qgroup_free(struct btrfs_root *root, u64 num_bytes)
|
|||
|
||||
out:
|
||||
spin_unlock(&fs_info->qgroup_lock);
|
||||
ulist_free(ulist);
|
||||
}
|
||||
|
||||
void assert_qgroups_uptodate(struct btrfs_trans_handle *trans)
|
||||
|
@ -1874,12 +1864,11 @@ void assert_qgroups_uptodate(struct btrfs_trans_handle *trans)
|
|||
* returns 1 when done, 2 when done and FLAG_INCONSISTENT was cleared.
|
||||
*/
|
||||
static int
|
||||
qgroup_rescan_leaf(struct qgroup_rescan *qscan, struct btrfs_path *path,
|
||||
qgroup_rescan_leaf(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
|
||||
struct btrfs_trans_handle *trans, struct ulist *tmp,
|
||||
struct extent_buffer *scratch_leaf)
|
||||
{
|
||||
struct btrfs_key found;
|
||||
struct btrfs_fs_info *fs_info = qscan->fs_info;
|
||||
struct ulist *roots = NULL;
|
||||
struct ulist_node *unode;
|
||||
struct ulist_iterator uiter;
|
||||
|
@ -2007,11 +1996,10 @@ out:
|
|||
|
||||
static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
|
||||
{
|
||||
struct qgroup_rescan *qscan = container_of(work, struct qgroup_rescan,
|
||||
work);
|
||||
struct btrfs_fs_info *fs_info = container_of(work, struct btrfs_fs_info,
|
||||
qgroup_rescan_work);
|
||||
struct btrfs_path *path;
|
||||
struct btrfs_trans_handle *trans = NULL;
|
||||
struct btrfs_fs_info *fs_info = qscan->fs_info;
|
||||
struct ulist *tmp = NULL;
|
||||
struct extent_buffer *scratch_leaf = NULL;
|
||||
int err = -ENOMEM;
|
||||
|
@ -2036,7 +2024,7 @@ static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
|
|||
if (!fs_info->quota_enabled) {
|
||||
err = -EINTR;
|
||||
} else {
|
||||
err = qgroup_rescan_leaf(qscan, path, trans,
|
||||
err = qgroup_rescan_leaf(fs_info, path, trans,
|
||||
tmp, scratch_leaf);
|
||||
}
|
||||
if (err > 0)
|
||||
|
@ -2049,7 +2037,6 @@ out:
|
|||
kfree(scratch_leaf);
|
||||
ulist_free(tmp);
|
||||
btrfs_free_path(path);
|
||||
kfree(qscan);
|
||||
|
||||
mutex_lock(&fs_info->qgroup_rescan_lock);
|
||||
fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
|
||||
|
@ -2068,47 +2055,74 @@ out:
|
|||
} else {
|
||||
pr_err("btrfs: qgroup scan failed with %d\n", err);
|
||||
}
|
||||
|
||||
complete_all(&fs_info->qgroup_rescan_completion);
|
||||
}
|
||||
|
||||
static void
|
||||
qgroup_rescan_start(struct btrfs_fs_info *fs_info, struct qgroup_rescan *qscan)
|
||||
{
|
||||
memset(&qscan->work, 0, sizeof(qscan->work));
|
||||
qscan->work.func = btrfs_qgroup_rescan_worker;
|
||||
qscan->fs_info = fs_info;
|
||||
|
||||
pr_info("btrfs: qgroup scan started\n");
|
||||
btrfs_queue_worker(&fs_info->qgroup_rescan_workers, &qscan->work);
|
||||
}
|
||||
|
||||
int
|
||||
btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info)
|
||||
/*
|
||||
* Checks that (a) no rescan is running and (b) quota is enabled. Allocates all
|
||||
* memory required for the rescan context.
|
||||
*/
|
||||
static int
|
||||
qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
|
||||
int init_flags)
|
||||
{
|
||||
int ret = 0;
|
||||
struct rb_node *n;
|
||||
struct btrfs_qgroup *qgroup;
|
||||
struct qgroup_rescan *qscan = kmalloc(sizeof(*qscan), GFP_NOFS);
|
||||
|
||||
if (!qscan)
|
||||
return -ENOMEM;
|
||||
if (!init_flags &&
|
||||
(!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN) ||
|
||||
!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON))) {
|
||||
ret = -EINVAL;
|
||||
goto err;
|
||||
}
|
||||
|
||||
mutex_lock(&fs_info->qgroup_rescan_lock);
|
||||
spin_lock(&fs_info->qgroup_lock);
|
||||
if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN)
|
||||
ret = -EINPROGRESS;
|
||||
else if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON))
|
||||
ret = -EINVAL;
|
||||
|
||||
if (init_flags) {
|
||||
if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN)
|
||||
ret = -EINPROGRESS;
|
||||
else if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON))
|
||||
ret = -EINVAL;
|
||||
|
||||
if (ret) {
|
||||
spin_unlock(&fs_info->qgroup_lock);
|
||||
mutex_unlock(&fs_info->qgroup_rescan_lock);
|
||||
goto err;
|
||||
}
|
||||
|
||||
fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_RESCAN;
|
||||
}
|
||||
|
||||
memset(&fs_info->qgroup_rescan_progress, 0,
|
||||
sizeof(fs_info->qgroup_rescan_progress));
|
||||
fs_info->qgroup_rescan_progress.objectid = progress_objectid;
|
||||
|
||||
spin_unlock(&fs_info->qgroup_lock);
|
||||
mutex_unlock(&fs_info->qgroup_rescan_lock);
|
||||
|
||||
init_completion(&fs_info->qgroup_rescan_completion);
|
||||
|
||||
memset(&fs_info->qgroup_rescan_work, 0,
|
||||
sizeof(fs_info->qgroup_rescan_work));
|
||||
fs_info->qgroup_rescan_work.func = btrfs_qgroup_rescan_worker;
|
||||
|
||||
if (ret) {
|
||||
spin_unlock(&fs_info->qgroup_lock);
|
||||
mutex_unlock(&fs_info->qgroup_rescan_lock);
|
||||
kfree(qscan);
|
||||
err:
|
||||
pr_info("btrfs: qgroup_rescan_init failed with %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
fs_info->qgroup_flags |= BTRFS_QGROUP_STATUS_FLAG_RESCAN;
|
||||
memset(&fs_info->qgroup_rescan_progress, 0,
|
||||
sizeof(fs_info->qgroup_rescan_progress));
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info)
|
||||
{
|
||||
struct rb_node *n;
|
||||
struct btrfs_qgroup *qgroup;
|
||||
|
||||
spin_lock(&fs_info->qgroup_lock);
|
||||
/* clear all current qgroup tracking information */
|
||||
for (n = rb_first(&fs_info->qgroup_tree); n; n = rb_next(n)) {
|
||||
qgroup = rb_entry(n, struct btrfs_qgroup, node);
|
||||
|
@ -2118,9 +2132,74 @@ btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info)
|
|||
qgroup->excl_cmpr = 0;
|
||||
}
|
||||
spin_unlock(&fs_info->qgroup_lock);
|
||||
mutex_unlock(&fs_info->qgroup_rescan_lock);
|
||||
}
|
||||
|
||||
qgroup_rescan_start(fs_info, qscan);
|
||||
int
|
||||
btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info)
|
||||
{
|
||||
int ret = 0;
|
||||
struct btrfs_trans_handle *trans;
|
||||
|
||||
ret = qgroup_rescan_init(fs_info, 0, 1);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* We have set the rescan_progress to 0, which means no more
|
||||
* delayed refs will be accounted by btrfs_qgroup_account_ref.
|
||||
* However, btrfs_qgroup_account_ref may be right after its call
|
||||
* to btrfs_find_all_roots, in which case it would still do the
|
||||
* accounting.
|
||||
* To solve this, we're committing the transaction, which will
|
||||
* ensure we run all delayed refs and only after that, we are
|
||||
* going to clear all tracking information for a clean start.
|
||||
*/
|
||||
|
||||
trans = btrfs_join_transaction(fs_info->fs_root);
|
||||
if (IS_ERR(trans)) {
|
||||
fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
|
||||
return PTR_ERR(trans);
|
||||
}
|
||||
ret = btrfs_commit_transaction(trans, fs_info->fs_root);
|
||||
if (ret) {
|
||||
fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
|
||||
return ret;
|
||||
}
|
||||
|
||||
qgroup_rescan_zero_tracking(fs_info);
|
||||
|
||||
btrfs_queue_worker(&fs_info->qgroup_rescan_workers,
|
||||
&fs_info->qgroup_rescan_work);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int btrfs_qgroup_wait_for_completion(struct btrfs_fs_info *fs_info)
|
||||
{
|
||||
int running;
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&fs_info->qgroup_rescan_lock);
|
||||
spin_lock(&fs_info->qgroup_lock);
|
||||
running = fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN;
|
||||
spin_unlock(&fs_info->qgroup_lock);
|
||||
mutex_unlock(&fs_info->qgroup_rescan_lock);
|
||||
|
||||
if (running)
|
||||
ret = wait_for_completion_interruptible(
|
||||
&fs_info->qgroup_rescan_completion);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* this is only called from open_ctree where we're still single threaded, thus
|
||||
* locking is omitted here.
|
||||
*/
|
||||
void
|
||||
btrfs_qgroup_rescan_resume(struct btrfs_fs_info *fs_info)
|
||||
{
|
||||
if (fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN)
|
||||
btrfs_queue_worker(&fs_info->qgroup_rescan_workers,
|
||||
&fs_info->qgroup_rescan_work);
|
||||
}
|
||||
|
|
|
@ -1305,6 +1305,7 @@ static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans,
|
|||
struct extent_buffer *eb;
|
||||
struct btrfs_root_item *root_item;
|
||||
struct btrfs_key root_key;
|
||||
u64 last_snap = 0;
|
||||
int ret;
|
||||
|
||||
root_item = kmalloc(sizeof(*root_item), GFP_NOFS);
|
||||
|
@ -1320,6 +1321,7 @@ static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans,
|
|||
BTRFS_TREE_RELOC_OBJECTID);
|
||||
BUG_ON(ret);
|
||||
|
||||
last_snap = btrfs_root_last_snapshot(&root->root_item);
|
||||
btrfs_set_root_last_snapshot(&root->root_item,
|
||||
trans->transid - 1);
|
||||
} else {
|
||||
|
@ -1345,6 +1347,12 @@ static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans,
|
|||
memset(&root_item->drop_progress, 0,
|
||||
sizeof(struct btrfs_disk_key));
|
||||
root_item->drop_level = 0;
|
||||
/*
|
||||
* abuse rtransid, it is safe because it is impossible to
|
||||
* receive data into a relocation tree.
|
||||
*/
|
||||
btrfs_set_root_rtransid(root_item, last_snap);
|
||||
btrfs_set_root_otransid(root_item, trans->transid);
|
||||
}
|
||||
|
||||
btrfs_tree_unlock(eb);
|
||||
|
@ -1355,8 +1363,7 @@ static struct btrfs_root *create_reloc_root(struct btrfs_trans_handle *trans,
|
|||
BUG_ON(ret);
|
||||
kfree(root_item);
|
||||
|
||||
reloc_root = btrfs_read_fs_root_no_radix(root->fs_info->tree_root,
|
||||
&root_key);
|
||||
reloc_root = btrfs_read_fs_root(root->fs_info->tree_root, &root_key);
|
||||
BUG_ON(IS_ERR(reloc_root));
|
||||
reloc_root->last_trans = trans->transid;
|
||||
return reloc_root;
|
||||
|
@ -2273,8 +2280,12 @@ void free_reloc_roots(struct list_head *list)
|
|||
static noinline_for_stack
|
||||
int merge_reloc_roots(struct reloc_control *rc)
|
||||
{
|
||||
struct btrfs_trans_handle *trans;
|
||||
struct btrfs_root *root;
|
||||
struct btrfs_root *reloc_root;
|
||||
u64 last_snap;
|
||||
u64 otransid;
|
||||
u64 objectid;
|
||||
LIST_HEAD(reloc_roots);
|
||||
int found = 0;
|
||||
int ret = 0;
|
||||
|
@ -2308,12 +2319,44 @@ again:
|
|||
} else {
|
||||
list_del_init(&reloc_root->root_list);
|
||||
}
|
||||
|
||||
/*
|
||||
* we keep the old last snapshod transid in rtranid when we
|
||||
* created the relocation tree.
|
||||
*/
|
||||
last_snap = btrfs_root_rtransid(&reloc_root->root_item);
|
||||
otransid = btrfs_root_otransid(&reloc_root->root_item);
|
||||
objectid = reloc_root->root_key.offset;
|
||||
|
||||
ret = btrfs_drop_snapshot(reloc_root, rc->block_rsv, 0, 1);
|
||||
if (ret < 0) {
|
||||
if (list_empty(&reloc_root->root_list))
|
||||
list_add_tail(&reloc_root->root_list,
|
||||
&reloc_roots);
|
||||
goto out;
|
||||
} else if (!ret) {
|
||||
/*
|
||||
* recover the last snapshot tranid to avoid
|
||||
* the space balance break NOCOW.
|
||||
*/
|
||||
root = read_fs_root(rc->extent_root->fs_info,
|
||||
objectid);
|
||||
if (IS_ERR(root))
|
||||
continue;
|
||||
|
||||
if (btrfs_root_refs(&root->root_item) == 0)
|
||||
continue;
|
||||
|
||||
trans = btrfs_join_transaction(root);
|
||||
BUG_ON(IS_ERR(trans));
|
||||
|
||||
/* Check if the fs/file tree was snapshoted or not. */
|
||||
if (btrfs_root_last_snapshot(&root->root_item) ==
|
||||
otransid - 1)
|
||||
btrfs_set_root_last_snapshot(&root->root_item,
|
||||
last_snap);
|
||||
|
||||
btrfs_end_transaction(trans, root);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3266,6 +3309,8 @@ static int __add_tree_block(struct reloc_control *rc,
|
|||
struct btrfs_path *path;
|
||||
struct btrfs_key key;
|
||||
int ret;
|
||||
bool skinny = btrfs_fs_incompat(rc->extent_root->fs_info,
|
||||
SKINNY_METADATA);
|
||||
|
||||
if (tree_block_processed(bytenr, blocksize, rc))
|
||||
return 0;
|
||||
|
@ -3276,10 +3321,15 @@ static int __add_tree_block(struct reloc_control *rc,
|
|||
path = btrfs_alloc_path();
|
||||
if (!path)
|
||||
return -ENOMEM;
|
||||
|
||||
again:
|
||||
key.objectid = bytenr;
|
||||
key.type = BTRFS_EXTENT_ITEM_KEY;
|
||||
key.offset = blocksize;
|
||||
if (skinny) {
|
||||
key.type = BTRFS_METADATA_ITEM_KEY;
|
||||
key.offset = (u64)-1;
|
||||
} else {
|
||||
key.type = BTRFS_EXTENT_ITEM_KEY;
|
||||
key.offset = blocksize;
|
||||
}
|
||||
|
||||
path->search_commit_root = 1;
|
||||
path->skip_locking = 1;
|
||||
|
@ -3287,11 +3337,23 @@ static int __add_tree_block(struct reloc_control *rc,
|
|||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
|
||||
if (ret > 0) {
|
||||
if (key.objectid == bytenr &&
|
||||
key.type == BTRFS_METADATA_ITEM_KEY)
|
||||
ret = 0;
|
||||
if (ret > 0 && skinny) {
|
||||
if (path->slots[0]) {
|
||||
path->slots[0]--;
|
||||
btrfs_item_key_to_cpu(path->nodes[0], &key,
|
||||
path->slots[0]);
|
||||
if (key.objectid == bytenr &&
|
||||
(key.type == BTRFS_METADATA_ITEM_KEY ||
|
||||
(key.type == BTRFS_EXTENT_ITEM_KEY &&
|
||||
key.offset == blocksize)))
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
if (ret) {
|
||||
skinny = false;
|
||||
btrfs_release_path(path);
|
||||
goto again;
|
||||
}
|
||||
}
|
||||
BUG_ON(ret);
|
||||
|
||||
|
@ -4160,12 +4222,12 @@ int btrfs_relocate_block_group(struct btrfs_root *extent_root, u64 group_start)
|
|||
(unsigned long long)rc->block_group->key.objectid,
|
||||
(unsigned long long)rc->block_group->flags);
|
||||
|
||||
ret = btrfs_start_delalloc_inodes(fs_info->tree_root, 0);
|
||||
ret = btrfs_start_all_delalloc_inodes(fs_info, 0);
|
||||
if (ret < 0) {
|
||||
err = ret;
|
||||
goto out;
|
||||
}
|
||||
btrfs_wait_ordered_extents(fs_info->tree_root, 0);
|
||||
btrfs_wait_all_ordered_extents(fs_info, 0);
|
||||
|
||||
while (1) {
|
||||
mutex_lock(&fs_info->cleaner_mutex);
|
||||
|
@ -4277,7 +4339,7 @@ int btrfs_recover_relocation(struct btrfs_root *root)
|
|||
key.type != BTRFS_ROOT_ITEM_KEY)
|
||||
break;
|
||||
|
||||
reloc_root = btrfs_read_fs_root_no_radix(root, &key);
|
||||
reloc_root = btrfs_read_fs_root(root, &key);
|
||||
if (IS_ERR(reloc_root)) {
|
||||
err = PTR_ERR(reloc_root);
|
||||
goto out;
|
||||
|
@ -4396,10 +4458,8 @@ out:
|
|||
int btrfs_reloc_clone_csums(struct inode *inode, u64 file_pos, u64 len)
|
||||
{
|
||||
struct btrfs_ordered_sum *sums;
|
||||
struct btrfs_sector_sum *sector_sum;
|
||||
struct btrfs_ordered_extent *ordered;
|
||||
struct btrfs_root *root = BTRFS_I(inode)->root;
|
||||
size_t offset;
|
||||
int ret;
|
||||
u64 disk_bytenr;
|
||||
LIST_HEAD(list);
|
||||
|
@ -4413,19 +4473,13 @@ int btrfs_reloc_clone_csums(struct inode *inode, u64 file_pos, u64 len)
|
|||
if (ret)
|
||||
goto out;
|
||||
|
||||
disk_bytenr = ordered->start;
|
||||
while (!list_empty(&list)) {
|
||||
sums = list_entry(list.next, struct btrfs_ordered_sum, list);
|
||||
list_del_init(&sums->list);
|
||||
|
||||
sector_sum = sums->sums;
|
||||
sums->bytenr = ordered->start;
|
||||
|
||||
offset = 0;
|
||||
while (offset < sums->len) {
|
||||
sector_sum->bytenr += ordered->start - disk_bytenr;
|
||||
sector_sum++;
|
||||
offset += root->sectorsize;
|
||||
}
|
||||
sums->bytenr = disk_bytenr;
|
||||
disk_bytenr += sums->len;
|
||||
|
||||
btrfs_add_ordered_sum(inode, ordered, sums);
|
||||
}
|
||||
|
|
|
@ -64,52 +64,59 @@ void btrfs_read_root_item(struct extent_buffer *eb, int slot,
|
|||
}
|
||||
|
||||
/*
|
||||
* lookup the root with the highest offset for a given objectid. The key we do
|
||||
* find is copied into 'key'. If we find something return 0, otherwise 1, < 0
|
||||
* on error.
|
||||
* btrfs_find_root - lookup the root by the key.
|
||||
* root: the root of the root tree
|
||||
* search_key: the key to search
|
||||
* path: the path we search
|
||||
* root_item: the root item of the tree we look for
|
||||
* root_key: the reak key of the tree we look for
|
||||
*
|
||||
* If ->offset of 'seach_key' is -1ULL, it means we are not sure the offset
|
||||
* of the search key, just lookup the root with the highest offset for a
|
||||
* given objectid.
|
||||
*
|
||||
* If we find something return 0, otherwise > 0, < 0 on error.
|
||||
*/
|
||||
int btrfs_find_last_root(struct btrfs_root *root, u64 objectid,
|
||||
struct btrfs_root_item *item, struct btrfs_key *key)
|
||||
int btrfs_find_root(struct btrfs_root *root, struct btrfs_key *search_key,
|
||||
struct btrfs_path *path, struct btrfs_root_item *root_item,
|
||||
struct btrfs_key *root_key)
|
||||
{
|
||||
struct btrfs_path *path;
|
||||
struct btrfs_key search_key;
|
||||
struct btrfs_key found_key;
|
||||
struct extent_buffer *l;
|
||||
int ret;
|
||||
int slot;
|
||||
|
||||
search_key.objectid = objectid;
|
||||
search_key.type = BTRFS_ROOT_ITEM_KEY;
|
||||
search_key.offset = (u64)-1;
|
||||
|
||||
path = btrfs_alloc_path();
|
||||
if (!path)
|
||||
return -ENOMEM;
|
||||
ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
|
||||
ret = btrfs_search_slot(NULL, root, search_key, path, 0, 0);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
return ret;
|
||||
|
||||
BUG_ON(ret == 0);
|
||||
if (path->slots[0] == 0) {
|
||||
ret = 1;
|
||||
goto out;
|
||||
if (search_key->offset != -1ULL) { /* the search key is exact */
|
||||
if (ret > 0)
|
||||
goto out;
|
||||
} else {
|
||||
BUG_ON(ret == 0); /* Logical error */
|
||||
if (path->slots[0] == 0)
|
||||
goto out;
|
||||
path->slots[0]--;
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
l = path->nodes[0];
|
||||
slot = path->slots[0] - 1;
|
||||
slot = path->slots[0];
|
||||
|
||||
btrfs_item_key_to_cpu(l, &found_key, slot);
|
||||
if (found_key.objectid != objectid ||
|
||||
if (found_key.objectid != search_key->objectid ||
|
||||
found_key.type != BTRFS_ROOT_ITEM_KEY) {
|
||||
ret = 1;
|
||||
goto out;
|
||||
}
|
||||
if (item)
|
||||
btrfs_read_root_item(l, slot, item);
|
||||
if (key)
|
||||
memcpy(key, &found_key, sizeof(found_key));
|
||||
|
||||
ret = 0;
|
||||
if (root_item)
|
||||
btrfs_read_root_item(l, slot, root_item);
|
||||
if (root_key)
|
||||
memcpy(root_key, &found_key, sizeof(found_key));
|
||||
out:
|
||||
btrfs_free_path(path);
|
||||
btrfs_release_path(path);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -212,86 +219,6 @@ int btrfs_insert_root(struct btrfs_trans_handle *trans, struct btrfs_root *root,
|
|||
return btrfs_insert_item(trans, root, key, item, sizeof(*item));
|
||||
}
|
||||
|
||||
/*
|
||||
* at mount time we want to find all the old transaction snapshots that were in
|
||||
* the process of being deleted if we crashed. This is any root item with an
|
||||
* offset lower than the latest root. They need to be queued for deletion to
|
||||
* finish what was happening when we crashed.
|
||||
*/
|
||||
int btrfs_find_dead_roots(struct btrfs_root *root, u64 objectid)
|
||||
{
|
||||
struct btrfs_root *dead_root;
|
||||
struct btrfs_root_item *ri;
|
||||
struct btrfs_key key;
|
||||
struct btrfs_key found_key;
|
||||
struct btrfs_path *path;
|
||||
int ret;
|
||||
u32 nritems;
|
||||
struct extent_buffer *leaf;
|
||||
int slot;
|
||||
|
||||
key.objectid = objectid;
|
||||
btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
|
||||
key.offset = 0;
|
||||
path = btrfs_alloc_path();
|
||||
if (!path)
|
||||
return -ENOMEM;
|
||||
|
||||
again:
|
||||
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
|
||||
if (ret < 0)
|
||||
goto err;
|
||||
while (1) {
|
||||
leaf = path->nodes[0];
|
||||
nritems = btrfs_header_nritems(leaf);
|
||||
slot = path->slots[0];
|
||||
if (slot >= nritems) {
|
||||
ret = btrfs_next_leaf(root, path);
|
||||
if (ret)
|
||||
break;
|
||||
leaf = path->nodes[0];
|
||||
nritems = btrfs_header_nritems(leaf);
|
||||
slot = path->slots[0];
|
||||
}
|
||||
btrfs_item_key_to_cpu(leaf, &key, slot);
|
||||
if (btrfs_key_type(&key) != BTRFS_ROOT_ITEM_KEY)
|
||||
goto next;
|
||||
|
||||
if (key.objectid < objectid)
|
||||
goto next;
|
||||
|
||||
if (key.objectid > objectid)
|
||||
break;
|
||||
|
||||
ri = btrfs_item_ptr(leaf, slot, struct btrfs_root_item);
|
||||
if (btrfs_disk_root_refs(leaf, ri) != 0)
|
||||
goto next;
|
||||
|
||||
memcpy(&found_key, &key, sizeof(key));
|
||||
key.offset++;
|
||||
btrfs_release_path(path);
|
||||
dead_root =
|
||||
btrfs_read_fs_root_no_radix(root->fs_info->tree_root,
|
||||
&found_key);
|
||||
if (IS_ERR(dead_root)) {
|
||||
ret = PTR_ERR(dead_root);
|
||||
goto err;
|
||||
}
|
||||
|
||||
ret = btrfs_add_dead_root(dead_root);
|
||||
if (ret)
|
||||
goto err;
|
||||
goto again;
|
||||
next:
|
||||
slot++;
|
||||
path->slots[0]++;
|
||||
}
|
||||
ret = 0;
|
||||
err:
|
||||
btrfs_free_path(path);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int btrfs_find_orphan_roots(struct btrfs_root *tree_root)
|
||||
{
|
||||
struct extent_buffer *leaf;
|
||||
|
@ -301,6 +228,10 @@ int btrfs_find_orphan_roots(struct btrfs_root *tree_root)
|
|||
struct btrfs_root *root;
|
||||
int err = 0;
|
||||
int ret;
|
||||
bool can_recover = true;
|
||||
|
||||
if (tree_root->fs_info->sb->s_flags & MS_RDONLY)
|
||||
can_recover = false;
|
||||
|
||||
path = btrfs_alloc_path();
|
||||
if (!path)
|
||||
|
@ -340,20 +271,52 @@ int btrfs_find_orphan_roots(struct btrfs_root *tree_root)
|
|||
root_key.objectid = key.offset;
|
||||
key.offset++;
|
||||
|
||||
root = btrfs_read_fs_root_no_name(tree_root->fs_info,
|
||||
&root_key);
|
||||
if (!IS_ERR(root))
|
||||
continue;
|
||||
root = btrfs_read_fs_root(tree_root, &root_key);
|
||||
err = PTR_RET(root);
|
||||
if (err && err != -ENOENT) {
|
||||
break;
|
||||
} else if (err == -ENOENT) {
|
||||
struct btrfs_trans_handle *trans;
|
||||
|
||||
ret = PTR_ERR(root);
|
||||
if (ret != -ENOENT) {
|
||||
err = ret;
|
||||
btrfs_release_path(path);
|
||||
|
||||
trans = btrfs_join_transaction(tree_root);
|
||||
if (IS_ERR(trans)) {
|
||||
err = PTR_ERR(trans);
|
||||
btrfs_error(tree_root->fs_info, err,
|
||||
"Failed to start trans to delete "
|
||||
"orphan item");
|
||||
break;
|
||||
}
|
||||
err = btrfs_del_orphan_item(trans, tree_root,
|
||||
root_key.objectid);
|
||||
btrfs_end_transaction(trans, tree_root);
|
||||
if (err) {
|
||||
btrfs_error(tree_root->fs_info, err,
|
||||
"Failed to delete root orphan "
|
||||
"item");
|
||||
break;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
if (btrfs_root_refs(&root->root_item) == 0) {
|
||||
btrfs_add_dead_root(root);
|
||||
continue;
|
||||
}
|
||||
|
||||
err = btrfs_init_fs_root(root);
|
||||
if (err) {
|
||||
btrfs_free_fs_root(root);
|
||||
break;
|
||||
}
|
||||
|
||||
ret = btrfs_find_dead_roots(tree_root, root_key.objectid);
|
||||
if (ret) {
|
||||
err = ret;
|
||||
root->orphan_item_inserted = 1;
|
||||
|
||||
err = btrfs_insert_fs_root(root->fs_info, root);
|
||||
if (err) {
|
||||
BUG_ON(err == -EEXIST);
|
||||
btrfs_free_fs_root(root);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -368,8 +331,6 @@ int btrfs_del_root(struct btrfs_trans_handle *trans, struct btrfs_root *root,
|
|||
{
|
||||
struct btrfs_path *path;
|
||||
int ret;
|
||||
struct btrfs_root_item *ri;
|
||||
struct extent_buffer *leaf;
|
||||
|
||||
path = btrfs_alloc_path();
|
||||
if (!path)
|
||||
|
@ -379,8 +340,6 @@ int btrfs_del_root(struct btrfs_trans_handle *trans, struct btrfs_root *root,
|
|||
goto out;
|
||||
|
||||
BUG_ON(ret != 0);
|
||||
leaf = path->nodes[0];
|
||||
ri = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_item);
|
||||
|
||||
ret = btrfs_del_item(trans, root, path);
|
||||
out:
|
||||
|
|
|
@ -2126,8 +2126,7 @@ static int scrub_find_csum(struct scrub_ctx *sctx, u64 logical, u64 len,
|
|||
u8 *csum)
|
||||
{
|
||||
struct btrfs_ordered_sum *sum = NULL;
|
||||
int ret = 0;
|
||||
unsigned long i;
|
||||
unsigned long index;
|
||||
unsigned long num_sectors;
|
||||
|
||||
while (!list_empty(&sctx->csum_list)) {
|
||||
|
@ -2146,19 +2145,14 @@ static int scrub_find_csum(struct scrub_ctx *sctx, u64 logical, u64 len,
|
|||
if (!sum)
|
||||
return 0;
|
||||
|
||||
index = ((u32)(logical - sum->bytenr)) / sctx->sectorsize;
|
||||
num_sectors = sum->len / sctx->sectorsize;
|
||||
for (i = 0; i < num_sectors; ++i) {
|
||||
if (sum->sums[i].bytenr == logical) {
|
||||
memcpy(csum, &sum->sums[i].sum, sctx->csum_size);
|
||||
ret = 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (ret && i == num_sectors - 1) {
|
||||
memcpy(csum, sum->sums + index, sctx->csum_size);
|
||||
if (index == num_sectors - 1) {
|
||||
list_del(&sum->list);
|
||||
kfree(sum);
|
||||
}
|
||||
return ret;
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* scrub extent tries to collect up to 64 kB for each bio */
|
||||
|
@ -2505,6 +2499,7 @@ again:
|
|||
if (ret)
|
||||
goto out;
|
||||
|
||||
scrub_free_csums(sctx);
|
||||
if (extent_logical + extent_len <
|
||||
key.objectid + bytes) {
|
||||
logical += increment;
|
||||
|
@ -3204,16 +3199,18 @@ out:
|
|||
|
||||
static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root, void *ctx)
|
||||
{
|
||||
unsigned long index;
|
||||
struct scrub_copy_nocow_ctx *nocow_ctx = ctx;
|
||||
int ret = 0;
|
||||
struct btrfs_fs_info *fs_info = nocow_ctx->sctx->dev_root->fs_info;
|
||||
struct btrfs_key key;
|
||||
struct inode *inode = NULL;
|
||||
struct inode *inode;
|
||||
struct page *page;
|
||||
struct btrfs_root *local_root;
|
||||
u64 physical_for_dev_replace;
|
||||
u64 len;
|
||||
struct btrfs_fs_info *fs_info = nocow_ctx->sctx->dev_root->fs_info;
|
||||
unsigned long index;
|
||||
int srcu_index;
|
||||
int ret;
|
||||
int err;
|
||||
|
||||
key.objectid = root;
|
||||
key.type = BTRFS_ROOT_ITEM_KEY;
|
||||
|
@ -3227,6 +3224,11 @@ static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root, void *ctx)
|
|||
return PTR_ERR(local_root);
|
||||
}
|
||||
|
||||
if (btrfs_root_refs(&local_root->root_item) == 0) {
|
||||
srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
key.type = BTRFS_INODE_ITEM_KEY;
|
||||
key.objectid = inum;
|
||||
key.offset = 0;
|
||||
|
@ -3235,19 +3237,21 @@ static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root, void *ctx)
|
|||
if (IS_ERR(inode))
|
||||
return PTR_ERR(inode);
|
||||
|
||||
/* Avoid truncate/dio/punch hole.. */
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode_dio_wait(inode);
|
||||
|
||||
ret = 0;
|
||||
physical_for_dev_replace = nocow_ctx->physical_for_dev_replace;
|
||||
len = nocow_ctx->len;
|
||||
while (len >= PAGE_CACHE_SIZE) {
|
||||
struct page *page = NULL;
|
||||
int ret_sub;
|
||||
|
||||
index = offset >> PAGE_CACHE_SHIFT;
|
||||
|
||||
again:
|
||||
page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
|
||||
if (!page) {
|
||||
pr_err("find_or_create_page() failed\n");
|
||||
ret = -ENOMEM;
|
||||
goto next_page;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (PageUptodate(page)) {
|
||||
|
@ -3255,39 +3259,49 @@ static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root, void *ctx)
|
|||
goto next_page;
|
||||
} else {
|
||||
ClearPageError(page);
|
||||
ret_sub = extent_read_full_page(&BTRFS_I(inode)->
|
||||
err = extent_read_full_page(&BTRFS_I(inode)->
|
||||
io_tree,
|
||||
page, btrfs_get_extent,
|
||||
nocow_ctx->mirror_num);
|
||||
if (ret_sub) {
|
||||
ret = ret_sub;
|
||||
if (err) {
|
||||
ret = err;
|
||||
goto next_page;
|
||||
}
|
||||
wait_on_page_locked(page);
|
||||
|
||||
lock_page(page);
|
||||
/*
|
||||
* If the page has been remove from the page cache,
|
||||
* the data on it is meaningless, because it may be
|
||||
* old one, the new data may be written into the new
|
||||
* page in the page cache.
|
||||
*/
|
||||
if (page->mapping != inode->i_mapping) {
|
||||
page_cache_release(page);
|
||||
goto again;
|
||||
}
|
||||
if (!PageUptodate(page)) {
|
||||
ret = -EIO;
|
||||
goto next_page;
|
||||
}
|
||||
}
|
||||
ret_sub = write_page_nocow(nocow_ctx->sctx,
|
||||
physical_for_dev_replace, page);
|
||||
if (ret_sub) {
|
||||
ret = ret_sub;
|
||||
goto next_page;
|
||||
}
|
||||
|
||||
err = write_page_nocow(nocow_ctx->sctx,
|
||||
physical_for_dev_replace, page);
|
||||
if (err)
|
||||
ret = err;
|
||||
next_page:
|
||||
if (page) {
|
||||
unlock_page(page);
|
||||
put_page(page);
|
||||
}
|
||||
unlock_page(page);
|
||||
page_cache_release(page);
|
||||
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
offset += PAGE_CACHE_SIZE;
|
||||
physical_for_dev_replace += PAGE_CACHE_SIZE;
|
||||
len -= PAGE_CACHE_SIZE;
|
||||
}
|
||||
|
||||
if (inode)
|
||||
iput(inode);
|
||||
out:
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
iput(inode);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
235
fs/btrfs/send.c
235
fs/btrfs/send.c
|
@ -158,7 +158,7 @@ static void fs_path_reset(struct fs_path *p)
|
|||
}
|
||||
}
|
||||
|
||||
static struct fs_path *fs_path_alloc(struct send_ctx *sctx)
|
||||
static struct fs_path *fs_path_alloc(void)
|
||||
{
|
||||
struct fs_path *p;
|
||||
|
||||
|
@ -173,11 +173,11 @@ static struct fs_path *fs_path_alloc(struct send_ctx *sctx)
|
|||
return p;
|
||||
}
|
||||
|
||||
static struct fs_path *fs_path_alloc_reversed(struct send_ctx *sctx)
|
||||
static struct fs_path *fs_path_alloc_reversed(void)
|
||||
{
|
||||
struct fs_path *p;
|
||||
|
||||
p = fs_path_alloc(sctx);
|
||||
p = fs_path_alloc();
|
||||
if (!p)
|
||||
return NULL;
|
||||
p->reversed = 1;
|
||||
|
@ -185,7 +185,7 @@ static struct fs_path *fs_path_alloc_reversed(struct send_ctx *sctx)
|
|||
return p;
|
||||
}
|
||||
|
||||
static void fs_path_free(struct send_ctx *sctx, struct fs_path *p)
|
||||
static void fs_path_free(struct fs_path *p)
|
||||
{
|
||||
if (!p)
|
||||
return;
|
||||
|
@ -753,8 +753,7 @@ typedef int (*iterate_inode_ref_t)(int num, u64 dir, int index,
|
|||
*
|
||||
* path must point to the INODE_REF or INODE_EXTREF when called.
|
||||
*/
|
||||
static int iterate_inode_ref(struct send_ctx *sctx,
|
||||
struct btrfs_root *root, struct btrfs_path *path,
|
||||
static int iterate_inode_ref(struct btrfs_root *root, struct btrfs_path *path,
|
||||
struct btrfs_key *found_key, int resolve,
|
||||
iterate_inode_ref_t iterate, void *ctx)
|
||||
{
|
||||
|
@ -777,13 +776,13 @@ static int iterate_inode_ref(struct send_ctx *sctx,
|
|||
unsigned long elem_size;
|
||||
unsigned long ptr;
|
||||
|
||||
p = fs_path_alloc_reversed(sctx);
|
||||
p = fs_path_alloc_reversed();
|
||||
if (!p)
|
||||
return -ENOMEM;
|
||||
|
||||
tmp_path = alloc_path_for_send();
|
||||
if (!tmp_path) {
|
||||
fs_path_free(sctx, p);
|
||||
fs_path_free(p);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
@ -858,7 +857,7 @@ static int iterate_inode_ref(struct send_ctx *sctx,
|
|||
|
||||
out:
|
||||
btrfs_free_path(tmp_path);
|
||||
fs_path_free(sctx, p);
|
||||
fs_path_free(p);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -874,8 +873,7 @@ typedef int (*iterate_dir_item_t)(int num, struct btrfs_key *di_key,
|
|||
*
|
||||
* path must point to the dir item when called.
|
||||
*/
|
||||
static int iterate_dir_item(struct send_ctx *sctx,
|
||||
struct btrfs_root *root, struct btrfs_path *path,
|
||||
static int iterate_dir_item(struct btrfs_root *root, struct btrfs_path *path,
|
||||
struct btrfs_key *found_key,
|
||||
iterate_dir_item_t iterate, void *ctx)
|
||||
{
|
||||
|
@ -990,7 +988,7 @@ static int __copy_first_ref(int num, u64 dir, int index,
|
|||
* Retrieve the first path of an inode. If an inode has more then one
|
||||
* ref/hardlink, this is ignored.
|
||||
*/
|
||||
static int get_inode_path(struct send_ctx *sctx, struct btrfs_root *root,
|
||||
static int get_inode_path(struct btrfs_root *root,
|
||||
u64 ino, struct fs_path *path)
|
||||
{
|
||||
int ret;
|
||||
|
@ -1022,8 +1020,8 @@ static int get_inode_path(struct send_ctx *sctx, struct btrfs_root *root,
|
|||
goto out;
|
||||
}
|
||||
|
||||
ret = iterate_inode_ref(sctx, root, p, &found_key, 1,
|
||||
__copy_first_ref, path);
|
||||
ret = iterate_inode_ref(root, p, &found_key, 1,
|
||||
__copy_first_ref, path);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
ret = 0;
|
||||
|
@ -1314,8 +1312,7 @@ out:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int read_symlink(struct send_ctx *sctx,
|
||||
struct btrfs_root *root,
|
||||
static int read_symlink(struct btrfs_root *root,
|
||||
u64 ino,
|
||||
struct fs_path *dest)
|
||||
{
|
||||
|
@ -1562,8 +1559,7 @@ out:
|
|||
* Looks up the first btrfs_inode_ref of a given ino. It returns the parent dir,
|
||||
* generation of the parent dir and the name of the dir entry.
|
||||
*/
|
||||
static int get_first_ref(struct send_ctx *sctx,
|
||||
struct btrfs_root *root, u64 ino,
|
||||
static int get_first_ref(struct btrfs_root *root, u64 ino,
|
||||
u64 *dir, u64 *dir_gen, struct fs_path *name)
|
||||
{
|
||||
int ret;
|
||||
|
@ -1628,8 +1624,7 @@ out:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int is_first_ref(struct send_ctx *sctx,
|
||||
struct btrfs_root *root,
|
||||
static int is_first_ref(struct btrfs_root *root,
|
||||
u64 ino, u64 dir,
|
||||
const char *name, int name_len)
|
||||
{
|
||||
|
@ -1638,11 +1633,11 @@ static int is_first_ref(struct send_ctx *sctx,
|
|||
u64 tmp_dir;
|
||||
u64 tmp_dir_gen;
|
||||
|
||||
tmp_name = fs_path_alloc(sctx);
|
||||
tmp_name = fs_path_alloc();
|
||||
if (!tmp_name)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = get_first_ref(sctx, root, ino, &tmp_dir, &tmp_dir_gen, tmp_name);
|
||||
ret = get_first_ref(root, ino, &tmp_dir, &tmp_dir_gen, tmp_name);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
|
@ -1654,7 +1649,7 @@ static int is_first_ref(struct send_ctx *sctx,
|
|||
ret = !memcmp(tmp_name->start, name, name_len);
|
||||
|
||||
out:
|
||||
fs_path_free(sctx, tmp_name);
|
||||
fs_path_free(tmp_name);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1783,11 +1778,11 @@ static int did_overwrite_first_ref(struct send_ctx *sctx, u64 ino, u64 gen)
|
|||
if (!sctx->parent_root)
|
||||
goto out;
|
||||
|
||||
name = fs_path_alloc(sctx);
|
||||
name = fs_path_alloc();
|
||||
if (!name)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = get_first_ref(sctx, sctx->parent_root, ino, &dir, &dir_gen, name);
|
||||
ret = get_first_ref(sctx->parent_root, ino, &dir, &dir_gen, name);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
|
@ -1795,7 +1790,7 @@ static int did_overwrite_first_ref(struct send_ctx *sctx, u64 ino, u64 gen)
|
|||
name->start, fs_path_len(name));
|
||||
|
||||
out:
|
||||
fs_path_free(sctx, name);
|
||||
fs_path_free(name);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1979,11 +1974,11 @@ static int __get_cur_name_and_parent(struct send_ctx *sctx,
|
|||
* send_root or parent_root for ref lookup.
|
||||
*/
|
||||
if (ino < sctx->send_progress)
|
||||
ret = get_first_ref(sctx, sctx->send_root, ino,
|
||||
parent_ino, parent_gen, dest);
|
||||
ret = get_first_ref(sctx->send_root, ino,
|
||||
parent_ino, parent_gen, dest);
|
||||
else
|
||||
ret = get_first_ref(sctx, sctx->parent_root, ino,
|
||||
parent_ino, parent_gen, dest);
|
||||
ret = get_first_ref(sctx->parent_root, ino,
|
||||
parent_ino, parent_gen, dest);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
|
@ -2070,7 +2065,7 @@ static int get_cur_path(struct send_ctx *sctx, u64 ino, u64 gen,
|
|||
u64 parent_gen = 0;
|
||||
int stop = 0;
|
||||
|
||||
name = fs_path_alloc(sctx);
|
||||
name = fs_path_alloc();
|
||||
if (!name) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
|
@ -2098,7 +2093,7 @@ static int get_cur_path(struct send_ctx *sctx, u64 ino, u64 gen,
|
|||
}
|
||||
|
||||
out:
|
||||
fs_path_free(sctx, name);
|
||||
fs_path_free(name);
|
||||
if (!ret)
|
||||
fs_path_unreverse(dest);
|
||||
return ret;
|
||||
|
@ -2263,7 +2258,7 @@ static int send_truncate(struct send_ctx *sctx, u64 ino, u64 gen, u64 size)
|
|||
|
||||
verbose_printk("btrfs: send_truncate %llu size=%llu\n", ino, size);
|
||||
|
||||
p = fs_path_alloc(sctx);
|
||||
p = fs_path_alloc();
|
||||
if (!p)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -2281,7 +2276,7 @@ verbose_printk("btrfs: send_truncate %llu size=%llu\n", ino, size);
|
|||
|
||||
tlv_put_failure:
|
||||
out:
|
||||
fs_path_free(sctx, p);
|
||||
fs_path_free(p);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -2292,7 +2287,7 @@ static int send_chmod(struct send_ctx *sctx, u64 ino, u64 gen, u64 mode)
|
|||
|
||||
verbose_printk("btrfs: send_chmod %llu mode=%llu\n", ino, mode);
|
||||
|
||||
p = fs_path_alloc(sctx);
|
||||
p = fs_path_alloc();
|
||||
if (!p)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -2310,7 +2305,7 @@ verbose_printk("btrfs: send_chmod %llu mode=%llu\n", ino, mode);
|
|||
|
||||
tlv_put_failure:
|
||||
out:
|
||||
fs_path_free(sctx, p);
|
||||
fs_path_free(p);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -2321,7 +2316,7 @@ static int send_chown(struct send_ctx *sctx, u64 ino, u64 gen, u64 uid, u64 gid)
|
|||
|
||||
verbose_printk("btrfs: send_chown %llu uid=%llu, gid=%llu\n", ino, uid, gid);
|
||||
|
||||
p = fs_path_alloc(sctx);
|
||||
p = fs_path_alloc();
|
||||
if (!p)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -2340,7 +2335,7 @@ verbose_printk("btrfs: send_chown %llu uid=%llu, gid=%llu\n", ino, uid, gid);
|
|||
|
||||
tlv_put_failure:
|
||||
out:
|
||||
fs_path_free(sctx, p);
|
||||
fs_path_free(p);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -2356,7 +2351,7 @@ static int send_utimes(struct send_ctx *sctx, u64 ino, u64 gen)
|
|||
|
||||
verbose_printk("btrfs: send_utimes %llu\n", ino);
|
||||
|
||||
p = fs_path_alloc(sctx);
|
||||
p = fs_path_alloc();
|
||||
if (!p)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -2397,7 +2392,7 @@ verbose_printk("btrfs: send_utimes %llu\n", ino);
|
|||
|
||||
tlv_put_failure:
|
||||
out:
|
||||
fs_path_free(sctx, p);
|
||||
fs_path_free(p);
|
||||
btrfs_free_path(path);
|
||||
return ret;
|
||||
}
|
||||
|
@ -2418,7 +2413,7 @@ static int send_create_inode(struct send_ctx *sctx, u64 ino)
|
|||
|
||||
verbose_printk("btrfs: send_create_inode %llu\n", ino);
|
||||
|
||||
p = fs_path_alloc(sctx);
|
||||
p = fs_path_alloc();
|
||||
if (!p)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -2459,7 +2454,7 @@ verbose_printk("btrfs: send_create_inode %llu\n", ino);
|
|||
|
||||
if (S_ISLNK(mode)) {
|
||||
fs_path_reset(p);
|
||||
ret = read_symlink(sctx, sctx->send_root, ino, p);
|
||||
ret = read_symlink(sctx->send_root, ino, p);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_LINK, p);
|
||||
|
@ -2476,7 +2471,7 @@ verbose_printk("btrfs: send_create_inode %llu\n", ino);
|
|||
|
||||
tlv_put_failure:
|
||||
out:
|
||||
fs_path_free(sctx, p);
|
||||
fs_path_free(p);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -2615,13 +2610,13 @@ static int record_ref(struct list_head *head, u64 dir,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void __free_recorded_refs(struct send_ctx *sctx, struct list_head *head)
|
||||
static void __free_recorded_refs(struct list_head *head)
|
||||
{
|
||||
struct recorded_ref *cur;
|
||||
|
||||
while (!list_empty(head)) {
|
||||
cur = list_entry(head->next, struct recorded_ref, list);
|
||||
fs_path_free(sctx, cur->full_path);
|
||||
fs_path_free(cur->full_path);
|
||||
list_del(&cur->list);
|
||||
kfree(cur);
|
||||
}
|
||||
|
@ -2629,8 +2624,8 @@ static void __free_recorded_refs(struct send_ctx *sctx, struct list_head *head)
|
|||
|
||||
static void free_recorded_refs(struct send_ctx *sctx)
|
||||
{
|
||||
__free_recorded_refs(sctx, &sctx->new_refs);
|
||||
__free_recorded_refs(sctx, &sctx->deleted_refs);
|
||||
__free_recorded_refs(&sctx->new_refs);
|
||||
__free_recorded_refs(&sctx->deleted_refs);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -2644,7 +2639,7 @@ static int orphanize_inode(struct send_ctx *sctx, u64 ino, u64 gen,
|
|||
int ret;
|
||||
struct fs_path *orphan;
|
||||
|
||||
orphan = fs_path_alloc(sctx);
|
||||
orphan = fs_path_alloc();
|
||||
if (!orphan)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -2655,7 +2650,7 @@ static int orphanize_inode(struct send_ctx *sctx, u64 ino, u64 gen,
|
|||
ret = send_rename(sctx, path, orphan);
|
||||
|
||||
out:
|
||||
fs_path_free(sctx, orphan);
|
||||
fs_path_free(orphan);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -2746,7 +2741,7 @@ verbose_printk("btrfs: process_recorded_refs %llu\n", sctx->cur_ino);
|
|||
*/
|
||||
BUG_ON(sctx->cur_ino <= BTRFS_FIRST_FREE_OBJECTID);
|
||||
|
||||
valid_path = fs_path_alloc(sctx);
|
||||
valid_path = fs_path_alloc();
|
||||
if (!valid_path) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
|
@ -2843,9 +2838,9 @@ verbose_printk("btrfs: process_recorded_refs %llu\n", sctx->cur_ino);
|
|||
if (ret < 0)
|
||||
goto out;
|
||||
if (ret) {
|
||||
ret = is_first_ref(sctx, sctx->parent_root,
|
||||
ow_inode, cur->dir, cur->name,
|
||||
cur->name_len);
|
||||
ret = is_first_ref(sctx->parent_root,
|
||||
ow_inode, cur->dir, cur->name,
|
||||
cur->name_len);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
if (ret) {
|
||||
|
@ -3024,7 +3019,7 @@ verbose_printk("btrfs: process_recorded_refs %llu\n", sctx->cur_ino);
|
|||
out:
|
||||
free_recorded_refs(sctx);
|
||||
ulist_free(check_dirs);
|
||||
fs_path_free(sctx, valid_path);
|
||||
fs_path_free(valid_path);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -3037,7 +3032,7 @@ static int __record_new_ref(int num, u64 dir, int index,
|
|||
struct fs_path *p;
|
||||
u64 gen;
|
||||
|
||||
p = fs_path_alloc(sctx);
|
||||
p = fs_path_alloc();
|
||||
if (!p)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -3057,7 +3052,7 @@ static int __record_new_ref(int num, u64 dir, int index,
|
|||
|
||||
out:
|
||||
if (ret)
|
||||
fs_path_free(sctx, p);
|
||||
fs_path_free(p);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -3070,7 +3065,7 @@ static int __record_deleted_ref(int num, u64 dir, int index,
|
|||
struct fs_path *p;
|
||||
u64 gen;
|
||||
|
||||
p = fs_path_alloc(sctx);
|
||||
p = fs_path_alloc();
|
||||
if (!p)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -3090,7 +3085,7 @@ static int __record_deleted_ref(int num, u64 dir, int index,
|
|||
|
||||
out:
|
||||
if (ret)
|
||||
fs_path_free(sctx, p);
|
||||
fs_path_free(p);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -3098,8 +3093,8 @@ static int record_new_ref(struct send_ctx *sctx)
|
|||
{
|
||||
int ret;
|
||||
|
||||
ret = iterate_inode_ref(sctx, sctx->send_root, sctx->left_path,
|
||||
sctx->cmp_key, 0, __record_new_ref, sctx);
|
||||
ret = iterate_inode_ref(sctx->send_root, sctx->left_path,
|
||||
sctx->cmp_key, 0, __record_new_ref, sctx);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
ret = 0;
|
||||
|
@ -3112,8 +3107,8 @@ static int record_deleted_ref(struct send_ctx *sctx)
|
|||
{
|
||||
int ret;
|
||||
|
||||
ret = iterate_inode_ref(sctx, sctx->parent_root, sctx->right_path,
|
||||
sctx->cmp_key, 0, __record_deleted_ref, sctx);
|
||||
ret = iterate_inode_ref(sctx->parent_root, sctx->right_path,
|
||||
sctx->cmp_key, 0, __record_deleted_ref, sctx);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
ret = 0;
|
||||
|
@ -3142,8 +3137,7 @@ static int __find_iref(int num, u64 dir, int index,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int find_iref(struct send_ctx *sctx,
|
||||
struct btrfs_root *root,
|
||||
static int find_iref(struct btrfs_root *root,
|
||||
struct btrfs_path *path,
|
||||
struct btrfs_key *key,
|
||||
u64 dir, struct fs_path *name)
|
||||
|
@ -3155,7 +3149,7 @@ static int find_iref(struct send_ctx *sctx,
|
|||
ctx.name = name;
|
||||
ctx.found_idx = -1;
|
||||
|
||||
ret = iterate_inode_ref(sctx, root, path, key, 0, __find_iref, &ctx);
|
||||
ret = iterate_inode_ref(root, path, key, 0, __find_iref, &ctx);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
|
@ -3172,7 +3166,7 @@ static int __record_changed_new_ref(int num, u64 dir, int index,
|
|||
int ret;
|
||||
struct send_ctx *sctx = ctx;
|
||||
|
||||
ret = find_iref(sctx, sctx->parent_root, sctx->right_path,
|
||||
ret = find_iref(sctx->parent_root, sctx->right_path,
|
||||
sctx->cmp_key, dir, name);
|
||||
if (ret == -ENOENT)
|
||||
ret = __record_new_ref(num, dir, index, name, sctx);
|
||||
|
@ -3189,7 +3183,7 @@ static int __record_changed_deleted_ref(int num, u64 dir, int index,
|
|||
int ret;
|
||||
struct send_ctx *sctx = ctx;
|
||||
|
||||
ret = find_iref(sctx, sctx->send_root, sctx->left_path, sctx->cmp_key,
|
||||
ret = find_iref(sctx->send_root, sctx->left_path, sctx->cmp_key,
|
||||
dir, name);
|
||||
if (ret == -ENOENT)
|
||||
ret = __record_deleted_ref(num, dir, index, name, sctx);
|
||||
|
@ -3203,11 +3197,11 @@ static int record_changed_ref(struct send_ctx *sctx)
|
|||
{
|
||||
int ret = 0;
|
||||
|
||||
ret = iterate_inode_ref(sctx, sctx->send_root, sctx->left_path,
|
||||
ret = iterate_inode_ref(sctx->send_root, sctx->left_path,
|
||||
sctx->cmp_key, 0, __record_changed_new_ref, sctx);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
ret = iterate_inode_ref(sctx, sctx->parent_root, sctx->right_path,
|
||||
ret = iterate_inode_ref(sctx->parent_root, sctx->right_path,
|
||||
sctx->cmp_key, 0, __record_changed_deleted_ref, sctx);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
@ -3266,8 +3260,7 @@ static int process_all_refs(struct send_ctx *sctx,
|
|||
found_key.type != BTRFS_INODE_EXTREF_KEY))
|
||||
break;
|
||||
|
||||
ret = iterate_inode_ref(sctx, root, path, &found_key, 0, cb,
|
||||
sctx);
|
||||
ret = iterate_inode_ref(root, path, &found_key, 0, cb, sctx);
|
||||
btrfs_release_path(path);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
@ -3335,7 +3328,7 @@ static int __process_new_xattr(int num, struct btrfs_key *di_key,
|
|||
struct fs_path *p;
|
||||
posix_acl_xattr_header dummy_acl;
|
||||
|
||||
p = fs_path_alloc(sctx);
|
||||
p = fs_path_alloc();
|
||||
if (!p)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -3362,7 +3355,7 @@ static int __process_new_xattr(int num, struct btrfs_key *di_key,
|
|||
ret = send_set_xattr(sctx, p, name, name_len, data, data_len);
|
||||
|
||||
out:
|
||||
fs_path_free(sctx, p);
|
||||
fs_path_free(p);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -3375,7 +3368,7 @@ static int __process_deleted_xattr(int num, struct btrfs_key *di_key,
|
|||
struct send_ctx *sctx = ctx;
|
||||
struct fs_path *p;
|
||||
|
||||
p = fs_path_alloc(sctx);
|
||||
p = fs_path_alloc();
|
||||
if (!p)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -3386,7 +3379,7 @@ static int __process_deleted_xattr(int num, struct btrfs_key *di_key,
|
|||
ret = send_remove_xattr(sctx, p, name, name_len);
|
||||
|
||||
out:
|
||||
fs_path_free(sctx, p);
|
||||
fs_path_free(p);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -3394,8 +3387,8 @@ static int process_new_xattr(struct send_ctx *sctx)
|
|||
{
|
||||
int ret = 0;
|
||||
|
||||
ret = iterate_dir_item(sctx, sctx->send_root, sctx->left_path,
|
||||
sctx->cmp_key, __process_new_xattr, sctx);
|
||||
ret = iterate_dir_item(sctx->send_root, sctx->left_path,
|
||||
sctx->cmp_key, __process_new_xattr, sctx);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -3404,8 +3397,8 @@ static int process_deleted_xattr(struct send_ctx *sctx)
|
|||
{
|
||||
int ret;
|
||||
|
||||
ret = iterate_dir_item(sctx, sctx->parent_root, sctx->right_path,
|
||||
sctx->cmp_key, __process_deleted_xattr, sctx);
|
||||
ret = iterate_dir_item(sctx->parent_root, sctx->right_path,
|
||||
sctx->cmp_key, __process_deleted_xattr, sctx);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -3429,17 +3422,15 @@ static int __find_xattr(int num, struct btrfs_key *di_key,
|
|||
strncmp(name, ctx->name, name_len) == 0) {
|
||||
ctx->found_idx = num;
|
||||
ctx->found_data_len = data_len;
|
||||
ctx->found_data = kmalloc(data_len, GFP_NOFS);
|
||||
ctx->found_data = kmemdup(data, data_len, GFP_NOFS);
|
||||
if (!ctx->found_data)
|
||||
return -ENOMEM;
|
||||
memcpy(ctx->found_data, data, data_len);
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int find_xattr(struct send_ctx *sctx,
|
||||
struct btrfs_root *root,
|
||||
static int find_xattr(struct btrfs_root *root,
|
||||
struct btrfs_path *path,
|
||||
struct btrfs_key *key,
|
||||
const char *name, int name_len,
|
||||
|
@ -3454,7 +3445,7 @@ static int find_xattr(struct send_ctx *sctx,
|
|||
ctx.found_data = NULL;
|
||||
ctx.found_data_len = 0;
|
||||
|
||||
ret = iterate_dir_item(sctx, root, path, key, __find_xattr, &ctx);
|
||||
ret = iterate_dir_item(root, path, key, __find_xattr, &ctx);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
|
@ -3480,9 +3471,9 @@ static int __process_changed_new_xattr(int num, struct btrfs_key *di_key,
|
|||
char *found_data = NULL;
|
||||
int found_data_len = 0;
|
||||
|
||||
ret = find_xattr(sctx, sctx->parent_root, sctx->right_path,
|
||||
sctx->cmp_key, name, name_len, &found_data,
|
||||
&found_data_len);
|
||||
ret = find_xattr(sctx->parent_root, sctx->right_path,
|
||||
sctx->cmp_key, name, name_len, &found_data,
|
||||
&found_data_len);
|
||||
if (ret == -ENOENT) {
|
||||
ret = __process_new_xattr(num, di_key, name, name_len, data,
|
||||
data_len, type, ctx);
|
||||
|
@ -3508,8 +3499,8 @@ static int __process_changed_deleted_xattr(int num, struct btrfs_key *di_key,
|
|||
int ret;
|
||||
struct send_ctx *sctx = ctx;
|
||||
|
||||
ret = find_xattr(sctx, sctx->send_root, sctx->left_path, sctx->cmp_key,
|
||||
name, name_len, NULL, NULL);
|
||||
ret = find_xattr(sctx->send_root, sctx->left_path, sctx->cmp_key,
|
||||
name, name_len, NULL, NULL);
|
||||
if (ret == -ENOENT)
|
||||
ret = __process_deleted_xattr(num, di_key, name, name_len, data,
|
||||
data_len, type, ctx);
|
||||
|
@ -3523,11 +3514,11 @@ static int process_changed_xattr(struct send_ctx *sctx)
|
|||
{
|
||||
int ret = 0;
|
||||
|
||||
ret = iterate_dir_item(sctx, sctx->send_root, sctx->left_path,
|
||||
ret = iterate_dir_item(sctx->send_root, sctx->left_path,
|
||||
sctx->cmp_key, __process_changed_new_xattr, sctx);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
ret = iterate_dir_item(sctx, sctx->parent_root, sctx->right_path,
|
||||
ret = iterate_dir_item(sctx->parent_root, sctx->right_path,
|
||||
sctx->cmp_key, __process_changed_deleted_xattr, sctx);
|
||||
|
||||
out:
|
||||
|
@ -3572,8 +3563,8 @@ static int process_all_new_xattrs(struct send_ctx *sctx)
|
|||
goto out;
|
||||
}
|
||||
|
||||
ret = iterate_dir_item(sctx, root, path, &found_key,
|
||||
__process_new_xattr, sctx);
|
||||
ret = iterate_dir_item(root, path, &found_key,
|
||||
__process_new_xattr, sctx);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
|
@ -3598,7 +3589,7 @@ static int send_write(struct send_ctx *sctx, u64 offset, u32 len)
|
|||
int num_read = 0;
|
||||
mm_segment_t old_fs;
|
||||
|
||||
p = fs_path_alloc(sctx);
|
||||
p = fs_path_alloc();
|
||||
if (!p)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -3640,7 +3631,7 @@ verbose_printk("btrfs: send_write offset=%llu, len=%d\n", offset, len);
|
|||
|
||||
tlv_put_failure:
|
||||
out:
|
||||
fs_path_free(sctx, p);
|
||||
fs_path_free(p);
|
||||
set_fs(old_fs);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
@ -3663,7 +3654,7 @@ verbose_printk("btrfs: send_clone offset=%llu, len=%d, clone_root=%llu, "
|
|||
clone_root->root->objectid, clone_root->ino,
|
||||
clone_root->offset);
|
||||
|
||||
p = fs_path_alloc(sctx);
|
||||
p = fs_path_alloc();
|
||||
if (!p)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -3686,8 +3677,7 @@ verbose_printk("btrfs: send_clone offset=%llu, len=%d, clone_root=%llu, "
|
|||
goto out;
|
||||
ret = get_cur_path(sctx, clone_root->ino, gen, p);
|
||||
} else {
|
||||
ret = get_inode_path(sctx, clone_root->root,
|
||||
clone_root->ino, p);
|
||||
ret = get_inode_path(clone_root->root, clone_root->ino, p);
|
||||
}
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
@ -3704,7 +3694,7 @@ verbose_printk("btrfs: send_clone offset=%llu, len=%d, clone_root=%llu, "
|
|||
|
||||
tlv_put_failure:
|
||||
out:
|
||||
fs_path_free(sctx, p);
|
||||
fs_path_free(p);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -3717,7 +3707,7 @@ static int send_update_extent(struct send_ctx *sctx,
|
|||
int ret = 0;
|
||||
struct fs_path *p;
|
||||
|
||||
p = fs_path_alloc(sctx);
|
||||
p = fs_path_alloc();
|
||||
if (!p)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -3737,7 +3727,7 @@ static int send_update_extent(struct send_ctx *sctx,
|
|||
|
||||
tlv_put_failure:
|
||||
out:
|
||||
fs_path_free(sctx, p);
|
||||
fs_path_free(p);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -4579,6 +4569,41 @@ long btrfs_ioctl_send(struct file *mnt_file, void __user *arg_)
|
|||
send_root = BTRFS_I(file_inode(mnt_file))->root;
|
||||
fs_info = send_root->fs_info;
|
||||
|
||||
/*
|
||||
* This is done when we lookup the root, it should already be complete
|
||||
* by the time we get here.
|
||||
*/
|
||||
WARN_ON(send_root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE);
|
||||
|
||||
/*
|
||||
* If we just created this root we need to make sure that the orphan
|
||||
* cleanup has been done and committed since we search the commit root,
|
||||
* so check its commit root transid with our otransid and if they match
|
||||
* commit the transaction to make sure everything is updated.
|
||||
*/
|
||||
down_read(&send_root->fs_info->extent_commit_sem);
|
||||
if (btrfs_header_generation(send_root->commit_root) ==
|
||||
btrfs_root_otransid(&send_root->root_item)) {
|
||||
struct btrfs_trans_handle *trans;
|
||||
|
||||
up_read(&send_root->fs_info->extent_commit_sem);
|
||||
|
||||
trans = btrfs_attach_transaction_barrier(send_root);
|
||||
if (IS_ERR(trans)) {
|
||||
if (PTR_ERR(trans) != -ENOENT) {
|
||||
ret = PTR_ERR(trans);
|
||||
goto out;
|
||||
}
|
||||
/* ENOENT means theres no transaction */
|
||||
} else {
|
||||
ret = btrfs_commit_transaction(trans, send_root);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
} else {
|
||||
up_read(&send_root->fs_info->extent_commit_sem);
|
||||
}
|
||||
|
||||
arg = memdup_user(arg_, sizeof(*arg));
|
||||
if (IS_ERR(arg)) {
|
||||
ret = PTR_ERR(arg);
|
||||
|
@ -4663,10 +4688,6 @@ long btrfs_ioctl_send(struct file *mnt_file, void __user *arg_)
|
|||
key.type = BTRFS_ROOT_ITEM_KEY;
|
||||
key.offset = (u64)-1;
|
||||
clone_root = btrfs_read_fs_root_no_name(fs_info, &key);
|
||||
if (!clone_root) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
if (IS_ERR(clone_root)) {
|
||||
ret = PTR_ERR(clone_root);
|
||||
goto out;
|
||||
|
@ -4682,8 +4703,8 @@ long btrfs_ioctl_send(struct file *mnt_file, void __user *arg_)
|
|||
key.type = BTRFS_ROOT_ITEM_KEY;
|
||||
key.offset = (u64)-1;
|
||||
sctx->parent_root = btrfs_read_fs_root_no_name(fs_info, &key);
|
||||
if (!sctx->parent_root) {
|
||||
ret = -EINVAL;
|
||||
if (IS_ERR(sctx->parent_root)) {
|
||||
ret = PTR_ERR(sctx->parent_root);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -51,7 +51,6 @@
|
|||
#include "print-tree.h"
|
||||
#include "xattr.h"
|
||||
#include "volumes.h"
|
||||
#include "version.h"
|
||||
#include "export.h"
|
||||
#include "compression.h"
|
||||
#include "rcu-string.h"
|
||||
|
@ -266,6 +265,9 @@ void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
|
|||
return;
|
||||
}
|
||||
ACCESS_ONCE(trans->transaction->aborted) = errno;
|
||||
/* Wake up anybody who may be waiting on this transaction */
|
||||
wake_up(&root->fs_info->transaction_wait);
|
||||
wake_up(&root->fs_info->transaction_blocked_wait);
|
||||
__btrfs_std_error(root->fs_info, function, line, errno, NULL);
|
||||
}
|
||||
/*
|
||||
|
@ -776,9 +778,6 @@ find_root:
|
|||
if (IS_ERR(new_root))
|
||||
return ERR_CAST(new_root);
|
||||
|
||||
if (btrfs_root_refs(&new_root->root_item) == 0)
|
||||
return ERR_PTR(-ENOENT);
|
||||
|
||||
dir_id = btrfs_root_dirid(&new_root->root_item);
|
||||
setup_root:
|
||||
location.objectid = dir_id;
|
||||
|
@ -866,7 +865,7 @@ int btrfs_sync_fs(struct super_block *sb, int wait)
|
|||
return 0;
|
||||
}
|
||||
|
||||
btrfs_wait_ordered_extents(root, 1);
|
||||
btrfs_wait_all_ordered_extents(fs_info, 1);
|
||||
|
||||
trans = btrfs_attach_transaction_barrier(root);
|
||||
if (IS_ERR(trans)) {
|
||||
|
@ -1685,6 +1684,18 @@ static void btrfs_interface_exit(void)
|
|||
printk(KERN_INFO "btrfs: misc_deregister failed for control device\n");
|
||||
}
|
||||
|
||||
static void btrfs_print_info(void)
|
||||
{
|
||||
printk(KERN_INFO "Btrfs loaded"
|
||||
#ifdef CONFIG_BTRFS_DEBUG
|
||||
", debug=on"
|
||||
#endif
|
||||
#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
|
||||
", integrity-checker=on"
|
||||
#endif
|
||||
"\n");
|
||||
}
|
||||
|
||||
static int __init init_btrfs_fs(void)
|
||||
{
|
||||
int err;
|
||||
|
@ -1733,11 +1744,9 @@ static int __init init_btrfs_fs(void)
|
|||
|
||||
btrfs_init_lockdep();
|
||||
|
||||
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
|
||||
btrfs_print_info();
|
||||
btrfs_test_free_space_cache();
|
||||
#endif
|
||||
|
||||
printk(KERN_INFO "%s loaded\n", BTRFS_BUILD_VERSION);
|
||||
return 0;
|
||||
|
||||
unregister_ioctl:
|
||||
|
|
|
@ -34,12 +34,43 @@
|
|||
|
||||
#define BTRFS_ROOT_TRANS_TAG 0
|
||||
|
||||
static unsigned int btrfs_blocked_trans_types[TRANS_STATE_MAX] = {
|
||||
[TRANS_STATE_RUNNING] = 0U,
|
||||
[TRANS_STATE_BLOCKED] = (__TRANS_USERSPACE |
|
||||
__TRANS_START),
|
||||
[TRANS_STATE_COMMIT_START] = (__TRANS_USERSPACE |
|
||||
__TRANS_START |
|
||||
__TRANS_ATTACH),
|
||||
[TRANS_STATE_COMMIT_DOING] = (__TRANS_USERSPACE |
|
||||
__TRANS_START |
|
||||
__TRANS_ATTACH |
|
||||
__TRANS_JOIN),
|
||||
[TRANS_STATE_UNBLOCKED] = (__TRANS_USERSPACE |
|
||||
__TRANS_START |
|
||||
__TRANS_ATTACH |
|
||||
__TRANS_JOIN |
|
||||
__TRANS_JOIN_NOLOCK),
|
||||
[TRANS_STATE_COMPLETED] = (__TRANS_USERSPACE |
|
||||
__TRANS_START |
|
||||
__TRANS_ATTACH |
|
||||
__TRANS_JOIN |
|
||||
__TRANS_JOIN_NOLOCK),
|
||||
};
|
||||
|
||||
static void put_transaction(struct btrfs_transaction *transaction)
|
||||
{
|
||||
WARN_ON(atomic_read(&transaction->use_count) == 0);
|
||||
if (atomic_dec_and_test(&transaction->use_count)) {
|
||||
BUG_ON(!list_empty(&transaction->list));
|
||||
WARN_ON(transaction->delayed_refs.root.rb_node);
|
||||
while (!list_empty(&transaction->pending_chunks)) {
|
||||
struct extent_map *em;
|
||||
|
||||
em = list_first_entry(&transaction->pending_chunks,
|
||||
struct extent_map, list);
|
||||
list_del_init(&em->list);
|
||||
free_extent_map(em);
|
||||
}
|
||||
kmem_cache_free(btrfs_transaction_cachep, transaction);
|
||||
}
|
||||
}
|
||||
|
@ -50,18 +81,35 @@ static noinline void switch_commit_root(struct btrfs_root *root)
|
|||
root->commit_root = btrfs_root_node(root);
|
||||
}
|
||||
|
||||
static inline int can_join_transaction(struct btrfs_transaction *trans,
|
||||
int type)
|
||||
static inline void extwriter_counter_inc(struct btrfs_transaction *trans,
|
||||
unsigned int type)
|
||||
{
|
||||
return !(trans->in_commit &&
|
||||
type != TRANS_JOIN &&
|
||||
type != TRANS_JOIN_NOLOCK);
|
||||
if (type & TRANS_EXTWRITERS)
|
||||
atomic_inc(&trans->num_extwriters);
|
||||
}
|
||||
|
||||
static inline void extwriter_counter_dec(struct btrfs_transaction *trans,
|
||||
unsigned int type)
|
||||
{
|
||||
if (type & TRANS_EXTWRITERS)
|
||||
atomic_dec(&trans->num_extwriters);
|
||||
}
|
||||
|
||||
static inline void extwriter_counter_init(struct btrfs_transaction *trans,
|
||||
unsigned int type)
|
||||
{
|
||||
atomic_set(&trans->num_extwriters, ((type & TRANS_EXTWRITERS) ? 1 : 0));
|
||||
}
|
||||
|
||||
static inline int extwriter_counter_read(struct btrfs_transaction *trans)
|
||||
{
|
||||
return atomic_read(&trans->num_extwriters);
|
||||
}
|
||||
|
||||
/*
|
||||
* either allocate a new transaction or hop into the existing one
|
||||
*/
|
||||
static noinline int join_transaction(struct btrfs_root *root, int type)
|
||||
static noinline int join_transaction(struct btrfs_root *root, unsigned int type)
|
||||
{
|
||||
struct btrfs_transaction *cur_trans;
|
||||
struct btrfs_fs_info *fs_info = root->fs_info;
|
||||
|
@ -74,32 +122,19 @@ loop:
|
|||
return -EROFS;
|
||||
}
|
||||
|
||||
if (fs_info->trans_no_join) {
|
||||
/*
|
||||
* If we are JOIN_NOLOCK we're already committing a current
|
||||
* transaction, we just need a handle to deal with something
|
||||
* when committing the transaction, such as inode cache and
|
||||
* space cache. It is a special case.
|
||||
*/
|
||||
if (type != TRANS_JOIN_NOLOCK) {
|
||||
spin_unlock(&fs_info->trans_lock);
|
||||
return -EBUSY;
|
||||
}
|
||||
}
|
||||
|
||||
cur_trans = fs_info->running_transaction;
|
||||
if (cur_trans) {
|
||||
if (cur_trans->aborted) {
|
||||
spin_unlock(&fs_info->trans_lock);
|
||||
return cur_trans->aborted;
|
||||
}
|
||||
if (!can_join_transaction(cur_trans, type)) {
|
||||
if (btrfs_blocked_trans_types[cur_trans->state] & type) {
|
||||
spin_unlock(&fs_info->trans_lock);
|
||||
return -EBUSY;
|
||||
}
|
||||
atomic_inc(&cur_trans->use_count);
|
||||
atomic_inc(&cur_trans->num_writers);
|
||||
cur_trans->num_joined++;
|
||||
extwriter_counter_inc(cur_trans, type);
|
||||
spin_unlock(&fs_info->trans_lock);
|
||||
return 0;
|
||||
}
|
||||
|
@ -112,6 +147,12 @@ loop:
|
|||
if (type == TRANS_ATTACH)
|
||||
return -ENOENT;
|
||||
|
||||
/*
|
||||
* JOIN_NOLOCK only happens during the transaction commit, so
|
||||
* it is impossible that ->running_transaction is NULL
|
||||
*/
|
||||
BUG_ON(type == TRANS_JOIN_NOLOCK);
|
||||
|
||||
cur_trans = kmem_cache_alloc(btrfs_transaction_cachep, GFP_NOFS);
|
||||
if (!cur_trans)
|
||||
return -ENOMEM;
|
||||
|
@ -120,7 +161,7 @@ loop:
|
|||
if (fs_info->running_transaction) {
|
||||
/*
|
||||
* someone started a transaction after we unlocked. Make sure
|
||||
* to redo the trans_no_join checks above
|
||||
* to redo the checks above
|
||||
*/
|
||||
kmem_cache_free(btrfs_transaction_cachep, cur_trans);
|
||||
goto loop;
|
||||
|
@ -131,17 +172,15 @@ loop:
|
|||
}
|
||||
|
||||
atomic_set(&cur_trans->num_writers, 1);
|
||||
cur_trans->num_joined = 0;
|
||||
extwriter_counter_init(cur_trans, type);
|
||||
init_waitqueue_head(&cur_trans->writer_wait);
|
||||
init_waitqueue_head(&cur_trans->commit_wait);
|
||||
cur_trans->in_commit = 0;
|
||||
cur_trans->blocked = 0;
|
||||
cur_trans->state = TRANS_STATE_RUNNING;
|
||||
/*
|
||||
* One for this trans handle, one so it will live on until we
|
||||
* commit the transaction.
|
||||
*/
|
||||
atomic_set(&cur_trans->use_count, 2);
|
||||
cur_trans->commit_done = 0;
|
||||
cur_trans->start_time = get_seconds();
|
||||
|
||||
cur_trans->delayed_refs.root = RB_ROOT;
|
||||
|
@ -164,7 +203,6 @@ loop:
|
|||
"creating a fresh transaction\n");
|
||||
atomic64_set(&fs_info->tree_mod_seq, 0);
|
||||
|
||||
spin_lock_init(&cur_trans->commit_lock);
|
||||
spin_lock_init(&cur_trans->delayed_refs.lock);
|
||||
atomic_set(&cur_trans->delayed_refs.procs_running_refs, 0);
|
||||
atomic_set(&cur_trans->delayed_refs.ref_seq, 0);
|
||||
|
@ -172,6 +210,7 @@ loop:
|
|||
|
||||
INIT_LIST_HEAD(&cur_trans->pending_snapshots);
|
||||
INIT_LIST_HEAD(&cur_trans->ordered_operations);
|
||||
INIT_LIST_HEAD(&cur_trans->pending_chunks);
|
||||
list_add_tail(&cur_trans->list, &fs_info->trans_list);
|
||||
extent_io_tree_init(&cur_trans->dirty_pages,
|
||||
fs_info->btree_inode->i_mapping);
|
||||
|
@ -269,6 +308,13 @@ int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline int is_transaction_blocked(struct btrfs_transaction *trans)
|
||||
{
|
||||
return (trans->state >= TRANS_STATE_BLOCKED &&
|
||||
trans->state < TRANS_STATE_UNBLOCKED &&
|
||||
!trans->aborted);
|
||||
}
|
||||
|
||||
/* wait for commit against the current transaction to become unblocked
|
||||
* when this is done, it is safe to start a new transaction, but the current
|
||||
* transaction might not be fully on disk.
|
||||
|
@ -279,12 +325,13 @@ static void wait_current_trans(struct btrfs_root *root)
|
|||
|
||||
spin_lock(&root->fs_info->trans_lock);
|
||||
cur_trans = root->fs_info->running_transaction;
|
||||
if (cur_trans && cur_trans->blocked) {
|
||||
if (cur_trans && is_transaction_blocked(cur_trans)) {
|
||||
atomic_inc(&cur_trans->use_count);
|
||||
spin_unlock(&root->fs_info->trans_lock);
|
||||
|
||||
wait_event(root->fs_info->transaction_wait,
|
||||
!cur_trans->blocked);
|
||||
cur_trans->state >= TRANS_STATE_UNBLOCKED ||
|
||||
cur_trans->aborted);
|
||||
put_transaction(cur_trans);
|
||||
} else {
|
||||
spin_unlock(&root->fs_info->trans_lock);
|
||||
|
@ -307,7 +354,7 @@ static int may_wait_transaction(struct btrfs_root *root, int type)
|
|||
}
|
||||
|
||||
static struct btrfs_trans_handle *
|
||||
start_transaction(struct btrfs_root *root, u64 num_items, int type,
|
||||
start_transaction(struct btrfs_root *root, u64 num_items, unsigned int type,
|
||||
enum btrfs_reserve_flush_enum flush)
|
||||
{
|
||||
struct btrfs_trans_handle *h;
|
||||
|
@ -320,7 +367,7 @@ start_transaction(struct btrfs_root *root, u64 num_items, int type,
|
|||
return ERR_PTR(-EROFS);
|
||||
|
||||
if (current->journal_info) {
|
||||
WARN_ON(type != TRANS_JOIN && type != TRANS_JOIN_NOLOCK);
|
||||
WARN_ON(type & TRANS_EXTWRITERS);
|
||||
h = current->journal_info;
|
||||
h->use_count++;
|
||||
WARN_ON(h->use_count > 2);
|
||||
|
@ -366,7 +413,7 @@ again:
|
|||
* If we are ATTACH, it means we just want to catch the current
|
||||
* transaction and commit it, so we needn't do sb_start_intwrite().
|
||||
*/
|
||||
if (type < TRANS_JOIN_NOLOCK)
|
||||
if (type & __TRANS_FREEZABLE)
|
||||
sb_start_intwrite(root->fs_info->sb);
|
||||
|
||||
if (may_wait_transaction(root, type))
|
||||
|
@ -408,7 +455,8 @@ again:
|
|||
INIT_LIST_HEAD(&h->new_bgs);
|
||||
|
||||
smp_mb();
|
||||
if (cur_trans->blocked && may_wait_transaction(root, type)) {
|
||||
if (cur_trans->state >= TRANS_STATE_BLOCKED &&
|
||||
may_wait_transaction(root, type)) {
|
||||
btrfs_commit_transaction(h, root);
|
||||
goto again;
|
||||
}
|
||||
|
@ -429,7 +477,7 @@ got_it:
|
|||
return h;
|
||||
|
||||
join_fail:
|
||||
if (type < TRANS_JOIN_NOLOCK)
|
||||
if (type & __TRANS_FREEZABLE)
|
||||
sb_end_intwrite(root->fs_info->sb);
|
||||
kmem_cache_free(btrfs_trans_handle_cachep, h);
|
||||
alloc_fail:
|
||||
|
@ -490,7 +538,7 @@ struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root)
|
|||
}
|
||||
|
||||
/*
|
||||
* btrfs_attach_transaction() - catch the running transaction
|
||||
* btrfs_attach_transaction_barrier() - catch the running transaction
|
||||
*
|
||||
* It is similar to the above function, the differentia is this one
|
||||
* will wait for all the inactive transactions until they fully
|
||||
|
@ -512,7 +560,7 @@ btrfs_attach_transaction_barrier(struct btrfs_root *root)
|
|||
static noinline void wait_for_commit(struct btrfs_root *root,
|
||||
struct btrfs_transaction *commit)
|
||||
{
|
||||
wait_event(commit->commit_wait, commit->commit_done);
|
||||
wait_event(commit->commit_wait, commit->state == TRANS_STATE_COMPLETED);
|
||||
}
|
||||
|
||||
int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid)
|
||||
|
@ -548,8 +596,8 @@ int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid)
|
|||
spin_lock(&root->fs_info->trans_lock);
|
||||
list_for_each_entry_reverse(t, &root->fs_info->trans_list,
|
||||
list) {
|
||||
if (t->in_commit) {
|
||||
if (t->commit_done)
|
||||
if (t->state >= TRANS_STATE_COMMIT_START) {
|
||||
if (t->state == TRANS_STATE_COMPLETED)
|
||||
break;
|
||||
cur_trans = t;
|
||||
atomic_inc(&cur_trans->use_count);
|
||||
|
@ -576,10 +624,11 @@ void btrfs_throttle(struct btrfs_root *root)
|
|||
static int should_end_transaction(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root)
|
||||
{
|
||||
int ret;
|
||||
if (root->fs_info->global_block_rsv.space_info->full &&
|
||||
btrfs_should_throttle_delayed_refs(trans, root))
|
||||
return 1;
|
||||
|
||||
ret = btrfs_block_rsv_check(root, &root->fs_info->global_block_rsv, 5);
|
||||
return ret ? 1 : 0;
|
||||
return !!btrfs_block_rsv_check(root, &root->fs_info->global_block_rsv, 5);
|
||||
}
|
||||
|
||||
int btrfs_should_end_transaction(struct btrfs_trans_handle *trans,
|
||||
|
@ -590,7 +639,8 @@ int btrfs_should_end_transaction(struct btrfs_trans_handle *trans,
|
|||
int err;
|
||||
|
||||
smp_mb();
|
||||
if (cur_trans->blocked || cur_trans->delayed_refs.flushing)
|
||||
if (cur_trans->state >= TRANS_STATE_BLOCKED ||
|
||||
cur_trans->delayed_refs.flushing)
|
||||
return 1;
|
||||
|
||||
updates = trans->delayed_ref_updates;
|
||||
|
@ -609,7 +659,7 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
|
|||
{
|
||||
struct btrfs_transaction *cur_trans = trans->transaction;
|
||||
struct btrfs_fs_info *info = root->fs_info;
|
||||
int count = 0;
|
||||
unsigned long cur = trans->delayed_ref_updates;
|
||||
int lock = (trans->type != TRANS_JOIN_NOLOCK);
|
||||
int err = 0;
|
||||
|
||||
|
@ -638,17 +688,11 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
|
|||
if (!list_empty(&trans->new_bgs))
|
||||
btrfs_create_pending_block_groups(trans, root);
|
||||
|
||||
while (count < 1) {
|
||||
unsigned long cur = trans->delayed_ref_updates;
|
||||
trans->delayed_ref_updates = 0;
|
||||
if (btrfs_should_throttle_delayed_refs(trans, root)) {
|
||||
cur = max_t(unsigned long, cur, 1);
|
||||
trans->delayed_ref_updates = 0;
|
||||
if (cur &&
|
||||
trans->transaction->delayed_refs.num_heads_ready > 64) {
|
||||
trans->delayed_ref_updates = 0;
|
||||
btrfs_run_delayed_refs(trans, root, cur);
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
count++;
|
||||
btrfs_run_delayed_refs(trans, root, cur);
|
||||
}
|
||||
|
||||
btrfs_trans_release_metadata(trans, root);
|
||||
|
@ -658,12 +702,15 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
|
|||
btrfs_create_pending_block_groups(trans, root);
|
||||
|
||||
if (lock && !atomic_read(&root->fs_info->open_ioctl_trans) &&
|
||||
should_end_transaction(trans, root)) {
|
||||
trans->transaction->blocked = 1;
|
||||
smp_wmb();
|
||||
should_end_transaction(trans, root) &&
|
||||
ACCESS_ONCE(cur_trans->state) == TRANS_STATE_RUNNING) {
|
||||
spin_lock(&info->trans_lock);
|
||||
if (cur_trans->state == TRANS_STATE_RUNNING)
|
||||
cur_trans->state = TRANS_STATE_BLOCKED;
|
||||
spin_unlock(&info->trans_lock);
|
||||
}
|
||||
|
||||
if (lock && cur_trans->blocked && !cur_trans->in_commit) {
|
||||
if (lock && ACCESS_ONCE(cur_trans->state) == TRANS_STATE_BLOCKED) {
|
||||
if (throttle) {
|
||||
/*
|
||||
* We may race with somebody else here so end up having
|
||||
|
@ -677,12 +724,13 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
|
|||
}
|
||||
}
|
||||
|
||||
if (trans->type < TRANS_JOIN_NOLOCK)
|
||||
if (trans->type & __TRANS_FREEZABLE)
|
||||
sb_end_intwrite(root->fs_info->sb);
|
||||
|
||||
WARN_ON(cur_trans != info->running_transaction);
|
||||
WARN_ON(atomic_read(&cur_trans->num_writers) < 1);
|
||||
atomic_dec(&cur_trans->num_writers);
|
||||
extwriter_counter_dec(cur_trans, trans->type);
|
||||
|
||||
smp_mb();
|
||||
if (waitqueue_active(&cur_trans->writer_wait))
|
||||
|
@ -736,9 +784,7 @@ int btrfs_write_marked_extents(struct btrfs_root *root,
|
|||
struct extent_state *cached_state = NULL;
|
||||
u64 start = 0;
|
||||
u64 end;
|
||||
struct blk_plug plug;
|
||||
|
||||
blk_start_plug(&plug);
|
||||
while (!find_first_extent_bit(dirty_pages, start, &start, &end,
|
||||
mark, &cached_state)) {
|
||||
convert_extent_bit(dirty_pages, start, end, EXTENT_NEED_WAIT,
|
||||
|
@ -752,7 +798,6 @@ int btrfs_write_marked_extents(struct btrfs_root *root,
|
|||
}
|
||||
if (err)
|
||||
werr = err;
|
||||
blk_finish_plug(&plug);
|
||||
return werr;
|
||||
}
|
||||
|
||||
|
@ -797,8 +842,11 @@ int btrfs_write_and_wait_marked_extents(struct btrfs_root *root,
|
|||
{
|
||||
int ret;
|
||||
int ret2;
|
||||
struct blk_plug plug;
|
||||
|
||||
blk_start_plug(&plug);
|
||||
ret = btrfs_write_marked_extents(root, dirty_pages, mark);
|
||||
blk_finish_plug(&plug);
|
||||
ret2 = btrfs_wait_marked_extents(root, dirty_pages, mark);
|
||||
|
||||
if (ret)
|
||||
|
@ -1318,20 +1366,26 @@ static void update_super_roots(struct btrfs_root *root)
|
|||
|
||||
int btrfs_transaction_in_commit(struct btrfs_fs_info *info)
|
||||
{
|
||||
struct btrfs_transaction *trans;
|
||||
int ret = 0;
|
||||
|
||||
spin_lock(&info->trans_lock);
|
||||
if (info->running_transaction)
|
||||
ret = info->running_transaction->in_commit;
|
||||
trans = info->running_transaction;
|
||||
if (trans)
|
||||
ret = (trans->state >= TRANS_STATE_COMMIT_START);
|
||||
spin_unlock(&info->trans_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int btrfs_transaction_blocked(struct btrfs_fs_info *info)
|
||||
{
|
||||
struct btrfs_transaction *trans;
|
||||
int ret = 0;
|
||||
|
||||
spin_lock(&info->trans_lock);
|
||||
if (info->running_transaction)
|
||||
ret = info->running_transaction->blocked;
|
||||
trans = info->running_transaction;
|
||||
if (trans)
|
||||
ret = is_transaction_blocked(trans);
|
||||
spin_unlock(&info->trans_lock);
|
||||
return ret;
|
||||
}
|
||||
|
@ -1343,7 +1397,9 @@ int btrfs_transaction_blocked(struct btrfs_fs_info *info)
|
|||
static void wait_current_trans_commit_start(struct btrfs_root *root,
|
||||
struct btrfs_transaction *trans)
|
||||
{
|
||||
wait_event(root->fs_info->transaction_blocked_wait, trans->in_commit);
|
||||
wait_event(root->fs_info->transaction_blocked_wait,
|
||||
trans->state >= TRANS_STATE_COMMIT_START ||
|
||||
trans->aborted);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1354,7 +1410,8 @@ static void wait_current_trans_commit_start_and_unblock(struct btrfs_root *root,
|
|||
struct btrfs_transaction *trans)
|
||||
{
|
||||
wait_event(root->fs_info->transaction_wait,
|
||||
trans->commit_done || (trans->in_commit && !trans->blocked));
|
||||
trans->state >= TRANS_STATE_UNBLOCKED ||
|
||||
trans->aborted);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1450,26 +1507,31 @@ static void cleanup_transaction(struct btrfs_trans_handle *trans,
|
|||
|
||||
spin_lock(&root->fs_info->trans_lock);
|
||||
|
||||
if (list_empty(&cur_trans->list)) {
|
||||
spin_unlock(&root->fs_info->trans_lock);
|
||||
btrfs_end_transaction(trans, root);
|
||||
return;
|
||||
}
|
||||
/*
|
||||
* If the transaction is removed from the list, it means this
|
||||
* transaction has been committed successfully, so it is impossible
|
||||
* to call the cleanup function.
|
||||
*/
|
||||
BUG_ON(list_empty(&cur_trans->list));
|
||||
|
||||
list_del_init(&cur_trans->list);
|
||||
if (cur_trans == root->fs_info->running_transaction) {
|
||||
root->fs_info->trans_no_join = 1;
|
||||
cur_trans->state = TRANS_STATE_COMMIT_DOING;
|
||||
spin_unlock(&root->fs_info->trans_lock);
|
||||
wait_event(cur_trans->writer_wait,
|
||||
atomic_read(&cur_trans->num_writers) == 1);
|
||||
|
||||
spin_lock(&root->fs_info->trans_lock);
|
||||
root->fs_info->running_transaction = NULL;
|
||||
}
|
||||
spin_unlock(&root->fs_info->trans_lock);
|
||||
|
||||
btrfs_cleanup_one_transaction(trans->transaction, root);
|
||||
|
||||
spin_lock(&root->fs_info->trans_lock);
|
||||
if (cur_trans == root->fs_info->running_transaction)
|
||||
root->fs_info->running_transaction = NULL;
|
||||
spin_unlock(&root->fs_info->trans_lock);
|
||||
|
||||
put_transaction(cur_trans);
|
||||
put_transaction(cur_trans);
|
||||
|
||||
|
@ -1481,33 +1543,13 @@ static void cleanup_transaction(struct btrfs_trans_handle *trans,
|
|||
current->journal_info = NULL;
|
||||
|
||||
kmem_cache_free(btrfs_trans_handle_cachep, trans);
|
||||
|
||||
spin_lock(&root->fs_info->trans_lock);
|
||||
root->fs_info->trans_no_join = 0;
|
||||
spin_unlock(&root->fs_info->trans_lock);
|
||||
}
|
||||
|
||||
static int btrfs_flush_all_pending_stuffs(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root)
|
||||
{
|
||||
int flush_on_commit = btrfs_test_opt(root, FLUSHONCOMMIT);
|
||||
int snap_pending = 0;
|
||||
int ret;
|
||||
|
||||
if (!flush_on_commit) {
|
||||
spin_lock(&root->fs_info->trans_lock);
|
||||
if (!list_empty(&trans->transaction->pending_snapshots))
|
||||
snap_pending = 1;
|
||||
spin_unlock(&root->fs_info->trans_lock);
|
||||
}
|
||||
|
||||
if (flush_on_commit || snap_pending) {
|
||||
ret = btrfs_start_delalloc_inodes(root, 1);
|
||||
if (ret)
|
||||
return ret;
|
||||
btrfs_wait_ordered_extents(root, 1);
|
||||
}
|
||||
|
||||
ret = btrfs_run_delayed_items(trans, root);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -1531,23 +1573,25 @@ static int btrfs_flush_all_pending_stuffs(struct btrfs_trans_handle *trans,
|
|||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* btrfs_transaction state sequence:
|
||||
* in_commit = 0, blocked = 0 (initial)
|
||||
* in_commit = 1, blocked = 1
|
||||
* blocked = 0
|
||||
* commit_done = 1
|
||||
*/
|
||||
static inline int btrfs_start_delalloc_flush(struct btrfs_fs_info *fs_info)
|
||||
{
|
||||
if (btrfs_test_opt(fs_info->tree_root, FLUSHONCOMMIT))
|
||||
return btrfs_start_all_delalloc_inodes(fs_info, 1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void btrfs_wait_delalloc_flush(struct btrfs_fs_info *fs_info)
|
||||
{
|
||||
if (btrfs_test_opt(fs_info->tree_root, FLUSHONCOMMIT))
|
||||
btrfs_wait_all_ordered_extents(fs_info, 1);
|
||||
}
|
||||
|
||||
int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root)
|
||||
{
|
||||
unsigned long joined = 0;
|
||||
struct btrfs_transaction *cur_trans = trans->transaction;
|
||||
struct btrfs_transaction *prev_trans = NULL;
|
||||
DEFINE_WAIT(wait);
|
||||
int ret;
|
||||
int should_grow = 0;
|
||||
unsigned long now = get_seconds();
|
||||
|
||||
ret = btrfs_run_ordered_operations(trans, root, 0);
|
||||
if (ret) {
|
||||
|
@ -1586,6 +1630,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
|
|||
* start sending their work down.
|
||||
*/
|
||||
cur_trans->delayed_refs.flushing = 1;
|
||||
smp_wmb();
|
||||
|
||||
if (!list_empty(&trans->new_bgs))
|
||||
btrfs_create_pending_block_groups(trans, root);
|
||||
|
@ -1596,9 +1641,9 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
|
|||
return ret;
|
||||
}
|
||||
|
||||
spin_lock(&cur_trans->commit_lock);
|
||||
if (cur_trans->in_commit) {
|
||||
spin_unlock(&cur_trans->commit_lock);
|
||||
spin_lock(&root->fs_info->trans_lock);
|
||||
if (cur_trans->state >= TRANS_STATE_COMMIT_START) {
|
||||
spin_unlock(&root->fs_info->trans_lock);
|
||||
atomic_inc(&cur_trans->use_count);
|
||||
ret = btrfs_end_transaction(trans, root);
|
||||
|
||||
|
@ -1609,16 +1654,13 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
|
|||
return ret;
|
||||
}
|
||||
|
||||
trans->transaction->in_commit = 1;
|
||||
trans->transaction->blocked = 1;
|
||||
spin_unlock(&cur_trans->commit_lock);
|
||||
cur_trans->state = TRANS_STATE_COMMIT_START;
|
||||
wake_up(&root->fs_info->transaction_blocked_wait);
|
||||
|
||||
spin_lock(&root->fs_info->trans_lock);
|
||||
if (cur_trans->list.prev != &root->fs_info->trans_list) {
|
||||
prev_trans = list_entry(cur_trans->list.prev,
|
||||
struct btrfs_transaction, list);
|
||||
if (!prev_trans->commit_done) {
|
||||
if (prev_trans->state != TRANS_STATE_COMPLETED) {
|
||||
atomic_inc(&prev_trans->use_count);
|
||||
spin_unlock(&root->fs_info->trans_lock);
|
||||
|
||||
|
@ -1632,42 +1674,32 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
|
|||
spin_unlock(&root->fs_info->trans_lock);
|
||||
}
|
||||
|
||||
if (!btrfs_test_opt(root, SSD) &&
|
||||
(now < cur_trans->start_time || now - cur_trans->start_time < 1))
|
||||
should_grow = 1;
|
||||
extwriter_counter_dec(cur_trans, trans->type);
|
||||
|
||||
do {
|
||||
joined = cur_trans->num_joined;
|
||||
|
||||
WARN_ON(cur_trans != trans->transaction);
|
||||
|
||||
ret = btrfs_flush_all_pending_stuffs(trans, root);
|
||||
if (ret)
|
||||
goto cleanup_transaction;
|
||||
|
||||
prepare_to_wait(&cur_trans->writer_wait, &wait,
|
||||
TASK_UNINTERRUPTIBLE);
|
||||
|
||||
if (atomic_read(&cur_trans->num_writers) > 1)
|
||||
schedule_timeout(MAX_SCHEDULE_TIMEOUT);
|
||||
else if (should_grow)
|
||||
schedule_timeout(1);
|
||||
|
||||
finish_wait(&cur_trans->writer_wait, &wait);
|
||||
} while (atomic_read(&cur_trans->num_writers) > 1 ||
|
||||
(should_grow && cur_trans->num_joined != joined));
|
||||
ret = btrfs_start_delalloc_flush(root->fs_info);
|
||||
if (ret)
|
||||
goto cleanup_transaction;
|
||||
|
||||
ret = btrfs_flush_all_pending_stuffs(trans, root);
|
||||
if (ret)
|
||||
goto cleanup_transaction;
|
||||
|
||||
wait_event(cur_trans->writer_wait,
|
||||
extwriter_counter_read(cur_trans) == 0);
|
||||
|
||||
/* some pending stuffs might be added after the previous flush. */
|
||||
ret = btrfs_flush_all_pending_stuffs(trans, root);
|
||||
if (ret)
|
||||
goto cleanup_transaction;
|
||||
|
||||
btrfs_wait_delalloc_flush(root->fs_info);
|
||||
/*
|
||||
* Ok now we need to make sure to block out any other joins while we
|
||||
* commit the transaction. We could have started a join before setting
|
||||
* no_join so make sure to wait for num_writers to == 1 again.
|
||||
* COMMIT_DOING so make sure to wait for num_writers to == 1 again.
|
||||
*/
|
||||
spin_lock(&root->fs_info->trans_lock);
|
||||
root->fs_info->trans_no_join = 1;
|
||||
cur_trans->state = TRANS_STATE_COMMIT_DOING;
|
||||
spin_unlock(&root->fs_info->trans_lock);
|
||||
wait_event(cur_trans->writer_wait,
|
||||
atomic_read(&cur_trans->num_writers) == 1);
|
||||
|
@ -1794,10 +1826,9 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
|
|||
memcpy(root->fs_info->super_for_commit, root->fs_info->super_copy,
|
||||
sizeof(*root->fs_info->super_copy));
|
||||
|
||||
trans->transaction->blocked = 0;
|
||||
spin_lock(&root->fs_info->trans_lock);
|
||||
cur_trans->state = TRANS_STATE_UNBLOCKED;
|
||||
root->fs_info->running_transaction = NULL;
|
||||
root->fs_info->trans_no_join = 0;
|
||||
spin_unlock(&root->fs_info->trans_lock);
|
||||
mutex_unlock(&root->fs_info->reloc_mutex);
|
||||
|
||||
|
@ -1825,10 +1856,12 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
|
|||
|
||||
btrfs_finish_extent_commit(trans, root);
|
||||
|
||||
cur_trans->commit_done = 1;
|
||||
|
||||
root->fs_info->last_trans_committed = cur_trans->transid;
|
||||
|
||||
/*
|
||||
* We needn't acquire the lock here because there is no other task
|
||||
* which can change it.
|
||||
*/
|
||||
cur_trans->state = TRANS_STATE_COMPLETED;
|
||||
wake_up(&cur_trans->commit_wait);
|
||||
|
||||
spin_lock(&root->fs_info->trans_lock);
|
||||
|
@ -1838,7 +1871,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
|
|||
put_transaction(cur_trans);
|
||||
put_transaction(cur_trans);
|
||||
|
||||
if (trans->type < TRANS_JOIN_NOLOCK)
|
||||
if (trans->type & __TRANS_FREEZABLE)
|
||||
sb_end_intwrite(root->fs_info->sb);
|
||||
|
||||
trace_btrfs_transaction_commit(root);
|
||||
|
@ -1885,11 +1918,6 @@ int btrfs_clean_one_deleted_snapshot(struct btrfs_root *root)
|
|||
int ret;
|
||||
struct btrfs_fs_info *fs_info = root->fs_info;
|
||||
|
||||
if (fs_info->sb->s_flags & MS_RDONLY) {
|
||||
pr_debug("btrfs: cleaner called for RO fs!\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
spin_lock(&fs_info->trans_lock);
|
||||
if (list_empty(&fs_info->dead_roots)) {
|
||||
spin_unlock(&fs_info->trans_lock);
|
||||
|
|
|
@ -22,8 +22,24 @@
|
|||
#include "delayed-ref.h"
|
||||
#include "ctree.h"
|
||||
|
||||
enum btrfs_trans_state {
|
||||
TRANS_STATE_RUNNING = 0,
|
||||
TRANS_STATE_BLOCKED = 1,
|
||||
TRANS_STATE_COMMIT_START = 2,
|
||||
TRANS_STATE_COMMIT_DOING = 3,
|
||||
TRANS_STATE_UNBLOCKED = 4,
|
||||
TRANS_STATE_COMPLETED = 5,
|
||||
TRANS_STATE_MAX = 6,
|
||||
};
|
||||
|
||||
struct btrfs_transaction {
|
||||
u64 transid;
|
||||
/*
|
||||
* total external writers(USERSPACE/START/ATTACH) in this
|
||||
* transaction, it must be zero before the transaction is
|
||||
* being committed
|
||||
*/
|
||||
atomic_t num_extwriters;
|
||||
/*
|
||||
* total writers in this transaction, it must be zero before the
|
||||
* transaction can end
|
||||
|
@ -31,12 +47,8 @@ struct btrfs_transaction {
|
|||
atomic_t num_writers;
|
||||
atomic_t use_count;
|
||||
|
||||
unsigned long num_joined;
|
||||
|
||||
spinlock_t commit_lock;
|
||||
int in_commit;
|
||||
int commit_done;
|
||||
int blocked;
|
||||
/* Be protected by fs_info->trans_lock when we want to change it. */
|
||||
enum btrfs_trans_state state;
|
||||
struct list_head list;
|
||||
struct extent_io_tree dirty_pages;
|
||||
unsigned long start_time;
|
||||
|
@ -44,17 +56,27 @@ struct btrfs_transaction {
|
|||
wait_queue_head_t commit_wait;
|
||||
struct list_head pending_snapshots;
|
||||
struct list_head ordered_operations;
|
||||
struct list_head pending_chunks;
|
||||
struct btrfs_delayed_ref_root delayed_refs;
|
||||
int aborted;
|
||||
};
|
||||
|
||||
enum btrfs_trans_type {
|
||||
TRANS_START,
|
||||
TRANS_JOIN,
|
||||
TRANS_USERSPACE,
|
||||
TRANS_JOIN_NOLOCK,
|
||||
TRANS_ATTACH,
|
||||
};
|
||||
#define __TRANS_FREEZABLE (1U << 0)
|
||||
|
||||
#define __TRANS_USERSPACE (1U << 8)
|
||||
#define __TRANS_START (1U << 9)
|
||||
#define __TRANS_ATTACH (1U << 10)
|
||||
#define __TRANS_JOIN (1U << 11)
|
||||
#define __TRANS_JOIN_NOLOCK (1U << 12)
|
||||
|
||||
#define TRANS_USERSPACE (__TRANS_USERSPACE | __TRANS_FREEZABLE)
|
||||
#define TRANS_START (__TRANS_START | __TRANS_FREEZABLE)
|
||||
#define TRANS_ATTACH (__TRANS_ATTACH)
|
||||
#define TRANS_JOIN (__TRANS_JOIN | __TRANS_FREEZABLE)
|
||||
#define TRANS_JOIN_NOLOCK (__TRANS_JOIN_NOLOCK)
|
||||
|
||||
#define TRANS_EXTWRITERS (__TRANS_USERSPACE | __TRANS_START | \
|
||||
__TRANS_ATTACH)
|
||||
|
||||
struct btrfs_trans_handle {
|
||||
u64 transid;
|
||||
|
@ -70,7 +92,7 @@ struct btrfs_trans_handle {
|
|||
short aborted;
|
||||
short adding_csums;
|
||||
bool allocating_chunk;
|
||||
enum btrfs_trans_type type;
|
||||
unsigned int type;
|
||||
/*
|
||||
* this root is only needed to validate that the root passed to
|
||||
* start_transaction is the same as the one passed to end_transaction.
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
|
||||
#include <linux/sched.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/list_sort.h>
|
||||
#include "ctree.h"
|
||||
#include "transaction.h"
|
||||
|
@ -279,11 +280,23 @@ static int process_one_buffer(struct btrfs_root *log,
|
|||
{
|
||||
int ret = 0;
|
||||
|
||||
/*
|
||||
* If this fs is mixed then we need to be able to process the leaves to
|
||||
* pin down any logged extents, so we have to read the block.
|
||||
*/
|
||||
if (btrfs_fs_incompat(log->fs_info, MIXED_GROUPS)) {
|
||||
ret = btrfs_read_buffer(eb, gen);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (wc->pin)
|
||||
ret = btrfs_pin_extent_for_log_replay(log->fs_info->extent_root,
|
||||
eb->start, eb->len);
|
||||
|
||||
if (!ret && btrfs_buffer_uptodate(eb, gen, 0)) {
|
||||
if (wc->pin && btrfs_header_level(eb) == 0)
|
||||
ret = btrfs_exclude_logged_extents(log, eb);
|
||||
if (wc->write)
|
||||
btrfs_write_tree_block(eb);
|
||||
if (wc->wait)
|
||||
|
@ -2016,13 +2029,8 @@ static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
|
|||
eb, i, &key);
|
||||
if (ret)
|
||||
break;
|
||||
} else if (key.type == BTRFS_INODE_REF_KEY) {
|
||||
ret = add_inode_ref(wc->trans, root, log, path,
|
||||
eb, i, &key);
|
||||
if (ret && ret != -ENOENT)
|
||||
break;
|
||||
ret = 0;
|
||||
} else if (key.type == BTRFS_INODE_EXTREF_KEY) {
|
||||
} else if (key.type == BTRFS_INODE_REF_KEY ||
|
||||
key.type == BTRFS_INODE_EXTREF_KEY) {
|
||||
ret = add_inode_ref(wc->trans, root, log, path,
|
||||
eb, i, &key);
|
||||
if (ret && ret != -ENOENT)
|
||||
|
@ -2358,6 +2366,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
|
|||
struct btrfs_root *log = root->log_root;
|
||||
struct btrfs_root *log_root_tree = root->fs_info->log_root_tree;
|
||||
unsigned long log_transid = 0;
|
||||
struct blk_plug plug;
|
||||
|
||||
mutex_lock(&root->log_mutex);
|
||||
log_transid = root->log_transid;
|
||||
|
@ -2401,8 +2410,10 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
|
|||
/* we start IO on all the marked extents here, but we don't actually
|
||||
* wait for them until later.
|
||||
*/
|
||||
blk_start_plug(&plug);
|
||||
ret = btrfs_write_marked_extents(log, &log->dirty_log_pages, mark);
|
||||
if (ret) {
|
||||
blk_finish_plug(&plug);
|
||||
btrfs_abort_transaction(trans, root, ret);
|
||||
btrfs_free_logged_extents(log, log_transid);
|
||||
mutex_unlock(&root->log_mutex);
|
||||
|
@ -2437,6 +2448,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
|
|||
}
|
||||
|
||||
if (ret) {
|
||||
blk_finish_plug(&plug);
|
||||
if (ret != -ENOSPC) {
|
||||
btrfs_abort_transaction(trans, root, ret);
|
||||
mutex_unlock(&log_root_tree->log_mutex);
|
||||
|
@ -2452,6 +2464,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
|
|||
|
||||
index2 = log_root_tree->log_transid % 2;
|
||||
if (atomic_read(&log_root_tree->log_commit[index2])) {
|
||||
blk_finish_plug(&plug);
|
||||
btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
|
||||
wait_log_commit(trans, log_root_tree,
|
||||
log_root_tree->log_transid);
|
||||
|
@ -2474,6 +2487,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
|
|||
* check the full commit flag again
|
||||
*/
|
||||
if (root->fs_info->last_trans_log_full_commit == trans->transid) {
|
||||
blk_finish_plug(&plug);
|
||||
btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
|
||||
btrfs_free_logged_extents(log, log_transid);
|
||||
mutex_unlock(&log_root_tree->log_mutex);
|
||||
|
@ -2481,9 +2495,10 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
|
|||
goto out_wake_log_root;
|
||||
}
|
||||
|
||||
ret = btrfs_write_and_wait_marked_extents(log_root_tree,
|
||||
&log_root_tree->dirty_log_pages,
|
||||
EXTENT_DIRTY | EXTENT_NEW);
|
||||
ret = btrfs_write_marked_extents(log_root_tree,
|
||||
&log_root_tree->dirty_log_pages,
|
||||
EXTENT_DIRTY | EXTENT_NEW);
|
||||
blk_finish_plug(&plug);
|
||||
if (ret) {
|
||||
btrfs_abort_transaction(trans, root, ret);
|
||||
btrfs_free_logged_extents(log, log_transid);
|
||||
|
@ -2491,6 +2506,9 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
|
|||
goto out_wake_log_root;
|
||||
}
|
||||
btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
|
||||
btrfs_wait_marked_extents(log_root_tree,
|
||||
&log_root_tree->dirty_log_pages,
|
||||
EXTENT_NEW | EXTENT_DIRTY);
|
||||
btrfs_wait_logged_extents(log, log_transid);
|
||||
|
||||
btrfs_set_super_log_root(root->fs_info->super_for_commit,
|
||||
|
@ -4016,8 +4034,7 @@ again:
|
|||
if (found_key.objectid != BTRFS_TREE_LOG_OBJECTID)
|
||||
break;
|
||||
|
||||
log = btrfs_read_fs_root_no_radix(log_root_tree,
|
||||
&found_key);
|
||||
log = btrfs_read_fs_root(log_root_tree, &found_key);
|
||||
if (IS_ERR(log)) {
|
||||
ret = PTR_ERR(log);
|
||||
btrfs_error(fs_info, ret,
|
||||
|
|
|
@ -205,6 +205,10 @@ int ulist_add_merge(struct ulist *ulist, u64 val, u64 aux,
|
|||
u64 new_alloced = ulist->nodes_alloced + 128;
|
||||
struct ulist_node *new_nodes;
|
||||
void *old = NULL;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ulist->nnodes; i++)
|
||||
rb_erase(&ulist->nodes[i].rb_node, &ulist->root);
|
||||
|
||||
/*
|
||||
* if nodes_alloced == ULIST_SIZE no memory has been allocated
|
||||
|
@ -224,6 +228,17 @@ int ulist_add_merge(struct ulist *ulist, u64 val, u64 aux,
|
|||
|
||||
ulist->nodes = new_nodes;
|
||||
ulist->nodes_alloced = new_alloced;
|
||||
|
||||
/*
|
||||
* krealloc actually uses memcpy, which does not copy rb_node
|
||||
* pointers, so we have to do it ourselves. Otherwise we may
|
||||
* be bitten by crashes.
|
||||
*/
|
||||
for (i = 0; i < ulist->nnodes; i++) {
|
||||
ret = ulist_rbtree_insert(ulist, &ulist->nodes[i]);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
ulist->nodes[ulist->nnodes].val = val;
|
||||
ulist->nodes[ulist->nnodes].aux = aux;
|
||||
|
|
|
@ -1,4 +0,0 @@
|
|||
#ifndef __BTRFS_VERSION_H
|
||||
#define __BTRFS_VERSION_H
|
||||
#define BTRFS_BUILD_VERSION "Btrfs"
|
||||
#endif
|
|
@ -982,6 +982,35 @@ out:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int contains_pending_extent(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_device *device,
|
||||
u64 *start, u64 len)
|
||||
{
|
||||
struct extent_map *em;
|
||||
int ret = 0;
|
||||
|
||||
list_for_each_entry(em, &trans->transaction->pending_chunks, list) {
|
||||
struct map_lookup *map;
|
||||
int i;
|
||||
|
||||
map = (struct map_lookup *)em->bdev;
|
||||
for (i = 0; i < map->num_stripes; i++) {
|
||||
if (map->stripes[i].dev != device)
|
||||
continue;
|
||||
if (map->stripes[i].physical >= *start + len ||
|
||||
map->stripes[i].physical + em->orig_block_len <=
|
||||
*start)
|
||||
continue;
|
||||
*start = map->stripes[i].physical +
|
||||
em->orig_block_len;
|
||||
ret = 1;
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* find_free_dev_extent - find free space in the specified device
|
||||
* @device: the device which we search the free space in
|
||||
|
@ -1002,7 +1031,8 @@ out:
|
|||
* But if we don't find suitable free space, it is used to store the size of
|
||||
* the max free space.
|
||||
*/
|
||||
int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes,
|
||||
int find_free_dev_extent(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_device *device, u64 num_bytes,
|
||||
u64 *start, u64 *len)
|
||||
{
|
||||
struct btrfs_key key;
|
||||
|
@ -1026,21 +1056,22 @@ int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes,
|
|||
*/
|
||||
search_start = max(root->fs_info->alloc_start, 1024ull * 1024);
|
||||
|
||||
path = btrfs_alloc_path();
|
||||
if (!path)
|
||||
return -ENOMEM;
|
||||
again:
|
||||
max_hole_start = search_start;
|
||||
max_hole_size = 0;
|
||||
hole_size = 0;
|
||||
|
||||
if (search_start >= search_end || device->is_tgtdev_for_dev_replace) {
|
||||
ret = -ENOSPC;
|
||||
goto error;
|
||||
goto out;
|
||||
}
|
||||
|
||||
path = btrfs_alloc_path();
|
||||
if (!path) {
|
||||
ret = -ENOMEM;
|
||||
goto error;
|
||||
}
|
||||
path->reada = 2;
|
||||
path->search_commit_root = 1;
|
||||
path->skip_locking = 1;
|
||||
|
||||
key.objectid = device->devid;
|
||||
key.offset = search_start;
|
||||
|
@ -1081,6 +1112,15 @@ int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes,
|
|||
if (key.offset > search_start) {
|
||||
hole_size = key.offset - search_start;
|
||||
|
||||
/*
|
||||
* Have to check before we set max_hole_start, otherwise
|
||||
* we could end up sending back this offset anyway.
|
||||
*/
|
||||
if (contains_pending_extent(trans, device,
|
||||
&search_start,
|
||||
hole_size))
|
||||
hole_size = 0;
|
||||
|
||||
if (hole_size > max_hole_size) {
|
||||
max_hole_start = search_start;
|
||||
max_hole_size = hole_size;
|
||||
|
@ -1124,6 +1164,11 @@ next:
|
|||
max_hole_size = hole_size;
|
||||
}
|
||||
|
||||
if (contains_pending_extent(trans, device, &search_start, hole_size)) {
|
||||
btrfs_release_path(path);
|
||||
goto again;
|
||||
}
|
||||
|
||||
/* See above. */
|
||||
if (hole_size < num_bytes)
|
||||
ret = -ENOSPC;
|
||||
|
@ -1132,7 +1177,6 @@ next:
|
|||
|
||||
out:
|
||||
btrfs_free_path(path);
|
||||
error:
|
||||
*start = max_hole_start;
|
||||
if (len)
|
||||
*len = max_hole_size;
|
||||
|
@ -1244,47 +1288,22 @@ out:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static noinline int find_next_chunk(struct btrfs_root *root,
|
||||
u64 objectid, u64 *offset)
|
||||
static u64 find_next_chunk(struct btrfs_fs_info *fs_info)
|
||||
{
|
||||
struct btrfs_path *path;
|
||||
int ret;
|
||||
struct btrfs_key key;
|
||||
struct btrfs_chunk *chunk;
|
||||
struct btrfs_key found_key;
|
||||
struct extent_map_tree *em_tree;
|
||||
struct extent_map *em;
|
||||
struct rb_node *n;
|
||||
u64 ret = 0;
|
||||
|
||||
path = btrfs_alloc_path();
|
||||
if (!path)
|
||||
return -ENOMEM;
|
||||
|
||||
key.objectid = objectid;
|
||||
key.offset = (u64)-1;
|
||||
key.type = BTRFS_CHUNK_ITEM_KEY;
|
||||
|
||||
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
|
||||
if (ret < 0)
|
||||
goto error;
|
||||
|
||||
BUG_ON(ret == 0); /* Corruption */
|
||||
|
||||
ret = btrfs_previous_item(root, path, 0, BTRFS_CHUNK_ITEM_KEY);
|
||||
if (ret) {
|
||||
*offset = 0;
|
||||
} else {
|
||||
btrfs_item_key_to_cpu(path->nodes[0], &found_key,
|
||||
path->slots[0]);
|
||||
if (found_key.objectid != objectid)
|
||||
*offset = 0;
|
||||
else {
|
||||
chunk = btrfs_item_ptr(path->nodes[0], path->slots[0],
|
||||
struct btrfs_chunk);
|
||||
*offset = found_key.offset +
|
||||
btrfs_chunk_length(path->nodes[0], chunk);
|
||||
}
|
||||
em_tree = &fs_info->mapping_tree.map_tree;
|
||||
read_lock(&em_tree->lock);
|
||||
n = rb_last(&em_tree->map);
|
||||
if (n) {
|
||||
em = rb_entry(n, struct extent_map, rb_node);
|
||||
ret = em->start + em->len;
|
||||
}
|
||||
ret = 0;
|
||||
error:
|
||||
btrfs_free_path(path);
|
||||
read_unlock(&em_tree->lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1462,31 +1481,23 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
|
|||
btrfs_dev_replace_unlock(&root->fs_info->dev_replace);
|
||||
|
||||
if ((all_avail & BTRFS_BLOCK_GROUP_RAID10) && num_devices <= 4) {
|
||||
printk(KERN_ERR "btrfs: unable to go below four devices "
|
||||
"on raid10\n");
|
||||
ret = -EINVAL;
|
||||
ret = BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if ((all_avail & BTRFS_BLOCK_GROUP_RAID1) && num_devices <= 2) {
|
||||
printk(KERN_ERR "btrfs: unable to go below two "
|
||||
"devices on raid1\n");
|
||||
ret = -EINVAL;
|
||||
ret = BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if ((all_avail & BTRFS_BLOCK_GROUP_RAID5) &&
|
||||
root->fs_info->fs_devices->rw_devices <= 2) {
|
||||
printk(KERN_ERR "btrfs: unable to go below two "
|
||||
"devices on raid5\n");
|
||||
ret = -EINVAL;
|
||||
ret = BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET;
|
||||
goto out;
|
||||
}
|
||||
if ((all_avail & BTRFS_BLOCK_GROUP_RAID6) &&
|
||||
root->fs_info->fs_devices->rw_devices <= 3) {
|
||||
printk(KERN_ERR "btrfs: unable to go below three "
|
||||
"devices on raid6\n");
|
||||
ret = -EINVAL;
|
||||
ret = BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET;
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -1512,8 +1523,7 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
|
|||
bh = NULL;
|
||||
disk_super = NULL;
|
||||
if (!device) {
|
||||
printk(KERN_ERR "btrfs: no missing devices found to "
|
||||
"remove\n");
|
||||
ret = BTRFS_ERROR_DEV_MISSING_NOT_FOUND;
|
||||
goto out;
|
||||
}
|
||||
} else {
|
||||
|
@ -1535,15 +1545,12 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
|
|||
}
|
||||
|
||||
if (device->is_tgtdev_for_dev_replace) {
|
||||
pr_err("btrfs: unable to remove the dev_replace target dev\n");
|
||||
ret = -EINVAL;
|
||||
ret = BTRFS_ERROR_DEV_TGT_REPLACE;
|
||||
goto error_brelse;
|
||||
}
|
||||
|
||||
if (device->writeable && root->fs_info->fs_devices->rw_devices == 1) {
|
||||
printk(KERN_ERR "btrfs: unable to remove the only writeable "
|
||||
"device\n");
|
||||
ret = -EINVAL;
|
||||
ret = BTRFS_ERROR_DEV_ONLY_WRITABLE;
|
||||
goto error_brelse;
|
||||
}
|
||||
|
||||
|
@ -3295,10 +3302,7 @@ int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info)
|
|||
}
|
||||
|
||||
tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance");
|
||||
if (IS_ERR(tsk))
|
||||
return PTR_ERR(tsk);
|
||||
|
||||
return 0;
|
||||
return PTR_RET(tsk);
|
||||
}
|
||||
|
||||
int btrfs_recover_balance(struct btrfs_fs_info *fs_info)
|
||||
|
@ -3681,10 +3685,8 @@ static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type)
|
|||
}
|
||||
|
||||
static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *extent_root,
|
||||
struct map_lookup **map_ret,
|
||||
u64 *num_bytes_out, u64 *stripe_size_out,
|
||||
u64 start, u64 type)
|
||||
struct btrfs_root *extent_root, u64 start,
|
||||
u64 type)
|
||||
{
|
||||
struct btrfs_fs_info *info = extent_root->fs_info;
|
||||
struct btrfs_fs_devices *fs_devices = info->fs_devices;
|
||||
|
@ -3791,7 +3793,7 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
|
|||
if (total_avail == 0)
|
||||
continue;
|
||||
|
||||
ret = find_free_dev_extent(device,
|
||||
ret = find_free_dev_extent(trans, device,
|
||||
max_stripe_size * dev_stripes,
|
||||
&dev_offset, &max_avail);
|
||||
if (ret && ret != -ENOSPC)
|
||||
|
@ -3903,12 +3905,8 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
|
|||
map->type = type;
|
||||
map->sub_stripes = sub_stripes;
|
||||
|
||||
*map_ret = map;
|
||||
num_bytes = stripe_size * data_stripes;
|
||||
|
||||
*stripe_size_out = stripe_size;
|
||||
*num_bytes_out = num_bytes;
|
||||
|
||||
trace_btrfs_chunk_alloc(info->chunk_root, map, start, num_bytes);
|
||||
|
||||
em = alloc_extent_map();
|
||||
|
@ -3921,38 +3919,26 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
|
|||
em->len = num_bytes;
|
||||
em->block_start = 0;
|
||||
em->block_len = em->len;
|
||||
em->orig_block_len = stripe_size;
|
||||
|
||||
em_tree = &extent_root->fs_info->mapping_tree.map_tree;
|
||||
write_lock(&em_tree->lock);
|
||||
ret = add_extent_mapping(em_tree, em, 0);
|
||||
if (!ret) {
|
||||
list_add_tail(&em->list, &trans->transaction->pending_chunks);
|
||||
atomic_inc(&em->refs);
|
||||
}
|
||||
write_unlock(&em_tree->lock);
|
||||
if (ret) {
|
||||
free_extent_map(em);
|
||||
goto error;
|
||||
}
|
||||
|
||||
for (i = 0; i < map->num_stripes; ++i) {
|
||||
struct btrfs_device *device;
|
||||
u64 dev_offset;
|
||||
|
||||
device = map->stripes[i].dev;
|
||||
dev_offset = map->stripes[i].physical;
|
||||
|
||||
ret = btrfs_alloc_dev_extent(trans, device,
|
||||
info->chunk_root->root_key.objectid,
|
||||
BTRFS_FIRST_CHUNK_TREE_OBJECTID,
|
||||
start, dev_offset, stripe_size);
|
||||
if (ret)
|
||||
goto error_dev_extent;
|
||||
}
|
||||
|
||||
ret = btrfs_make_block_group(trans, extent_root, 0, type,
|
||||
BTRFS_FIRST_CHUNK_TREE_OBJECTID,
|
||||
start, num_bytes);
|
||||
if (ret) {
|
||||
i = map->num_stripes - 1;
|
||||
goto error_dev_extent;
|
||||
}
|
||||
if (ret)
|
||||
goto error_del_extent;
|
||||
|
||||
free_extent_map(em);
|
||||
check_raid56_incompat_flag(extent_root->fs_info, type);
|
||||
|
@ -3960,18 +3946,7 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
|
|||
kfree(devices_info);
|
||||
return 0;
|
||||
|
||||
error_dev_extent:
|
||||
for (; i >= 0; i--) {
|
||||
struct btrfs_device *device;
|
||||
int err;
|
||||
|
||||
device = map->stripes[i].dev;
|
||||
err = btrfs_free_dev_extent(trans, device, start);
|
||||
if (err) {
|
||||
btrfs_abort_transaction(trans, extent_root, err);
|
||||
break;
|
||||
}
|
||||
}
|
||||
error_del_extent:
|
||||
write_lock(&em_tree->lock);
|
||||
remove_extent_mapping(em_tree, em);
|
||||
write_unlock(&em_tree->lock);
|
||||
|
@ -3986,33 +3961,68 @@ error:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int __finish_chunk_alloc(struct btrfs_trans_handle *trans,
|
||||
int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *extent_root,
|
||||
struct map_lookup *map, u64 chunk_offset,
|
||||
u64 chunk_size, u64 stripe_size)
|
||||
u64 chunk_offset, u64 chunk_size)
|
||||
{
|
||||
u64 dev_offset;
|
||||
struct btrfs_key key;
|
||||
struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
|
||||
struct btrfs_device *device;
|
||||
struct btrfs_chunk *chunk;
|
||||
struct btrfs_stripe *stripe;
|
||||
size_t item_size = btrfs_chunk_item_size(map->num_stripes);
|
||||
int index = 0;
|
||||
struct extent_map_tree *em_tree;
|
||||
struct extent_map *em;
|
||||
struct map_lookup *map;
|
||||
size_t item_size;
|
||||
u64 dev_offset;
|
||||
u64 stripe_size;
|
||||
int i = 0;
|
||||
int ret;
|
||||
|
||||
chunk = kzalloc(item_size, GFP_NOFS);
|
||||
if (!chunk)
|
||||
return -ENOMEM;
|
||||
em_tree = &extent_root->fs_info->mapping_tree.map_tree;
|
||||
read_lock(&em_tree->lock);
|
||||
em = lookup_extent_mapping(em_tree, chunk_offset, chunk_size);
|
||||
read_unlock(&em_tree->lock);
|
||||
|
||||
if (!em) {
|
||||
btrfs_crit(extent_root->fs_info, "unable to find logical "
|
||||
"%Lu len %Lu", chunk_offset, chunk_size);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (em->start != chunk_offset || em->len != chunk_size) {
|
||||
btrfs_crit(extent_root->fs_info, "found a bad mapping, wanted"
|
||||
" %Lu-%Lu, found %Lu-%Lu\n", chunk_offset,
|
||||
chunk_size, em->start, em->len);
|
||||
free_extent_map(em);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
map = (struct map_lookup *)em->bdev;
|
||||
item_size = btrfs_chunk_item_size(map->num_stripes);
|
||||
stripe_size = em->orig_block_len;
|
||||
|
||||
chunk = kzalloc(item_size, GFP_NOFS);
|
||||
if (!chunk) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
for (i = 0; i < map->num_stripes; i++) {
|
||||
device = map->stripes[i].dev;
|
||||
dev_offset = map->stripes[i].physical;
|
||||
|
||||
index = 0;
|
||||
while (index < map->num_stripes) {
|
||||
device = map->stripes[index].dev;
|
||||
device->bytes_used += stripe_size;
|
||||
ret = btrfs_update_device(trans, device);
|
||||
if (ret)
|
||||
goto out_free;
|
||||
index++;
|
||||
goto out;
|
||||
ret = btrfs_alloc_dev_extent(trans, device,
|
||||
chunk_root->root_key.objectid,
|
||||
BTRFS_FIRST_CHUNK_TREE_OBJECTID,
|
||||
chunk_offset, dev_offset,
|
||||
stripe_size);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
|
||||
spin_lock(&extent_root->fs_info->free_chunk_lock);
|
||||
|
@ -4020,17 +4030,15 @@ static int __finish_chunk_alloc(struct btrfs_trans_handle *trans,
|
|||
map->num_stripes);
|
||||
spin_unlock(&extent_root->fs_info->free_chunk_lock);
|
||||
|
||||
index = 0;
|
||||
stripe = &chunk->stripe;
|
||||
while (index < map->num_stripes) {
|
||||
device = map->stripes[index].dev;
|
||||
dev_offset = map->stripes[index].physical;
|
||||
for (i = 0; i < map->num_stripes; i++) {
|
||||
device = map->stripes[i].dev;
|
||||
dev_offset = map->stripes[i].physical;
|
||||
|
||||
btrfs_set_stack_stripe_devid(stripe, device->devid);
|
||||
btrfs_set_stack_stripe_offset(stripe, dev_offset);
|
||||
memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
|
||||
stripe++;
|
||||
index++;
|
||||
}
|
||||
|
||||
btrfs_set_stack_chunk_length(chunk, chunk_size);
|
||||
|
@ -4048,7 +4056,6 @@ static int __finish_chunk_alloc(struct btrfs_trans_handle *trans,
|
|||
key.offset = chunk_offset;
|
||||
|
||||
ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
|
||||
|
||||
if (ret == 0 && map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
|
||||
/*
|
||||
* TODO: Cleanup of inserted chunk root in case of
|
||||
|
@ -4058,8 +4065,9 @@ static int __finish_chunk_alloc(struct btrfs_trans_handle *trans,
|
|||
item_size);
|
||||
}
|
||||
|
||||
out_free:
|
||||
out:
|
||||
kfree(chunk);
|
||||
free_extent_map(em);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -4074,27 +4082,9 @@ int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
|
|||
struct btrfs_root *extent_root, u64 type)
|
||||
{
|
||||
u64 chunk_offset;
|
||||
u64 chunk_size;
|
||||
u64 stripe_size;
|
||||
struct map_lookup *map;
|
||||
struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
|
||||
int ret;
|
||||
|
||||
ret = find_next_chunk(chunk_root, BTRFS_FIRST_CHUNK_TREE_OBJECTID,
|
||||
&chunk_offset);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size,
|
||||
&stripe_size, chunk_offset, type);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset,
|
||||
chunk_size, stripe_size);
|
||||
if (ret)
|
||||
return ret;
|
||||
return 0;
|
||||
chunk_offset = find_next_chunk(extent_root->fs_info);
|
||||
return __btrfs_alloc_chunk(trans, extent_root, chunk_offset, type);
|
||||
}
|
||||
|
||||
static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
|
||||
|
@ -4103,66 +4093,31 @@ static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
|
|||
{
|
||||
u64 chunk_offset;
|
||||
u64 sys_chunk_offset;
|
||||
u64 chunk_size;
|
||||
u64 sys_chunk_size;
|
||||
u64 stripe_size;
|
||||
u64 sys_stripe_size;
|
||||
u64 alloc_profile;
|
||||
struct map_lookup *map;
|
||||
struct map_lookup *sys_map;
|
||||
struct btrfs_fs_info *fs_info = root->fs_info;
|
||||
struct btrfs_root *extent_root = fs_info->extent_root;
|
||||
int ret;
|
||||
|
||||
ret = find_next_chunk(fs_info->chunk_root,
|
||||
BTRFS_FIRST_CHUNK_TREE_OBJECTID, &chunk_offset);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
chunk_offset = find_next_chunk(fs_info);
|
||||
alloc_profile = btrfs_get_alloc_profile(extent_root, 0);
|
||||
ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size,
|
||||
&stripe_size, chunk_offset, alloc_profile);
|
||||
ret = __btrfs_alloc_chunk(trans, extent_root, chunk_offset,
|
||||
alloc_profile);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
sys_chunk_offset = chunk_offset + chunk_size;
|
||||
|
||||
sys_chunk_offset = find_next_chunk(root->fs_info);
|
||||
alloc_profile = btrfs_get_alloc_profile(fs_info->chunk_root, 0);
|
||||
ret = __btrfs_alloc_chunk(trans, extent_root, &sys_map,
|
||||
&sys_chunk_size, &sys_stripe_size,
|
||||
sys_chunk_offset, alloc_profile);
|
||||
ret = __btrfs_alloc_chunk(trans, extent_root, sys_chunk_offset,
|
||||
alloc_profile);
|
||||
if (ret) {
|
||||
btrfs_abort_transaction(trans, root, ret);
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = btrfs_add_device(trans, fs_info->chunk_root, device);
|
||||
if (ret) {
|
||||
btrfs_abort_transaction(trans, root, ret);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* Modifying chunk tree needs allocating new blocks from both
|
||||
* system block group and metadata block group. So we only can
|
||||
* do operations require modifying the chunk tree after both
|
||||
* block groups were created.
|
||||
*/
|
||||
ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset,
|
||||
chunk_size, stripe_size);
|
||||
if (ret) {
|
||||
btrfs_abort_transaction(trans, root, ret);
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = __finish_chunk_alloc(trans, extent_root, sys_map,
|
||||
sys_chunk_offset, sys_chunk_size,
|
||||
sys_stripe_size);
|
||||
if (ret)
|
||||
btrfs_abort_transaction(trans, root, ret);
|
||||
|
||||
out:
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -4435,9 +4390,6 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
|
|||
map = (struct map_lookup *)em->bdev;
|
||||
offset = logical - em->start;
|
||||
|
||||
if (mirror_num > map->num_stripes)
|
||||
mirror_num = 0;
|
||||
|
||||
stripe_len = map->stripe_len;
|
||||
stripe_nr = offset;
|
||||
/*
|
||||
|
@ -5367,7 +5319,6 @@ static struct btrfs_device *add_missing_dev(struct btrfs_root *root,
|
|||
return NULL;
|
||||
list_add(&device->dev_list,
|
||||
&fs_devices->devices);
|
||||
device->dev_root = root->fs_info->dev_root;
|
||||
device->devid = devid;
|
||||
device->work.func = pending_bios_fn;
|
||||
device->fs_devices = fs_devices;
|
||||
|
@ -5593,7 +5544,6 @@ static int read_one_dev(struct btrfs_root *root,
|
|||
}
|
||||
|
||||
fill_device_from_item(leaf, dev_item, device);
|
||||
device->dev_root = root->fs_info->dev_root;
|
||||
device->in_fs_metadata = 1;
|
||||
if (device->writeable && !device->is_tgtdev_for_dev_replace) {
|
||||
device->fs_devices->total_rw_bytes += device->total_bytes;
|
||||
|
@ -5751,6 +5701,17 @@ error:
|
|||
return ret;
|
||||
}
|
||||
|
||||
void btrfs_init_devices_late(struct btrfs_fs_info *fs_info)
|
||||
{
|
||||
struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
|
||||
struct btrfs_device *device;
|
||||
|
||||
mutex_lock(&fs_devices->device_list_mutex);
|
||||
list_for_each_entry(device, &fs_devices->devices, dev_list)
|
||||
device->dev_root = fs_info->dev_root;
|
||||
mutex_unlock(&fs_devices->device_list_mutex);
|
||||
}
|
||||
|
||||
static void __btrfs_reset_dev_stats(struct btrfs_device *dev)
|
||||
{
|
||||
int i;
|
||||
|
|
|
@ -316,11 +316,13 @@ int btrfs_recover_balance(struct btrfs_fs_info *fs_info);
|
|||
int btrfs_pause_balance(struct btrfs_fs_info *fs_info);
|
||||
int btrfs_cancel_balance(struct btrfs_fs_info *fs_info);
|
||||
int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset);
|
||||
int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes,
|
||||
int find_free_dev_extent(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_device *device, u64 num_bytes,
|
||||
u64 *start, u64 *max_avail);
|
||||
void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index);
|
||||
int btrfs_get_dev_stats(struct btrfs_root *root,
|
||||
struct btrfs_ioctl_get_dev_stats *stats);
|
||||
void btrfs_init_devices_late(struct btrfs_fs_info *fs_info);
|
||||
int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info);
|
||||
int btrfs_run_dev_stats(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_fs_info *fs_info);
|
||||
|
@ -336,6 +338,9 @@ int btrfs_is_parity_mirror(struct btrfs_mapping_tree *map_tree,
|
|||
unsigned long btrfs_full_stripe_len(struct btrfs_root *root,
|
||||
struct btrfs_mapping_tree *map_tree,
|
||||
u64 logical);
|
||||
int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *extent_root,
|
||||
u64 chunk_offset, u64 chunk_size);
|
||||
static inline void btrfs_dev_stat_inc(struct btrfs_device *dev,
|
||||
int index)
|
||||
{
|
||||
|
|
|
@ -40,22 +40,25 @@ struct extent_buffer;
|
|||
{ BTRFS_ROOT_TREE_DIR_OBJECTID, "ROOT_TREE_DIR" }, \
|
||||
{ BTRFS_CSUM_TREE_OBJECTID, "CSUM_TREE" }, \
|
||||
{ BTRFS_TREE_LOG_OBJECTID, "TREE_LOG" }, \
|
||||
{ BTRFS_QUOTA_TREE_OBJECTID, "QUOTA_TREE" }, \
|
||||
{ BTRFS_TREE_RELOC_OBJECTID, "TREE_RELOC" }, \
|
||||
{ BTRFS_DATA_RELOC_TREE_OBJECTID, "DATA_RELOC_TREE" })
|
||||
|
||||
#define show_root_type(obj) \
|
||||
obj, ((obj >= BTRFS_DATA_RELOC_TREE_OBJECTID) || \
|
||||
(obj >= BTRFS_ROOT_TREE_OBJECTID && \
|
||||
obj <= BTRFS_CSUM_TREE_OBJECTID)) ? __show_root_type(obj) : "-"
|
||||
obj <= BTRFS_QUOTA_TREE_OBJECTID)) ? __show_root_type(obj) : "-"
|
||||
|
||||
#define BTRFS_GROUP_FLAGS \
|
||||
{ BTRFS_BLOCK_GROUP_DATA, "DATA"}, \
|
||||
{ BTRFS_BLOCK_GROUP_SYSTEM, "SYSTEM"}, \
|
||||
{ BTRFS_BLOCK_GROUP_METADATA, "METADATA"}, \
|
||||
{ BTRFS_BLOCK_GROUP_RAID0, "RAID0"}, \
|
||||
{ BTRFS_BLOCK_GROUP_RAID1, "RAID1"}, \
|
||||
{ BTRFS_BLOCK_GROUP_DUP, "DUP"}, \
|
||||
{ BTRFS_BLOCK_GROUP_RAID10, "RAID10"}
|
||||
{ BTRFS_BLOCK_GROUP_DATA, "DATA"}, \
|
||||
{ BTRFS_BLOCK_GROUP_SYSTEM, "SYSTEM"}, \
|
||||
{ BTRFS_BLOCK_GROUP_METADATA, "METADATA"}, \
|
||||
{ BTRFS_BLOCK_GROUP_RAID0, "RAID0"}, \
|
||||
{ BTRFS_BLOCK_GROUP_RAID1, "RAID1"}, \
|
||||
{ BTRFS_BLOCK_GROUP_DUP, "DUP"}, \
|
||||
{ BTRFS_BLOCK_GROUP_RAID10, "RAID10"}, \
|
||||
{ BTRFS_BLOCK_GROUP_RAID5, "RAID5"}, \
|
||||
{ BTRFS_BLOCK_GROUP_RAID6, "RAID6"}
|
||||
|
||||
#define BTRFS_UUID_SIZE 16
|
||||
|
||||
|
@ -154,7 +157,9 @@ DEFINE_EVENT(btrfs__inode, btrfs_inode_evict,
|
|||
{ EXTENT_FLAG_PINNED, "PINNED" }, \
|
||||
{ EXTENT_FLAG_COMPRESSED, "COMPRESSED" }, \
|
||||
{ EXTENT_FLAG_VACANCY, "VACANCY" }, \
|
||||
{ EXTENT_FLAG_PREALLOC, "PREALLOC" })
|
||||
{ EXTENT_FLAG_PREALLOC, "PREALLOC" }, \
|
||||
{ EXTENT_FLAG_LOGGING, "LOGGING" }, \
|
||||
{ EXTENT_FLAG_FILLING, "FILLING" })
|
||||
|
||||
TRACE_EVENT(btrfs_get_extent,
|
||||
|
||||
|
@ -201,13 +206,17 @@ TRACE_EVENT(btrfs_get_extent,
|
|||
);
|
||||
|
||||
#define show_ordered_flags(flags) \
|
||||
__print_symbolic(flags, \
|
||||
__print_symbolic(flags, \
|
||||
{ BTRFS_ORDERED_IO_DONE, "IO_DONE" }, \
|
||||
{ BTRFS_ORDERED_COMPLETE, "COMPLETE" }, \
|
||||
{ BTRFS_ORDERED_NOCOW, "NOCOW" }, \
|
||||
{ BTRFS_ORDERED_COMPRESSED, "COMPRESSED" }, \
|
||||
{ BTRFS_ORDERED_PREALLOC, "PREALLOC" }, \
|
||||
{ BTRFS_ORDERED_DIRECT, "DIRECT" })
|
||||
{ BTRFS_ORDERED_DIRECT, "DIRECT" }, \
|
||||
{ BTRFS_ORDERED_IOERR, "IOERR" }, \
|
||||
{ BTRFS_ORDERED_UPDATED_ISIZE, "UPDATED_ISIZE" }, \
|
||||
{ BTRFS_ORDERED_LOGGED_CSUM, "LOGGED_CSUM" })
|
||||
|
||||
|
||||
DECLARE_EVENT_CLASS(btrfs__ordered_extent,
|
||||
|
||||
|
@ -555,7 +564,9 @@ TRACE_EVENT(btrfs_delayed_ref_head,
|
|||
{ BTRFS_BLOCK_GROUP_RAID0, "RAID0" }, \
|
||||
{ BTRFS_BLOCK_GROUP_RAID1, "RAID1" }, \
|
||||
{ BTRFS_BLOCK_GROUP_DUP, "DUP" }, \
|
||||
{ BTRFS_BLOCK_GROUP_RAID10, "RAID10"})
|
||||
{ BTRFS_BLOCK_GROUP_RAID10, "RAID10"}, \
|
||||
{ BTRFS_BLOCK_GROUP_RAID5, "RAID5" }, \
|
||||
{ BTRFS_BLOCK_GROUP_RAID6, "RAID6" })
|
||||
|
||||
DECLARE_EVENT_CLASS(btrfs__chunk,
|
||||
|
||||
|
|
|
@ -447,6 +447,46 @@ struct btrfs_ioctl_send_args {
|
|||
__u64 reserved[4]; /* in */
|
||||
};
|
||||
|
||||
/* Error codes as returned by the kernel */
|
||||
enum btrfs_err_code {
|
||||
notused,
|
||||
BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET,
|
||||
BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET,
|
||||
BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET,
|
||||
BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET,
|
||||
BTRFS_ERROR_DEV_TGT_REPLACE,
|
||||
BTRFS_ERROR_DEV_MISSING_NOT_FOUND,
|
||||
BTRFS_ERROR_DEV_ONLY_WRITABLE,
|
||||
BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS
|
||||
};
|
||||
/* An error code to error string mapping for the kernel
|
||||
* error codes
|
||||
*/
|
||||
static inline char *btrfs_err_str(enum btrfs_err_code err_code)
|
||||
{
|
||||
switch (err_code) {
|
||||
case BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET:
|
||||
return "unable to go below two devices on raid1";
|
||||
case BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET:
|
||||
return "unable to go below four devices on raid10";
|
||||
case BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET:
|
||||
return "unable to go below two devices on raid5";
|
||||
case BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET:
|
||||
return "unable to go below three devices on raid6";
|
||||
case BTRFS_ERROR_DEV_TGT_REPLACE:
|
||||
return "unable to remove the dev_replace target dev";
|
||||
case BTRFS_ERROR_DEV_MISSING_NOT_FOUND:
|
||||
return "no missing devices found to remove";
|
||||
case BTRFS_ERROR_DEV_ONLY_WRITABLE:
|
||||
return "unable to remove the only writeable device";
|
||||
case BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS:
|
||||
return "add/delete/balance/replace/resize operation "\
|
||||
"in progress";
|
||||
default:
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
#define BTRFS_IOC_SNAP_CREATE _IOW(BTRFS_IOCTL_MAGIC, 1, \
|
||||
struct btrfs_ioctl_vol_args)
|
||||
#define BTRFS_IOC_DEFRAG _IOW(BTRFS_IOCTL_MAGIC, 2, \
|
||||
|
@ -530,6 +570,7 @@ struct btrfs_ioctl_send_args {
|
|||
struct btrfs_ioctl_quota_rescan_args)
|
||||
#define BTRFS_IOC_QUOTA_RESCAN_STATUS _IOR(BTRFS_IOCTL_MAGIC, 45, \
|
||||
struct btrfs_ioctl_quota_rescan_args)
|
||||
#define BTRFS_IOC_QUOTA_RESCAN_WAIT _IO(BTRFS_IOCTL_MAGIC, 46)
|
||||
#define BTRFS_IOC_GET_FSLABEL _IOR(BTRFS_IOCTL_MAGIC, 49, \
|
||||
char[BTRFS_LABEL_SIZE])
|
||||
#define BTRFS_IOC_SET_FSLABEL _IOW(BTRFS_IOCTL_MAGIC, 50, \
|
||||
|
@ -538,5 +579,4 @@ struct btrfs_ioctl_send_args {
|
|||
struct btrfs_ioctl_get_dev_stats)
|
||||
#define BTRFS_IOC_DEV_REPLACE _IOWR(BTRFS_IOCTL_MAGIC, 53, \
|
||||
struct btrfs_ioctl_dev_replace_args)
|
||||
|
||||
#endif /* _UAPI_LINUX_BTRFS_H */
|
||||
|
|
Загрузка…
Ссылка в новой задаче