Merge branch 'for-linus-4.2' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs
Pull btrfs updates from Chris Mason: "Outside of our usual batch of fixes, this integrates the subvolume quota updates that Qu Wenruo from Fujitsu has been working on for a few releases now. He gets an extra gold star for making btrfs smaller this time, and fixing a number of quota corners in the process. Dave Sterba tested and integrated Anand Jain's sysfs improvements. Outside of exporting a symbol (ack'd by Greg) these are all internal to btrfs and it's mostly cleanups and fixes. Anand also attached some of our sysfs objects to our internal device management structs instead of an object off the super block. It will make device management easier overall and it's a better fit for how the sysfs files are used. None of the existing sysfs files are moved around. Thanks for all the fixes everyone" * 'for-linus-4.2' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs: (87 commits) btrfs: delayed-ref: double free in btrfs_add_delayed_tree_ref() Btrfs: Check if kobject is initialized before put lib: export symbol kobject_move() Btrfs: sysfs: add support to show replacing target in the sysfs Btrfs: free the stale device Btrfs: use received_uuid of parent during send Btrfs: fix use-after-free in btrfs_replay_log btrfs: wait for delayed iputs on no space btrfs: qgroup: Make snapshot accounting work with new extent-oriented qgroup. btrfs: qgroup: Add the ability to skip given qgroup for old/new_roots. btrfs: ulist: Add ulist_del() function. btrfs: qgroup: Cleanup the old ref_node-oriented mechanism. btrfs: qgroup: Switch self test to extent-oriented qgroup mechanism. btrfs: qgroup: Switch to new extent-oriented qgroup mechanism. btrfs: qgroup: Switch rescan to new mechanism. btrfs: qgroup: Add new qgroup calculation function btrfs_qgroup_account_extents(). btrfs: backref: Add special time_seq == (u64)-1 case for btrfs_find_all_roots(). btrfs: qgroup: Add new function to record old_roots. btrfs: qgroup: Record possible quota-related extent for qgroup. btrfs: qgroup: Add function qgroup_update_counters(). ...
This commit is contained in:
Коммит
043cd04950
|
@ -85,6 +85,7 @@ BTRFS_WORK_HELPER(extent_refs_helper);
|
|||
BTRFS_WORK_HELPER(scrub_helper);
|
||||
BTRFS_WORK_HELPER(scrubwrc_helper);
|
||||
BTRFS_WORK_HELPER(scrubnc_helper);
|
||||
BTRFS_WORK_HELPER(scrubparity_helper);
|
||||
|
||||
static struct __btrfs_workqueue *
|
||||
__btrfs_alloc_workqueue(const char *name, unsigned int flags, int max_active,
|
||||
|
|
|
@ -64,6 +64,8 @@ BTRFS_WORK_HELPER_PROTO(extent_refs_helper);
|
|||
BTRFS_WORK_HELPER_PROTO(scrub_helper);
|
||||
BTRFS_WORK_HELPER_PROTO(scrubwrc_helper);
|
||||
BTRFS_WORK_HELPER_PROTO(scrubnc_helper);
|
||||
BTRFS_WORK_HELPER_PROTO(scrubparity_helper);
|
||||
|
||||
|
||||
struct btrfs_workqueue *btrfs_alloc_workqueue(const char *name,
|
||||
unsigned int flags,
|
||||
|
|
|
@ -250,8 +250,12 @@ static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
|
|||
* the first item to check. But sometimes, we may enter it with
|
||||
* slot==nritems. In that case, go to the next leaf before we continue.
|
||||
*/
|
||||
if (path->slots[0] >= btrfs_header_nritems(path->nodes[0]))
|
||||
ret = btrfs_next_old_leaf(root, path, time_seq);
|
||||
if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
|
||||
if (time_seq == (u64)-1)
|
||||
ret = btrfs_next_leaf(root, path);
|
||||
else
|
||||
ret = btrfs_next_old_leaf(root, path, time_seq);
|
||||
}
|
||||
|
||||
while (!ret && count < total_refs) {
|
||||
eb = path->nodes[0];
|
||||
|
@ -291,7 +295,10 @@ static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
|
|||
eie = NULL;
|
||||
}
|
||||
next:
|
||||
ret = btrfs_next_old_item(root, path, time_seq);
|
||||
if (time_seq == (u64)-1)
|
||||
ret = btrfs_next_item(root, path);
|
||||
else
|
||||
ret = btrfs_next_old_item(root, path, time_seq);
|
||||
}
|
||||
|
||||
if (ret > 0)
|
||||
|
@ -334,6 +341,8 @@ static int __resolve_indirect_ref(struct btrfs_fs_info *fs_info,
|
|||
|
||||
if (path->search_commit_root)
|
||||
root_level = btrfs_header_level(root->commit_root);
|
||||
else if (time_seq == (u64)-1)
|
||||
root_level = btrfs_header_level(root->node);
|
||||
else
|
||||
root_level = btrfs_old_root_level(root, time_seq);
|
||||
|
||||
|
@ -343,7 +352,12 @@ static int __resolve_indirect_ref(struct btrfs_fs_info *fs_info,
|
|||
}
|
||||
|
||||
path->lowest_level = level;
|
||||
ret = btrfs_search_old_slot(root, &ref->key_for_search, path, time_seq);
|
||||
if (time_seq == (u64)-1)
|
||||
ret = btrfs_search_slot(NULL, root, &ref->key_for_search, path,
|
||||
0, 0);
|
||||
else
|
||||
ret = btrfs_search_old_slot(root, &ref->key_for_search, path,
|
||||
time_seq);
|
||||
|
||||
/* root node has been locked, we can release @subvol_srcu safely here */
|
||||
srcu_read_unlock(&fs_info->subvol_srcu, index);
|
||||
|
@ -491,7 +505,9 @@ static int __add_missing_keys(struct btrfs_fs_info *fs_info,
|
|||
BUG_ON(!ref->wanted_disk_byte);
|
||||
eb = read_tree_block(fs_info->tree_root, ref->wanted_disk_byte,
|
||||
0);
|
||||
if (!eb || !extent_buffer_uptodate(eb)) {
|
||||
if (IS_ERR(eb)) {
|
||||
return PTR_ERR(eb);
|
||||
} else if (!extent_buffer_uptodate(eb)) {
|
||||
free_extent_buffer(eb);
|
||||
return -EIO;
|
||||
}
|
||||
|
@ -507,7 +523,7 @@ static int __add_missing_keys(struct btrfs_fs_info *fs_info,
|
|||
}
|
||||
|
||||
/*
|
||||
* merge two lists of backrefs and adjust counts accordingly
|
||||
* merge backrefs and adjust counts accordingly
|
||||
*
|
||||
* mode = 1: merge identical keys, if key is set
|
||||
* FIXME: if we add more keys in __add_prelim_ref, we can merge more here.
|
||||
|
@ -535,9 +551,9 @@ static void __merge_refs(struct list_head *head, int mode)
|
|||
|
||||
ref2 = list_entry(pos2, struct __prelim_ref, list);
|
||||
|
||||
if (!ref_for_same_block(ref1, ref2))
|
||||
continue;
|
||||
if (mode == 1) {
|
||||
if (!ref_for_same_block(ref1, ref2))
|
||||
continue;
|
||||
if (!ref1->parent && ref2->parent) {
|
||||
xchg = ref1;
|
||||
ref1 = ref2;
|
||||
|
@ -572,8 +588,8 @@ static int __add_delayed_refs(struct btrfs_delayed_ref_head *head, u64 seq,
|
|||
struct list_head *prefs, u64 *total_refs,
|
||||
u64 inum)
|
||||
{
|
||||
struct btrfs_delayed_ref_node *node;
|
||||
struct btrfs_delayed_extent_op *extent_op = head->extent_op;
|
||||
struct rb_node *n = &head->node.rb_node;
|
||||
struct btrfs_key key;
|
||||
struct btrfs_key op_key = {0};
|
||||
int sgn;
|
||||
|
@ -583,12 +599,7 @@ static int __add_delayed_refs(struct btrfs_delayed_ref_head *head, u64 seq,
|
|||
btrfs_disk_key_to_cpu(&op_key, &extent_op->key);
|
||||
|
||||
spin_lock(&head->lock);
|
||||
n = rb_first(&head->ref_root);
|
||||
while (n) {
|
||||
struct btrfs_delayed_ref_node *node;
|
||||
node = rb_entry(n, struct btrfs_delayed_ref_node,
|
||||
rb_node);
|
||||
n = rb_next(n);
|
||||
list_for_each_entry(node, &head->ref_list, list) {
|
||||
if (node->seq > seq)
|
||||
continue;
|
||||
|
||||
|
@ -882,6 +893,11 @@ static int __add_keyed_refs(struct btrfs_fs_info *fs_info,
|
|||
*
|
||||
* NOTE: This can return values > 0
|
||||
*
|
||||
* If time_seq is set to (u64)-1, it will not search delayed_refs, and behave
|
||||
* much like trans == NULL case, the difference only lies in it will not
|
||||
* commit root.
|
||||
* The special case is for qgroup to search roots in commit_transaction().
|
||||
*
|
||||
* FIXME some caching might speed things up
|
||||
*/
|
||||
static int find_parent_nodes(struct btrfs_trans_handle *trans,
|
||||
|
@ -920,6 +936,9 @@ static int find_parent_nodes(struct btrfs_trans_handle *trans,
|
|||
path->skip_locking = 1;
|
||||
}
|
||||
|
||||
if (time_seq == (u64)-1)
|
||||
path->skip_locking = 1;
|
||||
|
||||
/*
|
||||
* grab both a lock on the path and a lock on the delayed ref head.
|
||||
* We need both to get a consistent picture of how the refs look
|
||||
|
@ -934,9 +953,10 @@ again:
|
|||
BUG_ON(ret == 0);
|
||||
|
||||
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
|
||||
if (trans && likely(trans->type != __TRANS_DUMMY)) {
|
||||
if (trans && likely(trans->type != __TRANS_DUMMY) &&
|
||||
time_seq != (u64)-1) {
|
||||
#else
|
||||
if (trans) {
|
||||
if (trans && time_seq != (u64)-1) {
|
||||
#endif
|
||||
/*
|
||||
* look if there are updates for this ref queued and lock the
|
||||
|
@ -1034,7 +1054,10 @@ again:
|
|||
|
||||
eb = read_tree_block(fs_info->extent_root,
|
||||
ref->parent, 0);
|
||||
if (!eb || !extent_buffer_uptodate(eb)) {
|
||||
if (IS_ERR(eb)) {
|
||||
ret = PTR_ERR(eb);
|
||||
goto out;
|
||||
} else if (!extent_buffer_uptodate(eb)) {
|
||||
free_extent_buffer(eb);
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
|
|
|
@ -1439,8 +1439,9 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
|
|||
btrfs_tree_read_unlock(eb_root);
|
||||
free_extent_buffer(eb_root);
|
||||
old = read_tree_block(root, logical, 0);
|
||||
if (WARN_ON(!old || !extent_buffer_uptodate(old))) {
|
||||
free_extent_buffer(old);
|
||||
if (WARN_ON(IS_ERR(old) || !extent_buffer_uptodate(old))) {
|
||||
if (!IS_ERR(old))
|
||||
free_extent_buffer(old);
|
||||
btrfs_warn(root->fs_info,
|
||||
"failed to read tree block %llu from get_old_root", logical);
|
||||
} else {
|
||||
|
@ -1685,7 +1686,9 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans,
|
|||
if (!cur || !uptodate) {
|
||||
if (!cur) {
|
||||
cur = read_tree_block(root, blocknr, gen);
|
||||
if (!cur || !extent_buffer_uptodate(cur)) {
|
||||
if (IS_ERR(cur)) {
|
||||
return PTR_ERR(cur);
|
||||
} else if (!extent_buffer_uptodate(cur)) {
|
||||
free_extent_buffer(cur);
|
||||
return -EIO;
|
||||
}
|
||||
|
@ -1864,8 +1867,9 @@ static noinline struct extent_buffer *read_node_slot(struct btrfs_root *root,
|
|||
|
||||
eb = read_tree_block(root, btrfs_node_blockptr(parent, slot),
|
||||
btrfs_node_ptr_generation(parent, slot));
|
||||
if (eb && !extent_buffer_uptodate(eb)) {
|
||||
free_extent_buffer(eb);
|
||||
if (IS_ERR(eb) || !extent_buffer_uptodate(eb)) {
|
||||
if (!IS_ERR(eb))
|
||||
free_extent_buffer(eb);
|
||||
eb = NULL;
|
||||
}
|
||||
|
||||
|
@ -2494,7 +2498,7 @@ read_block_for_search(struct btrfs_trans_handle *trans,
|
|||
|
||||
ret = -EAGAIN;
|
||||
tmp = read_tree_block(root, blocknr, 0);
|
||||
if (tmp) {
|
||||
if (!IS_ERR(tmp)) {
|
||||
/*
|
||||
* If the read above didn't mark this buffer up to date,
|
||||
* it will never end up being up to date. Set ret to EIO now
|
||||
|
|
|
@ -174,7 +174,7 @@ struct btrfs_ordered_sum;
|
|||
/* csum types */
|
||||
#define BTRFS_CSUM_TYPE_CRC32 0
|
||||
|
||||
static int btrfs_csum_sizes[] = { 4, 0 };
|
||||
static int btrfs_csum_sizes[] = { 4 };
|
||||
|
||||
/* four bytes for CRC32 */
|
||||
#define BTRFS_EMPTY_DIR_SIZE 0
|
||||
|
@ -1619,10 +1619,7 @@ struct btrfs_fs_info {
|
|||
struct task_struct *cleaner_kthread;
|
||||
int thread_pool_size;
|
||||
|
||||
struct kobject super_kobj;
|
||||
struct kobject *space_info_kobj;
|
||||
struct kobject *device_dir_kobj;
|
||||
struct completion kobj_unregister;
|
||||
int do_barriers;
|
||||
int closing;
|
||||
int log_root_recovering;
|
||||
|
@ -1698,6 +1695,7 @@ struct btrfs_fs_info {
|
|||
struct btrfs_workqueue *scrub_workers;
|
||||
struct btrfs_workqueue *scrub_wr_completion_workers;
|
||||
struct btrfs_workqueue *scrub_nocow_workers;
|
||||
struct btrfs_workqueue *scrub_parity_workers;
|
||||
|
||||
#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
|
||||
u32 check_integrity_print_mask;
|
||||
|
@ -1735,7 +1733,7 @@ struct btrfs_fs_info {
|
|||
/* list of dirty qgroups to be written at next commit */
|
||||
struct list_head dirty_qgroups;
|
||||
|
||||
/* used by btrfs_qgroup_record_ref for an efficient tree traversal */
|
||||
/* used by qgroup for an efficient tree traversal */
|
||||
u64 qgroup_seq;
|
||||
|
||||
/* qgroup rescan items */
|
||||
|
@ -3458,6 +3456,7 @@ int btrfs_check_data_free_space(struct inode *inode, u64 bytes, u64 write_bytes)
|
|||
void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes);
|
||||
void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root);
|
||||
void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans);
|
||||
int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
|
||||
struct inode *inode);
|
||||
void btrfs_orphan_release_metadata(struct inode *inode);
|
||||
|
@ -3515,6 +3514,9 @@ int btrfs_delayed_refs_qgroup_accounting(struct btrfs_trans_handle *trans,
|
|||
int __get_raid_index(u64 flags);
|
||||
int btrfs_start_write_no_snapshoting(struct btrfs_root *root);
|
||||
void btrfs_end_write_no_snapshoting(struct btrfs_root *root);
|
||||
void check_system_chunk(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root,
|
||||
const u64 type);
|
||||
/* ctree.c */
|
||||
int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key,
|
||||
int level, int *slot);
|
||||
|
@ -4050,6 +4052,7 @@ void btrfs_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...)
|
|||
|
||||
#ifdef CONFIG_BTRFS_ASSERT
|
||||
|
||||
__cold
|
||||
static inline void assfail(char *expr, char *file, int line)
|
||||
{
|
||||
pr_err("BTRFS: assertion failed: %s, file: %s, line: %d",
|
||||
|
@ -4065,10 +4068,12 @@ static inline void assfail(char *expr, char *file, int line)
|
|||
|
||||
#define btrfs_assert()
|
||||
__printf(5, 6)
|
||||
__cold
|
||||
void __btrfs_std_error(struct btrfs_fs_info *fs_info, const char *function,
|
||||
unsigned int line, int errno, const char *fmt, ...);
|
||||
|
||||
|
||||
__cold
|
||||
void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root, const char *function,
|
||||
unsigned int line, int errno);
|
||||
|
@ -4111,11 +4116,17 @@ static inline int __btrfs_fs_incompat(struct btrfs_fs_info *fs_info, u64 flag)
|
|||
* Call btrfs_abort_transaction as early as possible when an error condition is
|
||||
* detected, that way the exact line number is reported.
|
||||
*/
|
||||
|
||||
#define btrfs_abort_transaction(trans, root, errno) \
|
||||
do { \
|
||||
__btrfs_abort_transaction(trans, root, __func__, \
|
||||
__LINE__, errno); \
|
||||
/* Report first abort since mount */ \
|
||||
if (!test_and_set_bit(BTRFS_FS_STATE_TRANS_ABORTED, \
|
||||
&((root)->fs_info->fs_state))) { \
|
||||
WARN(1, KERN_DEBUG \
|
||||
"BTRFS: Transaction aborted (error %d)\n", \
|
||||
(errno)); \
|
||||
} \
|
||||
__btrfs_abort_transaction((trans), (root), __func__, \
|
||||
__LINE__, (errno)); \
|
||||
} while (0)
|
||||
|
||||
#define btrfs_std_error(fs_info, errno) \
|
||||
|
@ -4132,6 +4143,7 @@ do { \
|
|||
} while (0)
|
||||
|
||||
__printf(5, 6)
|
||||
__cold
|
||||
void __btrfs_panic(struct btrfs_fs_info *fs_info, const char *function,
|
||||
unsigned int line, int errno, const char *fmt, ...);
|
||||
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
#include "ctree.h"
|
||||
#include "delayed-ref.h"
|
||||
#include "transaction.h"
|
||||
#include "qgroup.h"
|
||||
|
||||
struct kmem_cache *btrfs_delayed_ref_head_cachep;
|
||||
struct kmem_cache *btrfs_delayed_tree_ref_cachep;
|
||||
|
@ -84,87 +85,6 @@ static int comp_data_refs(struct btrfs_delayed_data_ref *ref2,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* entries in the rb tree are ordered by the byte number of the extent,
|
||||
* type of the delayed backrefs and content of delayed backrefs.
|
||||
*/
|
||||
static int comp_entry(struct btrfs_delayed_ref_node *ref2,
|
||||
struct btrfs_delayed_ref_node *ref1,
|
||||
bool compare_seq)
|
||||
{
|
||||
if (ref1->bytenr < ref2->bytenr)
|
||||
return -1;
|
||||
if (ref1->bytenr > ref2->bytenr)
|
||||
return 1;
|
||||
if (ref1->is_head && ref2->is_head)
|
||||
return 0;
|
||||
if (ref2->is_head)
|
||||
return -1;
|
||||
if (ref1->is_head)
|
||||
return 1;
|
||||
if (ref1->type < ref2->type)
|
||||
return -1;
|
||||
if (ref1->type > ref2->type)
|
||||
return 1;
|
||||
if (ref1->no_quota > ref2->no_quota)
|
||||
return 1;
|
||||
if (ref1->no_quota < ref2->no_quota)
|
||||
return -1;
|
||||
/* merging of sequenced refs is not allowed */
|
||||
if (compare_seq) {
|
||||
if (ref1->seq < ref2->seq)
|
||||
return -1;
|
||||
if (ref1->seq > ref2->seq)
|
||||
return 1;
|
||||
}
|
||||
if (ref1->type == BTRFS_TREE_BLOCK_REF_KEY ||
|
||||
ref1->type == BTRFS_SHARED_BLOCK_REF_KEY) {
|
||||
return comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref2),
|
||||
btrfs_delayed_node_to_tree_ref(ref1),
|
||||
ref1->type);
|
||||
} else if (ref1->type == BTRFS_EXTENT_DATA_REF_KEY ||
|
||||
ref1->type == BTRFS_SHARED_DATA_REF_KEY) {
|
||||
return comp_data_refs(btrfs_delayed_node_to_data_ref(ref2),
|
||||
btrfs_delayed_node_to_data_ref(ref1));
|
||||
}
|
||||
BUG();
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* insert a new ref into the rbtree. This returns any existing refs
|
||||
* for the same (bytenr,parent) tuple, or NULL if the new node was properly
|
||||
* inserted.
|
||||
*/
|
||||
static struct btrfs_delayed_ref_node *tree_insert(struct rb_root *root,
|
||||
struct rb_node *node)
|
||||
{
|
||||
struct rb_node **p = &root->rb_node;
|
||||
struct rb_node *parent_node = NULL;
|
||||
struct btrfs_delayed_ref_node *entry;
|
||||
struct btrfs_delayed_ref_node *ins;
|
||||
int cmp;
|
||||
|
||||
ins = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
|
||||
while (*p) {
|
||||
parent_node = *p;
|
||||
entry = rb_entry(parent_node, struct btrfs_delayed_ref_node,
|
||||
rb_node);
|
||||
|
||||
cmp = comp_entry(entry, ins, 1);
|
||||
if (cmp < 0)
|
||||
p = &(*p)->rb_left;
|
||||
else if (cmp > 0)
|
||||
p = &(*p)->rb_right;
|
||||
else
|
||||
return entry;
|
||||
}
|
||||
|
||||
rb_link_node(node, parent_node, p);
|
||||
rb_insert_color(node, root);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* insert a new ref to head ref rbtree */
|
||||
static struct btrfs_delayed_ref_head *htree_insert(struct rb_root *root,
|
||||
struct rb_node *node)
|
||||
|
@ -268,7 +188,7 @@ static inline void drop_delayed_ref(struct btrfs_trans_handle *trans,
|
|||
rb_erase(&head->href_node, &delayed_refs->href_root);
|
||||
} else {
|
||||
assert_spin_locked(&head->lock);
|
||||
rb_erase(&ref->rb_node, &head->ref_root);
|
||||
list_del(&ref->list);
|
||||
}
|
||||
ref->in_tree = 0;
|
||||
btrfs_put_delayed_ref(ref);
|
||||
|
@ -277,99 +197,6 @@ static inline void drop_delayed_ref(struct btrfs_trans_handle *trans,
|
|||
trans->delayed_ref_updates--;
|
||||
}
|
||||
|
||||
static int merge_ref(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_delayed_ref_root *delayed_refs,
|
||||
struct btrfs_delayed_ref_head *head,
|
||||
struct btrfs_delayed_ref_node *ref, u64 seq)
|
||||
{
|
||||
struct rb_node *node;
|
||||
int mod = 0;
|
||||
int done = 0;
|
||||
|
||||
node = rb_next(&ref->rb_node);
|
||||
while (!done && node) {
|
||||
struct btrfs_delayed_ref_node *next;
|
||||
|
||||
next = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
|
||||
node = rb_next(node);
|
||||
if (seq && next->seq >= seq)
|
||||
break;
|
||||
if (comp_entry(ref, next, 0))
|
||||
continue;
|
||||
|
||||
if (ref->action == next->action) {
|
||||
mod = next->ref_mod;
|
||||
} else {
|
||||
if (ref->ref_mod < next->ref_mod) {
|
||||
struct btrfs_delayed_ref_node *tmp;
|
||||
|
||||
tmp = ref;
|
||||
ref = next;
|
||||
next = tmp;
|
||||
done = 1;
|
||||
}
|
||||
mod = -next->ref_mod;
|
||||
}
|
||||
|
||||
drop_delayed_ref(trans, delayed_refs, head, next);
|
||||
ref->ref_mod += mod;
|
||||
if (ref->ref_mod == 0) {
|
||||
drop_delayed_ref(trans, delayed_refs, head, ref);
|
||||
done = 1;
|
||||
} else {
|
||||
/*
|
||||
* You can't have multiples of the same ref on a tree
|
||||
* block.
|
||||
*/
|
||||
WARN_ON(ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
|
||||
ref->type == BTRFS_SHARED_BLOCK_REF_KEY);
|
||||
}
|
||||
}
|
||||
return done;
|
||||
}
|
||||
|
||||
void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_fs_info *fs_info,
|
||||
struct btrfs_delayed_ref_root *delayed_refs,
|
||||
struct btrfs_delayed_ref_head *head)
|
||||
{
|
||||
struct rb_node *node;
|
||||
u64 seq = 0;
|
||||
|
||||
assert_spin_locked(&head->lock);
|
||||
/*
|
||||
* We don't have too much refs to merge in the case of delayed data
|
||||
* refs.
|
||||
*/
|
||||
if (head->is_data)
|
||||
return;
|
||||
|
||||
spin_lock(&fs_info->tree_mod_seq_lock);
|
||||
if (!list_empty(&fs_info->tree_mod_seq_list)) {
|
||||
struct seq_list *elem;
|
||||
|
||||
elem = list_first_entry(&fs_info->tree_mod_seq_list,
|
||||
struct seq_list, list);
|
||||
seq = elem->seq;
|
||||
}
|
||||
spin_unlock(&fs_info->tree_mod_seq_lock);
|
||||
|
||||
node = rb_first(&head->ref_root);
|
||||
while (node) {
|
||||
struct btrfs_delayed_ref_node *ref;
|
||||
|
||||
ref = rb_entry(node, struct btrfs_delayed_ref_node,
|
||||
rb_node);
|
||||
/* We can't merge refs that are outside of our seq count */
|
||||
if (seq && ref->seq >= seq)
|
||||
break;
|
||||
if (merge_ref(trans, delayed_refs, head, ref, seq))
|
||||
node = rb_first(&head->ref_root);
|
||||
else
|
||||
node = rb_next(&ref->rb_node);
|
||||
}
|
||||
}
|
||||
|
||||
int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info,
|
||||
struct btrfs_delayed_ref_root *delayed_refs,
|
||||
u64 seq)
|
||||
|
@ -443,45 +270,71 @@ again:
|
|||
}
|
||||
|
||||
/*
|
||||
* helper function to update an extent delayed ref in the
|
||||
* rbtree. existing and update must both have the same
|
||||
* bytenr and parent
|
||||
* Helper to insert the ref_node to the tail or merge with tail.
|
||||
*
|
||||
* This may free existing if the update cancels out whatever
|
||||
* operation it was doing.
|
||||
* Return 0 for insert.
|
||||
* Return >0 for merge.
|
||||
*/
|
||||
static noinline void
|
||||
update_existing_ref(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_delayed_ref_root *delayed_refs,
|
||||
struct btrfs_delayed_ref_head *head,
|
||||
struct btrfs_delayed_ref_node *existing,
|
||||
struct btrfs_delayed_ref_node *update)
|
||||
static int
|
||||
add_delayed_ref_tail_merge(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_delayed_ref_root *root,
|
||||
struct btrfs_delayed_ref_head *href,
|
||||
struct btrfs_delayed_ref_node *ref)
|
||||
{
|
||||
if (update->action != existing->action) {
|
||||
/*
|
||||
* this is effectively undoing either an add or a
|
||||
* drop. We decrement the ref_mod, and if it goes
|
||||
* down to zero we just delete the entry without
|
||||
* every changing the extent allocation tree.
|
||||
*/
|
||||
existing->ref_mod--;
|
||||
if (existing->ref_mod == 0)
|
||||
drop_delayed_ref(trans, delayed_refs, head, existing);
|
||||
else
|
||||
WARN_ON(existing->type == BTRFS_TREE_BLOCK_REF_KEY ||
|
||||
existing->type == BTRFS_SHARED_BLOCK_REF_KEY);
|
||||
struct btrfs_delayed_ref_node *exist;
|
||||
int mod;
|
||||
int ret = 0;
|
||||
|
||||
spin_lock(&href->lock);
|
||||
/* Check whether we can merge the tail node with ref */
|
||||
if (list_empty(&href->ref_list))
|
||||
goto add_tail;
|
||||
exist = list_entry(href->ref_list.prev, struct btrfs_delayed_ref_node,
|
||||
list);
|
||||
/* No need to compare bytenr nor is_head */
|
||||
if (exist->type != ref->type || exist->no_quota != ref->no_quota ||
|
||||
exist->seq != ref->seq)
|
||||
goto add_tail;
|
||||
|
||||
if ((exist->type == BTRFS_TREE_BLOCK_REF_KEY ||
|
||||
exist->type == BTRFS_SHARED_BLOCK_REF_KEY) &&
|
||||
comp_tree_refs(btrfs_delayed_node_to_tree_ref(exist),
|
||||
btrfs_delayed_node_to_tree_ref(ref),
|
||||
ref->type))
|
||||
goto add_tail;
|
||||
if ((exist->type == BTRFS_EXTENT_DATA_REF_KEY ||
|
||||
exist->type == BTRFS_SHARED_DATA_REF_KEY) &&
|
||||
comp_data_refs(btrfs_delayed_node_to_data_ref(exist),
|
||||
btrfs_delayed_node_to_data_ref(ref)))
|
||||
goto add_tail;
|
||||
|
||||
/* Now we are sure we can merge */
|
||||
ret = 1;
|
||||
if (exist->action == ref->action) {
|
||||
mod = ref->ref_mod;
|
||||
} else {
|
||||
WARN_ON(existing->type == BTRFS_TREE_BLOCK_REF_KEY ||
|
||||
existing->type == BTRFS_SHARED_BLOCK_REF_KEY);
|
||||
/*
|
||||
* the action on the existing ref matches
|
||||
* the action on the ref we're trying to add.
|
||||
* Bump the ref_mod by one so the backref that
|
||||
* is eventually added/removed has the correct
|
||||
* reference count
|
||||
*/
|
||||
existing->ref_mod += update->ref_mod;
|
||||
/* Need to change action */
|
||||
if (exist->ref_mod < ref->ref_mod) {
|
||||
exist->action = ref->action;
|
||||
mod = -exist->ref_mod;
|
||||
exist->ref_mod = ref->ref_mod;
|
||||
} else
|
||||
mod = -ref->ref_mod;
|
||||
}
|
||||
exist->ref_mod += mod;
|
||||
|
||||
/* remove existing tail if its ref_mod is zero */
|
||||
if (exist->ref_mod == 0)
|
||||
drop_delayed_ref(trans, root, href, exist);
|
||||
spin_unlock(&href->lock);
|
||||
return ret;
|
||||
|
||||
add_tail:
|
||||
list_add_tail(&ref->list, &href->ref_list);
|
||||
atomic_inc(&root->num_entries);
|
||||
trans->delayed_ref_updates++;
|
||||
spin_unlock(&href->lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -568,12 +421,14 @@ update_existing_head_ref(struct btrfs_delayed_ref_root *delayed_refs,
|
|||
static noinline struct btrfs_delayed_ref_head *
|
||||
add_delayed_ref_head(struct btrfs_fs_info *fs_info,
|
||||
struct btrfs_trans_handle *trans,
|
||||
struct btrfs_delayed_ref_node *ref, u64 bytenr,
|
||||
u64 num_bytes, int action, int is_data)
|
||||
struct btrfs_delayed_ref_node *ref,
|
||||
struct btrfs_qgroup_extent_record *qrecord,
|
||||
u64 bytenr, u64 num_bytes, int action, int is_data)
|
||||
{
|
||||
struct btrfs_delayed_ref_head *existing;
|
||||
struct btrfs_delayed_ref_head *head_ref = NULL;
|
||||
struct btrfs_delayed_ref_root *delayed_refs;
|
||||
struct btrfs_qgroup_extent_record *qexisting;
|
||||
int count_mod = 1;
|
||||
int must_insert_reserved = 0;
|
||||
|
||||
|
@ -618,10 +473,22 @@ add_delayed_ref_head(struct btrfs_fs_info *fs_info,
|
|||
head_ref = btrfs_delayed_node_to_head(ref);
|
||||
head_ref->must_insert_reserved = must_insert_reserved;
|
||||
head_ref->is_data = is_data;
|
||||
head_ref->ref_root = RB_ROOT;
|
||||
INIT_LIST_HEAD(&head_ref->ref_list);
|
||||
head_ref->processing = 0;
|
||||
head_ref->total_ref_mod = count_mod;
|
||||
|
||||
/* Record qgroup extent info if provided */
|
||||
if (qrecord) {
|
||||
qrecord->bytenr = bytenr;
|
||||
qrecord->num_bytes = num_bytes;
|
||||
qrecord->old_roots = NULL;
|
||||
|
||||
qexisting = btrfs_qgroup_insert_dirty_extent(delayed_refs,
|
||||
qrecord);
|
||||
if (qexisting)
|
||||
kfree(qrecord);
|
||||
}
|
||||
|
||||
spin_lock_init(&head_ref->lock);
|
||||
mutex_init(&head_ref->mutex);
|
||||
|
||||
|
@ -659,10 +526,10 @@ add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
|
|||
u64 num_bytes, u64 parent, u64 ref_root, int level,
|
||||
int action, int no_quota)
|
||||
{
|
||||
struct btrfs_delayed_ref_node *existing;
|
||||
struct btrfs_delayed_tree_ref *full_ref;
|
||||
struct btrfs_delayed_ref_root *delayed_refs;
|
||||
u64 seq = 0;
|
||||
int ret;
|
||||
|
||||
if (action == BTRFS_ADD_DELAYED_EXTENT)
|
||||
action = BTRFS_ADD_DELAYED_REF;
|
||||
|
@ -693,21 +560,14 @@ add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
|
|||
|
||||
trace_add_delayed_tree_ref(ref, full_ref, action);
|
||||
|
||||
spin_lock(&head_ref->lock);
|
||||
existing = tree_insert(&head_ref->ref_root, &ref->rb_node);
|
||||
if (existing) {
|
||||
update_existing_ref(trans, delayed_refs, head_ref, existing,
|
||||
ref);
|
||||
/*
|
||||
* we've updated the existing ref, free the newly
|
||||
* allocated ref
|
||||
*/
|
||||
ret = add_delayed_ref_tail_merge(trans, delayed_refs, head_ref, ref);
|
||||
|
||||
/*
|
||||
* XXX: memory should be freed at the same level allocated.
|
||||
* But bad practice is anywhere... Follow it now. Need cleanup.
|
||||
*/
|
||||
if (ret > 0)
|
||||
kmem_cache_free(btrfs_delayed_tree_ref_cachep, full_ref);
|
||||
} else {
|
||||
atomic_inc(&delayed_refs->num_entries);
|
||||
trans->delayed_ref_updates++;
|
||||
}
|
||||
spin_unlock(&head_ref->lock);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -721,10 +581,10 @@ add_delayed_data_ref(struct btrfs_fs_info *fs_info,
|
|||
u64 num_bytes, u64 parent, u64 ref_root, u64 owner,
|
||||
u64 offset, int action, int no_quota)
|
||||
{
|
||||
struct btrfs_delayed_ref_node *existing;
|
||||
struct btrfs_delayed_data_ref *full_ref;
|
||||
struct btrfs_delayed_ref_root *delayed_refs;
|
||||
u64 seq = 0;
|
||||
int ret;
|
||||
|
||||
if (action == BTRFS_ADD_DELAYED_EXTENT)
|
||||
action = BTRFS_ADD_DELAYED_REF;
|
||||
|
@ -758,21 +618,10 @@ add_delayed_data_ref(struct btrfs_fs_info *fs_info,
|
|||
|
||||
trace_add_delayed_data_ref(ref, full_ref, action);
|
||||
|
||||
spin_lock(&head_ref->lock);
|
||||
existing = tree_insert(&head_ref->ref_root, &ref->rb_node);
|
||||
if (existing) {
|
||||
update_existing_ref(trans, delayed_refs, head_ref, existing,
|
||||
ref);
|
||||
/*
|
||||
* we've updated the existing ref, free the newly
|
||||
* allocated ref
|
||||
*/
|
||||
ret = add_delayed_ref_tail_merge(trans, delayed_refs, head_ref, ref);
|
||||
|
||||
if (ret > 0)
|
||||
kmem_cache_free(btrfs_delayed_data_ref_cachep, full_ref);
|
||||
} else {
|
||||
atomic_inc(&delayed_refs->num_entries);
|
||||
trans->delayed_ref_updates++;
|
||||
}
|
||||
spin_unlock(&head_ref->lock);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -790,6 +639,7 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
|
|||
struct btrfs_delayed_tree_ref *ref;
|
||||
struct btrfs_delayed_ref_head *head_ref;
|
||||
struct btrfs_delayed_ref_root *delayed_refs;
|
||||
struct btrfs_qgroup_extent_record *record = NULL;
|
||||
|
||||
if (!is_fstree(ref_root) || !fs_info->quota_enabled)
|
||||
no_quota = 0;
|
||||
|
@ -800,9 +650,13 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
|
|||
return -ENOMEM;
|
||||
|
||||
head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
|
||||
if (!head_ref) {
|
||||
kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
|
||||
return -ENOMEM;
|
||||
if (!head_ref)
|
||||
goto free_ref;
|
||||
|
||||
if (fs_info->quota_enabled && is_fstree(ref_root)) {
|
||||
record = kmalloc(sizeof(*record), GFP_NOFS);
|
||||
if (!record)
|
||||
goto free_head_ref;
|
||||
}
|
||||
|
||||
head_ref->extent_op = extent_op;
|
||||
|
@ -814,7 +668,7 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
|
|||
* insert both the head node and the new ref without dropping
|
||||
* the spin lock
|
||||
*/
|
||||
head_ref = add_delayed_ref_head(fs_info, trans, &head_ref->node,
|
||||
head_ref = add_delayed_ref_head(fs_info, trans, &head_ref->node, record,
|
||||
bytenr, num_bytes, action, 0);
|
||||
|
||||
add_delayed_tree_ref(fs_info, trans, head_ref, &ref->node, bytenr,
|
||||
|
@ -823,6 +677,13 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
|
|||
spin_unlock(&delayed_refs->lock);
|
||||
|
||||
return 0;
|
||||
|
||||
free_head_ref:
|
||||
kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
|
||||
free_ref:
|
||||
kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
|
||||
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -839,6 +700,7 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
|
|||
struct btrfs_delayed_data_ref *ref;
|
||||
struct btrfs_delayed_ref_head *head_ref;
|
||||
struct btrfs_delayed_ref_root *delayed_refs;
|
||||
struct btrfs_qgroup_extent_record *record = NULL;
|
||||
|
||||
if (!is_fstree(ref_root) || !fs_info->quota_enabled)
|
||||
no_quota = 0;
|
||||
|
@ -854,6 +716,16 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (fs_info->quota_enabled && is_fstree(ref_root)) {
|
||||
record = kmalloc(sizeof(*record), GFP_NOFS);
|
||||
if (!record) {
|
||||
kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
|
||||
kmem_cache_free(btrfs_delayed_ref_head_cachep,
|
||||
head_ref);
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
|
||||
head_ref->extent_op = extent_op;
|
||||
|
||||
delayed_refs = &trans->transaction->delayed_refs;
|
||||
|
@ -863,7 +735,7 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
|
|||
* insert both the head node and the new ref without dropping
|
||||
* the spin lock
|
||||
*/
|
||||
head_ref = add_delayed_ref_head(fs_info, trans, &head_ref->node,
|
||||
head_ref = add_delayed_ref_head(fs_info, trans, &head_ref->node, record,
|
||||
bytenr, num_bytes, action, 1);
|
||||
|
||||
add_delayed_data_ref(fs_info, trans, head_ref, &ref->node, bytenr,
|
||||
|
@ -891,9 +763,9 @@ int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
|
|||
delayed_refs = &trans->transaction->delayed_refs;
|
||||
spin_lock(&delayed_refs->lock);
|
||||
|
||||
add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr,
|
||||
num_bytes, BTRFS_UPDATE_DELAYED_HEAD,
|
||||
extent_op->is_data);
|
||||
add_delayed_ref_head(fs_info, trans, &head_ref->node, NULL, bytenr,
|
||||
num_bytes, BTRFS_UPDATE_DELAYED_HEAD,
|
||||
extent_op->is_data);
|
||||
|
||||
spin_unlock(&delayed_refs->lock);
|
||||
return 0;
|
||||
|
|
|
@ -24,9 +24,25 @@
|
|||
#define BTRFS_ADD_DELAYED_EXTENT 3 /* record a full extent allocation */
|
||||
#define BTRFS_UPDATE_DELAYED_HEAD 4 /* not changing ref count on head ref */
|
||||
|
||||
/*
|
||||
* XXX: Qu: I really hate the design that ref_head and tree/data ref shares the
|
||||
* same ref_node structure.
|
||||
* Ref_head is in a higher logic level than tree/data ref, and duplicated
|
||||
* bytenr/num_bytes in ref_node is really a waste or memory, they should be
|
||||
* referred from ref_head.
|
||||
* This gets more disgusting after we use list to store tree/data ref in
|
||||
* ref_head. Must clean this mess up later.
|
||||
*/
|
||||
struct btrfs_delayed_ref_node {
|
||||
/*
|
||||
* ref_head use rb tree, stored in ref_root->href.
|
||||
* indexed by bytenr
|
||||
*/
|
||||
struct rb_node rb_node;
|
||||
|
||||
/*data/tree ref use list, stored in ref_head->ref_list. */
|
||||
struct list_head list;
|
||||
|
||||
/* the starting bytenr of the extent */
|
||||
u64 bytenr;
|
||||
|
||||
|
@ -83,7 +99,7 @@ struct btrfs_delayed_ref_head {
|
|||
struct mutex mutex;
|
||||
|
||||
spinlock_t lock;
|
||||
struct rb_root ref_root;
|
||||
struct list_head ref_list;
|
||||
|
||||
struct rb_node href_node;
|
||||
|
||||
|
@ -132,6 +148,9 @@ struct btrfs_delayed_ref_root {
|
|||
/* head ref rbtree */
|
||||
struct rb_root href_root;
|
||||
|
||||
/* dirty extent records */
|
||||
struct rb_root dirty_extent_root;
|
||||
|
||||
/* this spin lock protects the rbtree and the entries inside */
|
||||
spinlock_t lock;
|
||||
|
||||
|
@ -156,6 +175,14 @@ struct btrfs_delayed_ref_root {
|
|||
int flushing;
|
||||
|
||||
u64 run_delayed_start;
|
||||
|
||||
/*
|
||||
* To make qgroup to skip given root.
|
||||
* This is for snapshot, as btrfs_qgroup_inherit() will manully
|
||||
* modify counters for snapshot and its source, so we should skip
|
||||
* the snapshot in new_root/old_roots or it will get calculated twice
|
||||
*/
|
||||
u64 qgroup_to_skip;
|
||||
};
|
||||
|
||||
extern struct kmem_cache *btrfs_delayed_ref_head_cachep;
|
||||
|
|
|
@ -376,6 +376,10 @@ int btrfs_dev_replace_start(struct btrfs_root *root,
|
|||
WARN_ON(!tgt_device);
|
||||
dev_replace->tgtdev = tgt_device;
|
||||
|
||||
ret = btrfs_kobj_add_device(tgt_device->fs_devices, tgt_device);
|
||||
if (ret)
|
||||
btrfs_error(root->fs_info, ret, "kobj add dev failed");
|
||||
|
||||
printk_in_rcu(KERN_INFO
|
||||
"BTRFS: dev_replace from %s (devid %llu) to %s started\n",
|
||||
src_device->missing ? "<missing disk>" :
|
||||
|
@ -583,8 +587,7 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
|
|||
mutex_unlock(&uuid_mutex);
|
||||
|
||||
/* replace the sysfs entry */
|
||||
btrfs_kobj_rm_device(fs_info, src_device);
|
||||
btrfs_kobj_add_device(fs_info, tgt_device);
|
||||
btrfs_kobj_rm_device(fs_info->fs_devices, src_device);
|
||||
btrfs_rm_dev_replace_free_srcdev(fs_info, src_device);
|
||||
|
||||
/* write back the superblocks */
|
||||
|
|
|
@ -1149,12 +1149,12 @@ struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
|
|||
|
||||
buf = btrfs_find_create_tree_block(root, bytenr);
|
||||
if (!buf)
|
||||
return NULL;
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
|
||||
if (ret) {
|
||||
free_extent_buffer(buf);
|
||||
return NULL;
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
return buf;
|
||||
|
||||
|
@ -1509,20 +1509,19 @@ static struct btrfs_root *btrfs_read_tree_root(struct btrfs_root *tree_root,
|
|||
generation = btrfs_root_generation(&root->root_item);
|
||||
root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
|
||||
generation);
|
||||
if (!root->node) {
|
||||
ret = -ENOMEM;
|
||||
if (IS_ERR(root->node)) {
|
||||
ret = PTR_ERR(root->node);
|
||||
goto find_fail;
|
||||
} else if (!btrfs_buffer_uptodate(root->node, generation, 0)) {
|
||||
ret = -EIO;
|
||||
goto read_fail;
|
||||
free_extent_buffer(root->node);
|
||||
goto find_fail;
|
||||
}
|
||||
root->commit_root = btrfs_root_node(root);
|
||||
out:
|
||||
btrfs_free_path(path);
|
||||
return root;
|
||||
|
||||
read_fail:
|
||||
free_extent_buffer(root->node);
|
||||
find_fail:
|
||||
kfree(root);
|
||||
alloc_fail:
|
||||
|
@ -2320,8 +2319,12 @@ static int btrfs_replay_log(struct btrfs_fs_info *fs_info,
|
|||
|
||||
log_tree_root->node = read_tree_block(tree_root, bytenr,
|
||||
fs_info->generation + 1);
|
||||
if (!log_tree_root->node ||
|
||||
!extent_buffer_uptodate(log_tree_root->node)) {
|
||||
if (IS_ERR(log_tree_root->node)) {
|
||||
printk(KERN_ERR "BTRFS: failed to read log tree\n");
|
||||
ret = PTR_ERR(log_tree_root->node);
|
||||
kfree(log_tree_root);
|
||||
return ret;
|
||||
} else if (!extent_buffer_uptodate(log_tree_root->node)) {
|
||||
printk(KERN_ERR "BTRFS: failed to read log tree\n");
|
||||
free_extent_buffer(log_tree_root->node);
|
||||
kfree(log_tree_root);
|
||||
|
@ -2494,7 +2497,6 @@ int open_ctree(struct super_block *sb,
|
|||
seqlock_init(&fs_info->profiles_lock);
|
||||
init_rwsem(&fs_info->delayed_iput_sem);
|
||||
|
||||
init_completion(&fs_info->kobj_unregister);
|
||||
INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
|
||||
INIT_LIST_HEAD(&fs_info->space_info);
|
||||
INIT_LIST_HEAD(&fs_info->tree_mod_seq_list);
|
||||
|
@ -2797,8 +2799,8 @@ int open_ctree(struct super_block *sb,
|
|||
chunk_root->node = read_tree_block(chunk_root,
|
||||
btrfs_super_chunk_root(disk_super),
|
||||
generation);
|
||||
if (!chunk_root->node ||
|
||||
!test_bit(EXTENT_BUFFER_UPTODATE, &chunk_root->node->bflags)) {
|
||||
if (IS_ERR(chunk_root->node) ||
|
||||
!extent_buffer_uptodate(chunk_root->node)) {
|
||||
printk(KERN_ERR "BTRFS: failed to read chunk root on %s\n",
|
||||
sb->s_id);
|
||||
goto fail_tree_roots;
|
||||
|
@ -2834,8 +2836,8 @@ retry_root_backup:
|
|||
tree_root->node = read_tree_block(tree_root,
|
||||
btrfs_super_root(disk_super),
|
||||
generation);
|
||||
if (!tree_root->node ||
|
||||
!test_bit(EXTENT_BUFFER_UPTODATE, &tree_root->node->bflags)) {
|
||||
if (IS_ERR(tree_root->node) ||
|
||||
!extent_buffer_uptodate(tree_root->node)) {
|
||||
printk(KERN_WARNING "BTRFS: failed to read tree root on %s\n",
|
||||
sb->s_id);
|
||||
|
||||
|
@ -2874,10 +2876,22 @@ retry_root_backup:
|
|||
|
||||
btrfs_close_extra_devices(fs_devices, 1);
|
||||
|
||||
ret = btrfs_sysfs_add_fsid(fs_devices, NULL);
|
||||
if (ret) {
|
||||
pr_err("BTRFS: failed to init sysfs fsid interface: %d\n", ret);
|
||||
goto fail_block_groups;
|
||||
}
|
||||
|
||||
ret = btrfs_sysfs_add_device(fs_devices);
|
||||
if (ret) {
|
||||
pr_err("BTRFS: failed to init sysfs device interface: %d\n", ret);
|
||||
goto fail_fsdev_sysfs;
|
||||
}
|
||||
|
||||
ret = btrfs_sysfs_add_one(fs_info);
|
||||
if (ret) {
|
||||
pr_err("BTRFS: failed to init sysfs interface: %d\n", ret);
|
||||
goto fail_block_groups;
|
||||
goto fail_fsdev_sysfs;
|
||||
}
|
||||
|
||||
ret = btrfs_init_space_info(fs_info);
|
||||
|
@ -3055,6 +3069,9 @@ fail_cleaner:
|
|||
fail_sysfs:
|
||||
btrfs_sysfs_remove_one(fs_info);
|
||||
|
||||
fail_fsdev_sysfs:
|
||||
btrfs_sysfs_remove_fsid(fs_info->fs_devices);
|
||||
|
||||
fail_block_groups:
|
||||
btrfs_put_block_group_cache(fs_info);
|
||||
btrfs_free_block_groups(fs_info);
|
||||
|
@ -3725,6 +3742,7 @@ void close_ctree(struct btrfs_root *root)
|
|||
}
|
||||
|
||||
btrfs_sysfs_remove_one(fs_info);
|
||||
btrfs_sysfs_remove_fsid(fs_info->fs_devices);
|
||||
|
||||
btrfs_free_fs_roots(fs_info);
|
||||
|
||||
|
@ -4053,6 +4071,7 @@ static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
|
|||
|
||||
while ((node = rb_first(&delayed_refs->href_root)) != NULL) {
|
||||
struct btrfs_delayed_ref_head *head;
|
||||
struct btrfs_delayed_ref_node *tmp;
|
||||
bool pin_bytes = false;
|
||||
|
||||
head = rb_entry(node, struct btrfs_delayed_ref_head,
|
||||
|
@ -4068,11 +4087,10 @@ static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
|
|||
continue;
|
||||
}
|
||||
spin_lock(&head->lock);
|
||||
while ((node = rb_first(&head->ref_root)) != NULL) {
|
||||
ref = rb_entry(node, struct btrfs_delayed_ref_node,
|
||||
rb_node);
|
||||
list_for_each_entry_safe_reverse(ref, tmp, &head->ref_list,
|
||||
list) {
|
||||
ref->in_tree = 0;
|
||||
rb_erase(&ref->rb_node, &head->ref_root);
|
||||
list_del(&ref->list);
|
||||
atomic_dec(&delayed_refs->num_entries);
|
||||
btrfs_put_delayed_ref(ref);
|
||||
}
|
||||
|
|
|
@ -79,11 +79,10 @@ static int update_block_group(struct btrfs_trans_handle *trans,
|
|||
u64 num_bytes, int alloc);
|
||||
static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root,
|
||||
u64 bytenr, u64 num_bytes, u64 parent,
|
||||
struct btrfs_delayed_ref_node *node, u64 parent,
|
||||
u64 root_objectid, u64 owner_objectid,
|
||||
u64 owner_offset, int refs_to_drop,
|
||||
struct btrfs_delayed_extent_op *extra_op,
|
||||
int no_quota);
|
||||
struct btrfs_delayed_extent_op *extra_op);
|
||||
static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
|
||||
struct extent_buffer *leaf,
|
||||
struct btrfs_extent_item *ei);
|
||||
|
@ -1967,10 +1966,9 @@ int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
|
|||
|
||||
static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root,
|
||||
u64 bytenr, u64 num_bytes,
|
||||
struct btrfs_delayed_ref_node *node,
|
||||
u64 parent, u64 root_objectid,
|
||||
u64 owner, u64 offset, int refs_to_add,
|
||||
int no_quota,
|
||||
struct btrfs_delayed_extent_op *extent_op)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = root->fs_info;
|
||||
|
@ -1978,9 +1976,11 @@ static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
|
|||
struct extent_buffer *leaf;
|
||||
struct btrfs_extent_item *item;
|
||||
struct btrfs_key key;
|
||||
u64 bytenr = node->bytenr;
|
||||
u64 num_bytes = node->num_bytes;
|
||||
u64 refs;
|
||||
int ret;
|
||||
enum btrfs_qgroup_operation_type type = BTRFS_QGROUP_OPER_ADD_EXCL;
|
||||
int no_quota = node->no_quota;
|
||||
|
||||
path = btrfs_alloc_path();
|
||||
if (!path)
|
||||
|
@ -1996,26 +1996,8 @@ static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
|
|||
bytenr, num_bytes, parent,
|
||||
root_objectid, owner, offset,
|
||||
refs_to_add, extent_op);
|
||||
if ((ret < 0 && ret != -EAGAIN) || (!ret && no_quota))
|
||||
if ((ret < 0 && ret != -EAGAIN) || !ret)
|
||||
goto out;
|
||||
/*
|
||||
* Ok we were able to insert an inline extent and it appears to be a new
|
||||
* reference, deal with the qgroup accounting.
|
||||
*/
|
||||
if (!ret && !no_quota) {
|
||||
ASSERT(root->fs_info->quota_enabled);
|
||||
leaf = path->nodes[0];
|
||||
btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
|
||||
item = btrfs_item_ptr(leaf, path->slots[0],
|
||||
struct btrfs_extent_item);
|
||||
if (btrfs_extent_refs(leaf, item) > (u64)refs_to_add)
|
||||
type = BTRFS_QGROUP_OPER_ADD_SHARED;
|
||||
btrfs_release_path(path);
|
||||
|
||||
ret = btrfs_qgroup_record_ref(trans, fs_info, root_objectid,
|
||||
bytenr, num_bytes, type, 0);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* Ok we had -EAGAIN which means we didn't have space to insert and
|
||||
|
@ -2026,8 +2008,6 @@ static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
|
|||
btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
|
||||
item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
|
||||
refs = btrfs_extent_refs(leaf, item);
|
||||
if (refs)
|
||||
type = BTRFS_QGROUP_OPER_ADD_SHARED;
|
||||
btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
|
||||
if (extent_op)
|
||||
__run_delayed_extent_op(extent_op, leaf, item);
|
||||
|
@ -2035,13 +2015,6 @@ static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
|
|||
btrfs_mark_buffer_dirty(leaf);
|
||||
btrfs_release_path(path);
|
||||
|
||||
if (!no_quota) {
|
||||
ret = btrfs_qgroup_record_ref(trans, fs_info, root_objectid,
|
||||
bytenr, num_bytes, type, 0);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
|
||||
path->reada = 1;
|
||||
path->leave_spinning = 1;
|
||||
/* now insert the actual backref */
|
||||
|
@ -2087,17 +2060,15 @@ static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
|
|||
ref->objectid, ref->offset,
|
||||
&ins, node->ref_mod);
|
||||
} else if (node->action == BTRFS_ADD_DELAYED_REF) {
|
||||
ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
|
||||
node->num_bytes, parent,
|
||||
ret = __btrfs_inc_extent_ref(trans, root, node, parent,
|
||||
ref_root, ref->objectid,
|
||||
ref->offset, node->ref_mod,
|
||||
node->no_quota, extent_op);
|
||||
extent_op);
|
||||
} else if (node->action == BTRFS_DROP_DELAYED_REF) {
|
||||
ret = __btrfs_free_extent(trans, root, node->bytenr,
|
||||
node->num_bytes, parent,
|
||||
ret = __btrfs_free_extent(trans, root, node, parent,
|
||||
ref_root, ref->objectid,
|
||||
ref->offset, node->ref_mod,
|
||||
extent_op, node->no_quota);
|
||||
extent_op);
|
||||
} else {
|
||||
BUG();
|
||||
}
|
||||
|
@ -2255,15 +2226,14 @@ static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
|
|||
ref->level, &ins,
|
||||
node->no_quota);
|
||||
} else if (node->action == BTRFS_ADD_DELAYED_REF) {
|
||||
ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
|
||||
node->num_bytes, parent, ref_root,
|
||||
ref->level, 0, 1, node->no_quota,
|
||||
ret = __btrfs_inc_extent_ref(trans, root, node,
|
||||
parent, ref_root,
|
||||
ref->level, 0, 1,
|
||||
extent_op);
|
||||
} else if (node->action == BTRFS_DROP_DELAYED_REF) {
|
||||
ret = __btrfs_free_extent(trans, root, node->bytenr,
|
||||
node->num_bytes, parent, ref_root,
|
||||
ref->level, 0, 1, extent_op,
|
||||
node->no_quota);
|
||||
ret = __btrfs_free_extent(trans, root, node,
|
||||
parent, ref_root,
|
||||
ref->level, 0, 1, extent_op);
|
||||
} else {
|
||||
BUG();
|
||||
}
|
||||
|
@ -2323,28 +2293,14 @@ static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static noinline struct btrfs_delayed_ref_node *
|
||||
static inline struct btrfs_delayed_ref_node *
|
||||
select_delayed_ref(struct btrfs_delayed_ref_head *head)
|
||||
{
|
||||
struct rb_node *node;
|
||||
struct btrfs_delayed_ref_node *ref, *last = NULL;;
|
||||
if (list_empty(&head->ref_list))
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* select delayed ref of type BTRFS_ADD_DELAYED_REF first.
|
||||
* this prevents ref count from going down to zero when
|
||||
* there still are pending delayed ref.
|
||||
*/
|
||||
node = rb_first(&head->ref_root);
|
||||
while (node) {
|
||||
ref = rb_entry(node, struct btrfs_delayed_ref_node,
|
||||
rb_node);
|
||||
if (ref->action == BTRFS_ADD_DELAYED_REF)
|
||||
return ref;
|
||||
else if (last == NULL)
|
||||
last = ref;
|
||||
node = rb_next(node);
|
||||
}
|
||||
return last;
|
||||
return list_entry(head->ref_list.next, struct btrfs_delayed_ref_node,
|
||||
list);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -2396,16 +2352,7 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* We need to try and merge add/drops of the same ref since we
|
||||
* can run into issues with relocate dropping the implicit ref
|
||||
* and then it being added back again before the drop can
|
||||
* finish. If we merged anything we need to re-loop so we can
|
||||
* get a good ref.
|
||||
*/
|
||||
spin_lock(&locked_ref->lock);
|
||||
btrfs_merge_delayed_refs(trans, fs_info, delayed_refs,
|
||||
locked_ref);
|
||||
|
||||
/*
|
||||
* locked_ref is the head node, so we have to go one
|
||||
|
@ -2482,7 +2429,7 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
|
|||
spin_unlock(&locked_ref->lock);
|
||||
spin_lock(&delayed_refs->lock);
|
||||
spin_lock(&locked_ref->lock);
|
||||
if (rb_first(&locked_ref->ref_root) ||
|
||||
if (!list_empty(&locked_ref->ref_list) ||
|
||||
locked_ref->extent_op) {
|
||||
spin_unlock(&locked_ref->lock);
|
||||
spin_unlock(&delayed_refs->lock);
|
||||
|
@ -2496,7 +2443,7 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
|
|||
} else {
|
||||
actual_count++;
|
||||
ref->in_tree = 0;
|
||||
rb_erase(&ref->rb_node, &locked_ref->ref_root);
|
||||
list_del(&ref->list);
|
||||
}
|
||||
atomic_dec(&delayed_refs->num_entries);
|
||||
|
||||
|
@ -2864,9 +2811,6 @@ again:
|
|||
goto again;
|
||||
}
|
||||
out:
|
||||
ret = btrfs_delayed_qgroup_accounting(trans, root->fs_info);
|
||||
if (ret)
|
||||
return ret;
|
||||
assert_qgroups_uptodate(trans);
|
||||
return 0;
|
||||
}
|
||||
|
@ -2905,7 +2849,6 @@ static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
|
|||
struct btrfs_delayed_ref_node *ref;
|
||||
struct btrfs_delayed_data_ref *data_ref;
|
||||
struct btrfs_delayed_ref_root *delayed_refs;
|
||||
struct rb_node *node;
|
||||
int ret = 0;
|
||||
|
||||
delayed_refs = &trans->transaction->delayed_refs;
|
||||
|
@ -2934,11 +2877,7 @@ static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
|
|||
spin_unlock(&delayed_refs->lock);
|
||||
|
||||
spin_lock(&head->lock);
|
||||
node = rb_first(&head->ref_root);
|
||||
while (node) {
|
||||
ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
|
||||
node = rb_next(node);
|
||||
|
||||
list_for_each_entry(ref, &head->ref_list, list) {
|
||||
/* If it's a shared ref we know a cross reference exists */
|
||||
if (ref->type != BTRFS_EXTENT_DATA_REF_KEY) {
|
||||
ret = 1;
|
||||
|
@ -3693,7 +3632,8 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags,
|
|||
found->disk_total += total_bytes * factor;
|
||||
found->bytes_used += bytes_used;
|
||||
found->disk_used += bytes_used * factor;
|
||||
found->full = 0;
|
||||
if (total_bytes > 0)
|
||||
found->full = 0;
|
||||
spin_unlock(&found->lock);
|
||||
*space_info = found;
|
||||
return 0;
|
||||
|
@ -3721,7 +3661,10 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags,
|
|||
found->bytes_reserved = 0;
|
||||
found->bytes_readonly = 0;
|
||||
found->bytes_may_use = 0;
|
||||
found->full = 0;
|
||||
if (total_bytes > 0)
|
||||
found->full = 0;
|
||||
else
|
||||
found->full = 1;
|
||||
found->force_alloc = CHUNK_ALLOC_NO_FORCE;
|
||||
found->chunk_alloc = 0;
|
||||
found->flush = 0;
|
||||
|
@ -3975,6 +3918,9 @@ commit_trans:
|
|||
!atomic_read(&root->fs_info->open_ioctl_trans)) {
|
||||
need_commit--;
|
||||
|
||||
if (need_commit > 0)
|
||||
btrfs_wait_ordered_roots(fs_info, -1);
|
||||
|
||||
trans = btrfs_join_transaction(root);
|
||||
if (IS_ERR(trans))
|
||||
return PTR_ERR(trans);
|
||||
|
@ -4088,7 +4034,7 @@ static int should_alloc_chunk(struct btrfs_root *root,
|
|||
return 1;
|
||||
}
|
||||
|
||||
static u64 get_system_chunk_thresh(struct btrfs_root *root, u64 type)
|
||||
static u64 get_profile_num_devs(struct btrfs_root *root, u64 type)
|
||||
{
|
||||
u64 num_dev;
|
||||
|
||||
|
@ -4102,24 +4048,43 @@ static u64 get_system_chunk_thresh(struct btrfs_root *root, u64 type)
|
|||
else
|
||||
num_dev = 1; /* DUP or single */
|
||||
|
||||
/* metadata for updaing devices and chunk tree */
|
||||
return btrfs_calc_trans_metadata_size(root, num_dev + 1);
|
||||
return num_dev;
|
||||
}
|
||||
|
||||
static void check_system_chunk(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root, u64 type)
|
||||
/*
|
||||
* If @is_allocation is true, reserve space in the system space info necessary
|
||||
* for allocating a chunk, otherwise if it's false, reserve space necessary for
|
||||
* removing a chunk.
|
||||
*/
|
||||
void check_system_chunk(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root,
|
||||
u64 type)
|
||||
{
|
||||
struct btrfs_space_info *info;
|
||||
u64 left;
|
||||
u64 thresh;
|
||||
int ret = 0;
|
||||
u64 num_devs;
|
||||
|
||||
/*
|
||||
* Needed because we can end up allocating a system chunk and for an
|
||||
* atomic and race free space reservation in the chunk block reserve.
|
||||
*/
|
||||
ASSERT(mutex_is_locked(&root->fs_info->chunk_mutex));
|
||||
|
||||
info = __find_space_info(root->fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
|
||||
spin_lock(&info->lock);
|
||||
left = info->total_bytes - info->bytes_used - info->bytes_pinned -
|
||||
info->bytes_reserved - info->bytes_readonly;
|
||||
info->bytes_reserved - info->bytes_readonly -
|
||||
info->bytes_may_use;
|
||||
spin_unlock(&info->lock);
|
||||
|
||||
thresh = get_system_chunk_thresh(root, type);
|
||||
num_devs = get_profile_num_devs(root, type);
|
||||
|
||||
/* num_devs device items to update and 1 chunk item to add or remove */
|
||||
thresh = btrfs_calc_trunc_metadata_size(root, num_devs) +
|
||||
btrfs_calc_trans_metadata_size(root, 1);
|
||||
|
||||
if (left < thresh && btrfs_test_opt(root, ENOSPC_DEBUG)) {
|
||||
btrfs_info(root->fs_info, "left=%llu, need=%llu, flags=%llu",
|
||||
left, thresh, type);
|
||||
|
@ -4130,7 +4095,21 @@ static void check_system_chunk(struct btrfs_trans_handle *trans,
|
|||
u64 flags;
|
||||
|
||||
flags = btrfs_get_alloc_profile(root->fs_info->chunk_root, 0);
|
||||
btrfs_alloc_chunk(trans, root, flags);
|
||||
/*
|
||||
* Ignore failure to create system chunk. We might end up not
|
||||
* needing it, as we might not need to COW all nodes/leafs from
|
||||
* the paths we visit in the chunk tree (they were already COWed
|
||||
* or created in the current transaction for example).
|
||||
*/
|
||||
ret = btrfs_alloc_chunk(trans, root, flags);
|
||||
}
|
||||
|
||||
if (!ret) {
|
||||
ret = btrfs_block_rsv_add(root->fs_info->chunk_root,
|
||||
&root->fs_info->chunk_block_rsv,
|
||||
thresh, BTRFS_RESERVE_NO_FLUSH);
|
||||
if (!ret)
|
||||
trans->chunk_bytes_reserved += thresh;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -5188,6 +5167,24 @@ void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
|
|||
trans->bytes_reserved = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* To be called after all the new block groups attached to the transaction
|
||||
* handle have been created (btrfs_create_pending_block_groups()).
|
||||
*/
|
||||
void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = trans->root->fs_info;
|
||||
|
||||
if (!trans->chunk_bytes_reserved)
|
||||
return;
|
||||
|
||||
WARN_ON_ONCE(!list_empty(&trans->new_bgs));
|
||||
|
||||
block_rsv_release_bytes(fs_info, &fs_info->chunk_block_rsv, NULL,
|
||||
trans->chunk_bytes_reserved);
|
||||
trans->chunk_bytes_reserved = 0;
|
||||
}
|
||||
|
||||
/* Can only return 0 or -ENOSPC */
|
||||
int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
|
||||
struct inode *inode)
|
||||
|
@ -6092,11 +6089,10 @@ static void add_pinned_bytes(struct btrfs_fs_info *fs_info, u64 num_bytes,
|
|||
|
||||
static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root,
|
||||
u64 bytenr, u64 num_bytes, u64 parent,
|
||||
struct btrfs_delayed_ref_node *node, u64 parent,
|
||||
u64 root_objectid, u64 owner_objectid,
|
||||
u64 owner_offset, int refs_to_drop,
|
||||
struct btrfs_delayed_extent_op *extent_op,
|
||||
int no_quota)
|
||||
struct btrfs_delayed_extent_op *extent_op)
|
||||
{
|
||||
struct btrfs_key key;
|
||||
struct btrfs_path *path;
|
||||
|
@ -6110,10 +6106,12 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
|
|||
int extent_slot = 0;
|
||||
int found_extent = 0;
|
||||
int num_to_del = 1;
|
||||
int no_quota = node->no_quota;
|
||||
u32 item_size;
|
||||
u64 refs;
|
||||
u64 bytenr = node->bytenr;
|
||||
u64 num_bytes = node->num_bytes;
|
||||
int last_ref = 0;
|
||||
enum btrfs_qgroup_operation_type type = BTRFS_QGROUP_OPER_SUB_EXCL;
|
||||
bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
|
||||
SKINNY_METADATA);
|
||||
|
||||
|
@ -6294,7 +6292,6 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
|
|||
refs -= refs_to_drop;
|
||||
|
||||
if (refs > 0) {
|
||||
type = BTRFS_QGROUP_OPER_SUB_SHARED;
|
||||
if (extent_op)
|
||||
__run_delayed_extent_op(extent_op, leaf, ei);
|
||||
/*
|
||||
|
@ -6356,18 +6353,6 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
|
|||
}
|
||||
btrfs_release_path(path);
|
||||
|
||||
/* Deal with the quota accounting */
|
||||
if (!ret && last_ref && !no_quota) {
|
||||
int mod_seq = 0;
|
||||
|
||||
if (owner_objectid >= BTRFS_FIRST_FREE_OBJECTID &&
|
||||
type == BTRFS_QGROUP_OPER_SUB_SHARED)
|
||||
mod_seq = 1;
|
||||
|
||||
ret = btrfs_qgroup_record_ref(trans, info, root_objectid,
|
||||
bytenr, num_bytes, type,
|
||||
mod_seq);
|
||||
}
|
||||
out:
|
||||
btrfs_free_path(path);
|
||||
return ret;
|
||||
|
@ -6393,7 +6378,7 @@ static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
|
|||
goto out_delayed_unlock;
|
||||
|
||||
spin_lock(&head->lock);
|
||||
if (rb_first(&head->ref_root))
|
||||
if (!list_empty(&head->ref_list))
|
||||
goto out;
|
||||
|
||||
if (head->extent_op) {
|
||||
|
@ -7303,13 +7288,6 @@ static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
|
|||
btrfs_mark_buffer_dirty(path->nodes[0]);
|
||||
btrfs_free_path(path);
|
||||
|
||||
/* Always set parent to 0 here since its exclusive anyway. */
|
||||
ret = btrfs_qgroup_record_ref(trans, fs_info, root_objectid,
|
||||
ins->objectid, ins->offset,
|
||||
BTRFS_QGROUP_OPER_ADD_EXCL, 0);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
|
||||
if (ret) { /* -ENOENT, logic error */
|
||||
btrfs_err(fs_info, "update block group failed for %llu %llu",
|
||||
|
@ -7391,14 +7369,6 @@ static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
|
|||
btrfs_mark_buffer_dirty(leaf);
|
||||
btrfs_free_path(path);
|
||||
|
||||
if (!no_quota) {
|
||||
ret = btrfs_qgroup_record_ref(trans, fs_info, root_objectid,
|
||||
ins->objectid, num_bytes,
|
||||
BTRFS_QGROUP_OPER_ADD_EXCL, 0);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = update_block_group(trans, root, ins->objectid, root->nodesize,
|
||||
1);
|
||||
if (ret) { /* -ENOENT, logic error */
|
||||
|
@ -7755,12 +7725,18 @@ reada:
|
|||
wc->reada_slot = slot;
|
||||
}
|
||||
|
||||
/*
|
||||
* TODO: Modify related function to add related node/leaf to dirty_extent_root,
|
||||
* for later qgroup accounting.
|
||||
*
|
||||
* Current, this function does nothing.
|
||||
*/
|
||||
static int account_leaf_items(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root,
|
||||
struct extent_buffer *eb)
|
||||
{
|
||||
int nr = btrfs_header_nritems(eb);
|
||||
int i, extent_type, ret;
|
||||
int i, extent_type;
|
||||
struct btrfs_key key;
|
||||
struct btrfs_file_extent_item *fi;
|
||||
u64 bytenr, num_bytes;
|
||||
|
@ -7783,13 +7759,6 @@ static int account_leaf_items(struct btrfs_trans_handle *trans,
|
|||
continue;
|
||||
|
||||
num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi);
|
||||
|
||||
ret = btrfs_qgroup_record_ref(trans, root->fs_info,
|
||||
root->objectid,
|
||||
bytenr, num_bytes,
|
||||
BTRFS_QGROUP_OPER_SUB_SUBTREE, 0);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -7858,6 +7827,8 @@ static int adjust_slots_upwards(struct btrfs_root *root,
|
|||
|
||||
/*
|
||||
* root_eb is the subtree root and is locked before this function is called.
|
||||
* TODO: Modify this function to mark all (including complete shared node)
|
||||
* to dirty_extent_root to allow it get accounted in qgroup.
|
||||
*/
|
||||
static int account_shared_subtree(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root,
|
||||
|
@ -7920,7 +7891,11 @@ walk_down:
|
|||
child_gen = btrfs_node_ptr_generation(eb, parent_slot);
|
||||
|
||||
eb = read_tree_block(root, child_bytenr, child_gen);
|
||||
if (!eb || !extent_buffer_uptodate(eb)) {
|
||||
if (IS_ERR(eb)) {
|
||||
ret = PTR_ERR(eb);
|
||||
goto out;
|
||||
} else if (!extent_buffer_uptodate(eb)) {
|
||||
free_extent_buffer(eb);
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
}
|
||||
|
@ -7931,16 +7906,6 @@ walk_down:
|
|||
btrfs_tree_read_lock(eb);
|
||||
btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
|
||||
path->locks[level] = BTRFS_READ_LOCK_BLOCKING;
|
||||
|
||||
ret = btrfs_qgroup_record_ref(trans, root->fs_info,
|
||||
root->objectid,
|
||||
child_bytenr,
|
||||
root->nodesize,
|
||||
BTRFS_QGROUP_OPER_SUB_SUBTREE,
|
||||
0);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
}
|
||||
|
||||
if (level == 0) {
|
||||
|
@ -8151,7 +8116,9 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans,
|
|||
if (reada && level == 1)
|
||||
reada_walk_down(trans, root, wc, path);
|
||||
next = read_tree_block(root, bytenr, generation);
|
||||
if (!next || !extent_buffer_uptodate(next)) {
|
||||
if (IS_ERR(next)) {
|
||||
return PTR_ERR(next);
|
||||
} else if (!extent_buffer_uptodate(next)) {
|
||||
free_extent_buffer(next);
|
||||
return -EIO;
|
||||
}
|
||||
|
@ -8533,24 +8500,6 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
|
|||
goto out_end_trans;
|
||||
}
|
||||
|
||||
/*
|
||||
* Qgroup update accounting is run from
|
||||
* delayed ref handling. This usually works
|
||||
* out because delayed refs are normally the
|
||||
* only way qgroup updates are added. However,
|
||||
* we may have added updates during our tree
|
||||
* walk so run qgroups here to make sure we
|
||||
* don't lose any updates.
|
||||
*/
|
||||
ret = btrfs_delayed_qgroup_accounting(trans,
|
||||
root->fs_info);
|
||||
if (ret)
|
||||
printk_ratelimited(KERN_ERR "BTRFS: Failure %d "
|
||||
"running qgroup updates "
|
||||
"during snapshot delete. "
|
||||
"Quota is out of sync, "
|
||||
"rescan required.\n", ret);
|
||||
|
||||
btrfs_end_transaction_throttle(trans, tree_root);
|
||||
if (!for_reloc && btrfs_need_cleaner_sleep(root)) {
|
||||
pr_debug("BTRFS: drop snapshot early exit\n");
|
||||
|
@ -8604,14 +8553,6 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
|
|||
}
|
||||
root_dropped = true;
|
||||
out_end_trans:
|
||||
ret = btrfs_delayed_qgroup_accounting(trans, tree_root->fs_info);
|
||||
if (ret)
|
||||
printk_ratelimited(KERN_ERR "BTRFS: Failure %d "
|
||||
"running qgroup updates "
|
||||
"during snapshot delete. "
|
||||
"Quota is out of sync, "
|
||||
"rescan required.\n", ret);
|
||||
|
||||
btrfs_end_transaction_throttle(trans, tree_root);
|
||||
out_free:
|
||||
kfree(wc);
|
||||
|
@ -9562,6 +9503,19 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
|
|||
|
||||
free_excluded_extents(root, cache);
|
||||
|
||||
/*
|
||||
* Call to ensure the corresponding space_info object is created and
|
||||
* assigned to our block group, but don't update its counters just yet.
|
||||
* We want our bg to be added to the rbtree with its ->space_info set.
|
||||
*/
|
||||
ret = update_space_info(root->fs_info, cache->flags, 0, 0,
|
||||
&cache->space_info);
|
||||
if (ret) {
|
||||
btrfs_remove_free_space_cache(cache);
|
||||
btrfs_put_block_group(cache);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = btrfs_add_block_group_cache(root->fs_info, cache);
|
||||
if (ret) {
|
||||
btrfs_remove_free_space_cache(cache);
|
||||
|
@ -9569,6 +9523,10 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
|
|||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Now that our block group has its ->space_info set and is inserted in
|
||||
* the rbtree, update the space info's counters.
|
||||
*/
|
||||
ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
|
||||
&cache->space_info);
|
||||
if (ret) {
|
||||
|
|
|
@ -1277,7 +1277,12 @@ int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
|
|||
int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
|
||||
unsigned bits, gfp_t mask)
|
||||
{
|
||||
return clear_extent_bit(tree, start, end, bits, 0, 0, NULL, mask);
|
||||
int wake = 0;
|
||||
|
||||
if (bits & EXTENT_LOCKED)
|
||||
wake = 1;
|
||||
|
||||
return clear_extent_bit(tree, start, end, bits, wake, 0, NULL, mask);
|
||||
}
|
||||
|
||||
int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
|
||||
|
@ -4490,6 +4495,8 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
|
|||
}
|
||||
if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
|
||||
flags |= FIEMAP_EXTENT_ENCODED;
|
||||
if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
|
||||
flags |= FIEMAP_EXTENT_UNWRITTEN;
|
||||
|
||||
free_extent_map(em);
|
||||
em = NULL;
|
||||
|
|
|
@ -1868,6 +1868,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
|
|||
struct btrfs_log_ctx ctx;
|
||||
int ret = 0;
|
||||
bool full_sync = 0;
|
||||
const u64 len = end - start + 1;
|
||||
|
||||
trace_btrfs_sync_file(file, datasync);
|
||||
|
||||
|
@ -1896,7 +1897,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
|
|||
* all extents are persisted and the respective file extent
|
||||
* items are in the fs/subvol btree.
|
||||
*/
|
||||
ret = btrfs_wait_ordered_range(inode, start, end - start + 1);
|
||||
ret = btrfs_wait_ordered_range(inode, start, len);
|
||||
} else {
|
||||
/*
|
||||
* Start any new ordered operations before starting to log the
|
||||
|
@ -1968,8 +1969,10 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
|
|||
*/
|
||||
smp_mb();
|
||||
if (btrfs_inode_in_log(inode, root->fs_info->generation) ||
|
||||
(full_sync && BTRFS_I(inode)->last_trans <=
|
||||
root->fs_info->last_trans_committed)) {
|
||||
(BTRFS_I(inode)->last_trans <=
|
||||
root->fs_info->last_trans_committed &&
|
||||
(full_sync ||
|
||||
!btrfs_have_ordered_extents_in_range(inode, start, len)))) {
|
||||
/*
|
||||
* We'v had everything committed since the last time we were
|
||||
* modified so clear this flag in case it was set for whatever
|
||||
|
|
|
@ -231,6 +231,7 @@ int btrfs_truncate_free_space_cache(struct btrfs_root *root,
|
|||
{
|
||||
int ret = 0;
|
||||
struct btrfs_path *path = btrfs_alloc_path();
|
||||
bool locked = false;
|
||||
|
||||
if (!path) {
|
||||
ret = -ENOMEM;
|
||||
|
@ -238,6 +239,7 @@ int btrfs_truncate_free_space_cache(struct btrfs_root *root,
|
|||
}
|
||||
|
||||
if (block_group) {
|
||||
locked = true;
|
||||
mutex_lock(&trans->transaction->cache_write_mutex);
|
||||
if (!list_empty(&block_group->io_list)) {
|
||||
list_del_init(&block_group->io_list);
|
||||
|
@ -269,18 +271,14 @@ int btrfs_truncate_free_space_cache(struct btrfs_root *root,
|
|||
*/
|
||||
ret = btrfs_truncate_inode_items(trans, root, inode,
|
||||
0, BTRFS_EXTENT_DATA_KEY);
|
||||
if (ret) {
|
||||
mutex_unlock(&trans->transaction->cache_write_mutex);
|
||||
btrfs_abort_transaction(trans, root, ret);
|
||||
return ret;
|
||||
}
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
ret = btrfs_update_inode(trans, root, inode);
|
||||
|
||||
if (block_group)
|
||||
mutex_unlock(&trans->transaction->cache_write_mutex);
|
||||
|
||||
fail:
|
||||
if (locked)
|
||||
mutex_unlock(&trans->transaction->cache_write_mutex);
|
||||
if (ret)
|
||||
btrfs_abort_transaction(trans, root, ret);
|
||||
|
||||
|
|
|
@ -4986,24 +4986,40 @@ static void evict_inode_truncate_pages(struct inode *inode)
|
|||
}
|
||||
write_unlock(&map_tree->lock);
|
||||
|
||||
/*
|
||||
* Keep looping until we have no more ranges in the io tree.
|
||||
* We can have ongoing bios started by readpages (called from readahead)
|
||||
* that didn't get their end io callbacks called yet or they are still
|
||||
* in progress ((extent_io.c:end_bio_extent_readpage()). This means some
|
||||
* ranges can still be locked and eviction started because before
|
||||
* submitting those bios, which are executed by a separate task (work
|
||||
* queue kthread), inode references (inode->i_count) were not taken
|
||||
* (which would be dropped in the end io callback of each bio).
|
||||
* Therefore here we effectively end up waiting for those bios and
|
||||
* anyone else holding locked ranges without having bumped the inode's
|
||||
* reference count - if we don't do it, when they access the inode's
|
||||
* io_tree to unlock a range it may be too late, leading to an
|
||||
* use-after-free issue.
|
||||
*/
|
||||
spin_lock(&io_tree->lock);
|
||||
while (!RB_EMPTY_ROOT(&io_tree->state)) {
|
||||
struct extent_state *state;
|
||||
struct extent_state *cached_state = NULL;
|
||||
u64 start;
|
||||
u64 end;
|
||||
|
||||
node = rb_first(&io_tree->state);
|
||||
state = rb_entry(node, struct extent_state, rb_node);
|
||||
atomic_inc(&state->refs);
|
||||
start = state->start;
|
||||
end = state->end;
|
||||
spin_unlock(&io_tree->lock);
|
||||
|
||||
lock_extent_bits(io_tree, state->start, state->end,
|
||||
0, &cached_state);
|
||||
clear_extent_bit(io_tree, state->start, state->end,
|
||||
lock_extent_bits(io_tree, start, end, 0, &cached_state);
|
||||
clear_extent_bit(io_tree, start, end,
|
||||
EXTENT_LOCKED | EXTENT_DIRTY |
|
||||
EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING |
|
||||
EXTENT_DEFRAG, 1, 1,
|
||||
&cached_state, GFP_NOFS);
|
||||
free_extent_state(state);
|
||||
|
||||
cond_resched();
|
||||
spin_lock(&io_tree->lock);
|
||||
|
|
|
@ -553,8 +553,8 @@ static noinline int create_subvol(struct inode *dir,
|
|||
key.offset = (u64)-1;
|
||||
new_root = btrfs_read_fs_root_no_name(root->fs_info, &key);
|
||||
if (IS_ERR(new_root)) {
|
||||
btrfs_abort_transaction(trans, root, PTR_ERR(new_root));
|
||||
ret = PTR_ERR(new_root);
|
||||
btrfs_abort_transaction(trans, root, ret);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
|
@ -1318,7 +1318,7 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
|
|||
i = range->start >> PAGE_CACHE_SHIFT;
|
||||
}
|
||||
if (!max_to_defrag)
|
||||
max_to_defrag = last_index + 1;
|
||||
max_to_defrag = last_index - i + 1;
|
||||
|
||||
/*
|
||||
* make writeback starts from i, so the defrag range can be
|
||||
|
@ -1368,7 +1368,7 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
|
|||
ra_index = max(i, ra_index);
|
||||
btrfs_force_ra(inode->i_mapping, ra, file, ra_index,
|
||||
cluster);
|
||||
ra_index += max_cluster;
|
||||
ra_index += cluster;
|
||||
}
|
||||
|
||||
mutex_lock(&inode->i_mutex);
|
||||
|
@ -2271,10 +2271,7 @@ static noinline int btrfs_ioctl_ino_lookup(struct file *file,
|
|||
{
|
||||
struct btrfs_ioctl_ino_lookup_args *args;
|
||||
struct inode *inode;
|
||||
int ret;
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EPERM;
|
||||
int ret = 0;
|
||||
|
||||
args = memdup_user(argp, sizeof(*args));
|
||||
if (IS_ERR(args))
|
||||
|
@ -2282,13 +2279,28 @@ static noinline int btrfs_ioctl_ino_lookup(struct file *file,
|
|||
|
||||
inode = file_inode(file);
|
||||
|
||||
/*
|
||||
* Unprivileged query to obtain the containing subvolume root id. The
|
||||
* path is reset so it's consistent with btrfs_search_path_in_tree.
|
||||
*/
|
||||
if (args->treeid == 0)
|
||||
args->treeid = BTRFS_I(inode)->root->root_key.objectid;
|
||||
|
||||
if (args->objectid == BTRFS_FIRST_FREE_OBJECTID) {
|
||||
args->name[0] = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN)) {
|
||||
ret = -EPERM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = btrfs_search_path_in_tree(BTRFS_I(inode)->root->fs_info,
|
||||
args->treeid, args->objectid,
|
||||
args->name);
|
||||
|
||||
out:
|
||||
if (ret == 0 && copy_to_user(argp, args, sizeof(*args)))
|
||||
ret = -EFAULT;
|
||||
|
||||
|
@ -2413,8 +2425,6 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
|
|||
goto out_unlock_inode;
|
||||
}
|
||||
|
||||
d_invalidate(dentry);
|
||||
|
||||
down_write(&root->fs_info->subvol_sem);
|
||||
|
||||
err = may_destroy_subvol(dest);
|
||||
|
@ -2508,7 +2518,7 @@ out_up_write:
|
|||
out_unlock_inode:
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
if (!err) {
|
||||
shrink_dcache_sb(root->fs_info->sb);
|
||||
d_invalidate(dentry);
|
||||
btrfs_invalidate_inodes(dest);
|
||||
d_delete(dentry);
|
||||
ASSERT(dest->send_in_progress == 0);
|
||||
|
@ -2879,12 +2889,19 @@ static int btrfs_cmp_data(struct inode *src, u64 loff, struct inode *dst,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int extent_same_check_offsets(struct inode *inode, u64 off, u64 len)
|
||||
static int extent_same_check_offsets(struct inode *inode, u64 off, u64 *plen,
|
||||
u64 olen)
|
||||
{
|
||||
u64 len = *plen;
|
||||
u64 bs = BTRFS_I(inode)->root->fs_info->sb->s_blocksize;
|
||||
|
||||
if (off + len > inode->i_size || off + len < off)
|
||||
if (off + olen > inode->i_size || off + olen < off)
|
||||
return -EINVAL;
|
||||
|
||||
/* if we extend to eof, continue to block boundary */
|
||||
if (off + len == inode->i_size)
|
||||
*plen = len = ALIGN(inode->i_size, bs) - off;
|
||||
|
||||
/* Check that we are block aligned - btrfs_clone() requires this */
|
||||
if (!IS_ALIGNED(off, bs) || !IS_ALIGNED(off + len, bs))
|
||||
return -EINVAL;
|
||||
|
@ -2892,10 +2909,11 @@ static int extent_same_check_offsets(struct inode *inode, u64 off, u64 len)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int btrfs_extent_same(struct inode *src, u64 loff, u64 len,
|
||||
static int btrfs_extent_same(struct inode *src, u64 loff, u64 olen,
|
||||
struct inode *dst, u64 dst_loff)
|
||||
{
|
||||
int ret;
|
||||
u64 len = olen;
|
||||
|
||||
/*
|
||||
* btrfs_clone() can't handle extents in the same file
|
||||
|
@ -2910,11 +2928,11 @@ static int btrfs_extent_same(struct inode *src, u64 loff, u64 len,
|
|||
|
||||
btrfs_double_lock(src, loff, dst, dst_loff, len);
|
||||
|
||||
ret = extent_same_check_offsets(src, loff, len);
|
||||
ret = extent_same_check_offsets(src, loff, &len, olen);
|
||||
if (ret)
|
||||
goto out_unlock;
|
||||
|
||||
ret = extent_same_check_offsets(dst, dst_loff, len);
|
||||
ret = extent_same_check_offsets(dst, dst_loff, &len, olen);
|
||||
if (ret)
|
||||
goto out_unlock;
|
||||
|
||||
|
@ -2927,7 +2945,7 @@ static int btrfs_extent_same(struct inode *src, u64 loff, u64 len,
|
|||
|
||||
ret = btrfs_cmp_data(src, loff, dst, dst_loff, len);
|
||||
if (ret == 0)
|
||||
ret = btrfs_clone(src, dst, loff, len, len, dst_loff);
|
||||
ret = btrfs_clone(src, dst, loff, olen, len, dst_loff);
|
||||
|
||||
out_unlock:
|
||||
btrfs_double_unlock(src, loff, dst, dst_loff, len);
|
||||
|
|
|
@ -198,9 +198,6 @@ static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
|
|||
entry->file_offset = file_offset;
|
||||
entry->start = start;
|
||||
entry->len = len;
|
||||
if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM) &&
|
||||
!(type == BTRFS_ORDERED_NOCOW))
|
||||
entry->csum_bytes_left = disk_len;
|
||||
entry->disk_len = disk_len;
|
||||
entry->bytes_left = len;
|
||||
entry->inode = igrab(inode);
|
||||
|
@ -286,10 +283,6 @@ void btrfs_add_ordered_sum(struct inode *inode,
|
|||
tree = &BTRFS_I(inode)->ordered_tree;
|
||||
spin_lock_irq(&tree->lock);
|
||||
list_add_tail(&sum->list, &entry->list);
|
||||
WARN_ON(entry->csum_bytes_left < sum->len);
|
||||
entry->csum_bytes_left -= sum->len;
|
||||
if (entry->csum_bytes_left == 0)
|
||||
wake_up(&entry->wait);
|
||||
spin_unlock_irq(&tree->lock);
|
||||
}
|
||||
|
||||
|
@ -509,7 +502,21 @@ void btrfs_wait_logged_extents(struct btrfs_trans_handle *trans,
|
|||
wait_event(ordered->wait, test_bit(BTRFS_ORDERED_IO_DONE,
|
||||
&ordered->flags));
|
||||
|
||||
list_add_tail(&ordered->trans_list, &trans->ordered);
|
||||
/*
|
||||
* If our ordered extent completed it means it updated the
|
||||
* fs/subvol and csum trees already, so no need to make the
|
||||
* current transaction's commit wait for it, as we end up
|
||||
* holding memory unnecessarily and delaying the inode's iput
|
||||
* until the transaction commit (we schedule an iput for the
|
||||
* inode when the ordered extent's refcount drops to 0), which
|
||||
* prevents it from being evictable until the transaction
|
||||
* commits.
|
||||
*/
|
||||
if (test_bit(BTRFS_ORDERED_COMPLETE, &ordered->flags))
|
||||
btrfs_put_ordered_extent(ordered);
|
||||
else
|
||||
list_add_tail(&ordered->trans_list, &trans->ordered);
|
||||
|
||||
spin_lock_irq(&log->log_extents_lock[index]);
|
||||
}
|
||||
spin_unlock_irq(&log->log_extents_lock[index]);
|
||||
|
@ -844,6 +851,20 @@ out:
|
|||
return entry;
|
||||
}
|
||||
|
||||
bool btrfs_have_ordered_extents_in_range(struct inode *inode,
|
||||
u64 file_offset,
|
||||
u64 len)
|
||||
{
|
||||
struct btrfs_ordered_extent *oe;
|
||||
|
||||
oe = btrfs_lookup_ordered_range(inode, file_offset, len);
|
||||
if (oe) {
|
||||
btrfs_put_ordered_extent(oe);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* lookup and return any extent before 'file_offset'. NULL is returned
|
||||
* if none is found
|
||||
|
|
|
@ -89,9 +89,6 @@ struct btrfs_ordered_extent {
|
|||
/* number of bytes that still need writing */
|
||||
u64 bytes_left;
|
||||
|
||||
/* number of bytes that still need csumming */
|
||||
u64 csum_bytes_left;
|
||||
|
||||
/*
|
||||
* the end of the ordered extent which is behind it but
|
||||
* didn't update disk_i_size. Please see the comment of
|
||||
|
@ -191,6 +188,9 @@ btrfs_lookup_first_ordered_extent(struct inode * inode, u64 file_offset);
|
|||
struct btrfs_ordered_extent *btrfs_lookup_ordered_range(struct inode *inode,
|
||||
u64 file_offset,
|
||||
u64 len);
|
||||
bool btrfs_have_ordered_extents_in_range(struct inode *inode,
|
||||
u64 file_offset,
|
||||
u64 len);
|
||||
int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
|
||||
struct btrfs_ordered_extent *ordered);
|
||||
int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr,
|
||||
|
|
1088
fs/btrfs/qgroup.c
1088
fs/btrfs/qgroup.c
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -19,43 +19,18 @@
|
|||
#ifndef __BTRFS_QGROUP__
|
||||
#define __BTRFS_QGROUP__
|
||||
|
||||
/*
|
||||
* A description of the operations, all of these operations only happen when we
|
||||
* are adding the 1st reference for that subvolume in the case of adding space
|
||||
* or on the last reference delete in the case of subtraction. The only
|
||||
* exception is the last one, which is added for confusion.
|
||||
*
|
||||
* BTRFS_QGROUP_OPER_ADD_EXCL: adding bytes where this subvolume is the only
|
||||
* one pointing at the bytes we are adding. This is called on the first
|
||||
* allocation.
|
||||
*
|
||||
* BTRFS_QGROUP_OPER_ADD_SHARED: adding bytes where this bytenr is going to be
|
||||
* shared between subvols. This is called on the creation of a ref that already
|
||||
* has refs from a different subvolume, so basically reflink.
|
||||
*
|
||||
* BTRFS_QGROUP_OPER_SUB_EXCL: removing bytes where this subvolume is the only
|
||||
* one referencing the range.
|
||||
*
|
||||
* BTRFS_QGROUP_OPER_SUB_SHARED: removing bytes where this subvolume shares with
|
||||
* refs with other subvolumes.
|
||||
*/
|
||||
enum btrfs_qgroup_operation_type {
|
||||
BTRFS_QGROUP_OPER_ADD_EXCL,
|
||||
BTRFS_QGROUP_OPER_ADD_SHARED,
|
||||
BTRFS_QGROUP_OPER_SUB_EXCL,
|
||||
BTRFS_QGROUP_OPER_SUB_SHARED,
|
||||
BTRFS_QGROUP_OPER_SUB_SUBTREE,
|
||||
};
|
||||
#include "ulist.h"
|
||||
#include "delayed-ref.h"
|
||||
|
||||
struct btrfs_qgroup_operation {
|
||||
u64 ref_root;
|
||||
/*
|
||||
* Record a dirty extent, and info qgroup to update quota on it
|
||||
* TODO: Use kmem cache to alloc it.
|
||||
*/
|
||||
struct btrfs_qgroup_extent_record {
|
||||
struct rb_node node;
|
||||
u64 bytenr;
|
||||
u64 num_bytes;
|
||||
u64 seq;
|
||||
enum btrfs_qgroup_operation_type type;
|
||||
struct seq_list elem;
|
||||
struct rb_node n;
|
||||
struct list_head list;
|
||||
struct ulist *old_roots;
|
||||
};
|
||||
|
||||
int btrfs_quota_enable(struct btrfs_trans_handle *trans,
|
||||
|
@ -79,16 +54,18 @@ int btrfs_limit_qgroup(struct btrfs_trans_handle *trans,
|
|||
int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info);
|
||||
void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info);
|
||||
struct btrfs_delayed_extent_op;
|
||||
int btrfs_qgroup_record_ref(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_fs_info *fs_info, u64 ref_root,
|
||||
int btrfs_qgroup_prepare_account_extents(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_fs_info *fs_info);
|
||||
struct btrfs_qgroup_extent_record
|
||||
*btrfs_qgroup_insert_dirty_extent(struct btrfs_delayed_ref_root *delayed_refs,
|
||||
struct btrfs_qgroup_extent_record *record);
|
||||
int
|
||||
btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_fs_info *fs_info,
|
||||
u64 bytenr, u64 num_bytes,
|
||||
enum btrfs_qgroup_operation_type type,
|
||||
int mod_seq);
|
||||
int btrfs_delayed_qgroup_accounting(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_fs_info *fs_info);
|
||||
void btrfs_remove_qgroup_operation(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_fs_info *fs_info,
|
||||
struct btrfs_qgroup_operation *oper);
|
||||
struct ulist *old_roots, struct ulist *new_roots);
|
||||
int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_fs_info *fs_info);
|
||||
int btrfs_run_qgroups(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_fs_info *fs_info);
|
||||
int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
|
||||
|
|
|
@ -1847,8 +1847,10 @@ again:
|
|||
}
|
||||
|
||||
eb = read_tree_block(dest, old_bytenr, old_ptr_gen);
|
||||
if (!eb || !extent_buffer_uptodate(eb)) {
|
||||
ret = (!eb) ? -ENOMEM : -EIO;
|
||||
if (IS_ERR(eb)) {
|
||||
ret = PTR_ERR(eb);
|
||||
} else if (!extent_buffer_uptodate(eb)) {
|
||||
ret = -EIO;
|
||||
free_extent_buffer(eb);
|
||||
break;
|
||||
}
|
||||
|
@ -2002,7 +2004,9 @@ int walk_down_reloc_tree(struct btrfs_root *root, struct btrfs_path *path,
|
|||
|
||||
bytenr = btrfs_node_blockptr(eb, path->slots[i]);
|
||||
eb = read_tree_block(root, bytenr, ptr_gen);
|
||||
if (!eb || !extent_buffer_uptodate(eb)) {
|
||||
if (IS_ERR(eb)) {
|
||||
return PTR_ERR(eb);
|
||||
} else if (!extent_buffer_uptodate(eb)) {
|
||||
free_extent_buffer(eb);
|
||||
return -EIO;
|
||||
}
|
||||
|
@ -2710,7 +2714,10 @@ static int do_relocation(struct btrfs_trans_handle *trans,
|
|||
blocksize = root->nodesize;
|
||||
generation = btrfs_node_ptr_generation(upper->eb, slot);
|
||||
eb = read_tree_block(root, bytenr, generation);
|
||||
if (!eb || !extent_buffer_uptodate(eb)) {
|
||||
if (IS_ERR(eb)) {
|
||||
err = PTR_ERR(eb);
|
||||
goto next;
|
||||
} else if (!extent_buffer_uptodate(eb)) {
|
||||
free_extent_buffer(eb);
|
||||
err = -EIO;
|
||||
goto next;
|
||||
|
@ -2873,7 +2880,9 @@ static int get_tree_block_key(struct reloc_control *rc,
|
|||
BUG_ON(block->key_ready);
|
||||
eb = read_tree_block(rc->extent_root, block->bytenr,
|
||||
block->key.offset);
|
||||
if (!eb || !extent_buffer_uptodate(eb)) {
|
||||
if (IS_ERR(eb)) {
|
||||
return PTR_ERR(eb);
|
||||
} else if (!extent_buffer_uptodate(eb)) {
|
||||
free_extent_buffer(eb);
|
||||
return -EIO;
|
||||
}
|
||||
|
|
|
@ -2662,18 +2662,30 @@ static void scrub_free_parity(struct scrub_parity *sparity)
|
|||
kfree(sparity);
|
||||
}
|
||||
|
||||
static void scrub_parity_bio_endio_worker(struct btrfs_work *work)
|
||||
{
|
||||
struct scrub_parity *sparity = container_of(work, struct scrub_parity,
|
||||
work);
|
||||
struct scrub_ctx *sctx = sparity->sctx;
|
||||
|
||||
scrub_free_parity(sparity);
|
||||
scrub_pending_bio_dec(sctx);
|
||||
}
|
||||
|
||||
static void scrub_parity_bio_endio(struct bio *bio, int error)
|
||||
{
|
||||
struct scrub_parity *sparity = (struct scrub_parity *)bio->bi_private;
|
||||
struct scrub_ctx *sctx = sparity->sctx;
|
||||
|
||||
if (error)
|
||||
bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap,
|
||||
sparity->nsectors);
|
||||
|
||||
scrub_free_parity(sparity);
|
||||
scrub_pending_bio_dec(sctx);
|
||||
bio_put(bio);
|
||||
|
||||
btrfs_init_work(&sparity->work, btrfs_scrubparity_helper,
|
||||
scrub_parity_bio_endio_worker, NULL, NULL);
|
||||
btrfs_queue_work(sparity->sctx->dev_root->fs_info->scrub_parity_workers,
|
||||
&sparity->work);
|
||||
}
|
||||
|
||||
static void scrub_parity_check_and_repair(struct scrub_parity *sparity)
|
||||
|
@ -3589,6 +3601,13 @@ static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info,
|
|||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
fs_info->scrub_parity_workers =
|
||||
btrfs_alloc_workqueue("btrfs-scrubparity", flags,
|
||||
max_active, 2);
|
||||
if (!fs_info->scrub_parity_workers) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
++fs_info->scrub_workers_refcnt;
|
||||
out:
|
||||
|
@ -3601,6 +3620,7 @@ static noinline_for_stack void scrub_workers_put(struct btrfs_fs_info *fs_info)
|
|||
btrfs_destroy_workqueue(fs_info->scrub_workers);
|
||||
btrfs_destroy_workqueue(fs_info->scrub_wr_completion_workers);
|
||||
btrfs_destroy_workqueue(fs_info->scrub_nocow_workers);
|
||||
btrfs_destroy_workqueue(fs_info->scrub_parity_workers);
|
||||
}
|
||||
WARN_ON(fs_info->scrub_workers_refcnt < 0);
|
||||
}
|
||||
|
|
147
fs/btrfs/send.c
147
fs/btrfs/send.c
|
@ -243,6 +243,7 @@ struct waiting_dir_move {
|
|||
* after this directory is moved, we can try to rmdir the ino rmdir_ino.
|
||||
*/
|
||||
u64 rmdir_ino;
|
||||
bool orphanized;
|
||||
};
|
||||
|
||||
struct orphan_dir_info {
|
||||
|
@ -1158,6 +1159,9 @@ struct backref_ctx {
|
|||
/* may be truncated in case it's the last extent in a file */
|
||||
u64 extent_len;
|
||||
|
||||
/* data offset in the file extent item */
|
||||
u64 data_offset;
|
||||
|
||||
/* Just to check for bugs in backref resolving */
|
||||
int found_itself;
|
||||
};
|
||||
|
@ -1221,7 +1225,7 @@ static int __iterate_backrefs(u64 ino, u64 offset, u64 root, void *ctx_)
|
|||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if (offset + bctx->extent_len > i_size)
|
||||
if (offset + bctx->data_offset + bctx->extent_len > i_size)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
|
@ -1363,6 +1367,19 @@ static int find_extent_clone(struct send_ctx *sctx,
|
|||
backref_ctx->cur_offset = data_offset;
|
||||
backref_ctx->found_itself = 0;
|
||||
backref_ctx->extent_len = num_bytes;
|
||||
/*
|
||||
* For non-compressed extents iterate_extent_inodes() gives us extent
|
||||
* offsets that already take into account the data offset, but not for
|
||||
* compressed extents, since the offset is logical and not relative to
|
||||
* the physical extent locations. We must take this into account to
|
||||
* avoid sending clone offsets that go beyond the source file's size,
|
||||
* which would result in the clone ioctl failing with -EINVAL on the
|
||||
* receiving end.
|
||||
*/
|
||||
if (compressed == BTRFS_COMPRESS_NONE)
|
||||
backref_ctx->data_offset = 0;
|
||||
else
|
||||
backref_ctx->data_offset = btrfs_file_extent_offset(eb, fi);
|
||||
|
||||
/*
|
||||
* The last extent of a file may be too large due to page alignment.
|
||||
|
@ -1900,8 +1917,13 @@ static int did_overwrite_ref(struct send_ctx *sctx,
|
|||
goto out;
|
||||
}
|
||||
|
||||
/* we know that it is or will be overwritten. check this now */
|
||||
if (ow_inode < sctx->send_progress)
|
||||
/*
|
||||
* We know that it is or will be overwritten. Check this now.
|
||||
* The current inode being processed might have been the one that caused
|
||||
* inode 'ino' to be orphanized, therefore ow_inode can actually be the
|
||||
* same as sctx->send_progress.
|
||||
*/
|
||||
if (ow_inode <= sctx->send_progress)
|
||||
ret = 1;
|
||||
else
|
||||
ret = 0;
|
||||
|
@ -2223,6 +2245,8 @@ static int get_cur_path(struct send_ctx *sctx, u64 ino, u64 gen,
|
|||
fs_path_reset(dest);
|
||||
|
||||
while (!stop && ino != BTRFS_FIRST_FREE_OBJECTID) {
|
||||
struct waiting_dir_move *wdm;
|
||||
|
||||
fs_path_reset(name);
|
||||
|
||||
if (is_waiting_for_rm(sctx, ino)) {
|
||||
|
@ -2233,7 +2257,11 @@ static int get_cur_path(struct send_ctx *sctx, u64 ino, u64 gen,
|
|||
break;
|
||||
}
|
||||
|
||||
if (is_waiting_for_move(sctx, ino)) {
|
||||
wdm = get_waiting_dir_move(sctx, ino);
|
||||
if (wdm && wdm->orphanized) {
|
||||
ret = gen_unique_name(sctx, ino, gen, name);
|
||||
stop = 1;
|
||||
} else if (wdm) {
|
||||
ret = get_first_ref(sctx->parent_root, ino,
|
||||
&parent_inode, &parent_gen, name);
|
||||
} else {
|
||||
|
@ -2328,8 +2356,12 @@ static int send_subvol_begin(struct send_ctx *sctx)
|
|||
TLV_PUT_U64(sctx, BTRFS_SEND_A_CTRANSID,
|
||||
le64_to_cpu(sctx->send_root->root_item.ctransid));
|
||||
if (parent_root) {
|
||||
TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
|
||||
sctx->parent_root->root_item.uuid);
|
||||
if (!btrfs_is_empty_uuid(parent_root->root_item.received_uuid))
|
||||
TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
|
||||
parent_root->root_item.received_uuid);
|
||||
else
|
||||
TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
|
||||
parent_root->root_item.uuid);
|
||||
TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_CTRANSID,
|
||||
le64_to_cpu(sctx->parent_root->root_item.ctransid));
|
||||
}
|
||||
|
@ -2923,7 +2955,7 @@ static int is_waiting_for_move(struct send_ctx *sctx, u64 ino)
|
|||
return entry != NULL;
|
||||
}
|
||||
|
||||
static int add_waiting_dir_move(struct send_ctx *sctx, u64 ino)
|
||||
static int add_waiting_dir_move(struct send_ctx *sctx, u64 ino, bool orphanized)
|
||||
{
|
||||
struct rb_node **p = &sctx->waiting_dir_moves.rb_node;
|
||||
struct rb_node *parent = NULL;
|
||||
|
@ -2934,6 +2966,7 @@ static int add_waiting_dir_move(struct send_ctx *sctx, u64 ino)
|
|||
return -ENOMEM;
|
||||
dm->ino = ino;
|
||||
dm->rmdir_ino = 0;
|
||||
dm->orphanized = orphanized;
|
||||
|
||||
while (*p) {
|
||||
parent = *p;
|
||||
|
@ -3030,7 +3063,7 @@ static int add_pending_dir_move(struct send_ctx *sctx,
|
|||
goto out;
|
||||
}
|
||||
|
||||
ret = add_waiting_dir_move(sctx, pm->ino);
|
||||
ret = add_waiting_dir_move(sctx, pm->ino, is_orphan);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
|
@ -3353,8 +3386,40 @@ out:
|
|||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check if ino ino1 is an ancestor of inode ino2 in the given root.
|
||||
* Return 1 if true, 0 if false and < 0 on error.
|
||||
*/
|
||||
static int is_ancestor(struct btrfs_root *root,
|
||||
const u64 ino1,
|
||||
const u64 ino1_gen,
|
||||
const u64 ino2,
|
||||
struct fs_path *fs_path)
|
||||
{
|
||||
u64 ino = ino2;
|
||||
|
||||
while (ino > BTRFS_FIRST_FREE_OBJECTID) {
|
||||
int ret;
|
||||
u64 parent;
|
||||
u64 parent_gen;
|
||||
|
||||
fs_path_reset(fs_path);
|
||||
ret = get_first_ref(root, ino, &parent, &parent_gen, fs_path);
|
||||
if (ret < 0) {
|
||||
if (ret == -ENOENT && ino == ino2)
|
||||
ret = 0;
|
||||
return ret;
|
||||
}
|
||||
if (parent == ino1)
|
||||
return parent_gen == ino1_gen ? 1 : 0;
|
||||
ino = parent;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int wait_for_parent_move(struct send_ctx *sctx,
|
||||
struct recorded_ref *parent_ref)
|
||||
struct recorded_ref *parent_ref,
|
||||
const bool is_orphan)
|
||||
{
|
||||
int ret = 0;
|
||||
u64 ino = parent_ref->dir;
|
||||
|
@ -3374,11 +3439,24 @@ static int wait_for_parent_move(struct send_ctx *sctx,
|
|||
* Our current directory inode may not yet be renamed/moved because some
|
||||
* ancestor (immediate or not) has to be renamed/moved first. So find if
|
||||
* such ancestor exists and make sure our own rename/move happens after
|
||||
* that ancestor is processed.
|
||||
* that ancestor is processed to avoid path build infinite loops (done
|
||||
* at get_cur_path()).
|
||||
*/
|
||||
while (ino > BTRFS_FIRST_FREE_OBJECTID) {
|
||||
if (is_waiting_for_move(sctx, ino)) {
|
||||
ret = 1;
|
||||
/*
|
||||
* If the current inode is an ancestor of ino in the
|
||||
* parent root, we need to delay the rename of the
|
||||
* current inode, otherwise don't delayed the rename
|
||||
* because we can end up with a circular dependency
|
||||
* of renames, resulting in some directories never
|
||||
* getting the respective rename operations issued in
|
||||
* the send stream or getting into infinite path build
|
||||
* loops.
|
||||
*/
|
||||
ret = is_ancestor(sctx->parent_root,
|
||||
sctx->cur_ino, sctx->cur_inode_gen,
|
||||
ino, path_before);
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -3420,7 +3498,7 @@ out:
|
|||
ino,
|
||||
&sctx->new_refs,
|
||||
&sctx->deleted_refs,
|
||||
false);
|
||||
is_orphan);
|
||||
if (!ret)
|
||||
ret = 1;
|
||||
}
|
||||
|
@ -3589,6 +3667,17 @@ verbose_printk("btrfs: process_recorded_refs %llu\n", sctx->cur_ino);
|
|||
}
|
||||
}
|
||||
|
||||
if (S_ISDIR(sctx->cur_inode_mode) && sctx->parent_root &&
|
||||
can_rename) {
|
||||
ret = wait_for_parent_move(sctx, cur, is_orphan);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
if (ret == 1) {
|
||||
can_rename = false;
|
||||
*pending_move = 1;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* link/move the ref to the new place. If we have an orphan
|
||||
* inode, move it and update valid_path. If not, link or move
|
||||
|
@ -3609,18 +3698,11 @@ verbose_printk("btrfs: process_recorded_refs %llu\n", sctx->cur_ino);
|
|||
* dirs, we always have one new and one deleted
|
||||
* ref. The deleted ref is ignored later.
|
||||
*/
|
||||
ret = wait_for_parent_move(sctx, cur);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
if (ret) {
|
||||
*pending_move = 1;
|
||||
} else {
|
||||
ret = send_rename(sctx, valid_path,
|
||||
cur->full_path);
|
||||
if (!ret)
|
||||
ret = fs_path_copy(valid_path,
|
||||
cur->full_path);
|
||||
}
|
||||
ret = send_rename(sctx, valid_path,
|
||||
cur->full_path);
|
||||
if (!ret)
|
||||
ret = fs_path_copy(valid_path,
|
||||
cur->full_path);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
} else {
|
||||
|
@ -4508,8 +4590,21 @@ verbose_printk("btrfs: send_clone offset=%llu, len=%d, clone_root=%llu, "
|
|||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
|
||||
clone_root->root->root_item.uuid);
|
||||
/*
|
||||
* If the parent we're using has a received_uuid set then use that as
|
||||
* our clone source as that is what we will look for when doing a
|
||||
* receive.
|
||||
*
|
||||
* This covers the case that we create a snapshot off of a received
|
||||
* subvolume and then use that as the parent and try to receive on a
|
||||
* different host.
|
||||
*/
|
||||
if (!btrfs_is_empty_uuid(clone_root->root->root_item.received_uuid))
|
||||
TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
|
||||
clone_root->root->root_item.received_uuid);
|
||||
else
|
||||
TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
|
||||
clone_root->root->root_item.uuid);
|
||||
TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_CTRANSID,
|
||||
le64_to_cpu(clone_root->root->root_item.ctransid));
|
||||
TLV_PUT_PATH(sctx, BTRFS_SEND_A_CLONE_PATH, p);
|
||||
|
|
409
fs/btrfs/super.c
409
fs/btrfs/super.c
|
@ -135,6 +135,7 @@ static void btrfs_handle_error(struct btrfs_fs_info *fs_info)
|
|||
* __btrfs_std_error decodes expected errors from the caller and
|
||||
* invokes the approciate error response.
|
||||
*/
|
||||
__cold
|
||||
void __btrfs_std_error(struct btrfs_fs_info *fs_info, const char *function,
|
||||
unsigned int line, int errno, const char *fmt, ...)
|
||||
{
|
||||
|
@ -247,18 +248,11 @@ void __btrfs_std_error(struct btrfs_fs_info *fs_info, const char *function,
|
|||
* We'll complete the cleanup in btrfs_end_transaction and
|
||||
* btrfs_commit_transaction.
|
||||
*/
|
||||
__cold
|
||||
void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root, const char *function,
|
||||
unsigned int line, int errno)
|
||||
{
|
||||
/*
|
||||
* Report first abort since mount
|
||||
*/
|
||||
if (!test_and_set_bit(BTRFS_FS_STATE_TRANS_ABORTED,
|
||||
&root->fs_info->fs_state)) {
|
||||
WARN(1, KERN_DEBUG "BTRFS: Transaction aborted (error %d)\n",
|
||||
errno);
|
||||
}
|
||||
trans->aborted = errno;
|
||||
/* Nothing used. The other threads that have joined this
|
||||
* transaction may be able to continue. */
|
||||
|
@ -281,6 +275,7 @@ void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
|
|||
* __btrfs_panic decodes unexpected, fatal errors from the caller,
|
||||
* issues an alert, and either panics or BUGs, depending on mount options.
|
||||
*/
|
||||
__cold
|
||||
void __btrfs_panic(struct btrfs_fs_info *fs_info, const char *function,
|
||||
unsigned int line, int errno, const char *fmt, ...)
|
||||
{
|
||||
|
@ -841,33 +836,153 @@ out:
|
|||
return error;
|
||||
}
|
||||
|
||||
static struct dentry *get_default_root(struct super_block *sb,
|
||||
u64 subvol_objectid)
|
||||
static char *get_subvol_name_from_objectid(struct btrfs_fs_info *fs_info,
|
||||
u64 subvol_objectid)
|
||||
{
|
||||
struct btrfs_root *root = fs_info->tree_root;
|
||||
struct btrfs_root *fs_root;
|
||||
struct btrfs_root_ref *root_ref;
|
||||
struct btrfs_inode_ref *inode_ref;
|
||||
struct btrfs_key key;
|
||||
struct btrfs_path *path = NULL;
|
||||
char *name = NULL, *ptr;
|
||||
u64 dirid;
|
||||
int len;
|
||||
int ret;
|
||||
|
||||
path = btrfs_alloc_path();
|
||||
if (!path) {
|
||||
ret = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
path->leave_spinning = 1;
|
||||
|
||||
name = kmalloc(PATH_MAX, GFP_NOFS);
|
||||
if (!name) {
|
||||
ret = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
ptr = name + PATH_MAX - 1;
|
||||
ptr[0] = '\0';
|
||||
|
||||
/*
|
||||
* Walk up the subvolume trees in the tree of tree roots by root
|
||||
* backrefs until we hit the top-level subvolume.
|
||||
*/
|
||||
while (subvol_objectid != BTRFS_FS_TREE_OBJECTID) {
|
||||
key.objectid = subvol_objectid;
|
||||
key.type = BTRFS_ROOT_BACKREF_KEY;
|
||||
key.offset = (u64)-1;
|
||||
|
||||
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
|
||||
if (ret < 0) {
|
||||
goto err;
|
||||
} else if (ret > 0) {
|
||||
ret = btrfs_previous_item(root, path, subvol_objectid,
|
||||
BTRFS_ROOT_BACKREF_KEY);
|
||||
if (ret < 0) {
|
||||
goto err;
|
||||
} else if (ret > 0) {
|
||||
ret = -ENOENT;
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
|
||||
btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
|
||||
subvol_objectid = key.offset;
|
||||
|
||||
root_ref = btrfs_item_ptr(path->nodes[0], path->slots[0],
|
||||
struct btrfs_root_ref);
|
||||
len = btrfs_root_ref_name_len(path->nodes[0], root_ref);
|
||||
ptr -= len + 1;
|
||||
if (ptr < name) {
|
||||
ret = -ENAMETOOLONG;
|
||||
goto err;
|
||||
}
|
||||
read_extent_buffer(path->nodes[0], ptr + 1,
|
||||
(unsigned long)(root_ref + 1), len);
|
||||
ptr[0] = '/';
|
||||
dirid = btrfs_root_ref_dirid(path->nodes[0], root_ref);
|
||||
btrfs_release_path(path);
|
||||
|
||||
key.objectid = subvol_objectid;
|
||||
key.type = BTRFS_ROOT_ITEM_KEY;
|
||||
key.offset = (u64)-1;
|
||||
fs_root = btrfs_read_fs_root_no_name(fs_info, &key);
|
||||
if (IS_ERR(fs_root)) {
|
||||
ret = PTR_ERR(fs_root);
|
||||
goto err;
|
||||
}
|
||||
|
||||
/*
|
||||
* Walk up the filesystem tree by inode refs until we hit the
|
||||
* root directory.
|
||||
*/
|
||||
while (dirid != BTRFS_FIRST_FREE_OBJECTID) {
|
||||
key.objectid = dirid;
|
||||
key.type = BTRFS_INODE_REF_KEY;
|
||||
key.offset = (u64)-1;
|
||||
|
||||
ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0);
|
||||
if (ret < 0) {
|
||||
goto err;
|
||||
} else if (ret > 0) {
|
||||
ret = btrfs_previous_item(fs_root, path, dirid,
|
||||
BTRFS_INODE_REF_KEY);
|
||||
if (ret < 0) {
|
||||
goto err;
|
||||
} else if (ret > 0) {
|
||||
ret = -ENOENT;
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
|
||||
btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
|
||||
dirid = key.offset;
|
||||
|
||||
inode_ref = btrfs_item_ptr(path->nodes[0],
|
||||
path->slots[0],
|
||||
struct btrfs_inode_ref);
|
||||
len = btrfs_inode_ref_name_len(path->nodes[0],
|
||||
inode_ref);
|
||||
ptr -= len + 1;
|
||||
if (ptr < name) {
|
||||
ret = -ENAMETOOLONG;
|
||||
goto err;
|
||||
}
|
||||
read_extent_buffer(path->nodes[0], ptr + 1,
|
||||
(unsigned long)(inode_ref + 1), len);
|
||||
ptr[0] = '/';
|
||||
btrfs_release_path(path);
|
||||
}
|
||||
}
|
||||
|
||||
btrfs_free_path(path);
|
||||
if (ptr == name + PATH_MAX - 1) {
|
||||
name[0] = '/';
|
||||
name[1] = '\0';
|
||||
} else {
|
||||
memmove(name, ptr, name + PATH_MAX - ptr);
|
||||
}
|
||||
return name;
|
||||
|
||||
err:
|
||||
btrfs_free_path(path);
|
||||
kfree(name);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
static int get_default_subvol_objectid(struct btrfs_fs_info *fs_info, u64 *objectid)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = btrfs_sb(sb);
|
||||
struct btrfs_root *root = fs_info->tree_root;
|
||||
struct btrfs_root *new_root;
|
||||
struct btrfs_dir_item *di;
|
||||
struct btrfs_path *path;
|
||||
struct btrfs_key location;
|
||||
struct inode *inode;
|
||||
u64 dir_id;
|
||||
int new = 0;
|
||||
|
||||
/*
|
||||
* We have a specific subvol we want to mount, just setup location and
|
||||
* go look up the root.
|
||||
*/
|
||||
if (subvol_objectid) {
|
||||
location.objectid = subvol_objectid;
|
||||
location.type = BTRFS_ROOT_ITEM_KEY;
|
||||
location.offset = (u64)-1;
|
||||
goto find_root;
|
||||
}
|
||||
|
||||
path = btrfs_alloc_path();
|
||||
if (!path)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
return -ENOMEM;
|
||||
path->leave_spinning = 1;
|
||||
|
||||
/*
|
||||
|
@ -879,58 +994,23 @@ static struct dentry *get_default_root(struct super_block *sb,
|
|||
di = btrfs_lookup_dir_item(NULL, root, path, dir_id, "default", 7, 0);
|
||||
if (IS_ERR(di)) {
|
||||
btrfs_free_path(path);
|
||||
return ERR_CAST(di);
|
||||
return PTR_ERR(di);
|
||||
}
|
||||
if (!di) {
|
||||
/*
|
||||
* Ok the default dir item isn't there. This is weird since
|
||||
* it's always been there, but don't freak out, just try and
|
||||
* mount to root most subvolume.
|
||||
* mount the top-level subvolume.
|
||||
*/
|
||||
btrfs_free_path(path);
|
||||
dir_id = BTRFS_FIRST_FREE_OBJECTID;
|
||||
new_root = fs_info->fs_root;
|
||||
goto setup_root;
|
||||
*objectid = BTRFS_FS_TREE_OBJECTID;
|
||||
return 0;
|
||||
}
|
||||
|
||||
btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
|
||||
btrfs_free_path(path);
|
||||
|
||||
find_root:
|
||||
new_root = btrfs_read_fs_root_no_name(fs_info, &location);
|
||||
if (IS_ERR(new_root))
|
||||
return ERR_CAST(new_root);
|
||||
|
||||
if (!(sb->s_flags & MS_RDONLY)) {
|
||||
int ret;
|
||||
down_read(&fs_info->cleanup_work_sem);
|
||||
ret = btrfs_orphan_cleanup(new_root);
|
||||
up_read(&fs_info->cleanup_work_sem);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
dir_id = btrfs_root_dirid(&new_root->root_item);
|
||||
setup_root:
|
||||
location.objectid = dir_id;
|
||||
location.type = BTRFS_INODE_ITEM_KEY;
|
||||
location.offset = 0;
|
||||
|
||||
inode = btrfs_iget(sb, &location, new_root, &new);
|
||||
if (IS_ERR(inode))
|
||||
return ERR_CAST(inode);
|
||||
|
||||
/*
|
||||
* If we're just mounting the root most subvol put the inode and return
|
||||
* a reference to the dentry. We will have already gotten a reference
|
||||
* to the inode in btrfs_fill_super so we're good to go.
|
||||
*/
|
||||
if (!new && d_inode(sb->s_root) == inode) {
|
||||
iput(inode);
|
||||
return dget(sb->s_root);
|
||||
}
|
||||
|
||||
return d_obtain_root(inode);
|
||||
*objectid = location.objectid;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int btrfs_fill_super(struct super_block *sb,
|
||||
|
@ -1108,6 +1188,10 @@ static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry)
|
|||
seq_puts(seq, ",fatal_errors=panic");
|
||||
if (info->commit_interval != BTRFS_DEFAULT_COMMIT_INTERVAL)
|
||||
seq_printf(seq, ",commit=%d", info->commit_interval);
|
||||
seq_printf(seq, ",subvolid=%llu",
|
||||
BTRFS_I(d_inode(dentry))->root->root_key.objectid);
|
||||
seq_puts(seq, ",subvol=");
|
||||
seq_dentry(seq, dentry, " \t\n\\");
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1138,107 +1222,139 @@ static inline int is_subvolume_inode(struct inode *inode)
|
|||
}
|
||||
|
||||
/*
|
||||
* This will strip out the subvol=%s argument for an argument string and add
|
||||
* subvolid=0 to make sure we get the actual tree root for path walking to the
|
||||
* subvol we want.
|
||||
* This will add subvolid=0 to the argument string while removing any subvol=
|
||||
* and subvolid= arguments to make sure we get the top-level root for path
|
||||
* walking to the subvol we want.
|
||||
*/
|
||||
static char *setup_root_args(char *args)
|
||||
{
|
||||
unsigned len = strlen(args) + 2 + 1;
|
||||
char *src, *dst, *buf;
|
||||
char *buf, *dst, *sep;
|
||||
|
||||
/*
|
||||
* We need the same args as before, but with this substitution:
|
||||
* s!subvol=[^,]+!subvolid=0!
|
||||
*
|
||||
* Since the replacement string is up to 2 bytes longer than the
|
||||
* original, allocate strlen(args) + 2 + 1 bytes.
|
||||
*/
|
||||
if (!args)
|
||||
return kstrdup("subvolid=0", GFP_NOFS);
|
||||
|
||||
src = strstr(args, "subvol=");
|
||||
/* This shouldn't happen, but just in case.. */
|
||||
if (!src)
|
||||
return NULL;
|
||||
|
||||
buf = dst = kmalloc(len, GFP_NOFS);
|
||||
/* The worst case is that we add ",subvolid=0" to the end. */
|
||||
buf = dst = kmalloc(strlen(args) + strlen(",subvolid=0") + 1, GFP_NOFS);
|
||||
if (!buf)
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* If the subvol= arg is not at the start of the string,
|
||||
* copy whatever precedes it into buf.
|
||||
*/
|
||||
if (src != args) {
|
||||
*src++ = '\0';
|
||||
strcpy(buf, args);
|
||||
dst += strlen(args);
|
||||
while (1) {
|
||||
sep = strchrnul(args, ',');
|
||||
if (!strstarts(args, "subvol=") &&
|
||||
!strstarts(args, "subvolid=")) {
|
||||
memcpy(dst, args, sep - args);
|
||||
dst += sep - args;
|
||||
*dst++ = ',';
|
||||
}
|
||||
if (*sep)
|
||||
args = sep + 1;
|
||||
else
|
||||
break;
|
||||
}
|
||||
|
||||
strcpy(dst, "subvolid=0");
|
||||
dst += strlen("subvolid=0");
|
||||
|
||||
/*
|
||||
* If there is a "," after the original subvol=... string,
|
||||
* copy that suffix into our buffer. Otherwise, we're done.
|
||||
*/
|
||||
src = strchr(src, ',');
|
||||
if (src)
|
||||
strcpy(dst, src);
|
||||
|
||||
return buf;
|
||||
}
|
||||
|
||||
static struct dentry *mount_subvol(const char *subvol_name, int flags,
|
||||
const char *device_name, char *data)
|
||||
static struct dentry *mount_subvol(const char *subvol_name, u64 subvol_objectid,
|
||||
int flags, const char *device_name,
|
||||
char *data)
|
||||
{
|
||||
struct dentry *root;
|
||||
struct vfsmount *mnt;
|
||||
struct vfsmount *mnt = NULL;
|
||||
char *newargs;
|
||||
int ret;
|
||||
|
||||
newargs = setup_root_args(data);
|
||||
if (!newargs)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
mnt = vfs_kern_mount(&btrfs_fs_type, flags, device_name,
|
||||
newargs);
|
||||
if (!newargs) {
|
||||
root = ERR_PTR(-ENOMEM);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (PTR_RET(mnt) == -EBUSY) {
|
||||
mnt = vfs_kern_mount(&btrfs_fs_type, flags, device_name, newargs);
|
||||
if (PTR_ERR_OR_ZERO(mnt) == -EBUSY) {
|
||||
if (flags & MS_RDONLY) {
|
||||
mnt = vfs_kern_mount(&btrfs_fs_type, flags & ~MS_RDONLY, device_name,
|
||||
newargs);
|
||||
mnt = vfs_kern_mount(&btrfs_fs_type, flags & ~MS_RDONLY,
|
||||
device_name, newargs);
|
||||
} else {
|
||||
int r;
|
||||
mnt = vfs_kern_mount(&btrfs_fs_type, flags | MS_RDONLY, device_name,
|
||||
newargs);
|
||||
mnt = vfs_kern_mount(&btrfs_fs_type, flags | MS_RDONLY,
|
||||
device_name, newargs);
|
||||
if (IS_ERR(mnt)) {
|
||||
kfree(newargs);
|
||||
return ERR_CAST(mnt);
|
||||
root = ERR_CAST(mnt);
|
||||
mnt = NULL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
r = btrfs_remount(mnt->mnt_sb, &flags, NULL);
|
||||
if (r < 0) {
|
||||
/* FIXME: release vfsmount mnt ??*/
|
||||
kfree(newargs);
|
||||
return ERR_PTR(r);
|
||||
down_write(&mnt->mnt_sb->s_umount);
|
||||
ret = btrfs_remount(mnt->mnt_sb, &flags, NULL);
|
||||
up_write(&mnt->mnt_sb->s_umount);
|
||||
if (ret < 0) {
|
||||
root = ERR_PTR(ret);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
kfree(newargs);
|
||||
|
||||
if (IS_ERR(mnt))
|
||||
return ERR_CAST(mnt);
|
||||
|
||||
root = mount_subtree(mnt, subvol_name);
|
||||
|
||||
if (!IS_ERR(root) && !is_subvolume_inode(d_inode(root))) {
|
||||
struct super_block *s = root->d_sb;
|
||||
dput(root);
|
||||
root = ERR_PTR(-EINVAL);
|
||||
deactivate_locked_super(s);
|
||||
printk(KERN_ERR "BTRFS: '%s' is not a valid subvolume\n",
|
||||
subvol_name);
|
||||
if (IS_ERR(mnt)) {
|
||||
root = ERR_CAST(mnt);
|
||||
mnt = NULL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!subvol_name) {
|
||||
if (!subvol_objectid) {
|
||||
ret = get_default_subvol_objectid(btrfs_sb(mnt->mnt_sb),
|
||||
&subvol_objectid);
|
||||
if (ret) {
|
||||
root = ERR_PTR(ret);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
subvol_name = get_subvol_name_from_objectid(btrfs_sb(mnt->mnt_sb),
|
||||
subvol_objectid);
|
||||
if (IS_ERR(subvol_name)) {
|
||||
root = ERR_CAST(subvol_name);
|
||||
subvol_name = NULL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
root = mount_subtree(mnt, subvol_name);
|
||||
/* mount_subtree() drops our reference on the vfsmount. */
|
||||
mnt = NULL;
|
||||
|
||||
if (!IS_ERR(root)) {
|
||||
struct super_block *s = root->d_sb;
|
||||
struct inode *root_inode = d_inode(root);
|
||||
u64 root_objectid = BTRFS_I(root_inode)->root->root_key.objectid;
|
||||
|
||||
ret = 0;
|
||||
if (!is_subvolume_inode(root_inode)) {
|
||||
pr_err("BTRFS: '%s' is not a valid subvolume\n",
|
||||
subvol_name);
|
||||
ret = -EINVAL;
|
||||
}
|
||||
if (subvol_objectid && root_objectid != subvol_objectid) {
|
||||
/*
|
||||
* This will also catch a race condition where a
|
||||
* subvolume which was passed by ID is renamed and
|
||||
* another subvolume is renamed over the old location.
|
||||
*/
|
||||
pr_err("BTRFS: subvol '%s' does not match subvolid %llu\n",
|
||||
subvol_name, subvol_objectid);
|
||||
ret = -EINVAL;
|
||||
}
|
||||
if (ret) {
|
||||
dput(root);
|
||||
root = ERR_PTR(ret);
|
||||
deactivate_locked_super(s);
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
mntput(mnt);
|
||||
kfree(newargs);
|
||||
kfree(subvol_name);
|
||||
return root;
|
||||
}
|
||||
|
||||
|
@ -1303,7 +1419,6 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
|
|||
{
|
||||
struct block_device *bdev = NULL;
|
||||
struct super_block *s;
|
||||
struct dentry *root;
|
||||
struct btrfs_fs_devices *fs_devices = NULL;
|
||||
struct btrfs_fs_info *fs_info = NULL;
|
||||
struct security_mnt_opts new_sec_opts;
|
||||
|
@ -1323,10 +1438,10 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
|
|||
return ERR_PTR(error);
|
||||
}
|
||||
|
||||
if (subvol_name) {
|
||||
root = mount_subvol(subvol_name, flags, device_name, data);
|
||||
kfree(subvol_name);
|
||||
return root;
|
||||
if (subvol_name || subvol_objectid != BTRFS_FS_TREE_OBJECTID) {
|
||||
/* mount_subvol() will free subvol_name. */
|
||||
return mount_subvol(subvol_name, subvol_objectid, flags,
|
||||
device_name, data);
|
||||
}
|
||||
|
||||
security_init_mnt_opts(&new_sec_opts);
|
||||
|
@ -1392,23 +1507,19 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
|
|||
error = btrfs_fill_super(s, fs_devices, data,
|
||||
flags & MS_SILENT ? 1 : 0);
|
||||
}
|
||||
|
||||
root = !error ? get_default_root(s, subvol_objectid) : ERR_PTR(error);
|
||||
if (IS_ERR(root)) {
|
||||
if (error) {
|
||||
deactivate_locked_super(s);
|
||||
error = PTR_ERR(root);
|
||||
goto error_sec_opts;
|
||||
}
|
||||
|
||||
fs_info = btrfs_sb(s);
|
||||
error = setup_security_options(fs_info, s, &new_sec_opts);
|
||||
if (error) {
|
||||
dput(root);
|
||||
deactivate_locked_super(s);
|
||||
goto error_sec_opts;
|
||||
}
|
||||
|
||||
return root;
|
||||
return dget(s->s_root);
|
||||
|
||||
error_close_devices:
|
||||
btrfs_close_devices(fs_devices);
|
||||
|
|
154
fs/btrfs/sysfs.c
154
fs/btrfs/sysfs.c
|
@ -33,6 +33,7 @@
|
|||
#include "volumes.h"
|
||||
|
||||
static inline struct btrfs_fs_info *to_fs_info(struct kobject *kobj);
|
||||
static inline struct btrfs_fs_devices *to_fs_devs(struct kobject *kobj);
|
||||
|
||||
static u64 get_features(struct btrfs_fs_info *fs_info,
|
||||
enum btrfs_feature_set set)
|
||||
|
@ -428,7 +429,7 @@ static ssize_t btrfs_clone_alignment_show(struct kobject *kobj,
|
|||
|
||||
BTRFS_ATTR(clone_alignment, btrfs_clone_alignment_show);
|
||||
|
||||
static struct attribute *btrfs_attrs[] = {
|
||||
static const struct attribute *btrfs_attrs[] = {
|
||||
BTRFS_ATTR_PTR(label),
|
||||
BTRFS_ATTR_PTR(nodesize),
|
||||
BTRFS_ATTR_PTR(sectorsize),
|
||||
|
@ -438,21 +439,29 @@ static struct attribute *btrfs_attrs[] = {
|
|||
|
||||
static void btrfs_release_super_kobj(struct kobject *kobj)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = to_fs_info(kobj);
|
||||
complete(&fs_info->kobj_unregister);
|
||||
struct btrfs_fs_devices *fs_devs = to_fs_devs(kobj);
|
||||
|
||||
memset(&fs_devs->super_kobj, 0, sizeof(struct kobject));
|
||||
complete(&fs_devs->kobj_unregister);
|
||||
}
|
||||
|
||||
static struct kobj_type btrfs_ktype = {
|
||||
.sysfs_ops = &kobj_sysfs_ops,
|
||||
.release = btrfs_release_super_kobj,
|
||||
.default_attrs = btrfs_attrs,
|
||||
};
|
||||
|
||||
static inline struct btrfs_fs_devices *to_fs_devs(struct kobject *kobj)
|
||||
{
|
||||
if (kobj->ktype != &btrfs_ktype)
|
||||
return NULL;
|
||||
return container_of(kobj, struct btrfs_fs_devices, super_kobj);
|
||||
}
|
||||
|
||||
static inline struct btrfs_fs_info *to_fs_info(struct kobject *kobj)
|
||||
{
|
||||
if (kobj->ktype != &btrfs_ktype)
|
||||
return NULL;
|
||||
return container_of(kobj, struct btrfs_fs_info, super_kobj);
|
||||
return to_fs_devs(kobj)->fs_info;
|
||||
}
|
||||
|
||||
#define NUM_FEATURE_BITS 64
|
||||
|
@ -493,12 +502,12 @@ static int addrm_unknown_feature_attrs(struct btrfs_fs_info *fs_info, bool add)
|
|||
attrs[0] = &fa->kobj_attr.attr;
|
||||
if (add) {
|
||||
int ret;
|
||||
ret = sysfs_merge_group(&fs_info->super_kobj,
|
||||
ret = sysfs_merge_group(&fs_info->fs_devices->super_kobj,
|
||||
&agroup);
|
||||
if (ret)
|
||||
return ret;
|
||||
} else
|
||||
sysfs_unmerge_group(&fs_info->super_kobj,
|
||||
sysfs_unmerge_group(&fs_info->fs_devices->super_kobj,
|
||||
&agroup);
|
||||
}
|
||||
|
||||
|
@ -506,25 +515,49 @@ static int addrm_unknown_feature_attrs(struct btrfs_fs_info *fs_info, bool add)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void __btrfs_sysfs_remove_one(struct btrfs_fs_info *fs_info)
|
||||
static void __btrfs_sysfs_remove_fsid(struct btrfs_fs_devices *fs_devs)
|
||||
{
|
||||
kobject_del(&fs_info->super_kobj);
|
||||
kobject_put(&fs_info->super_kobj);
|
||||
wait_for_completion(&fs_info->kobj_unregister);
|
||||
if (fs_devs->device_dir_kobj) {
|
||||
kobject_del(fs_devs->device_dir_kobj);
|
||||
kobject_put(fs_devs->device_dir_kobj);
|
||||
fs_devs->device_dir_kobj = NULL;
|
||||
}
|
||||
|
||||
if (fs_devs->super_kobj.state_initialized) {
|
||||
kobject_del(&fs_devs->super_kobj);
|
||||
kobject_put(&fs_devs->super_kobj);
|
||||
wait_for_completion(&fs_devs->kobj_unregister);
|
||||
}
|
||||
}
|
||||
|
||||
/* when fs_devs is NULL it will remove all fsid kobject */
|
||||
void btrfs_sysfs_remove_fsid(struct btrfs_fs_devices *fs_devs)
|
||||
{
|
||||
struct list_head *fs_uuids = btrfs_get_fs_uuids();
|
||||
|
||||
if (fs_devs) {
|
||||
__btrfs_sysfs_remove_fsid(fs_devs);
|
||||
return;
|
||||
}
|
||||
|
||||
list_for_each_entry(fs_devs, fs_uuids, list) {
|
||||
__btrfs_sysfs_remove_fsid(fs_devs);
|
||||
}
|
||||
}
|
||||
|
||||
void btrfs_sysfs_remove_one(struct btrfs_fs_info *fs_info)
|
||||
{
|
||||
btrfs_reset_fs_info_ptr(fs_info);
|
||||
|
||||
if (fs_info->space_info_kobj) {
|
||||
sysfs_remove_files(fs_info->space_info_kobj, allocation_attrs);
|
||||
kobject_del(fs_info->space_info_kobj);
|
||||
kobject_put(fs_info->space_info_kobj);
|
||||
}
|
||||
kobject_del(fs_info->device_dir_kobj);
|
||||
kobject_put(fs_info->device_dir_kobj);
|
||||
addrm_unknown_feature_attrs(fs_info, false);
|
||||
sysfs_remove_group(&fs_info->super_kobj, &btrfs_feature_attr_group);
|
||||
__btrfs_sysfs_remove_one(fs_info);
|
||||
sysfs_remove_group(&fs_info->fs_devices->super_kobj, &btrfs_feature_attr_group);
|
||||
sysfs_remove_files(&fs_info->fs_devices->super_kobj, btrfs_attrs);
|
||||
btrfs_kobj_rm_device(fs_info->fs_devices, NULL);
|
||||
}
|
||||
|
||||
const char * const btrfs_feature_set_names[3] = {
|
||||
|
@ -602,40 +635,60 @@ static void init_feature_attrs(void)
|
|||
}
|
||||
}
|
||||
|
||||
int btrfs_kobj_rm_device(struct btrfs_fs_info *fs_info,
|
||||
/* when one_device is NULL, it removes all device links */
|
||||
|
||||
int btrfs_kobj_rm_device(struct btrfs_fs_devices *fs_devices,
|
||||
struct btrfs_device *one_device)
|
||||
{
|
||||
struct hd_struct *disk;
|
||||
struct kobject *disk_kobj;
|
||||
|
||||
if (!fs_info->device_dir_kobj)
|
||||
if (!fs_devices->device_dir_kobj)
|
||||
return -EINVAL;
|
||||
|
||||
if (one_device && one_device->bdev) {
|
||||
disk = one_device->bdev->bd_part;
|
||||
disk_kobj = &part_to_dev(disk)->kobj;
|
||||
|
||||
sysfs_remove_link(fs_info->device_dir_kobj,
|
||||
sysfs_remove_link(fs_devices->device_dir_kobj,
|
||||
disk_kobj->name);
|
||||
}
|
||||
|
||||
if (one_device)
|
||||
return 0;
|
||||
|
||||
list_for_each_entry(one_device,
|
||||
&fs_devices->devices, dev_list) {
|
||||
if (!one_device->bdev)
|
||||
continue;
|
||||
disk = one_device->bdev->bd_part;
|
||||
disk_kobj = &part_to_dev(disk)->kobj;
|
||||
|
||||
sysfs_remove_link(fs_devices->device_dir_kobj,
|
||||
disk_kobj->name);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int btrfs_kobj_add_device(struct btrfs_fs_info *fs_info,
|
||||
struct btrfs_device *one_device)
|
||||
int btrfs_sysfs_add_device(struct btrfs_fs_devices *fs_devs)
|
||||
{
|
||||
if (!fs_devs->device_dir_kobj)
|
||||
fs_devs->device_dir_kobj = kobject_create_and_add("devices",
|
||||
&fs_devs->super_kobj);
|
||||
|
||||
if (!fs_devs->device_dir_kobj)
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int btrfs_kobj_add_device(struct btrfs_fs_devices *fs_devices,
|
||||
struct btrfs_device *one_device)
|
||||
{
|
||||
int error = 0;
|
||||
struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
|
||||
struct btrfs_device *dev;
|
||||
|
||||
if (!fs_info->device_dir_kobj)
|
||||
fs_info->device_dir_kobj = kobject_create_and_add("devices",
|
||||
&fs_info->super_kobj);
|
||||
|
||||
if (!fs_info->device_dir_kobj)
|
||||
return -ENOMEM;
|
||||
|
||||
list_for_each_entry(dev, &fs_devices->devices, dev_list) {
|
||||
struct hd_struct *disk;
|
||||
struct kobject *disk_kobj;
|
||||
|
@ -649,7 +702,7 @@ int btrfs_kobj_add_device(struct btrfs_fs_info *fs_info,
|
|||
disk = dev->bdev->bd_part;
|
||||
disk_kobj = &part_to_dev(disk)->kobj;
|
||||
|
||||
error = sysfs_create_link(fs_info->device_dir_kobj,
|
||||
error = sysfs_create_link(fs_devices->device_dir_kobj,
|
||||
disk_kobj, disk_kobj->name);
|
||||
if (error)
|
||||
break;
|
||||
|
@ -667,34 +720,51 @@ static struct dentry *btrfs_debugfs_root_dentry;
|
|||
/* Debugging tunables and exported data */
|
||||
u64 btrfs_debugfs_test;
|
||||
|
||||
int btrfs_sysfs_add_one(struct btrfs_fs_info *fs_info)
|
||||
/*
|
||||
* Can be called by the device discovery thread.
|
||||
* And parent can be specified for seed device
|
||||
*/
|
||||
int btrfs_sysfs_add_fsid(struct btrfs_fs_devices *fs_devs,
|
||||
struct kobject *parent)
|
||||
{
|
||||
int error;
|
||||
|
||||
init_completion(&fs_info->kobj_unregister);
|
||||
fs_info->super_kobj.kset = btrfs_kset;
|
||||
error = kobject_init_and_add(&fs_info->super_kobj, &btrfs_ktype, NULL,
|
||||
"%pU", fs_info->fsid);
|
||||
init_completion(&fs_devs->kobj_unregister);
|
||||
fs_devs->super_kobj.kset = btrfs_kset;
|
||||
error = kobject_init_and_add(&fs_devs->super_kobj,
|
||||
&btrfs_ktype, parent, "%pU", fs_devs->fsid);
|
||||
return error;
|
||||
}
|
||||
|
||||
int btrfs_sysfs_add_one(struct btrfs_fs_info *fs_info)
|
||||
{
|
||||
int error;
|
||||
struct btrfs_fs_devices *fs_devs = fs_info->fs_devices;
|
||||
struct kobject *super_kobj = &fs_devs->super_kobj;
|
||||
|
||||
btrfs_set_fs_info_ptr(fs_info);
|
||||
|
||||
error = btrfs_kobj_add_device(fs_devs, NULL);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
error = sysfs_create_group(&fs_info->super_kobj,
|
||||
&btrfs_feature_attr_group);
|
||||
error = sysfs_create_files(super_kobj, btrfs_attrs);
|
||||
if (error) {
|
||||
__btrfs_sysfs_remove_one(fs_info);
|
||||
btrfs_kobj_rm_device(fs_devs, NULL);
|
||||
return error;
|
||||
}
|
||||
|
||||
error = sysfs_create_group(super_kobj,
|
||||
&btrfs_feature_attr_group);
|
||||
if (error)
|
||||
goto failure;
|
||||
|
||||
error = addrm_unknown_feature_attrs(fs_info, true);
|
||||
if (error)
|
||||
goto failure;
|
||||
|
||||
error = btrfs_kobj_add_device(fs_info, NULL);
|
||||
if (error)
|
||||
goto failure;
|
||||
|
||||
fs_info->space_info_kobj = kobject_create_and_add("allocation",
|
||||
&fs_info->super_kobj);
|
||||
super_kobj);
|
||||
if (!fs_info->space_info_kobj) {
|
||||
error = -ENOMEM;
|
||||
goto failure;
|
||||
|
|
|
@ -82,8 +82,12 @@ char *btrfs_printable_features(enum btrfs_feature_set set, u64 flags);
|
|||
extern const char * const btrfs_feature_set_names[3];
|
||||
extern struct kobj_type space_info_ktype;
|
||||
extern struct kobj_type btrfs_raid_ktype;
|
||||
int btrfs_kobj_add_device(struct btrfs_fs_info *fs_info,
|
||||
int btrfs_kobj_add_device(struct btrfs_fs_devices *fs_devices,
|
||||
struct btrfs_device *one_device);
|
||||
int btrfs_kobj_rm_device(struct btrfs_fs_info *fs_info,
|
||||
int btrfs_kobj_rm_device(struct btrfs_fs_devices *fs_devices,
|
||||
struct btrfs_device *one_device);
|
||||
int btrfs_sysfs_add_fsid(struct btrfs_fs_devices *fs_devs,
|
||||
struct kobject *parent);
|
||||
int btrfs_sysfs_add_device(struct btrfs_fs_devices *fs_devs);
|
||||
void btrfs_sysfs_remove_fsid(struct btrfs_fs_devices *fs_devs);
|
||||
#endif /* _BTRFS_SYSFS_H_ */
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
#include "../transaction.h"
|
||||
#include "../disk-io.h"
|
||||
#include "../qgroup.h"
|
||||
#include "../backref.h"
|
||||
|
||||
static void init_dummy_trans(struct btrfs_trans_handle *trans)
|
||||
{
|
||||
|
@ -227,6 +228,8 @@ static int test_no_shared_qgroup(struct btrfs_root *root)
|
|||
{
|
||||
struct btrfs_trans_handle trans;
|
||||
struct btrfs_fs_info *fs_info = root->fs_info;
|
||||
struct ulist *old_roots = NULL;
|
||||
struct ulist *new_roots = NULL;
|
||||
int ret;
|
||||
|
||||
init_dummy_trans(&trans);
|
||||
|
@ -238,10 +241,15 @@ static int test_no_shared_qgroup(struct btrfs_root *root)
|
|||
return ret;
|
||||
}
|
||||
|
||||
ret = btrfs_qgroup_record_ref(&trans, fs_info, 5, 4096, 4096,
|
||||
BTRFS_QGROUP_OPER_ADD_EXCL, 0);
|
||||
/*
|
||||
* Since the test trans doesn't havee the complicated delayed refs,
|
||||
* we can only call btrfs_qgroup_account_extent() directly to test
|
||||
* quota.
|
||||
*/
|
||||
ret = btrfs_find_all_roots(&trans, fs_info, 4096, 0, &old_roots);
|
||||
if (ret) {
|
||||
test_msg("Couldn't add space to a qgroup %d\n", ret);
|
||||
ulist_free(old_roots);
|
||||
test_msg("Couldn't find old roots: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -249,9 +257,18 @@ static int test_no_shared_qgroup(struct btrfs_root *root)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = btrfs_delayed_qgroup_accounting(&trans, fs_info);
|
||||
ret = btrfs_find_all_roots(&trans, fs_info, 4096, 0, &new_roots);
|
||||
if (ret) {
|
||||
test_msg("Delayed qgroup accounting failed %d\n", ret);
|
||||
ulist_free(old_roots);
|
||||
ulist_free(new_roots);
|
||||
test_msg("Couldn't find old roots: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = btrfs_qgroup_account_extent(&trans, fs_info, 4096, 4096,
|
||||
old_roots, new_roots);
|
||||
if (ret) {
|
||||
test_msg("Couldn't account space for a qgroup %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -259,21 +276,32 @@ static int test_no_shared_qgroup(struct btrfs_root *root)
|
|||
test_msg("Qgroup counts didn't match expected values\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
old_roots = NULL;
|
||||
new_roots = NULL;
|
||||
|
||||
ret = btrfs_find_all_roots(&trans, fs_info, 4096, 0, &old_roots);
|
||||
if (ret) {
|
||||
ulist_free(old_roots);
|
||||
test_msg("Couldn't find old roots: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = remove_extent_item(root, 4096, 4096);
|
||||
if (ret)
|
||||
return -EINVAL;
|
||||
|
||||
ret = btrfs_qgroup_record_ref(&trans, fs_info, 5, 4096, 4096,
|
||||
BTRFS_QGROUP_OPER_SUB_EXCL, 0);
|
||||
ret = btrfs_find_all_roots(&trans, fs_info, 4096, 0, &new_roots);
|
||||
if (ret) {
|
||||
test_msg("Couldn't remove space from the qgroup %d\n", ret);
|
||||
return -EINVAL;
|
||||
ulist_free(old_roots);
|
||||
ulist_free(new_roots);
|
||||
test_msg("Couldn't find old roots: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = btrfs_delayed_qgroup_accounting(&trans, fs_info);
|
||||
ret = btrfs_qgroup_account_extent(&trans, fs_info, 4096, 4096,
|
||||
old_roots, new_roots);
|
||||
if (ret) {
|
||||
test_msg("Qgroup accounting failed %d\n", ret);
|
||||
test_msg("Couldn't account space for a qgroup %d\n", ret);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -294,6 +322,8 @@ static int test_multiple_refs(struct btrfs_root *root)
|
|||
{
|
||||
struct btrfs_trans_handle trans;
|
||||
struct btrfs_fs_info *fs_info = root->fs_info;
|
||||
struct ulist *old_roots = NULL;
|
||||
struct ulist *new_roots = NULL;
|
||||
int ret;
|
||||
|
||||
init_dummy_trans(&trans);
|
||||
|
@ -307,20 +337,29 @@ static int test_multiple_refs(struct btrfs_root *root)
|
|||
return ret;
|
||||
}
|
||||
|
||||
ret = btrfs_find_all_roots(&trans, fs_info, 4096, 0, &old_roots);
|
||||
if (ret) {
|
||||
ulist_free(old_roots);
|
||||
test_msg("Couldn't find old roots: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = insert_normal_tree_ref(root, 4096, 4096, 0, 5);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = btrfs_qgroup_record_ref(&trans, fs_info, 5, 4096, 4096,
|
||||
BTRFS_QGROUP_OPER_ADD_EXCL, 0);
|
||||
ret = btrfs_find_all_roots(&trans, fs_info, 4096, 0, &new_roots);
|
||||
if (ret) {
|
||||
test_msg("Couldn't add space to a qgroup %d\n", ret);
|
||||
ulist_free(old_roots);
|
||||
ulist_free(new_roots);
|
||||
test_msg("Couldn't find old roots: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = btrfs_delayed_qgroup_accounting(&trans, fs_info);
|
||||
ret = btrfs_qgroup_account_extent(&trans, fs_info, 4096, 4096,
|
||||
old_roots, new_roots);
|
||||
if (ret) {
|
||||
test_msg("Delayed qgroup accounting failed %d\n", ret);
|
||||
test_msg("Couldn't account space for a qgroup %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -329,20 +368,29 @@ static int test_multiple_refs(struct btrfs_root *root)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = btrfs_find_all_roots(&trans, fs_info, 4096, 0, &old_roots);
|
||||
if (ret) {
|
||||
ulist_free(old_roots);
|
||||
test_msg("Couldn't find old roots: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = add_tree_ref(root, 4096, 4096, 0, 256);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = btrfs_qgroup_record_ref(&trans, fs_info, 256, 4096, 4096,
|
||||
BTRFS_QGROUP_OPER_ADD_SHARED, 0);
|
||||
ret = btrfs_find_all_roots(&trans, fs_info, 4096, 0, &new_roots);
|
||||
if (ret) {
|
||||
test_msg("Qgroup record ref failed %d\n", ret);
|
||||
ulist_free(old_roots);
|
||||
ulist_free(new_roots);
|
||||
test_msg("Couldn't find old roots: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = btrfs_delayed_qgroup_accounting(&trans, fs_info);
|
||||
ret = btrfs_qgroup_account_extent(&trans, fs_info, 4096, 4096,
|
||||
old_roots, new_roots);
|
||||
if (ret) {
|
||||
test_msg("Qgroup accounting failed %d\n", ret);
|
||||
test_msg("Couldn't account space for a qgroup %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -356,20 +404,29 @@ static int test_multiple_refs(struct btrfs_root *root)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = btrfs_find_all_roots(&trans, fs_info, 4096, 0, &old_roots);
|
||||
if (ret) {
|
||||
ulist_free(old_roots);
|
||||
test_msg("Couldn't find old roots: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = remove_extent_ref(root, 4096, 4096, 0, 256);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = btrfs_qgroup_record_ref(&trans, fs_info, 256, 4096, 4096,
|
||||
BTRFS_QGROUP_OPER_SUB_SHARED, 0);
|
||||
ret = btrfs_find_all_roots(&trans, fs_info, 4096, 0, &new_roots);
|
||||
if (ret) {
|
||||
test_msg("Qgroup record ref failed %d\n", ret);
|
||||
ulist_free(old_roots);
|
||||
ulist_free(new_roots);
|
||||
test_msg("Couldn't find old roots: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = btrfs_delayed_qgroup_accounting(&trans, fs_info);
|
||||
ret = btrfs_qgroup_account_extent(&trans, fs_info, 4096, 4096,
|
||||
old_roots, new_roots);
|
||||
if (ret) {
|
||||
test_msg("Qgroup accounting failed %d\n", ret);
|
||||
test_msg("Couldn't account space for a qgroup %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -225,12 +225,14 @@ loop:
|
|||
cur_trans->dirty_bg_run = 0;
|
||||
|
||||
cur_trans->delayed_refs.href_root = RB_ROOT;
|
||||
cur_trans->delayed_refs.dirty_extent_root = RB_ROOT;
|
||||
atomic_set(&cur_trans->delayed_refs.num_entries, 0);
|
||||
cur_trans->delayed_refs.num_heads_ready = 0;
|
||||
cur_trans->delayed_refs.pending_csums = 0;
|
||||
cur_trans->delayed_refs.num_heads = 0;
|
||||
cur_trans->delayed_refs.flushing = 0;
|
||||
cur_trans->delayed_refs.run_delayed_start = 0;
|
||||
cur_trans->delayed_refs.qgroup_to_skip = 0;
|
||||
|
||||
/*
|
||||
* although the tree mod log is per file system and not per transaction,
|
||||
|
@ -509,6 +511,7 @@ again:
|
|||
h->transaction = cur_trans;
|
||||
h->blocks_used = 0;
|
||||
h->bytes_reserved = 0;
|
||||
h->chunk_bytes_reserved = 0;
|
||||
h->root = root;
|
||||
h->delayed_ref_updates = 0;
|
||||
h->use_count = 1;
|
||||
|
@ -792,6 +795,8 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
|
|||
if (!list_empty(&trans->new_bgs))
|
||||
btrfs_create_pending_block_groups(trans, root);
|
||||
|
||||
btrfs_trans_release_chunk_metadata(trans);
|
||||
|
||||
if (lock && !atomic_read(&root->fs_info->open_ioctl_trans) &&
|
||||
should_end_transaction(trans, root) &&
|
||||
ACCESS_ONCE(cur_trans->state) == TRANS_STATE_RUNNING) {
|
||||
|
@ -1290,6 +1295,12 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
|
|||
if (pending->error)
|
||||
goto no_free_objectid;
|
||||
|
||||
/*
|
||||
* Make qgroup to skip current new snapshot's qgroupid, as it is
|
||||
* accounted by later btrfs_qgroup_inherit().
|
||||
*/
|
||||
btrfs_set_skip_qgroup(trans, objectid);
|
||||
|
||||
btrfs_reloc_pre_snapshot(trans, pending, &to_reserve);
|
||||
|
||||
if (to_reserve > 0) {
|
||||
|
@ -1298,7 +1309,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
|
|||
to_reserve,
|
||||
BTRFS_RESERVE_NO_FLUSH);
|
||||
if (pending->error)
|
||||
goto no_free_objectid;
|
||||
goto clear_skip_qgroup;
|
||||
}
|
||||
|
||||
key.objectid = objectid;
|
||||
|
@ -1396,25 +1407,6 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
|
|||
btrfs_abort_transaction(trans, root, ret);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/*
|
||||
* We need to flush delayed refs in order to make sure all of our quota
|
||||
* operations have been done before we call btrfs_qgroup_inherit.
|
||||
*/
|
||||
ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
|
||||
if (ret) {
|
||||
btrfs_abort_transaction(trans, root, ret);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
ret = btrfs_qgroup_inherit(trans, fs_info,
|
||||
root->root_key.objectid,
|
||||
objectid, pending->inherit);
|
||||
if (ret) {
|
||||
btrfs_abort_transaction(trans, root, ret);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* see comments in should_cow_block() */
|
||||
set_bit(BTRFS_ROOT_FORCE_COW, &root->state);
|
||||
smp_wmb();
|
||||
|
@ -1497,11 +1489,37 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
|
|||
goto fail;
|
||||
}
|
||||
}
|
||||
|
||||
ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
|
||||
if (ret) {
|
||||
btrfs_abort_transaction(trans, root, ret);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/*
|
||||
* account qgroup counters before qgroup_inherit()
|
||||
*/
|
||||
ret = btrfs_qgroup_prepare_account_extents(trans, fs_info);
|
||||
if (ret)
|
||||
goto fail;
|
||||
ret = btrfs_qgroup_account_extents(trans, fs_info);
|
||||
if (ret)
|
||||
goto fail;
|
||||
ret = btrfs_qgroup_inherit(trans, fs_info,
|
||||
root->root_key.objectid,
|
||||
objectid, pending->inherit);
|
||||
if (ret) {
|
||||
btrfs_abort_transaction(trans, root, ret);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
fail:
|
||||
pending->error = ret;
|
||||
dir_item_existed:
|
||||
trans->block_rsv = rsv;
|
||||
trans->bytes_reserved = 0;
|
||||
clear_skip_qgroup:
|
||||
btrfs_clear_skip_qgroup(trans);
|
||||
no_free_objectid:
|
||||
kfree(new_root_item);
|
||||
root_item_alloc_fail:
|
||||
|
@ -1963,6 +1981,13 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
|
|||
goto scrub_continue;
|
||||
}
|
||||
|
||||
/* Reocrd old roots for later qgroup accounting */
|
||||
ret = btrfs_qgroup_prepare_account_extents(trans, root->fs_info);
|
||||
if (ret) {
|
||||
mutex_unlock(&root->fs_info->reloc_mutex);
|
||||
goto scrub_continue;
|
||||
}
|
||||
|
||||
/*
|
||||
* make sure none of the code above managed to slip in a
|
||||
* delayed item
|
||||
|
@ -2004,6 +2029,17 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
|
|||
*/
|
||||
btrfs_free_log_root_tree(trans, root->fs_info);
|
||||
|
||||
/*
|
||||
* Since fs roots are all committed, we can get a quite accurate
|
||||
* new_roots. So let's do quota accounting.
|
||||
*/
|
||||
ret = btrfs_qgroup_account_extents(trans, root->fs_info);
|
||||
if (ret < 0) {
|
||||
mutex_unlock(&root->fs_info->tree_log_mutex);
|
||||
mutex_unlock(&root->fs_info->reloc_mutex);
|
||||
goto scrub_continue;
|
||||
}
|
||||
|
||||
ret = commit_cowonly_roots(trans, root);
|
||||
if (ret) {
|
||||
mutex_unlock(&root->fs_info->tree_log_mutex);
|
||||
|
@ -2054,6 +2090,8 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
|
|||
clear_bit(BTRFS_INODE_BTREE_LOG1_ERR, &btree_ino->runtime_flags);
|
||||
clear_bit(BTRFS_INODE_BTREE_LOG2_ERR, &btree_ino->runtime_flags);
|
||||
|
||||
btrfs_trans_release_chunk_metadata(trans);
|
||||
|
||||
spin_lock(&root->fs_info->trans_lock);
|
||||
cur_trans->state = TRANS_STATE_UNBLOCKED;
|
||||
root->fs_info->running_transaction = NULL;
|
||||
|
@ -2123,6 +2161,7 @@ scrub_continue:
|
|||
btrfs_scrub_continue(root);
|
||||
cleanup_transaction:
|
||||
btrfs_trans_release_metadata(trans, root);
|
||||
btrfs_trans_release_chunk_metadata(trans);
|
||||
trans->block_rsv = NULL;
|
||||
if (trans->qgroup_reserved) {
|
||||
btrfs_qgroup_free(root, trans->qgroup_reserved);
|
||||
|
|
|
@ -102,6 +102,7 @@ struct btrfs_transaction {
|
|||
struct btrfs_trans_handle {
|
||||
u64 transid;
|
||||
u64 bytes_reserved;
|
||||
u64 chunk_bytes_reserved;
|
||||
u64 qgroup_reserved;
|
||||
unsigned long use_count;
|
||||
unsigned long blocks_reserved;
|
||||
|
@ -153,6 +154,29 @@ static inline void btrfs_set_inode_last_trans(struct btrfs_trans_handle *trans,
|
|||
spin_unlock(&BTRFS_I(inode)->lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* Make qgroup codes to skip given qgroupid, means the old/new_roots for
|
||||
* qgroup won't contain the qgroupid in it.
|
||||
*/
|
||||
static inline void btrfs_set_skip_qgroup(struct btrfs_trans_handle *trans,
|
||||
u64 qgroupid)
|
||||
{
|
||||
struct btrfs_delayed_ref_root *delayed_refs;
|
||||
|
||||
delayed_refs = &trans->transaction->delayed_refs;
|
||||
WARN_ON(delayed_refs->qgroup_to_skip);
|
||||
delayed_refs->qgroup_to_skip = qgroupid;
|
||||
}
|
||||
|
||||
static inline void btrfs_clear_skip_qgroup(struct btrfs_trans_handle *trans)
|
||||
{
|
||||
struct btrfs_delayed_ref_root *delayed_refs;
|
||||
|
||||
delayed_refs = &trans->transaction->delayed_refs;
|
||||
WARN_ON(!delayed_refs->qgroup_to_skip);
|
||||
delayed_refs->qgroup_to_skip = 0;
|
||||
}
|
||||
|
||||
int btrfs_end_transaction(struct btrfs_trans_handle *trans,
|
||||
struct btrfs_root *root);
|
||||
struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
|
||||
|
|
|
@ -52,9 +52,6 @@ int btrfs_defrag_leaves(struct btrfs_trans_handle *trans,
|
|||
if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state))
|
||||
goto out;
|
||||
|
||||
if (btrfs_test_opt(root, SSD))
|
||||
goto out;
|
||||
|
||||
path = btrfs_alloc_path();
|
||||
if (!path)
|
||||
return -ENOMEM;
|
||||
|
|
|
@ -3881,12 +3881,6 @@ static int wait_ordered_extents(struct btrfs_trans_handle *trans,
|
|||
&ordered->flags))
|
||||
continue;
|
||||
|
||||
if (ordered->csum_bytes_left) {
|
||||
btrfs_start_ordered_extent(inode, ordered, 0);
|
||||
wait_event(ordered->wait,
|
||||
ordered->csum_bytes_left == 0);
|
||||
}
|
||||
|
||||
list_for_each_entry(sum, &ordered->list, list) {
|
||||
ret = btrfs_csum_file_blocks(trans, log, sum);
|
||||
if (ret)
|
||||
|
|
|
@ -132,6 +132,15 @@ static struct ulist_node *ulist_rbtree_search(struct ulist *ulist, u64 val)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static void ulist_rbtree_erase(struct ulist *ulist, struct ulist_node *node)
|
||||
{
|
||||
rb_erase(&node->rb_node, &ulist->root);
|
||||
list_del(&node->list);
|
||||
kfree(node);
|
||||
BUG_ON(ulist->nnodes == 0);
|
||||
ulist->nnodes--;
|
||||
}
|
||||
|
||||
static int ulist_rbtree_insert(struct ulist *ulist, struct ulist_node *ins)
|
||||
{
|
||||
struct rb_node **p = &ulist->root.rb_node;
|
||||
|
@ -197,9 +206,6 @@ int ulist_add_merge(struct ulist *ulist, u64 val, u64 aux,
|
|||
|
||||
node->val = val;
|
||||
node->aux = aux;
|
||||
#ifdef CONFIG_BTRFS_DEBUG
|
||||
node->seqnum = ulist->nnodes;
|
||||
#endif
|
||||
|
||||
ret = ulist_rbtree_insert(ulist, node);
|
||||
ASSERT(!ret);
|
||||
|
@ -209,6 +215,33 @@ int ulist_add_merge(struct ulist *ulist, u64 val, u64 aux,
|
|||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* ulist_del - delete one node from ulist
|
||||
* @ulist: ulist to remove node from
|
||||
* @val: value to delete
|
||||
* @aux: aux to delete
|
||||
*
|
||||
* The deletion will only be done when *BOTH* val and aux matches.
|
||||
* Return 0 for successful delete.
|
||||
* Return > 0 for not found.
|
||||
*/
|
||||
int ulist_del(struct ulist *ulist, u64 val, u64 aux)
|
||||
{
|
||||
struct ulist_node *node;
|
||||
|
||||
node = ulist_rbtree_search(ulist, val);
|
||||
/* Not found */
|
||||
if (!node)
|
||||
return 1;
|
||||
|
||||
if (node->aux != aux)
|
||||
return 1;
|
||||
|
||||
/* Found and delete */
|
||||
ulist_rbtree_erase(ulist, node);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* ulist_next - iterate ulist
|
||||
* @ulist: ulist to iterate
|
||||
|
@ -237,15 +270,7 @@ struct ulist_node *ulist_next(struct ulist *ulist, struct ulist_iterator *uiter)
|
|||
uiter->cur_list = uiter->cur_list->next;
|
||||
} else {
|
||||
uiter->cur_list = ulist->nodes.next;
|
||||
#ifdef CONFIG_BTRFS_DEBUG
|
||||
uiter->i = 0;
|
||||
#endif
|
||||
}
|
||||
node = list_entry(uiter->cur_list, struct ulist_node, list);
|
||||
#ifdef CONFIG_BTRFS_DEBUG
|
||||
ASSERT(node->seqnum == uiter->i);
|
||||
ASSERT(uiter->i >= 0 && uiter->i < ulist->nnodes);
|
||||
uiter->i++;
|
||||
#endif
|
||||
return node;
|
||||
}
|
||||
|
|
|
@ -57,6 +57,7 @@ void ulist_free(struct ulist *ulist);
|
|||
int ulist_add(struct ulist *ulist, u64 val, u64 aux, gfp_t gfp_mask);
|
||||
int ulist_add_merge(struct ulist *ulist, u64 val, u64 aux,
|
||||
u64 *old_aux, gfp_t gfp_mask);
|
||||
int ulist_del(struct ulist *ulist, u64 val, u64 aux);
|
||||
|
||||
/* just like ulist_add_merge() but take a pointer for the aux data */
|
||||
static inline int ulist_add_merge_ptr(struct ulist *ulist, u64 val, void *aux,
|
||||
|
|
|
@ -52,6 +52,10 @@ static void btrfs_dev_stat_print_on_load(struct btrfs_device *device);
|
|||
|
||||
DEFINE_MUTEX(uuid_mutex);
|
||||
static LIST_HEAD(fs_uuids);
|
||||
struct list_head *btrfs_get_fs_uuids(void)
|
||||
{
|
||||
return &fs_uuids;
|
||||
}
|
||||
|
||||
static struct btrfs_fs_devices *__alloc_fs_devices(void)
|
||||
{
|
||||
|
@ -441,6 +445,61 @@ static void pending_bios_fn(struct btrfs_work *work)
|
|||
run_scheduled_bios(device);
|
||||
}
|
||||
|
||||
|
||||
void btrfs_free_stale_device(struct btrfs_device *cur_dev)
|
||||
{
|
||||
struct btrfs_fs_devices *fs_devs;
|
||||
struct btrfs_device *dev;
|
||||
|
||||
if (!cur_dev->name)
|
||||
return;
|
||||
|
||||
list_for_each_entry(fs_devs, &fs_uuids, list) {
|
||||
int del = 1;
|
||||
|
||||
if (fs_devs->opened)
|
||||
continue;
|
||||
if (fs_devs->seeding)
|
||||
continue;
|
||||
|
||||
list_for_each_entry(dev, &fs_devs->devices, dev_list) {
|
||||
|
||||
if (dev == cur_dev)
|
||||
continue;
|
||||
if (!dev->name)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* Todo: This won't be enough. What if the same device
|
||||
* comes back (with new uuid and) with its mapper path?
|
||||
* But for now, this does help as mostly an admin will
|
||||
* either use mapper or non mapper path throughout.
|
||||
*/
|
||||
rcu_read_lock();
|
||||
del = strcmp(rcu_str_deref(dev->name),
|
||||
rcu_str_deref(cur_dev->name));
|
||||
rcu_read_unlock();
|
||||
if (!del)
|
||||
break;
|
||||
}
|
||||
|
||||
if (!del) {
|
||||
/* delete the stale device */
|
||||
if (fs_devs->num_devices == 1) {
|
||||
btrfs_sysfs_remove_fsid(fs_devs);
|
||||
list_del(&fs_devs->list);
|
||||
free_fs_devices(fs_devs);
|
||||
} else {
|
||||
fs_devs->num_devices--;
|
||||
list_del(&dev->dev_list);
|
||||
rcu_string_free(dev->name);
|
||||
kfree(dev);
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Add new device to list of registered devices
|
||||
*
|
||||
|
@ -556,6 +615,12 @@ static noinline int device_list_add(const char *path,
|
|||
if (!fs_devices->opened)
|
||||
device->generation = found_transid;
|
||||
|
||||
/*
|
||||
* if there is new btrfs on an already registered device,
|
||||
* then remove the stale device entry.
|
||||
*/
|
||||
btrfs_free_stale_device(device);
|
||||
|
||||
*fs_devices_ret = fs_devices;
|
||||
|
||||
return ret;
|
||||
|
@ -693,13 +758,13 @@ static void free_device(struct rcu_head *head)
|
|||
|
||||
static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
|
||||
{
|
||||
struct btrfs_device *device;
|
||||
struct btrfs_device *device, *tmp;
|
||||
|
||||
if (--fs_devices->opened > 0)
|
||||
return 0;
|
||||
|
||||
mutex_lock(&fs_devices->device_list_mutex);
|
||||
list_for_each_entry(device, &fs_devices->devices, dev_list) {
|
||||
list_for_each_entry_safe(device, tmp, &fs_devices->devices, dev_list) {
|
||||
struct btrfs_device *new_device;
|
||||
struct rcu_string *name;
|
||||
|
||||
|
@ -1067,15 +1132,31 @@ again:
|
|||
|
||||
map = (struct map_lookup *)em->bdev;
|
||||
for (i = 0; i < map->num_stripes; i++) {
|
||||
u64 end;
|
||||
|
||||
if (map->stripes[i].dev != device)
|
||||
continue;
|
||||
if (map->stripes[i].physical >= physical_start + len ||
|
||||
map->stripes[i].physical + em->orig_block_len <=
|
||||
physical_start)
|
||||
continue;
|
||||
*start = map->stripes[i].physical +
|
||||
em->orig_block_len;
|
||||
ret = 1;
|
||||
/*
|
||||
* Make sure that while processing the pinned list we do
|
||||
* not override our *start with a lower value, because
|
||||
* we can have pinned chunks that fall within this
|
||||
* device hole and that have lower physical addresses
|
||||
* than the pending chunks we processed before. If we
|
||||
* do not take this special care we can end up getting
|
||||
* 2 pending chunks that start at the same physical
|
||||
* device offsets because the end offset of a pinned
|
||||
* chunk can be equal to the start offset of some
|
||||
* pending chunk.
|
||||
*/
|
||||
end = map->stripes[i].physical + em->orig_block_len;
|
||||
if (end > *start) {
|
||||
*start = end;
|
||||
ret = 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (search_list == &trans->transaction->pending_chunks) {
|
||||
|
@ -1706,7 +1787,7 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
|
|||
if (device->bdev) {
|
||||
device->fs_devices->open_devices--;
|
||||
/* remove sysfs entry */
|
||||
btrfs_kobj_rm_device(root->fs_info, device);
|
||||
btrfs_kobj_rm_device(root->fs_info->fs_devices, device);
|
||||
}
|
||||
|
||||
call_rcu(&device->rcu, free_device);
|
||||
|
@ -1875,6 +1956,9 @@ void btrfs_destroy_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
|
|||
mutex_lock(&uuid_mutex);
|
||||
WARN_ON(!tgtdev);
|
||||
mutex_lock(&fs_info->fs_devices->device_list_mutex);
|
||||
|
||||
btrfs_kobj_rm_device(fs_info->fs_devices, tgtdev);
|
||||
|
||||
if (tgtdev->bdev) {
|
||||
btrfs_scratch_superblock(tgtdev);
|
||||
fs_info->fs_devices->open_devices--;
|
||||
|
@ -2211,7 +2295,7 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
|
|||
tmp + 1);
|
||||
|
||||
/* add sysfs device entry */
|
||||
btrfs_kobj_add_device(root->fs_info, device);
|
||||
btrfs_kobj_add_device(root->fs_info->fs_devices, device);
|
||||
|
||||
/*
|
||||
* we've got more storage, clear any full flags on the space
|
||||
|
@ -2252,8 +2336,9 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
|
|||
*/
|
||||
snprintf(fsid_buf, BTRFS_UUID_UNPARSED_SIZE, "%pU",
|
||||
root->fs_info->fsid);
|
||||
if (kobject_rename(&root->fs_info->super_kobj, fsid_buf))
|
||||
goto error_trans;
|
||||
if (kobject_rename(&root->fs_info->fs_devices->super_kobj,
|
||||
fsid_buf))
|
||||
pr_warn("BTRFS: sysfs: failed to create fsid for sprout\n");
|
||||
}
|
||||
|
||||
root->fs_info->num_tolerated_disk_barrier_failures =
|
||||
|
@ -2289,7 +2374,7 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
|
|||
error_trans:
|
||||
btrfs_end_transaction(trans, root);
|
||||
rcu_string_free(device->name);
|
||||
btrfs_kobj_rm_device(root->fs_info, device);
|
||||
btrfs_kobj_rm_device(root->fs_info->fs_devices, device);
|
||||
kfree(device);
|
||||
error:
|
||||
blkdev_put(bdev, FMODE_EXCL);
|
||||
|
@ -2609,6 +2694,9 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans,
|
|||
return -EINVAL;
|
||||
}
|
||||
map = (struct map_lookup *)em->bdev;
|
||||
lock_chunks(root->fs_info->chunk_root);
|
||||
check_system_chunk(trans, extent_root, map->type);
|
||||
unlock_chunks(root->fs_info->chunk_root);
|
||||
|
||||
for (i = 0; i < map->num_stripes; i++) {
|
||||
struct btrfs_device *device = map->stripes[i].dev;
|
||||
|
@ -3908,9 +3996,9 @@ int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info)
|
|||
uuid_root = btrfs_create_tree(trans, fs_info,
|
||||
BTRFS_UUID_TREE_OBJECTID);
|
||||
if (IS_ERR(uuid_root)) {
|
||||
btrfs_abort_transaction(trans, tree_root,
|
||||
PTR_ERR(uuid_root));
|
||||
return PTR_ERR(uuid_root);
|
||||
ret = PTR_ERR(uuid_root);
|
||||
btrfs_abort_transaction(trans, tree_root, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
fs_info->uuid_root = uuid_root;
|
||||
|
@ -3965,6 +4053,7 @@ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
|
|||
int slot;
|
||||
int failed = 0;
|
||||
bool retried = false;
|
||||
bool checked_pending_chunks = false;
|
||||
struct extent_buffer *l;
|
||||
struct btrfs_key key;
|
||||
struct btrfs_super_block *super_copy = root->fs_info->super_copy;
|
||||
|
@ -4045,15 +4134,6 @@ again:
|
|||
goto again;
|
||||
} else if (failed && retried) {
|
||||
ret = -ENOSPC;
|
||||
lock_chunks(root);
|
||||
|
||||
btrfs_device_set_total_bytes(device, old_size);
|
||||
if (device->writeable)
|
||||
device->fs_devices->total_rw_bytes += diff;
|
||||
spin_lock(&root->fs_info->free_chunk_lock);
|
||||
root->fs_info->free_chunk_space += diff;
|
||||
spin_unlock(&root->fs_info->free_chunk_lock);
|
||||
unlock_chunks(root);
|
||||
goto done;
|
||||
}
|
||||
|
||||
|
@ -4065,6 +4145,35 @@ again:
|
|||
}
|
||||
|
||||
lock_chunks(root);
|
||||
|
||||
/*
|
||||
* We checked in the above loop all device extents that were already in
|
||||
* the device tree. However before we have updated the device's
|
||||
* total_bytes to the new size, we might have had chunk allocations that
|
||||
* have not complete yet (new block groups attached to transaction
|
||||
* handles), and therefore their device extents were not yet in the
|
||||
* device tree and we missed them in the loop above. So if we have any
|
||||
* pending chunk using a device extent that overlaps the device range
|
||||
* that we can not use anymore, commit the current transaction and
|
||||
* repeat the search on the device tree - this way we guarantee we will
|
||||
* not have chunks using device extents that end beyond 'new_size'.
|
||||
*/
|
||||
if (!checked_pending_chunks) {
|
||||
u64 start = new_size;
|
||||
u64 len = old_size - new_size;
|
||||
|
||||
if (contains_pending_extent(trans, device, &start, len)) {
|
||||
unlock_chunks(root);
|
||||
checked_pending_chunks = true;
|
||||
failed = 0;
|
||||
retried = false;
|
||||
ret = btrfs_commit_transaction(trans, root);
|
||||
if (ret)
|
||||
goto done;
|
||||
goto again;
|
||||
}
|
||||
}
|
||||
|
||||
btrfs_device_set_disk_total_bytes(device, new_size);
|
||||
if (list_empty(&device->resized_list))
|
||||
list_add_tail(&device->resized_list,
|
||||
|
@ -4079,6 +4188,16 @@ again:
|
|||
btrfs_end_transaction(trans, root);
|
||||
done:
|
||||
btrfs_free_path(path);
|
||||
if (ret) {
|
||||
lock_chunks(root);
|
||||
btrfs_device_set_total_bytes(device, old_size);
|
||||
if (device->writeable)
|
||||
device->fs_devices->total_rw_bytes += diff;
|
||||
spin_lock(&root->fs_info->free_chunk_lock);
|
||||
root->fs_info->free_chunk_space += diff;
|
||||
spin_unlock(&root->fs_info->free_chunk_lock);
|
||||
unlock_chunks(root);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -6072,6 +6191,8 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
|
|||
free_extent_map(em);
|
||||
return -EIO;
|
||||
}
|
||||
btrfs_warn(root->fs_info, "devid %llu uuid %pU is missing",
|
||||
devid, uuid);
|
||||
}
|
||||
map->stripes[i].dev->in_fs_metadata = 1;
|
||||
}
|
||||
|
@ -6191,10 +6312,11 @@ static int read_one_dev(struct btrfs_root *root,
|
|||
if (!btrfs_test_opt(root, DEGRADED))
|
||||
return -EIO;
|
||||
|
||||
btrfs_warn(root->fs_info, "devid %llu missing", devid);
|
||||
device = add_missing_dev(root, fs_devices, devid, dev_uuid);
|
||||
if (!device)
|
||||
return -ENOMEM;
|
||||
btrfs_warn(root->fs_info, "devid %llu uuid %pU missing",
|
||||
devid, dev_uuid);
|
||||
} else {
|
||||
if (!device->bdev && !btrfs_test_opt(root, DEGRADED))
|
||||
return -EIO;
|
||||
|
@ -6722,3 +6844,21 @@ void btrfs_update_commit_device_bytes_used(struct btrfs_root *root,
|
|||
}
|
||||
unlock_chunks(root);
|
||||
}
|
||||
|
||||
void btrfs_set_fs_info_ptr(struct btrfs_fs_info *fs_info)
|
||||
{
|
||||
struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
|
||||
while (fs_devices) {
|
||||
fs_devices->fs_info = fs_info;
|
||||
fs_devices = fs_devices->seed;
|
||||
}
|
||||
}
|
||||
|
||||
void btrfs_reset_fs_info_ptr(struct btrfs_fs_info *fs_info)
|
||||
{
|
||||
struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
|
||||
while (fs_devices) {
|
||||
fs_devices->fs_info = NULL;
|
||||
fs_devices = fs_devices->seed;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -253,6 +253,12 @@ struct btrfs_fs_devices {
|
|||
* nonrot flag set
|
||||
*/
|
||||
int rotating;
|
||||
|
||||
struct btrfs_fs_info *fs_info;
|
||||
/* sysfs kobjects */
|
||||
struct kobject super_kobj;
|
||||
struct kobject *device_dir_kobj;
|
||||
struct completion kobj_unregister;
|
||||
};
|
||||
|
||||
#define BTRFS_BIO_INLINE_CSUM_SIZE 64
|
||||
|
@ -535,5 +541,8 @@ static inline void unlock_chunks(struct btrfs_root *root)
|
|||
mutex_unlock(&root->fs_info->chunk_mutex);
|
||||
}
|
||||
|
||||
struct list_head *btrfs_get_fs_uuids(void);
|
||||
void btrfs_set_fs_info_ptr(struct btrfs_fs_info *fs_info);
|
||||
void btrfs_reset_fs_info_ptr(struct btrfs_fs_info *fs_info);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -538,6 +538,7 @@ int seq_dentry(struct seq_file *m, struct dentry *dentry, const char *esc)
|
|||
|
||||
return res;
|
||||
}
|
||||
EXPORT_SYMBOL(seq_dentry);
|
||||
|
||||
static void *single_start(struct seq_file *p, loff_t *pos)
|
||||
{
|
||||
|
|
|
@ -1117,61 +1117,6 @@ DEFINE_EVENT(btrfs__workqueue_done, btrfs_workqueue_destroy,
|
|||
TP_ARGS(wq)
|
||||
);
|
||||
|
||||
#define show_oper_type(type) \
|
||||
__print_symbolic(type, \
|
||||
{ BTRFS_QGROUP_OPER_ADD_EXCL, "OPER_ADD_EXCL" }, \
|
||||
{ BTRFS_QGROUP_OPER_ADD_SHARED, "OPER_ADD_SHARED" }, \
|
||||
{ BTRFS_QGROUP_OPER_SUB_EXCL, "OPER_SUB_EXCL" }, \
|
||||
{ BTRFS_QGROUP_OPER_SUB_SHARED, "OPER_SUB_SHARED" })
|
||||
|
||||
DECLARE_EVENT_CLASS(btrfs_qgroup_oper,
|
||||
|
||||
TP_PROTO(struct btrfs_qgroup_operation *oper),
|
||||
|
||||
TP_ARGS(oper),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field( u64, ref_root )
|
||||
__field( u64, bytenr )
|
||||
__field( u64, num_bytes )
|
||||
__field( u64, seq )
|
||||
__field( int, type )
|
||||
__field( u64, elem_seq )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->ref_root = oper->ref_root;
|
||||
__entry->bytenr = oper->bytenr,
|
||||
__entry->num_bytes = oper->num_bytes;
|
||||
__entry->seq = oper->seq;
|
||||
__entry->type = oper->type;
|
||||
__entry->elem_seq = oper->elem.seq;
|
||||
),
|
||||
|
||||
TP_printk("ref_root = %llu, bytenr = %llu, num_bytes = %llu, "
|
||||
"seq = %llu, elem.seq = %llu, type = %s",
|
||||
(unsigned long long)__entry->ref_root,
|
||||
(unsigned long long)__entry->bytenr,
|
||||
(unsigned long long)__entry->num_bytes,
|
||||
(unsigned long long)__entry->seq,
|
||||
(unsigned long long)__entry->elem_seq,
|
||||
show_oper_type(__entry->type))
|
||||
);
|
||||
|
||||
DEFINE_EVENT(btrfs_qgroup_oper, btrfs_qgroup_account,
|
||||
|
||||
TP_PROTO(struct btrfs_qgroup_operation *oper),
|
||||
|
||||
TP_ARGS(oper)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(btrfs_qgroup_oper, btrfs_qgroup_record_ref,
|
||||
|
||||
TP_PROTO(struct btrfs_qgroup_operation *oper),
|
||||
|
||||
TP_ARGS(oper)
|
||||
);
|
||||
|
||||
#endif /* _TRACE_BTRFS_H */
|
||||
|
||||
/* This part must be outside protection */
|
||||
|
|
|
@ -545,6 +545,7 @@ out:
|
|||
kfree(devpath);
|
||||
return error;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kobject_move);
|
||||
|
||||
/**
|
||||
* kobject_del - unlink kobject from hierarchy.
|
||||
|
|
Загрузка…
Ссылка в новой задаче