Merge branch 'for-linus-4.10' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs

Pull btrfs updates from Chris Mason:
 "Jeff Mahoney and Dave Sterba have a really nice set of cleanups in
  here, and Christoph pitched in corrections/improvements to make btrfs
  use proper helpers for bio walking instead of doing it by hand.

  There are some key fixes as well, including some long standing bugs
  that took forever to track down in btrfs_drop_extents and during
  balance"

* 'for-linus-4.10' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs: (77 commits)
  btrfs: limit async_work allocation and worker func duration
  Revert "Btrfs: adjust len of writes if following a preallocated extent"
  Btrfs: don't WARN() in btrfs_transaction_abort() for IO errors
  btrfs: opencode chunk locking, remove helpers
  btrfs: remove root parameter from transaction commit/end routines
  btrfs: split btrfs_wait_marked_extents into normal and tree log functions
  btrfs: take an fs_info directly when the root is not used otherwise
  btrfs: simplify btrfs_wait_cache_io prototype
  btrfs: convert extent-tree tracepoints to use fs_info
  btrfs: root->fs_info cleanup, access fs_info->delayed_root directly
  btrfs: root->fs_info cleanup, add fs_info convenience variables
  btrfs: root->fs_info cleanup, update_block_group{,flags}
  btrfs: root->fs_info cleanup, lock/unlock_chunks
  btrfs: root->fs_info cleanup, btrfs_calc_{trans,trunc}_metadata_size
  btrfs: pull node/sector/stripe sizes out of root and into fs_info
  btrfs: root->fs_info cleanup, io_ctl_init
  btrfs: root->fs_info cleanup, use fs_info->dev_root everywhere
  btrfs: struct reada_control.root -> reada_control.fs_info
  btrfs: struct btrfsic_state->root should be an fs_info
  btrfs: alloc_reserved_file_extent trace point should use extent_root
  ...
This commit is contained in:
Linus Torvalds 2016-12-16 10:53:01 -08:00
Родитель 759b2656b2 2939e1a86f
Коммит 087a76d390
64 изменённых файлов: 4717 добавлений и 4632 удалений

Просмотреть файл

@ -86,6 +86,20 @@ btrfs_work_owner(struct btrfs_work *work)
return work->wq->fs_info;
}
bool btrfs_workqueue_normal_congested(struct btrfs_workqueue *wq)
{
/*
* We could compare wq->normal->pending with num_online_cpus()
* to support "thresh == NO_THRESHOLD" case, but it requires
* moving up atomic_inc/dec in thresh_queue/exec_hook. Let's
* postpone it until someone needs the support of that case.
*/
if (wq->normal->thresh == NO_THRESHOLD)
return false;
return atomic_read(&wq->normal->pending) > wq->normal->thresh * 2;
}
BTRFS_WORK_HELPER(worker_helper);
BTRFS_WORK_HELPER(delalloc_helper);
BTRFS_WORK_HELPER(flush_delalloc_helper);

Просмотреть файл

@ -84,4 +84,5 @@ void btrfs_workqueue_set_max(struct btrfs_workqueue *wq, int max);
void btrfs_set_work_high_priority(struct btrfs_work *work);
struct btrfs_fs_info *btrfs_work_owner(struct btrfs_work *work);
struct btrfs_fs_info *btrfs_workqueue_owner(struct __btrfs_workqueue *wq);
bool btrfs_workqueue_normal_congested(struct btrfs_workqueue *wq);
#endif

Просмотреть файл

@ -788,8 +788,7 @@ static int __add_missing_keys(struct btrfs_fs_info *fs_info,
if (ref->key_for_search.type)
continue;
BUG_ON(!ref->wanted_disk_byte);
eb = read_tree_block(fs_info->tree_root, ref->wanted_disk_byte,
0);
eb = read_tree_block(fs_info, ref->wanted_disk_byte, 0);
if (IS_ERR(eb)) {
return PTR_ERR(eb);
} else if (!extent_buffer_uptodate(eb)) {
@ -1405,8 +1404,7 @@ again:
ref->level == 0) {
struct extent_buffer *eb;
eb = read_tree_block(fs_info->extent_root,
ref->parent, 0);
eb = read_tree_block(fs_info, ref->parent, 0);
if (IS_ERR(eb)) {
ret = PTR_ERR(eb);
goto out;
@ -1829,7 +1827,7 @@ int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
}
btrfs_item_key_to_cpu(path->nodes[0], found_key, path->slots[0]);
if (found_key->type == BTRFS_METADATA_ITEM_KEY)
size = fs_info->extent_root->nodesize;
size = fs_info->nodesize;
else if (found_key->type == BTRFS_EXTENT_ITEM_KEY)
size = found_key->offset;
@ -2058,7 +2056,7 @@ int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
out:
if (!search_commit_root) {
btrfs_put_tree_mod_seq(fs_info, &tree_mod_seq_elem);
btrfs_end_transaction(trans, fs_info->extent_root);
btrfs_end_transaction(trans);
} else {
up_read(&fs_info->commit_root_sem);
}

Просмотреть файл

@ -254,7 +254,7 @@ struct btrfsic_state {
struct list_head all_blocks_list;
struct btrfsic_block_hashtable block_hashtable;
struct btrfsic_block_link_hashtable block_link_hashtable;
struct btrfs_root *root;
struct btrfs_fs_info *fs_info;
u64 max_superblock_generation;
struct btrfsic_block *latest_superblock;
u32 metablock_size;
@ -646,11 +646,12 @@ static struct btrfsic_dev_state *btrfsic_dev_state_hashtable_lookup(
static int btrfsic_process_superblock(struct btrfsic_state *state,
struct btrfs_fs_devices *fs_devices)
{
int ret = 0;
struct btrfs_fs_info *fs_info = state->fs_info;
struct btrfs_super_block *selected_super;
struct list_head *dev_head = &fs_devices->devices;
struct btrfs_device *device;
struct btrfsic_dev_state *selected_dev_state = NULL;
int ret = 0;
int pass;
BUG_ON(NULL == state);
@ -716,9 +717,8 @@ static int btrfsic_process_superblock(struct btrfsic_state *state,
break;
}
num_copies =
btrfs_num_copies(state->root->fs_info,
next_bytenr, state->metablock_size);
num_copies = btrfs_num_copies(fs_info, next_bytenr,
state->metablock_size);
if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES)
pr_info("num_copies(log_bytenr=%llu) = %d\n",
next_bytenr, num_copies);
@ -783,6 +783,7 @@ static int btrfsic_process_superblock_dev_mirror(
struct btrfsic_dev_state **selected_dev_state,
struct btrfs_super_block *selected_super)
{
struct btrfs_fs_info *fs_info = state->fs_info;
struct btrfs_super_block *super_tmp;
u64 dev_bytenr;
struct buffer_head *bh;
@ -832,7 +833,7 @@ static int btrfsic_process_superblock_dev_mirror(
superblock_tmp->never_written = 0;
superblock_tmp->mirror_num = 1 + superblock_mirror_num;
if (state->print_mask & BTRFSIC_PRINT_MASK_SUPERBLOCK_WRITE)
btrfs_info_in_rcu(device->dev_root->fs_info,
btrfs_info_in_rcu(fs_info,
"new initial S-block (bdev %p, %s) @%llu (%s/%llu/%d)",
superblock_bdev,
rcu_str_deref(device->name), dev_bytenr,
@ -887,9 +888,8 @@ static int btrfsic_process_superblock_dev_mirror(
break;
}
num_copies =
btrfs_num_copies(state->root->fs_info,
next_bytenr, state->metablock_size);
num_copies = btrfs_num_copies(fs_info, next_bytenr,
state->metablock_size);
if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES)
pr_info("num_copies(log_bytenr=%llu) = %d\n",
next_bytenr, num_copies);
@ -1254,6 +1254,7 @@ static int btrfsic_create_link_to_next_block(
struct btrfs_disk_key *disk_key,
u64 parent_generation)
{
struct btrfs_fs_info *fs_info = state->fs_info;
struct btrfsic_block *next_block = NULL;
int ret;
struct btrfsic_block_link *l;
@ -1262,9 +1263,8 @@ static int btrfsic_create_link_to_next_block(
*next_blockp = NULL;
if (0 == *num_copiesp) {
*num_copiesp =
btrfs_num_copies(state->root->fs_info,
next_bytenr, state->metablock_size);
*num_copiesp = btrfs_num_copies(fs_info, next_bytenr,
state->metablock_size);
if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES)
pr_info("num_copies(log_bytenr=%llu) = %d\n",
next_bytenr, *num_copiesp);
@ -1390,13 +1390,14 @@ static int btrfsic_handle_extent_data(
struct btrfsic_block_data_ctx *block_ctx,
u32 item_offset, int force_iodone_flag)
{
int ret;
struct btrfs_fs_info *fs_info = state->fs_info;
struct btrfs_file_extent_item file_extent_item;
u64 file_extent_item_offset;
u64 next_bytenr;
u64 num_bytes;
u64 generation;
struct btrfsic_block_link *l;
int ret;
file_extent_item_offset = offsetof(struct btrfs_leaf, items) +
item_offset;
@ -1456,9 +1457,8 @@ static int btrfsic_handle_extent_data(
else
chunk_len = num_bytes;
num_copies =
btrfs_num_copies(state->root->fs_info,
next_bytenr, state->datablock_size);
num_copies = btrfs_num_copies(fs_info, next_bytenr,
state->datablock_size);
if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES)
pr_info("num_copies(log_bytenr=%llu) = %d\n",
next_bytenr, num_copies);
@ -1533,13 +1533,14 @@ static int btrfsic_map_block(struct btrfsic_state *state, u64 bytenr, u32 len,
struct btrfsic_block_data_ctx *block_ctx_out,
int mirror_num)
{
struct btrfs_fs_info *fs_info = state->fs_info;
int ret;
u64 length;
struct btrfs_bio *multi = NULL;
struct btrfs_device *device;
length = len;
ret = btrfs_map_block(state->root->fs_info, READ,
ret = btrfs_map_block(fs_info, BTRFS_MAP_READ,
bytenr, &length, &multi, mirror_num);
if (ret) {
@ -1731,6 +1732,7 @@ static void btrfsic_dump_database(struct btrfsic_state *state)
static int btrfsic_test_for_metadata(struct btrfsic_state *state,
char **datav, unsigned int num_pages)
{
struct btrfs_fs_info *fs_info = state->fs_info;
struct btrfs_header *h;
u8 csum[BTRFS_CSUM_SIZE];
u32 crc = ~(u32)0;
@ -1741,7 +1743,7 @@ static int btrfsic_test_for_metadata(struct btrfsic_state *state,
num_pages = state->metablock_size >> PAGE_SHIFT;
h = (struct btrfs_header *)datav[0];
if (memcmp(h->fsid, state->root->fs_info->fsid, BTRFS_UUID_SIZE))
if (memcmp(h->fsid, fs_info->fsid, BTRFS_UUID_SIZE))
return 1;
for (i = 0; i < num_pages; i++) {
@ -2202,6 +2204,7 @@ static int btrfsic_process_written_superblock(
struct btrfsic_block *const superblock,
struct btrfs_super_block *const super_hdr)
{
struct btrfs_fs_info *fs_info = state->fs_info;
int pass;
superblock->generation = btrfs_super_generation(super_hdr);
@ -2275,9 +2278,8 @@ static int btrfsic_process_written_superblock(
break;
}
num_copies =
btrfs_num_copies(state->root->fs_info,
next_bytenr, BTRFS_SUPER_INFO_SIZE);
num_copies = btrfs_num_copies(fs_info, next_bytenr,
BTRFS_SUPER_INFO_SIZE);
if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES)
pr_info("num_copies(log_bytenr=%llu) = %d\n",
next_bytenr, num_copies);
@ -2699,14 +2701,14 @@ static void btrfsic_cmp_log_and_dev_bytenr(struct btrfsic_state *state,
struct btrfsic_dev_state *dev_state,
u64 dev_bytenr)
{
struct btrfs_fs_info *fs_info = state->fs_info;
struct btrfsic_block_data_ctx block_ctx;
int num_copies;
int mirror_num;
int ret;
struct btrfsic_block_data_ctx block_ctx;
int match = 0;
int ret;
num_copies = btrfs_num_copies(state->root->fs_info,
bytenr, state->metablock_size);
num_copies = btrfs_num_copies(fs_info, bytenr, state->metablock_size);
for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) {
ret = btrfsic_map_block(state, bytenr, state->metablock_size,
@ -2819,10 +2821,11 @@ static void __btrfsic_submit_bio(struct bio *bio)
* btrfsic_mount(), this might return NULL */
dev_state = btrfsic_dev_state_lookup(bio->bi_bdev);
if (NULL != dev_state &&
(bio_op(bio) == REQ_OP_WRITE) && NULL != bio->bi_io_vec) {
(bio_op(bio) == REQ_OP_WRITE) && bio_has_data(bio)) {
unsigned int i;
u64 dev_bytenr;
u64 cur_bytenr;
struct bio_vec *bvec;
int bio_is_patched;
char **mapped_datav;
@ -2840,32 +2843,23 @@ static void __btrfsic_submit_bio(struct bio *bio)
if (!mapped_datav)
goto leave;
cur_bytenr = dev_bytenr;
for (i = 0; i < bio->bi_vcnt; i++) {
BUG_ON(bio->bi_io_vec[i].bv_len != PAGE_SIZE);
mapped_datav[i] = kmap(bio->bi_io_vec[i].bv_page);
if (!mapped_datav[i]) {
while (i > 0) {
i--;
kunmap(bio->bi_io_vec[i].bv_page);
}
kfree(mapped_datav);
goto leave;
}
bio_for_each_segment_all(bvec, bio, i) {
BUG_ON(bvec->bv_len != PAGE_SIZE);
mapped_datav[i] = kmap(bvec->bv_page);
if (dev_state->state->print_mask &
BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH_VERBOSE)
pr_info("#%u: bytenr=%llu, len=%u, offset=%u\n",
i, cur_bytenr, bio->bi_io_vec[i].bv_len,
bio->bi_io_vec[i].bv_offset);
cur_bytenr += bio->bi_io_vec[i].bv_len;
i, cur_bytenr, bvec->bv_len, bvec->bv_offset);
cur_bytenr += bvec->bv_len;
}
btrfsic_process_written_block(dev_state, dev_bytenr,
mapped_datav, bio->bi_vcnt,
bio, &bio_is_patched,
NULL, bio->bi_opf);
while (i > 0) {
i--;
kunmap(bio->bi_io_vec[i].bv_page);
}
bio_for_each_segment_all(bvec, bio, i)
kunmap(bvec->bv_page);
kfree(mapped_datav);
} else if (NULL != dev_state && (bio->bi_opf & REQ_PREFLUSH)) {
if (dev_state->state->print_mask &
@ -2910,7 +2904,7 @@ int btrfsic_submit_bio_wait(struct bio *bio)
return submit_bio_wait(bio);
}
int btrfsic_mount(struct btrfs_root *root,
int btrfsic_mount(struct btrfs_fs_info *fs_info,
struct btrfs_fs_devices *fs_devices,
int including_extent_data, u32 print_mask)
{
@ -2919,14 +2913,14 @@ int btrfsic_mount(struct btrfs_root *root,
struct list_head *dev_head = &fs_devices->devices;
struct btrfs_device *device;
if (root->nodesize & ((u64)PAGE_SIZE - 1)) {
if (fs_info->nodesize & ((u64)PAGE_SIZE - 1)) {
pr_info("btrfsic: cannot handle nodesize %d not being a multiple of PAGE_SIZE %ld!\n",
root->nodesize, PAGE_SIZE);
fs_info->nodesize, PAGE_SIZE);
return -1;
}
if (root->sectorsize & ((u64)PAGE_SIZE - 1)) {
if (fs_info->sectorsize & ((u64)PAGE_SIZE - 1)) {
pr_info("btrfsic: cannot handle sectorsize %d not being a multiple of PAGE_SIZE %ld!\n",
root->sectorsize, PAGE_SIZE);
fs_info->sectorsize, PAGE_SIZE);
return -1;
}
state = kzalloc(sizeof(*state), GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
@ -2944,12 +2938,12 @@ int btrfsic_mount(struct btrfs_root *root,
btrfsic_is_initialized = 1;
}
mutex_lock(&btrfsic_mutex);
state->root = root;
state->fs_info = fs_info;
state->print_mask = print_mask;
state->include_extent_data = including_extent_data;
state->csum_size = 0;
state->metablock_size = root->nodesize;
state->datablock_size = root->sectorsize;
state->metablock_size = fs_info->nodesize;
state->datablock_size = fs_info->sectorsize;
INIT_LIST_HEAD(&state->all_blocks_list);
btrfsic_block_hashtable_init(&state->block_hashtable);
btrfsic_block_link_hashtable_init(&state->block_link_hashtable);
@ -2982,7 +2976,7 @@ int btrfsic_mount(struct btrfs_root *root,
ret = btrfsic_process_superblock(state, fs_devices);
if (0 != ret) {
mutex_unlock(&btrfsic_mutex);
btrfsic_unmount(root, fs_devices);
btrfsic_unmount(fs_devices);
return ret;
}
@ -2995,8 +2989,7 @@ int btrfsic_mount(struct btrfs_root *root,
return 0;
}
void btrfsic_unmount(struct btrfs_root *root,
struct btrfs_fs_devices *fs_devices)
void btrfsic_unmount(struct btrfs_fs_devices *fs_devices)
{
struct btrfsic_block *b_all, *tmp_all;
struct btrfsic_state *state;

Просмотреть файл

@ -29,10 +29,9 @@ int btrfsic_submit_bio_wait(struct bio *bio);
#define btrfsic_submit_bio_wait submit_bio_wait
#endif
int btrfsic_mount(struct btrfs_root *root,
int btrfsic_mount(struct btrfs_fs_info *fs_info,
struct btrfs_fs_devices *fs_devices,
int including_extent_data, u32 print_mask);
void btrfsic_unmount(struct btrfs_root *root,
struct btrfs_fs_devices *fs_devices);
void btrfsic_unmount(struct btrfs_fs_devices *fs_devices);
#endif

Просмотреть файл

@ -81,17 +81,17 @@ struct compressed_bio {
u32 sums;
};
static int btrfs_decompress_biovec(int type, struct page **pages_in,
u64 disk_start, struct bio_vec *bvec,
int vcnt, size_t srclen);
static int btrfs_decompress_bio(int type, struct page **pages_in,
u64 disk_start, struct bio *orig_bio,
size_t srclen);
static inline int compressed_bio_size(struct btrfs_root *root,
static inline int compressed_bio_size(struct btrfs_fs_info *fs_info,
unsigned long disk_size)
{
u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
return sizeof(struct compressed_bio) +
(DIV_ROUND_UP(disk_size, root->sectorsize)) * csum_size;
(DIV_ROUND_UP(disk_size, fs_info->sectorsize)) * csum_size;
}
static struct bio *compressed_bio_alloc(struct block_device *bdev,
@ -120,7 +120,7 @@ static int check_compressed_csum(struct inode *inode,
kaddr = kmap_atomic(page);
csum = btrfs_csum_data(kaddr, csum, PAGE_SIZE);
btrfs_csum_final(csum, (char *)&csum);
btrfs_csum_final(csum, (u8 *)&csum);
kunmap_atomic(kaddr);
if (csum != *cb_sum) {
@ -175,11 +175,10 @@ static void end_compressed_bio_read(struct bio *bio)
/* ok, we're the last bio for this extent, lets start
* the decompression.
*/
ret = btrfs_decompress_biovec(cb->compress_type,
ret = btrfs_decompress_bio(cb->compress_type,
cb->compressed_pages,
cb->start,
cb->orig_bio->bi_io_vec,
cb->orig_bio->bi_vcnt,
cb->orig_bio,
cb->compressed_len);
csum_failed:
if (ret)
@ -329,8 +328,8 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
struct page **compressed_pages,
unsigned long nr_pages)
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct bio *bio = NULL;
struct btrfs_root *root = BTRFS_I(inode)->root;
struct compressed_bio *cb;
unsigned long bytes_left;
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
@ -342,7 +341,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
int skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
WARN_ON(start & ((u64)PAGE_SIZE - 1));
cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS);
cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS);
if (!cb)
return -ENOMEM;
atomic_set(&cb->pending_bios, 0);
@ -356,7 +355,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
cb->orig_bio = NULL;
cb->nr_pages = nr_pages;
bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
bdev = fs_info->fs_devices->latest_bdev;
bio = compressed_bio_alloc(bdev, first_byte, GFP_NOFS);
if (!bio) {
@ -392,17 +391,16 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
* freed before we're done setting it up
*/
atomic_inc(&cb->pending_bios);
ret = btrfs_bio_wq_end_io(root->fs_info, bio,
BTRFS_WQ_ENDIO_DATA);
ret = btrfs_bio_wq_end_io(fs_info, bio,
BTRFS_WQ_ENDIO_DATA);
BUG_ON(ret); /* -ENOMEM */
if (!skip_sum) {
ret = btrfs_csum_one_bio(root, inode, bio,
start, 1);
ret = btrfs_csum_one_bio(inode, bio, start, 1);
BUG_ON(ret); /* -ENOMEM */
}
ret = btrfs_map_bio(root, bio, 0, 1);
ret = btrfs_map_bio(fs_info, bio, 0, 1);
if (ret) {
bio->bi_error = ret;
bio_endio(bio);
@ -418,7 +416,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
bio_add_page(bio, page, PAGE_SIZE, 0);
}
if (bytes_left < PAGE_SIZE) {
btrfs_info(BTRFS_I(inode)->root->fs_info,
btrfs_info(fs_info,
"bytes left %lu compress len %lu nr %lu",
bytes_left, cb->compressed_len, cb->nr_pages);
}
@ -428,15 +426,15 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
}
bio_get(bio);
ret = btrfs_bio_wq_end_io(root->fs_info, bio, BTRFS_WQ_ENDIO_DATA);
ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DATA);
BUG_ON(ret); /* -ENOMEM */
if (!skip_sum) {
ret = btrfs_csum_one_bio(root, inode, bio, start, 1);
ret = btrfs_csum_one_bio(inode, bio, start, 1);
BUG_ON(ret); /* -ENOMEM */
}
ret = btrfs_map_bio(root, bio, 0, 1);
ret = btrfs_map_bio(fs_info, bio, 0, 1);
if (ret) {
bio->bi_error = ret;
bio_endio(bio);
@ -446,6 +444,13 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
return 0;
}
static u64 bio_end_offset(struct bio *bio)
{
struct bio_vec *last = &bio->bi_io_vec[bio->bi_vcnt - 1];
return page_offset(last->bv_page) + last->bv_len + last->bv_offset;
}
static noinline int add_ra_bio_pages(struct inode *inode,
u64 compressed_end,
struct compressed_bio *cb)
@ -464,8 +469,7 @@ static noinline int add_ra_bio_pages(struct inode *inode,
u64 end;
int misses = 0;
page = cb->orig_bio->bi_io_vec[cb->orig_bio->bi_vcnt - 1].bv_page;
last_offset = (page_offset(page) + PAGE_SIZE);
last_offset = bio_end_offset(cb->orig_bio);
em_tree = &BTRFS_I(inode)->extent_tree;
tree = &BTRFS_I(inode)->io_tree;
@ -563,7 +567,6 @@ next:
*
* bio->bi_iter.bi_sector points to the compressed extent on disk
* bio->bi_io_vec points to all of the inode pages
* bio->bi_vcnt is a count of pages
*
* After the compressed pages are read, we copy the bytes into the
* bio we were passed and then call the bio end_io calls
@ -571,11 +574,10 @@ next:
int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
int mirror_num, unsigned long bio_flags)
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct extent_io_tree *tree;
struct extent_map_tree *em_tree;
struct compressed_bio *cb;
struct btrfs_root *root = BTRFS_I(inode)->root;
unsigned long uncompressed_len = bio->bi_vcnt * PAGE_SIZE;
unsigned long compressed_len;
unsigned long nr_pages;
unsigned long pg_index;
@ -603,7 +605,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
return -EIO;
compressed_len = em->block_len;
cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS);
cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS);
if (!cb)
goto out;
@ -620,7 +622,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
free_extent_map(em);
em = NULL;
cb->len = uncompressed_len;
cb->len = bio->bi_iter.bi_size;
cb->compressed_len = compressed_len;
cb->compress_type = extent_compress_type(bio_flags);
cb->orig_bio = bio;
@ -631,7 +633,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
if (!cb->compressed_pages)
goto fail1;
bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
bdev = fs_info->fs_devices->latest_bdev;
for (pg_index = 0; pg_index < nr_pages; pg_index++) {
cb->compressed_pages[pg_index] = alloc_page(GFP_NOFS |
@ -648,8 +650,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
add_ra_bio_pages(inode, em_start + em_len, cb);
/* include any pages we added in add_ra-bio_pages */
uncompressed_len = bio->bi_vcnt * PAGE_SIZE;
cb->len = uncompressed_len;
cb->len = bio->bi_iter.bi_size;
comp_bio = compressed_bio_alloc(bdev, cur_disk_byte, GFP_NOFS);
if (!comp_bio)
@ -676,8 +677,8 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
PAGE_SIZE) {
bio_get(comp_bio);
ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio,
BTRFS_WQ_ENDIO_DATA);
ret = btrfs_bio_wq_end_io(fs_info, comp_bio,
BTRFS_WQ_ENDIO_DATA);
BUG_ON(ret); /* -ENOMEM */
/*
@ -689,14 +690,14 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
atomic_inc(&cb->pending_bios);
if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
ret = btrfs_lookup_bio_sums(root, inode,
comp_bio, sums);
ret = btrfs_lookup_bio_sums(inode, comp_bio,
sums);
BUG_ON(ret); /* -ENOMEM */
}
sums += DIV_ROUND_UP(comp_bio->bi_iter.bi_size,
root->sectorsize);
fs_info->sectorsize);
ret = btrfs_map_bio(root, comp_bio, mirror_num, 0);
ret = btrfs_map_bio(fs_info, comp_bio, mirror_num, 0);
if (ret) {
comp_bio->bi_error = ret;
bio_endio(comp_bio);
@ -717,16 +718,15 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
}
bio_get(comp_bio);
ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio,
BTRFS_WQ_ENDIO_DATA);
ret = btrfs_bio_wq_end_io(fs_info, comp_bio, BTRFS_WQ_ENDIO_DATA);
BUG_ON(ret); /* -ENOMEM */
if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
ret = btrfs_lookup_bio_sums(root, inode, comp_bio, sums);
ret = btrfs_lookup_bio_sums(inode, comp_bio, sums);
BUG_ON(ret); /* -ENOMEM */
}
ret = btrfs_map_bio(root, comp_bio, mirror_num, 0);
ret = btrfs_map_bio(fs_info, comp_bio, mirror_num, 0);
if (ret) {
comp_bio->bi_error = ret;
bio_endio(comp_bio);
@ -959,9 +959,7 @@ int btrfs_compress_pages(int type, struct address_space *mapping,
*
* disk_start is the starting logical offset of this array in the file
*
* bvec is a bio_vec of pages from the file that we want to decompress into
*
* vcnt is the count of pages in the biovec
* orig_bio contains the pages from the file that we want to decompress into
*
* srclen is the number of bytes in pages_in
*
@ -970,18 +968,18 @@ int btrfs_compress_pages(int type, struct address_space *mapping,
* be contiguous. They all correspond to the range of bytes covered by
* the compressed extent.
*/
static int btrfs_decompress_biovec(int type, struct page **pages_in,
u64 disk_start, struct bio_vec *bvec,
int vcnt, size_t srclen)
static int btrfs_decompress_bio(int type, struct page **pages_in,
u64 disk_start, struct bio *orig_bio,
size_t srclen)
{
struct list_head *workspace;
int ret;
workspace = find_workspace(type);
ret = btrfs_compress_op[type-1]->decompress_biovec(workspace, pages_in,
disk_start,
bvec, vcnt, srclen);
ret = btrfs_compress_op[type-1]->decompress_bio(workspace, pages_in,
disk_start, orig_bio,
srclen);
free_workspace(type, workspace);
return ret;
}
@ -1021,9 +1019,7 @@ void btrfs_exit_compress(void)
*/
int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
unsigned long total_out, u64 disk_start,
struct bio_vec *bvec, int vcnt,
unsigned long *pg_index,
unsigned long *pg_offset)
struct bio *bio)
{
unsigned long buf_offset;
unsigned long current_buf_start;
@ -1031,13 +1027,13 @@ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
unsigned long working_bytes = total_out - buf_start;
unsigned long bytes;
char *kaddr;
struct page *page_out = bvec[*pg_index].bv_page;
struct bio_vec bvec = bio_iter_iovec(bio, bio->bi_iter);
/*
* start byte is the first byte of the page we're currently
* copying into relative to the start of the compressed data.
*/
start_byte = page_offset(page_out) - disk_start;
start_byte = page_offset(bvec.bv_page) - disk_start;
/* we haven't yet hit data corresponding to this page */
if (total_out <= start_byte)
@ -1057,80 +1053,46 @@ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
/* copy bytes from the working buffer into the pages */
while (working_bytes > 0) {
bytes = min(PAGE_SIZE - *pg_offset,
PAGE_SIZE - buf_offset);
bytes = min_t(unsigned long, bvec.bv_len,
PAGE_SIZE - buf_offset);
bytes = min(bytes, working_bytes);
kaddr = kmap_atomic(page_out);
memcpy(kaddr + *pg_offset, buf + buf_offset, bytes);
kunmap_atomic(kaddr);
flush_dcache_page(page_out);
*pg_offset += bytes;
kaddr = kmap_atomic(bvec.bv_page);
memcpy(kaddr + bvec.bv_offset, buf + buf_offset, bytes);
kunmap_atomic(kaddr);
flush_dcache_page(bvec.bv_page);
buf_offset += bytes;
working_bytes -= bytes;
current_buf_start += bytes;
/* check if we need to pick another page */
if (*pg_offset == PAGE_SIZE) {
(*pg_index)++;
if (*pg_index >= vcnt)
return 0;
bio_advance(bio, bytes);
if (!bio->bi_iter.bi_size)
return 0;
bvec = bio_iter_iovec(bio, bio->bi_iter);
page_out = bvec[*pg_index].bv_page;
*pg_offset = 0;
start_byte = page_offset(page_out) - disk_start;
start_byte = page_offset(bvec.bv_page) - disk_start;
/*
* make sure our new page is covered by this
* working buffer
*/
if (total_out <= start_byte)
return 1;
/*
* make sure our new page is covered by this
* working buffer
*/
if (total_out <= start_byte)
return 1;
/*
* the next page in the biovec might not be adjacent
* to the last page, but it might still be found
* inside this working buffer. bump our offset pointer
*/
if (total_out > start_byte &&
current_buf_start < start_byte) {
buf_offset = start_byte - buf_start;
working_bytes = total_out - start_byte;
current_buf_start = buf_start + buf_offset;
}
/*
* the next page in the biovec might not be adjacent
* to the last page, but it might still be found
* inside this working buffer. bump our offset pointer
*/
if (total_out > start_byte &&
current_buf_start < start_byte) {
buf_offset = start_byte - buf_start;
working_bytes = total_out - start_byte;
current_buf_start = buf_start + buf_offset;
}
}
return 1;
}
/*
* When uncompressing data, we need to make sure and zero any parts of
* the biovec that were not filled in by the decompression code. pg_index
* and pg_offset indicate the last page and the last offset of that page
* that have been filled in. This will zero everything remaining in the
* biovec.
*/
void btrfs_clear_biovec_end(struct bio_vec *bvec, int vcnt,
unsigned long pg_index,
unsigned long pg_offset)
{
while (pg_index < vcnt) {
struct page *page = bvec[pg_index].bv_page;
unsigned long off = bvec[pg_index].bv_offset;
unsigned long len = bvec[pg_index].bv_len;
if (pg_offset < off)
pg_offset = off;
if (pg_offset < off + len) {
unsigned long bytes = off + len - pg_offset;
char *kaddr;
kaddr = kmap_atomic(page);
memset(kaddr + pg_offset, 0, bytes);
kunmap_atomic(kaddr);
}
pg_index++;
pg_offset = 0;
}
}

Просмотреть файл

@ -34,9 +34,7 @@ int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page,
unsigned long start_byte, size_t srclen, size_t destlen);
int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
unsigned long total_out, u64 disk_start,
struct bio_vec *bvec, int vcnt,
unsigned long *pg_index,
unsigned long *pg_offset);
struct bio *bio);
int btrfs_submit_compressed_write(struct inode *inode, u64 start,
unsigned long len, u64 disk_start,
@ -45,9 +43,6 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
unsigned long nr_pages);
int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
int mirror_num, unsigned long bio_flags);
void btrfs_clear_biovec_end(struct bio_vec *bvec, int vcnt,
unsigned long pg_index,
unsigned long pg_offset);
enum btrfs_compression_type {
BTRFS_COMPRESS_NONE = 0,
@ -72,11 +67,10 @@ struct btrfs_compress_op {
unsigned long *total_out,
unsigned long max_out);
int (*decompress_biovec)(struct list_head *workspace,
int (*decompress_bio)(struct list_head *workspace,
struct page **pages_in,
u64 disk_start,
struct bio_vec *bvec,
int vcnt,
struct bio *orig_bio,
size_t srclen);
int (*decompress)(struct list_head *workspace,

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -90,9 +90,6 @@ static const int btrfs_csum_sizes[] = { 4 };
/* four bytes for CRC32 */
#define BTRFS_EMPTY_DIR_SIZE 0
/* specific to btrfs_map_block(), therefore not in include/linux/blk_types.h */
#define REQ_GET_READ_MIRRORS (1 << 30)
/* ioprio of readahead is set to idle */
#define BTRFS_IOPRIO_READA (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0))
@ -340,7 +337,7 @@ struct btrfs_path {
unsigned int need_commit_sem:1;
unsigned int skip_release_on_error:1;
};
#define BTRFS_MAX_EXTENT_ITEM_SIZE(r) ((BTRFS_LEAF_DATA_SIZE(r) >> 4) - \
#define BTRFS_MAX_EXTENT_ITEM_SIZE(r) ((BTRFS_LEAF_DATA_SIZE(r->fs_info) >> 4) - \
sizeof(struct btrfs_item))
struct btrfs_dev_replace {
u64 replace_state; /* see #define above */
@ -429,6 +426,10 @@ struct btrfs_space_info {
struct list_head ro_bgs;
struct list_head priority_tickets;
struct list_head tickets;
/*
* tickets_id just indicates the next ticket will be handled, so note
* it's not stored per ticket.
*/
u64 tickets_id;
struct rw_semaphore groups_sem;
@ -518,7 +519,7 @@ struct btrfs_io_ctl {
void *cur, *orig;
struct page *page;
struct page **pages;
struct btrfs_root *root;
struct btrfs_fs_info *fs_info;
struct inode *inode;
unsigned long size;
int index;
@ -798,7 +799,6 @@ struct btrfs_fs_info {
spinlock_t super_lock;
struct btrfs_super_block *super_copy;
struct btrfs_super_block *super_for_commit;
struct block_device *__bdev;
struct super_block *sb;
struct inode *btree_inode;
struct backing_dev_info bdi;
@ -1084,8 +1084,18 @@ struct btrfs_fs_info {
/* Used to record internally whether fs has been frozen */
int fs_frozen;
/* Cached block sizes */
u32 nodesize;
u32 sectorsize;
u32 stripesize;
};
static inline struct btrfs_fs_info *btrfs_sb(struct super_block *sb)
{
return sb->s_fs_info;
}
struct btrfs_subvolume_writers {
struct percpu_counter counter;
wait_queue_head_t wait;
@ -1159,14 +1169,6 @@ struct btrfs_root {
u64 objectid;
u64 last_trans;
/* data allocations are done in sectorsize units */
u32 sectorsize;
/* node allocations are done in nodesize units */
u32 nodesize;
u32 stripesize;
u32 type;
u64 highest_objectid;
@ -1250,38 +1252,42 @@ struct btrfs_root {
/* For qgroup metadata space reserve */
atomic_t qgroup_meta_rsv;
};
static inline u32 btrfs_inode_sectorsize(const struct inode *inode)
{
return btrfs_sb(inode->i_sb)->sectorsize;
}
static inline u32 __BTRFS_LEAF_DATA_SIZE(u32 blocksize)
{
return blocksize - sizeof(struct btrfs_header);
}
static inline u32 BTRFS_LEAF_DATA_SIZE(const struct btrfs_root *root)
static inline u32 BTRFS_LEAF_DATA_SIZE(const struct btrfs_fs_info *info)
{
return __BTRFS_LEAF_DATA_SIZE(root->nodesize);
return __BTRFS_LEAF_DATA_SIZE(info->nodesize);
}
static inline u32 BTRFS_MAX_ITEM_SIZE(const struct btrfs_root *root)
static inline u32 BTRFS_MAX_ITEM_SIZE(const struct btrfs_fs_info *info)
{
return BTRFS_LEAF_DATA_SIZE(root) - sizeof(struct btrfs_item);
return BTRFS_LEAF_DATA_SIZE(info) - sizeof(struct btrfs_item);
}
static inline u32 BTRFS_NODEPTRS_PER_BLOCK(const struct btrfs_root *root)
static inline u32 BTRFS_NODEPTRS_PER_BLOCK(const struct btrfs_fs_info *info)
{
return BTRFS_LEAF_DATA_SIZE(root) / sizeof(struct btrfs_key_ptr);
return BTRFS_LEAF_DATA_SIZE(info) / sizeof(struct btrfs_key_ptr);
}
#define BTRFS_FILE_EXTENT_INLINE_DATA_START \
(offsetof(struct btrfs_file_extent_item, disk_bytenr))
static inline u32 BTRFS_MAX_INLINE_DATA_SIZE(const struct btrfs_root *root)
static inline u32 BTRFS_MAX_INLINE_DATA_SIZE(const struct btrfs_fs_info *info)
{
return BTRFS_MAX_ITEM_SIZE(root) -
return BTRFS_MAX_ITEM_SIZE(info) -
BTRFS_FILE_EXTENT_INLINE_DATA_START;
}
static inline u32 BTRFS_MAX_XATTR_SIZE(const struct btrfs_root *root)
static inline u32 BTRFS_MAX_XATTR_SIZE(const struct btrfs_fs_info *info)
{
return BTRFS_MAX_ITEM_SIZE(root) - sizeof(struct btrfs_dir_item);
return BTRFS_MAX_ITEM_SIZE(info) - sizeof(struct btrfs_dir_item);
}
/*
@ -1343,12 +1349,13 @@ static inline u32 BTRFS_MAX_XATTR_SIZE(const struct btrfs_root *root)
#ifdef CONFIG_BTRFS_DEBUG
static inline int
btrfs_should_fragment_free_space(struct btrfs_root *root,
struct btrfs_block_group_cache *block_group)
btrfs_should_fragment_free_space(struct btrfs_block_group_cache *block_group)
{
return (btrfs_test_opt(root->fs_info, FRAGMENT_METADATA) &&
struct btrfs_fs_info *fs_info = block_group->fs_info;
return (btrfs_test_opt(fs_info, FRAGMENT_METADATA) &&
block_group->flags & BTRFS_BLOCK_GROUP_METADATA) ||
(btrfs_test_opt(root->fs_info, FRAGMENT_DATA) &&
(btrfs_test_opt(fs_info, FRAGMENT_DATA) &&
block_group->flags & BTRFS_BLOCK_GROUP_DATA);
}
#endif
@ -2210,6 +2217,8 @@ btrfs_disk_balance_args_to_cpu(struct btrfs_balance_args *cpu,
cpu->target = le64_to_cpu(disk->target);
cpu->flags = le64_to_cpu(disk->flags);
cpu->limit = le64_to_cpu(disk->limit);
cpu->stripes_min = le32_to_cpu(disk->stripes_min);
cpu->stripes_max = le32_to_cpu(disk->stripes_max);
}
static inline void
@ -2228,6 +2237,8 @@ btrfs_cpu_balance_args_to_disk(struct btrfs_disk_balance_args *disk,
disk->target = cpu_to_le64(cpu->target);
disk->flags = cpu_to_le64(cpu->flags);
disk->limit = cpu_to_le64(cpu->limit);
disk->stripes_min = cpu_to_le32(cpu->stripes_min);
disk->stripes_max = cpu_to_le32(cpu->stripes_max);
}
/* struct btrfs_super_block */
@ -2299,13 +2310,13 @@ static inline unsigned long btrfs_leaf_data(struct extent_buffer *l)
* this returns the address of the start of the last item,
* which is the stop of the leaf data stack
*/
static inline unsigned int leaf_data_end(struct btrfs_root *root,
static inline unsigned int leaf_data_end(struct btrfs_fs_info *fs_info,
struct extent_buffer *leaf)
{
u32 nr = btrfs_header_nritems(leaf);
if (nr == 0)
return BTRFS_LEAF_DATA_SIZE(root);
return BTRFS_LEAF_DATA_SIZE(fs_info);
return btrfs_item_offset_nr(leaf, nr - 1);
}
@ -2501,11 +2512,6 @@ BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_cursor_left,
BTRFS_SETGET_STACK_FUNCS(stack_dev_replace_cursor_right,
struct btrfs_dev_replace_item, cursor_right, 64);
static inline struct btrfs_fs_info *btrfs_sb(struct super_block *sb)
{
return sb->s_fs_info;
}
/* helper function to cast into the data area of the leaf. */
#define btrfs_item_ptr(leaf, slot, type) \
((type *)(btrfs_leaf_data(leaf) + \
@ -2528,28 +2534,28 @@ static inline gfp_t btrfs_alloc_write_mask(struct address_space *mapping)
/* extent-tree.c */
u64 btrfs_csum_bytes_to_leaves(struct btrfs_root *root, u64 csum_bytes);
u64 btrfs_csum_bytes_to_leaves(struct btrfs_fs_info *fs_info, u64 csum_bytes);
static inline u64 btrfs_calc_trans_metadata_size(struct btrfs_root *root,
static inline u64 btrfs_calc_trans_metadata_size(struct btrfs_fs_info *fs_info,
unsigned num_items)
{
return root->nodesize * BTRFS_MAX_LEVEL * 2 * num_items;
return fs_info->nodesize * BTRFS_MAX_LEVEL * 2 * num_items;
}
/*
* Doing a truncate won't result in new nodes or leaves, just what we need for
* COW.
*/
static inline u64 btrfs_calc_trunc_metadata_size(struct btrfs_root *root,
static inline u64 btrfs_calc_trunc_metadata_size(struct btrfs_fs_info *fs_info,
unsigned num_items)
{
return root->nodesize * BTRFS_MAX_LEVEL * num_items;
return fs_info->nodesize * BTRFS_MAX_LEVEL * num_items;
}
int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
struct btrfs_fs_info *fs_info);
int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
struct btrfs_fs_info *fs_info);
void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info,
const u64 start);
void btrfs_wait_block_group_reservations(struct btrfs_block_group_cache *bg);
@ -2558,18 +2564,18 @@ void btrfs_dec_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr);
void btrfs_wait_nocow_writers(struct btrfs_block_group_cache *bg);
void btrfs_put_block_group(struct btrfs_block_group_cache *cache);
int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
struct btrfs_root *root, unsigned long count);
int btrfs_async_run_delayed_refs(struct btrfs_root *root,
struct btrfs_fs_info *fs_info, unsigned long count);
int btrfs_async_run_delayed_refs(struct btrfs_fs_info *fs_info,
unsigned long count, u64 transid, int wait);
int btrfs_lookup_data_extent(struct btrfs_root *root, u64 start, u64 len);
int btrfs_lookup_data_extent(struct btrfs_fs_info *fs_info, u64 start, u64 len);
int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
struct btrfs_root *root, u64 bytenr,
struct btrfs_fs_info *fs_info, u64 bytenr,
u64 offset, int metadata, u64 *refs, u64 *flags);
int btrfs_pin_extent(struct btrfs_root *root,
int btrfs_pin_extent(struct btrfs_fs_info *fs_info,
u64 bytenr, u64 num, int reserved);
int btrfs_pin_extent_for_log_replay(struct btrfs_root *root,
int btrfs_pin_extent_for_log_replay(struct btrfs_fs_info *fs_info,
u64 bytenr, u64 num_bytes);
int btrfs_exclude_logged_extents(struct btrfs_root *root,
int btrfs_exclude_logged_extents(struct btrfs_fs_info *fs_info,
struct extent_buffer *eb);
int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
@ -2590,12 +2596,11 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
struct extent_buffer *buf,
u64 parent, int last_ref);
int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
u64 root_objectid, u64 owner,
u64 offset, u64 ram_bytes,
struct btrfs_key *ins);
int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_fs_info *fs_info,
u64 root_objectid, u64 owner, u64 offset,
struct btrfs_key *ins);
int btrfs_reserve_extent(struct btrfs_root *root, u64 ram_bytes, u64 num_bytes,
@ -2606,52 +2611,52 @@ int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
struct extent_buffer *buf, int full_backref);
int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_fs_info *fs_info,
u64 bytenr, u64 num_bytes, u64 flags,
int level, int is_data);
int btrfs_free_extent(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_fs_info *fs_info,
u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
u64 owner, u64 offset);
int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len,
int delalloc);
int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root,
int btrfs_free_reserved_extent(struct btrfs_fs_info *fs_info,
u64 start, u64 len, int delalloc);
int btrfs_free_and_pin_reserved_extent(struct btrfs_fs_info *fs_info,
u64 start, u64 len);
void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
struct btrfs_fs_info *fs_info);
int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
struct btrfs_fs_info *fs_info);
int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_fs_info *fs_info,
u64 bytenr, u64 num_bytes, u64 parent,
u64 root_objectid, u64 owner, u64 offset);
int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
struct btrfs_fs_info *fs_info);
int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
struct btrfs_fs_info *fs_info);
int btrfs_setup_space_cache(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr);
struct btrfs_fs_info *fs_info);
int btrfs_extent_readonly(struct btrfs_fs_info *fs_info, u64 bytenr);
int btrfs_free_block_groups(struct btrfs_fs_info *info);
int btrfs_read_block_groups(struct btrfs_root *root);
int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr);
int btrfs_read_block_groups(struct btrfs_fs_info *info);
int btrfs_can_relocate(struct btrfs_fs_info *fs_info, u64 bytenr);
int btrfs_make_block_group(struct btrfs_trans_handle *trans,
struct btrfs_root *root, u64 bytes_used,
struct btrfs_fs_info *fs_info, u64 bytes_used,
u64 type, u64 chunk_objectid, u64 chunk_offset,
u64 size);
struct btrfs_trans_handle *btrfs_start_trans_remove_block_group(
struct btrfs_fs_info *fs_info,
const u64 chunk_offset);
int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
struct btrfs_root *root, u64 group_start,
struct btrfs_fs_info *fs_info, u64 group_start,
struct extent_map *em);
void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info);
void btrfs_get_block_group_trimming(struct btrfs_block_group_cache *cache);
void btrfs_put_block_group_trimming(struct btrfs_block_group_cache *cache);
void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
struct btrfs_fs_info *fs_info);
u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data);
void btrfs_clear_space_info_full(struct btrfs_fs_info *info);
@ -2681,7 +2686,7 @@ void btrfs_free_reserved_data_space(struct inode *inode, u64 start, u64 len);
void btrfs_free_reserved_data_space_noquota(struct inode *inode, u64 start,
u64 len);
void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
struct btrfs_fs_info *fs_info);
void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans);
int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
struct inode *inode);
@ -2690,7 +2695,7 @@ int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
struct btrfs_block_rsv *rsv,
int nitems,
u64 *qgroup_reserved, bool use_global_rsv);
void btrfs_subvolume_release_metadata(struct btrfs_root *root,
void btrfs_subvolume_release_metadata(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *rsv,
u64 qgroup_reserved);
int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes);
@ -2698,16 +2703,15 @@ void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes);
int btrfs_delalloc_reserve_space(struct inode *inode, u64 start, u64 len);
void btrfs_delalloc_release_space(struct inode *inode, u64 start, u64 len);
void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type);
struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root,
struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_fs_info *fs_info,
unsigned short type);
void btrfs_free_block_rsv(struct btrfs_root *root,
void btrfs_free_block_rsv(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *rsv);
void __btrfs_free_block_rsv(struct btrfs_block_rsv *rsv);
int btrfs_block_rsv_add(struct btrfs_root *root,
struct btrfs_block_rsv *block_rsv, u64 num_bytes,
enum btrfs_reserve_flush_enum flush);
int btrfs_block_rsv_check(struct btrfs_root *root,
struct btrfs_block_rsv *block_rsv, int min_factor);
int btrfs_block_rsv_check(struct btrfs_block_rsv *block_rsv, int min_factor);
int btrfs_block_rsv_refill(struct btrfs_root *root,
struct btrfs_block_rsv *block_rsv, u64 min_reserved,
enum btrfs_reserve_flush_enum flush);
@ -2717,22 +2721,21 @@ int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
int btrfs_cond_migrate_bytes(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *dest, u64 num_bytes,
int min_factor);
void btrfs_block_rsv_release(struct btrfs_root *root,
void btrfs_block_rsv_release(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *block_rsv,
u64 num_bytes);
int btrfs_inc_block_group_ro(struct btrfs_root *root,
struct btrfs_block_group_cache *cache);
void btrfs_dec_block_group_ro(struct btrfs_root *root,
struct btrfs_block_group_cache *cache);
void btrfs_dec_block_group_ro(struct btrfs_block_group_cache *cache);
void btrfs_put_block_group_cache(struct btrfs_fs_info *info);
u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo);
int btrfs_error_unpin_extent_range(struct btrfs_root *root,
int btrfs_error_unpin_extent_range(struct btrfs_fs_info *fs_info,
u64 start, u64 end);
int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr,
u64 num_bytes, u64 *actual_bytes);
int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
struct btrfs_root *root, u64 type);
int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range);
struct btrfs_fs_info *fs_info, u64 type);
int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range);
int btrfs_init_space_info(struct btrfs_fs_info *fs_info);
int btrfs_delayed_refs_qgroup_accounting(struct btrfs_trans_handle *trans,
@ -2742,8 +2745,7 @@ int btrfs_start_write_no_snapshoting(struct btrfs_root *root);
void btrfs_end_write_no_snapshoting(struct btrfs_root *root);
void btrfs_wait_for_snapshot_creation(struct btrfs_root *root);
void check_system_chunk(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
const u64 type);
struct btrfs_fs_info *fs_info, const u64 type);
u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
struct btrfs_fs_info *info, u64 start, u64 end);
@ -2793,10 +2795,10 @@ int btrfs_copy_root(struct btrfs_trans_handle *trans,
struct extent_buffer **cow_ret, u64 new_root_objectid);
int btrfs_block_can_be_shared(struct btrfs_root *root,
struct extent_buffer *buf);
void btrfs_extend_item(struct btrfs_root *root, struct btrfs_path *path,
void btrfs_extend_item(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
u32 data_size);
void btrfs_truncate_item(struct btrfs_root *root, struct btrfs_path *path,
u32 new_size, int from_end);
void btrfs_truncate_item(struct btrfs_fs_info *fs_info,
struct btrfs_path *path, u32 new_size, int from_end);
int btrfs_split_item(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path,
@ -2872,7 +2874,8 @@ static inline int btrfs_next_item(struct btrfs_root *root, struct btrfs_path *p)
{
return btrfs_next_old_item(root, p, 0);
}
int btrfs_leaf_free_space(struct btrfs_root *root, struct extent_buffer *leaf);
int btrfs_leaf_free_space(struct btrfs_fs_info *fs_info,
struct extent_buffer *leaf);
int __must_check btrfs_drop_snapshot(struct btrfs_root *root,
struct btrfs_block_rsv *block_rsv,
int update_ref, int for_reloc);
@ -2898,10 +2901,9 @@ static inline int btrfs_fs_closing(struct btrfs_fs_info *fs_info)
* anything except sleeping. This function is used to check the status of
* the fs.
*/
static inline int btrfs_need_cleaner_sleep(struct btrfs_root *root)
static inline int btrfs_need_cleaner_sleep(struct btrfs_fs_info *fs_info)
{
return (root->fs_info->sb->s_flags & MS_RDONLY ||
btrfs_fs_closing(root->fs_info));
return fs_info->sb->s_flags & MS_RDONLY || btrfs_fs_closing(fs_info);
}
static inline void free_fs_info(struct btrfs_fs_info *fs_info)
@ -2931,11 +2933,11 @@ int btrfs_old_root_level(struct btrfs_root *root, u64 time_seq);
/* root-item.c */
int btrfs_add_root_ref(struct btrfs_trans_handle *trans,
struct btrfs_root *tree_root,
struct btrfs_fs_info *fs_info,
u64 root_id, u64 ref_id, u64 dirid, u64 sequence,
const char *name, int name_len);
int btrfs_del_root_ref(struct btrfs_trans_handle *trans,
struct btrfs_root *tree_root,
struct btrfs_fs_info *fs_info,
u64 root_id, u64 ref_id, u64 dirid, u64 *sequence,
const char *name, int name_len);
int btrfs_del_root(struct btrfs_trans_handle *trans, struct btrfs_root *root,
@ -2950,7 +2952,7 @@ int __must_check btrfs_update_root(struct btrfs_trans_handle *trans,
int btrfs_find_root(struct btrfs_root *root, struct btrfs_key *search_key,
struct btrfs_path *path, struct btrfs_root_item *root_item,
struct btrfs_key *root_key);
int btrfs_find_orphan_roots(struct btrfs_root *tree_root);
int btrfs_find_orphan_roots(struct btrfs_fs_info *fs_info);
void btrfs_set_root_node(struct btrfs_root_item *item,
struct extent_buffer *node);
void btrfs_check_and_init_root_item(struct btrfs_root_item *item);
@ -2959,10 +2961,10 @@ void btrfs_update_root_times(struct btrfs_trans_handle *trans,
/* uuid-tree.c */
int btrfs_uuid_tree_add(struct btrfs_trans_handle *trans,
struct btrfs_root *uuid_root, u8 *uuid, u8 type,
struct btrfs_fs_info *fs_info, u8 *uuid, u8 type,
u64 subid);
int btrfs_uuid_tree_rem(struct btrfs_trans_handle *trans,
struct btrfs_root *uuid_root, u8 *uuid, u8 type,
struct btrfs_fs_info *fs_info, u8 *uuid, u8 type,
u64 subid);
int btrfs_uuid_tree_iterate(struct btrfs_fs_info *fs_info,
int (*check_func)(struct btrfs_fs_info *, u8 *, u8,
@ -3004,10 +3006,10 @@ struct btrfs_dir_item *btrfs_lookup_xattr(struct btrfs_trans_handle *trans,
struct btrfs_path *path, u64 dir,
const char *name, u16 name_len,
int mod);
int verify_dir_item(struct btrfs_root *root,
int verify_dir_item(struct btrfs_fs_info *fs_info,
struct extent_buffer *leaf,
struct btrfs_dir_item *dir_item);
struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_root *root,
struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_fs_info *fs_info,
struct btrfs_path *path,
const char *name,
int name_len);
@ -3051,11 +3053,10 @@ int btrfs_find_name_in_ext_backref(struct btrfs_path *path,
/* file-item.c */
struct btrfs_dio_private;
int btrfs_del_csums(struct btrfs_trans_handle *trans,
struct btrfs_root *root, u64 bytenr, u64 len);
int btrfs_lookup_bio_sums(struct btrfs_root *root, struct inode *inode,
struct bio *bio, u32 *dst);
int btrfs_lookup_bio_sums_dio(struct btrfs_root *root, struct inode *inode,
struct bio *bio, u64 logical_offset);
struct btrfs_fs_info *fs_info, u64 bytenr, u64 len);
int btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, u32 *dst);
int btrfs_lookup_bio_sums_dio(struct inode *inode, struct bio *bio,
u64 logical_offset);
int btrfs_insert_file_extent(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
u64 objectid, u64 pos,
@ -3069,8 +3070,8 @@ int btrfs_lookup_file_extent(struct btrfs_trans_handle *trans,
int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_ordered_sum *sums);
int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
struct bio *bio, u64 file_start, int contig);
int btrfs_csum_one_bio(struct inode *inode, struct bio *bio,
u64 file_start, int contig);
int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
struct list_head *list, int search_commit);
void btrfs_extent_item_to_extent_map(struct inode *inode,
@ -3173,7 +3174,7 @@ void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans,
int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size);
void btrfs_invalidate_inodes(struct btrfs_root *root);
void btrfs_add_delayed_iput(struct inode *inode);
void btrfs_run_delayed_iputs(struct btrfs_root *root);
void btrfs_run_delayed_iputs(struct btrfs_fs_info *fs_info);
int btrfs_prealloc_file_range(struct inode *inode, int mode,
u64 start, u64 num_bytes, u64 min_size,
loff_t actual_len, u64 *alloc_hint);
@ -3227,9 +3228,8 @@ int btrfs_drop_extents(struct btrfs_trans_handle *trans,
int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
struct inode *inode, u64 start, u64 end);
int btrfs_release_file(struct inode *inode, struct file *file);
int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode,
struct page **pages, size_t num_pages,
loff_t pos, size_t write_bytes,
int btrfs_dirty_pages(struct inode *inode, struct page **pages,
size_t num_pages, loff_t pos, size_t write_bytes,
struct extent_state **cached);
int btrfs_fdatawrite_range(struct inode *inode, loff_t start, loff_t end);
ssize_t btrfs_copy_file_range(struct file *file_in, loff_t pos_in,
@ -3252,7 +3252,7 @@ void btrfs_sysfs_remove_mounted(struct btrfs_fs_info *fs_info);
ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size);
/* super.c */
int btrfs_parse_options(struct btrfs_root *root, char *options,
int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
unsigned long new_flags);
int btrfs_sync_fs(struct super_block *sb, int wait);
@ -3445,9 +3445,14 @@ do { \
/* Report first abort since mount */ \
if (!test_and_set_bit(BTRFS_FS_STATE_TRANS_ABORTED, \
&((trans)->fs_info->fs_state))) { \
WARN(1, KERN_DEBUG \
"BTRFS: Transaction aborted (error %d)\n", \
(errno)); \
if ((errno) != -EIO) { \
WARN(1, KERN_DEBUG \
"BTRFS: Transaction aborted (error %d)\n", \
(errno)); \
} else { \
pr_debug("BTRFS: Transaction aborted (error %d)\n", \
(errno)); \
} \
} \
__btrfs_abort_transaction((trans), __func__, \
__LINE__, (errno)); \
@ -3609,7 +3614,7 @@ static inline int btrfs_init_acl(struct btrfs_trans_handle *trans,
#endif
/* relocation.c */
int btrfs_relocate_block_group(struct btrfs_root *root, u64 group_start);
int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start);
int btrfs_init_reloc_root(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
@ -3628,12 +3633,12 @@ int btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans,
int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
u64 end, struct btrfs_scrub_progress *progress,
int readonly, int is_dev_replace);
void btrfs_scrub_pause(struct btrfs_root *root);
void btrfs_scrub_continue(struct btrfs_root *root);
void btrfs_scrub_pause(struct btrfs_fs_info *fs_info);
void btrfs_scrub_continue(struct btrfs_fs_info *fs_info);
int btrfs_scrub_cancel(struct btrfs_fs_info *info);
int btrfs_scrub_cancel_dev(struct btrfs_fs_info *info,
struct btrfs_device *dev);
int btrfs_scrub_progress(struct btrfs_root *root, u64 devid,
int btrfs_scrub_progress(struct btrfs_fs_info *fs_info, u64 devid,
struct btrfs_scrub_progress *progress);
/* dev-replace.c */
@ -3648,7 +3653,7 @@ static inline void btrfs_bio_counter_dec(struct btrfs_fs_info *fs_info)
/* reada.c */
struct reada_control {
struct btrfs_root *root; /* tree to prefetch */
struct btrfs_fs_info *fs_info; /* tree to prefetch */
struct btrfs_key key_start;
struct btrfs_key key_end; /* exclusive */
atomic_t elems;
@ -3660,7 +3665,7 @@ struct reada_control *btrfs_reada_add(struct btrfs_root *root,
int btrfs_reada_wait(void *handle);
void btrfs_reada_detach(void *handle);
int btree_readahead_hook(struct btrfs_fs_info *fs_info,
struct extent_buffer *eb, u64 start, int err);
struct extent_buffer *eb, int err);
static inline int is_fstree(u64 rootid)
{

Просмотреть файл

@ -72,12 +72,6 @@ static inline int btrfs_is_continuous_delayed_item(
return 0;
}
static inline struct btrfs_delayed_root *btrfs_get_delayed_root(
struct btrfs_root *root)
{
return root->fs_info->delayed_root;
}
static struct btrfs_delayed_node *btrfs_get_delayed_node(struct inode *inode)
{
struct btrfs_inode *btrfs_inode = BTRFS_I(inode);
@ -535,7 +529,7 @@ static struct btrfs_delayed_item *__btrfs_next_delayed_item(
}
static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_fs_info *fs_info,
struct btrfs_delayed_item *item)
{
struct btrfs_block_rsv *src_rsv;
@ -547,12 +541,12 @@ static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
return 0;
src_rsv = trans->block_rsv;
dst_rsv = &root->fs_info->delayed_block_rsv;
dst_rsv = &fs_info->delayed_block_rsv;
num_bytes = btrfs_calc_trans_metadata_size(root, 1);
num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, 1);
if (!ret) {
trace_btrfs_space_reservation(root->fs_info, "delayed_item",
trace_btrfs_space_reservation(fs_info, "delayed_item",
item->key.objectid,
num_bytes, 1);
item->bytes_reserved = num_bytes;
@ -561,7 +555,7 @@ static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
return ret;
}
static void btrfs_delayed_item_release_metadata(struct btrfs_root *root,
static void btrfs_delayed_item_release_metadata(struct btrfs_fs_info *fs_info,
struct btrfs_delayed_item *item)
{
struct btrfs_block_rsv *rsv;
@ -569,11 +563,11 @@ static void btrfs_delayed_item_release_metadata(struct btrfs_root *root,
if (!item->bytes_reserved)
return;
rsv = &root->fs_info->delayed_block_rsv;
trace_btrfs_space_reservation(root->fs_info, "delayed_item",
rsv = &fs_info->delayed_block_rsv;
trace_btrfs_space_reservation(fs_info, "delayed_item",
item->key.objectid, item->bytes_reserved,
0);
btrfs_block_rsv_release(root, rsv,
btrfs_block_rsv_release(fs_info, rsv,
item->bytes_reserved);
}
@ -583,6 +577,7 @@ static int btrfs_delayed_inode_reserve_metadata(
struct inode *inode,
struct btrfs_delayed_node *node)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_block_rsv *src_rsv;
struct btrfs_block_rsv *dst_rsv;
u64 num_bytes;
@ -590,9 +585,9 @@ static int btrfs_delayed_inode_reserve_metadata(
bool release = false;
src_rsv = trans->block_rsv;
dst_rsv = &root->fs_info->delayed_block_rsv;
dst_rsv = &fs_info->delayed_block_rsv;
num_bytes = btrfs_calc_trans_metadata_size(root, 1);
num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
/*
* If our block_rsv is the delalloc block reserve then check and see if
@ -640,7 +635,7 @@ static int btrfs_delayed_inode_reserve_metadata(
ret = -ENOSPC;
if (!ret) {
node->bytes_reserved = num_bytes;
trace_btrfs_space_reservation(root->fs_info,
trace_btrfs_space_reservation(fs_info,
"delayed_inode",
btrfs_ino(inode),
num_bytes, 1);
@ -664,21 +659,21 @@ static int btrfs_delayed_inode_reserve_metadata(
* how block rsvs. work.
*/
if (!ret) {
trace_btrfs_space_reservation(root->fs_info, "delayed_inode",
trace_btrfs_space_reservation(fs_info, "delayed_inode",
btrfs_ino(inode), num_bytes, 1);
node->bytes_reserved = num_bytes;
}
if (release) {
trace_btrfs_space_reservation(root->fs_info, "delalloc",
trace_btrfs_space_reservation(fs_info, "delalloc",
btrfs_ino(inode), num_bytes, 0);
btrfs_block_rsv_release(root, src_rsv, num_bytes);
btrfs_block_rsv_release(fs_info, src_rsv, num_bytes);
}
return ret;
}
static void btrfs_delayed_inode_release_metadata(struct btrfs_root *root,
static void btrfs_delayed_inode_release_metadata(struct btrfs_fs_info *fs_info,
struct btrfs_delayed_node *node)
{
struct btrfs_block_rsv *rsv;
@ -686,10 +681,10 @@ static void btrfs_delayed_inode_release_metadata(struct btrfs_root *root,
if (!node->bytes_reserved)
return;
rsv = &root->fs_info->delayed_block_rsv;
trace_btrfs_space_reservation(root->fs_info, "delayed_inode",
rsv = &fs_info->delayed_block_rsv;
trace_btrfs_space_reservation(fs_info, "delayed_inode",
node->inode_id, node->bytes_reserved, 0);
btrfs_block_rsv_release(root, rsv,
btrfs_block_rsv_release(fs_info, rsv,
node->bytes_reserved);
node->bytes_reserved = 0;
}
@ -702,6 +697,7 @@ static int btrfs_batch_insert_items(struct btrfs_root *root,
struct btrfs_path *path,
struct btrfs_delayed_item *item)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_delayed_item *curr, *next;
int free_space;
int total_data_size = 0, total_size = 0;
@ -718,7 +714,7 @@ static int btrfs_batch_insert_items(struct btrfs_root *root,
BUG_ON(!path->nodes[0]);
leaf = path->nodes[0];
free_space = btrfs_leaf_free_space(root, leaf);
free_space = btrfs_leaf_free_space(fs_info, leaf);
INIT_LIST_HEAD(&head);
next = item;
@ -791,7 +787,7 @@ static int btrfs_batch_insert_items(struct btrfs_root *root,
curr->data_len);
slot++;
btrfs_delayed_item_release_metadata(root, curr);
btrfs_delayed_item_release_metadata(fs_info, curr);
list_del(&curr->tree_list);
btrfs_release_delayed_item(curr);
@ -813,6 +809,7 @@ static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
struct btrfs_path *path,
struct btrfs_delayed_item *delayed_item)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_buffer *leaf;
char *ptr;
int ret;
@ -830,7 +827,7 @@ static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
delayed_item->data_len);
btrfs_mark_buffer_dirty(leaf);
btrfs_delayed_item_release_metadata(root, delayed_item);
btrfs_delayed_item_release_metadata(fs_info, delayed_item);
return 0;
}
@ -882,6 +879,7 @@ static int btrfs_batch_delete_items(struct btrfs_trans_handle *trans,
struct btrfs_path *path,
struct btrfs_delayed_item *item)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_delayed_item *curr, *next;
struct extent_buffer *leaf;
struct btrfs_key key;
@ -931,7 +929,7 @@ static int btrfs_batch_delete_items(struct btrfs_trans_handle *trans,
goto out;
list_for_each_entry_safe(curr, next, &head, tree_list) {
btrfs_delayed_item_release_metadata(root, curr);
btrfs_delayed_item_release_metadata(fs_info, curr);
list_del(&curr->tree_list);
btrfs_release_delayed_item(curr);
}
@ -1017,6 +1015,7 @@ static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
struct btrfs_path *path,
struct btrfs_delayed_node *node)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_key key;
struct btrfs_inode_item *inode_item;
struct extent_buffer *leaf;
@ -1073,7 +1072,7 @@ out:
no_iref:
btrfs_release_path(path);
err_out:
btrfs_delayed_inode_release_metadata(root, node);
btrfs_delayed_inode_release_metadata(fs_info, node);
btrfs_release_delayed_inode(node);
return ret;
@ -1138,7 +1137,7 @@ __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
* outstanding delayed items cleaned up.
*/
static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
struct btrfs_root *root, int nr)
struct btrfs_fs_info *fs_info, int nr)
{
struct btrfs_delayed_root *delayed_root;
struct btrfs_delayed_node *curr_node, *prev_node;
@ -1156,9 +1155,9 @@ static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
path->leave_spinning = 1;
block_rsv = trans->block_rsv;
trans->block_rsv = &root->fs_info->delayed_block_rsv;
trans->block_rsv = &fs_info->delayed_block_rsv;
delayed_root = btrfs_get_delayed_root(root);
delayed_root = fs_info->delayed_root;
curr_node = btrfs_first_delayed_node(delayed_root);
while (curr_node && (!count || (count && nr--))) {
@ -1185,15 +1184,15 @@ static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
}
int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
struct btrfs_root *root)
struct btrfs_fs_info *fs_info)
{
return __btrfs_run_delayed_items(trans, root, -1);
return __btrfs_run_delayed_items(trans, fs_info, -1);
}
int btrfs_run_delayed_items_nr(struct btrfs_trans_handle *trans,
struct btrfs_root *root, int nr)
struct btrfs_fs_info *fs_info, int nr)
{
return __btrfs_run_delayed_items(trans, root, nr);
return __btrfs_run_delayed_items(trans, fs_info, nr);
}
int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
@ -1236,6 +1235,7 @@ int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
int btrfs_commit_inode_delayed_inode(struct inode *inode)
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_trans_handle *trans;
struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
struct btrfs_path *path;
@ -1267,7 +1267,7 @@ int btrfs_commit_inode_delayed_inode(struct inode *inode)
path->leave_spinning = 1;
block_rsv = trans->block_rsv;
trans->block_rsv = &delayed_node->root->fs_info->delayed_block_rsv;
trans->block_rsv = &fs_info->delayed_block_rsv;
mutex_lock(&delayed_node->mutex);
if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags))
@ -1280,8 +1280,8 @@ int btrfs_commit_inode_delayed_inode(struct inode *inode)
btrfs_free_path(path);
trans->block_rsv = block_rsv;
trans_out:
btrfs_end_transaction(trans, delayed_node->root);
btrfs_btree_balance_dirty(delayed_node->root);
btrfs_end_transaction(trans);
btrfs_btree_balance_dirty(fs_info);
out:
btrfs_release_delayed_node(delayed_node);
@ -1345,15 +1345,16 @@ again:
__btrfs_commit_inode_delayed_items(trans, path, delayed_node);
trans->block_rsv = block_rsv;
btrfs_end_transaction(trans, root);
btrfs_btree_balance_dirty_nodelay(root);
btrfs_end_transaction(trans);
btrfs_btree_balance_dirty_nodelay(root->fs_info);
release_path:
btrfs_release_path(path);
total_done++;
btrfs_release_prepared_delayed_node(delayed_node);
if (async_work->nr == 0 || total_done < async_work->nr)
if ((async_work->nr == 0 && total_done < BTRFS_DELAYED_WRITEBACK) ||
total_done < async_work->nr)
goto again;
free_path:
@ -1369,7 +1370,8 @@ static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
{
struct btrfs_async_delayed_work *async_work;
if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND ||
btrfs_workqueue_normal_congested(fs_info->delayed_workers))
return 0;
async_work = kmalloc(sizeof(*async_work), GFP_NOFS);
@ -1385,11 +1387,9 @@ static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
return 0;
}
void btrfs_assert_delayed_root_empty(struct btrfs_root *root)
void btrfs_assert_delayed_root_empty(struct btrfs_fs_info *fs_info)
{
struct btrfs_delayed_root *delayed_root;
delayed_root = btrfs_get_delayed_root(root);
WARN_ON(btrfs_first_delayed_node(delayed_root));
WARN_ON(btrfs_first_delayed_node(fs_info->delayed_root));
}
static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
@ -1405,12 +1405,9 @@ static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
return 0;
}
void btrfs_balance_delayed_items(struct btrfs_root *root)
void btrfs_balance_delayed_items(struct btrfs_fs_info *fs_info)
{
struct btrfs_delayed_root *delayed_root;
struct btrfs_fs_info *fs_info = root->fs_info;
delayed_root = btrfs_get_delayed_root(root);
struct btrfs_delayed_root *delayed_root = fs_info->delayed_root;
if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
return;
@ -1435,8 +1432,9 @@ void btrfs_balance_delayed_items(struct btrfs_root *root)
/* Will return 0 or -ENOMEM */
int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
struct btrfs_root *root, const char *name,
int name_len, struct inode *dir,
struct btrfs_fs_info *fs_info,
const char *name, int name_len,
struct inode *dir,
struct btrfs_disk_key *disk_key, u8 type,
u64 index)
{
@ -1467,7 +1465,7 @@ int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
btrfs_set_stack_dir_type(dir_item, type);
memcpy((char *)(dir_item + 1), name, name_len);
ret = btrfs_delayed_item_reserve_metadata(trans, root, delayed_item);
ret = btrfs_delayed_item_reserve_metadata(trans, fs_info, delayed_item);
/*
* we have reserved enough space when we start a new transaction,
* so reserving metadata failure is impossible
@ -1478,7 +1476,7 @@ int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
mutex_lock(&delayed_node->mutex);
ret = __btrfs_add_delayed_insertion_item(delayed_node, delayed_item);
if (unlikely(ret)) {
btrfs_err(root->fs_info,
btrfs_err(fs_info,
"err add delayed dir index item(name: %.*s) into the insertion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
name_len, name, delayed_node->root->objectid,
delayed_node->inode_id, ret);
@ -1491,7 +1489,7 @@ release_node:
return ret;
}
static int btrfs_delete_delayed_insertion_item(struct btrfs_root *root,
static int btrfs_delete_delayed_insertion_item(struct btrfs_fs_info *fs_info,
struct btrfs_delayed_node *node,
struct btrfs_key *key)
{
@ -1504,15 +1502,15 @@ static int btrfs_delete_delayed_insertion_item(struct btrfs_root *root,
return 1;
}
btrfs_delayed_item_release_metadata(root, item);
btrfs_delayed_item_release_metadata(fs_info, item);
btrfs_release_delayed_item(item);
mutex_unlock(&node->mutex);
return 0;
}
int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct inode *dir,
u64 index)
struct btrfs_fs_info *fs_info,
struct inode *dir, u64 index)
{
struct btrfs_delayed_node *node;
struct btrfs_delayed_item *item;
@ -1527,7 +1525,7 @@ int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
item_key.type = BTRFS_DIR_INDEX_KEY;
item_key.offset = index;
ret = btrfs_delete_delayed_insertion_item(root, node, &item_key);
ret = btrfs_delete_delayed_insertion_item(fs_info, node, &item_key);
if (!ret)
goto end;
@ -1539,7 +1537,7 @@ int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
item->key = item_key;
ret = btrfs_delayed_item_reserve_metadata(trans, root, item);
ret = btrfs_delayed_item_reserve_metadata(trans, fs_info, item);
/*
* we have reserved enough space when we start a new transaction,
* so reserving metadata failure is impossible.
@ -1549,7 +1547,7 @@ int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
mutex_lock(&node->mutex);
ret = __btrfs_add_delayed_deletion_item(node, item);
if (unlikely(ret)) {
btrfs_err(root->fs_info,
btrfs_err(fs_info,
"err add delayed dir index item(index: %llu) into the deletion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
index, node->root->objectid, node->inode_id, ret);
BUG();
@ -1686,7 +1684,7 @@ int btrfs_should_delete_dir_index(struct list_head *del_list,
*
*/
int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
struct list_head *ins_list, bool *emitted)
struct list_head *ins_list)
{
struct btrfs_dir_item *di;
struct btrfs_delayed_item *curr, *next;
@ -1730,7 +1728,6 @@ int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
if (over)
return 1;
*emitted = true;
}
return 0;
}
@ -1861,6 +1858,7 @@ release_node:
int btrfs_delayed_delete_inode_ref(struct inode *inode)
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_delayed_node *delayed_node;
/*
@ -1868,8 +1866,7 @@ int btrfs_delayed_delete_inode_ref(struct inode *inode)
* leads to enospc problems. This means we also can't do
* delayed inode refs
*/
if (test_bit(BTRFS_FS_LOG_RECOVERING,
&BTRFS_I(inode)->root->fs_info->flags))
if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
return -EAGAIN;
delayed_node = btrfs_get_or_create_delayed_node(inode);
@ -1896,7 +1893,7 @@ int btrfs_delayed_delete_inode_ref(struct inode *inode)
set_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
delayed_node->count++;
atomic_inc(&BTRFS_I(inode)->root->fs_info->delayed_root->items);
atomic_inc(&fs_info->delayed_root->items);
release_node:
mutex_unlock(&delayed_node->mutex);
btrfs_release_delayed_node(delayed_node);
@ -1906,12 +1903,13 @@ release_node:
static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node)
{
struct btrfs_root *root = delayed_node->root;
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_delayed_item *curr_item, *prev_item;
mutex_lock(&delayed_node->mutex);
curr_item = __btrfs_first_delayed_insertion_item(delayed_node);
while (curr_item) {
btrfs_delayed_item_release_metadata(root, curr_item);
btrfs_delayed_item_release_metadata(fs_info, curr_item);
prev_item = curr_item;
curr_item = __btrfs_next_delayed_item(prev_item);
btrfs_release_delayed_item(prev_item);
@ -1919,7 +1917,7 @@ static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node)
curr_item = __btrfs_first_delayed_deletion_item(delayed_node);
while (curr_item) {
btrfs_delayed_item_release_metadata(root, curr_item);
btrfs_delayed_item_release_metadata(fs_info, curr_item);
prev_item = curr_item;
curr_item = __btrfs_next_delayed_item(prev_item);
btrfs_release_delayed_item(prev_item);
@ -1929,7 +1927,7 @@ static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node)
btrfs_release_delayed_iref(delayed_node);
if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
btrfs_delayed_inode_release_metadata(root, delayed_node);
btrfs_delayed_inode_release_metadata(fs_info, delayed_node);
btrfs_release_delayed_inode(delayed_node);
}
mutex_unlock(&delayed_node->mutex);
@ -1976,14 +1974,11 @@ void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
}
}
void btrfs_destroy_delayed_inodes(struct btrfs_root *root)
void btrfs_destroy_delayed_inodes(struct btrfs_fs_info *fs_info)
{
struct btrfs_delayed_root *delayed_root;
struct btrfs_delayed_node *curr_node, *prev_node;
delayed_root = btrfs_get_delayed_root(root);
curr_node = btrfs_first_delayed_node(delayed_root);
curr_node = btrfs_first_delayed_node(fs_info->delayed_root);
while (curr_node) {
__btrfs_kill_delayed_node(curr_node);

Просмотреть файл

@ -99,23 +99,24 @@ static inline void btrfs_init_delayed_root(
}
int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
struct btrfs_root *root, const char *name,
int name_len, struct inode *dir,
struct btrfs_fs_info *fs_info,
const char *name, int name_len,
struct inode *dir,
struct btrfs_disk_key *disk_key, u8 type,
u64 index);
int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct inode *dir,
u64 index);
struct btrfs_fs_info *fs_info,
struct inode *dir, u64 index);
int btrfs_inode_delayed_dir_index_count(struct inode *inode);
int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
struct btrfs_fs_info *fs_info);
int btrfs_run_delayed_items_nr(struct btrfs_trans_handle *trans,
struct btrfs_root *root, int nr);
struct btrfs_fs_info *fs_info, int nr);
void btrfs_balance_delayed_items(struct btrfs_root *root);
void btrfs_balance_delayed_items(struct btrfs_fs_info *fs_info);
int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
struct inode *inode);
@ -134,7 +135,7 @@ int btrfs_delayed_delete_inode_ref(struct inode *inode);
void btrfs_kill_all_delayed_nodes(struct btrfs_root *root);
/* Used for clean the transaction */
void btrfs_destroy_delayed_inodes(struct btrfs_root *root);
void btrfs_destroy_delayed_inodes(struct btrfs_fs_info *fs_info);
/* Used for readdir() */
bool btrfs_readdir_get_delayed_items(struct inode *inode,
@ -146,13 +147,13 @@ void btrfs_readdir_put_delayed_items(struct inode *inode,
int btrfs_should_delete_dir_index(struct list_head *del_list,
u64 index);
int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
struct list_head *ins_list, bool *emitted);
struct list_head *ins_list);
/* for init */
int __init btrfs_delayed_inode_init(void);
void btrfs_delayed_inode_exit(void);
/* for debugging */
void btrfs_assert_delayed_root_empty(struct btrfs_root *root);
void btrfs_assert_delayed_root_empty(struct btrfs_fs_info *fs_info);
#endif

Просмотреть файл

@ -189,6 +189,8 @@ static inline void drop_delayed_ref(struct btrfs_trans_handle *trans,
} else {
assert_spin_locked(&head->lock);
list_del(&ref->list);
if (!list_empty(&ref->add_list))
list_del(&ref->add_list);
}
ref->in_tree = 0;
btrfs_put_delayed_ref(ref);
@ -431,6 +433,15 @@ add_delayed_ref_tail_merge(struct btrfs_trans_handle *trans,
exist->action = ref->action;
mod = -exist->ref_mod;
exist->ref_mod = ref->ref_mod;
if (ref->action == BTRFS_ADD_DELAYED_REF)
list_add_tail(&exist->add_list,
&href->ref_add_list);
else if (ref->action == BTRFS_DROP_DELAYED_REF) {
ASSERT(!list_empty(&exist->add_list));
list_del(&exist->add_list);
} else {
ASSERT(0);
}
} else
mod = -ref->ref_mod;
}
@ -444,6 +455,8 @@ add_delayed_ref_tail_merge(struct btrfs_trans_handle *trans,
add_tail:
list_add_tail(&ref->list, &href->ref_list);
if (ref->action == BTRFS_ADD_DELAYED_REF)
list_add_tail(&ref->add_list, &href->ref_add_list);
atomic_inc(&root->num_entries);
trans->delayed_ref_updates++;
spin_unlock(&href->lock);
@ -590,6 +603,7 @@ add_delayed_ref_head(struct btrfs_fs_info *fs_info,
head_ref->must_insert_reserved = must_insert_reserved;
head_ref->is_data = is_data;
INIT_LIST_HEAD(&head_ref->ref_list);
INIT_LIST_HEAD(&head_ref->ref_add_list);
head_ref->processing = 0;
head_ref->total_ref_mod = count_mod;
head_ref->qgroup_reserved = 0;
@ -606,7 +620,7 @@ add_delayed_ref_head(struct btrfs_fs_info *fs_info,
qrecord->num_bytes = num_bytes;
qrecord->old_roots = NULL;
if(btrfs_qgroup_insert_dirty_extent_nolock(fs_info,
if(btrfs_qgroup_trace_extent_nolock(fs_info,
delayed_refs, qrecord))
kfree(qrecord);
}
@ -671,6 +685,8 @@ add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
ref->is_head = 0;
ref->in_tree = 1;
ref->seq = seq;
INIT_LIST_HEAD(&ref->list);
INIT_LIST_HEAD(&ref->add_list);
full_ref = btrfs_delayed_node_to_tree_ref(ref);
full_ref->parent = parent;
@ -726,6 +742,8 @@ add_delayed_data_ref(struct btrfs_fs_info *fs_info,
ref->is_head = 0;
ref->in_tree = 1;
ref->seq = seq;
INIT_LIST_HEAD(&ref->list);
INIT_LIST_HEAD(&ref->add_list);
full_ref = btrfs_delayed_node_to_data_ref(ref);
full_ref->parent = parent;

Просмотреть файл

@ -34,14 +34,14 @@
* ref_head. Must clean this mess up later.
*/
struct btrfs_delayed_ref_node {
/*
* ref_head use rb tree, stored in ref_root->href.
* indexed by bytenr
*/
struct rb_node rb_node;
/*data/tree ref use list, stored in ref_head->ref_list. */
struct list_head list;
/*
* If action is BTRFS_ADD_DELAYED_REF, also link this node to
* ref_head->ref_add_list, then we do not need to iterate the
* whole ref_head->ref_list to find BTRFS_ADD_DELAYED_REF nodes.
*/
struct list_head add_list;
/* the starting bytenr of the extent */
u64 bytenr;
@ -99,6 +99,8 @@ struct btrfs_delayed_ref_head {
spinlock_t lock;
struct list_head ref_list;
/* accumulate add BTRFS_ADD_DELAYED_REF nodes to this ref_add_list. */
struct list_head ref_add_list;
struct rb_node href_node;

Просмотреть файл

@ -142,7 +142,7 @@ no_valid_dev_replace_entry_found:
* missing
*/
if (!dev_replace->srcdev &&
!btrfs_test_opt(dev_root->fs_info, DEGRADED)) {
!btrfs_test_opt(fs_info, DEGRADED)) {
ret = -EIO;
btrfs_warn(fs_info,
"cannot mount because device replace operation is ongoing and");
@ -151,7 +151,7 @@ no_valid_dev_replace_entry_found:
src_devid);
}
if (!dev_replace->tgtdev &&
!btrfs_test_opt(dev_root->fs_info, DEGRADED)) {
!btrfs_test_opt(fs_info, DEGRADED)) {
ret = -EIO;
btrfs_warn(fs_info,
"cannot mount because device replace operation is ongoing and");
@ -304,11 +304,11 @@ void btrfs_after_dev_replace_commit(struct btrfs_fs_info *fs_info)
dev_replace->cursor_left_last_write_of_item;
}
int btrfs_dev_replace_start(struct btrfs_root *root, char *tgtdev_name,
int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info, char *tgtdev_name,
u64 srcdevid, char *srcdev_name, int read_src)
{
struct btrfs_root *root = fs_info->dev_root;
struct btrfs_trans_handle *trans;
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
int ret;
struct btrfs_device *tgt_device = NULL;
@ -316,14 +316,14 @@ int btrfs_dev_replace_start(struct btrfs_root *root, char *tgtdev_name,
/* the disk copy procedure reuses the scrub code */
mutex_lock(&fs_info->volume_mutex);
ret = btrfs_find_device_by_devspec(root, srcdevid,
ret = btrfs_find_device_by_devspec(fs_info, srcdevid,
srcdev_name, &src_device);
if (ret) {
mutex_unlock(&fs_info->volume_mutex);
return ret;
}
ret = btrfs_init_dev_replace_tgtdev(root, tgtdev_name,
ret = btrfs_init_dev_replace_tgtdev(fs_info, tgtdev_name,
src_device, &tgt_device);
mutex_unlock(&fs_info->volume_mutex);
if (ret)
@ -335,7 +335,7 @@ int btrfs_dev_replace_start(struct btrfs_root *root, char *tgtdev_name,
*/
trans = btrfs_attach_transaction(root);
if (!IS_ERR(trans)) {
ret = btrfs_commit_transaction(trans, root);
ret = btrfs_commit_transaction(trans);
if (ret)
return ret;
} else if (PTR_ERR(trans) != -ENOENT) {
@ -387,7 +387,7 @@ int btrfs_dev_replace_start(struct btrfs_root *root, char *tgtdev_name,
if (ret)
btrfs_err(fs_info, "kobj add dev failed %d", ret);
btrfs_wait_ordered_roots(root->fs_info, -1, 0, (u64)-1);
btrfs_wait_ordered_roots(fs_info, -1, 0, (u64)-1);
/* force writing the updated state information to disk */
trans = btrfs_start_transaction(root, 0);
@ -397,7 +397,7 @@ int btrfs_dev_replace_start(struct btrfs_root *root, char *tgtdev_name,
goto leave;
}
ret = btrfs_commit_transaction(trans, root);
ret = btrfs_commit_transaction(trans);
WARN_ON(ret);
/* the disk copy procedure reuses the scrub code */
@ -422,7 +422,7 @@ leave:
return ret;
}
int btrfs_dev_replace_by_ioctl(struct btrfs_root *root,
int btrfs_dev_replace_by_ioctl(struct btrfs_fs_info *fs_info,
struct btrfs_ioctl_dev_replace_args *args)
{
int ret;
@ -439,7 +439,7 @@ int btrfs_dev_replace_by_ioctl(struct btrfs_root *root,
args->start.tgtdev_name[0] == '\0')
return -EINVAL;
ret = btrfs_dev_replace_start(root, args->start.tgtdev_name,
ret = btrfs_dev_replace_start(fs_info, args->start.tgtdev_name,
args->start.srcdevid,
args->start.srcdev_name,
args->start.cont_reading_from_srcdev_mode);
@ -501,25 +501,25 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
* flush all outstanding I/O and inode extent mappings before the
* copy operation is declared as being finished
*/
ret = btrfs_start_delalloc_roots(root->fs_info, 0, -1);
ret = btrfs_start_delalloc_roots(fs_info, 0, -1);
if (ret) {
mutex_unlock(&dev_replace->lock_finishing_cancel_unmount);
return ret;
}
btrfs_wait_ordered_roots(root->fs_info, -1, 0, (u64)-1);
btrfs_wait_ordered_roots(fs_info, -1, 0, (u64)-1);
trans = btrfs_start_transaction(root, 0);
if (IS_ERR(trans)) {
mutex_unlock(&dev_replace->lock_finishing_cancel_unmount);
return PTR_ERR(trans);
}
ret = btrfs_commit_transaction(trans, root);
ret = btrfs_commit_transaction(trans);
WARN_ON(ret);
mutex_lock(&uuid_mutex);
/* keep away write_all_supers() during the finishing procedure */
mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
mutex_lock(&root->fs_info->chunk_mutex);
mutex_lock(&fs_info->fs_devices->device_list_mutex);
mutex_lock(&fs_info->chunk_mutex);
btrfs_dev_replace_lock(dev_replace, 1);
dev_replace->replace_state =
scrub_ret ? BTRFS_IOCTL_DEV_REPLACE_STATE_CANCELED
@ -535,15 +535,15 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
src_device,
tgt_device);
} else {
btrfs_err_in_rcu(root->fs_info,
"btrfs_scrub_dev(%s, %llu, %s) failed %d",
src_device->missing ? "<missing disk>" :
rcu_str_deref(src_device->name),
src_device->devid,
rcu_str_deref(tgt_device->name), scrub_ret);
btrfs_err_in_rcu(fs_info,
"btrfs_scrub_dev(%s, %llu, %s) failed %d",
src_device->missing ? "<missing disk>" :
rcu_str_deref(src_device->name),
src_device->devid,
rcu_str_deref(tgt_device->name), scrub_ret);
btrfs_dev_replace_unlock(dev_replace, 1);
mutex_unlock(&root->fs_info->chunk_mutex);
mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
mutex_unlock(&fs_info->chunk_mutex);
mutex_unlock(&fs_info->fs_devices->device_list_mutex);
mutex_unlock(&uuid_mutex);
if (tgt_device)
btrfs_destroy_dev_replace_tgtdev(fs_info, tgt_device);
@ -552,12 +552,12 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
return scrub_ret;
}
btrfs_info_in_rcu(root->fs_info,
"dev_replace from %s (devid %llu) to %s finished",
src_device->missing ? "<missing disk>" :
rcu_str_deref(src_device->name),
src_device->devid,
rcu_str_deref(tgt_device->name));
btrfs_info_in_rcu(fs_info,
"dev_replace from %s (devid %llu) to %s finished",
src_device->missing ? "<missing disk>" :
rcu_str_deref(src_device->name),
src_device->devid,
rcu_str_deref(tgt_device->name));
tgt_device->is_tgtdev_for_dev_replace = 0;
tgt_device->devid = src_device->devid;
src_device->devid = BTRFS_DEV_REPLACE_DEVID;
@ -592,8 +592,8 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
* superblock is scratched out so that it is no longer marked to
* belong to this filesystem.
*/
mutex_unlock(&root->fs_info->chunk_mutex);
mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
mutex_unlock(&fs_info->chunk_mutex);
mutex_unlock(&fs_info->fs_devices->device_list_mutex);
mutex_unlock(&uuid_mutex);
/* replace the sysfs entry */
@ -603,7 +603,7 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
/* write back the superblocks */
trans = btrfs_start_transaction(root, 0);
if (!IS_ERR(trans))
btrfs_commit_transaction(trans, root);
btrfs_commit_transaction(trans);
mutex_unlock(&dev_replace->lock_finishing_cancel_unmount);
@ -718,7 +718,7 @@ static u64 __btrfs_dev_replace_cancel(struct btrfs_fs_info *fs_info)
mutex_unlock(&dev_replace->lock_finishing_cancel_unmount);
return PTR_ERR(trans);
}
ret = btrfs_commit_transaction(trans, root);
ret = btrfs_commit_transaction(trans);
WARN_ON(ret);
if (tgt_device)
btrfs_destroy_dev_replace_tgtdev(fs_info, tgt_device);

Просмотреть файл

@ -25,9 +25,9 @@ int btrfs_init_dev_replace(struct btrfs_fs_info *fs_info);
int btrfs_run_dev_replace(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info);
void btrfs_after_dev_replace_commit(struct btrfs_fs_info *fs_info);
int btrfs_dev_replace_by_ioctl(struct btrfs_root *root,
int btrfs_dev_replace_by_ioctl(struct btrfs_fs_info *fs_info,
struct btrfs_ioctl_dev_replace_args *args);
int btrfs_dev_replace_start(struct btrfs_root *root, char *tgtdev_name,
int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info, char *tgtdev_name,
u64 srcdevid, char *srcdev_name, int read_src);
void btrfs_dev_replace_status(struct btrfs_fs_info *fs_info,
struct btrfs_ioctl_dev_replace_args *args);

Просмотреть файл

@ -38,6 +38,7 @@ static struct btrfs_dir_item *insert_with_overflow(struct btrfs_trans_handle
const char *name,
int name_len)
{
struct btrfs_fs_info *fs_info = root->fs_info;
int ret;
char *ptr;
struct btrfs_item *item;
@ -46,10 +47,10 @@ static struct btrfs_dir_item *insert_with_overflow(struct btrfs_trans_handle
ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size);
if (ret == -EEXIST) {
struct btrfs_dir_item *di;
di = btrfs_match_dir_item_name(root, path, name, name_len);
di = btrfs_match_dir_item_name(fs_info, path, name, name_len);
if (di)
return ERR_PTR(-EEXIST);
btrfs_extend_item(root, path, data_size);
btrfs_extend_item(fs_info, path, data_size);
} else if (ret < 0)
return ERR_PTR(ret);
WARN_ON(ret > 0);
@ -79,7 +80,7 @@ int btrfs_insert_xattr_item(struct btrfs_trans_handle *trans,
struct extent_buffer *leaf;
u32 data_size;
BUG_ON(name_len + data_len > BTRFS_MAX_XATTR_SIZE(root));
BUG_ON(name_len + data_len > BTRFS_MAX_XATTR_SIZE(root->fs_info));
key.objectid = objectid;
key.type = BTRFS_XATTR_ITEM_KEY;
@ -172,8 +173,9 @@ second_insert:
}
btrfs_release_path(path);
ret2 = btrfs_insert_delayed_dir_index(trans, root, name, name_len, dir,
&disk_key, type, index);
ret2 = btrfs_insert_delayed_dir_index(trans, root->fs_info, name,
name_len, dir, &disk_key, type,
index);
out_free:
btrfs_free_path(path);
if (ret)
@ -210,7 +212,7 @@ struct btrfs_dir_item *btrfs_lookup_dir_item(struct btrfs_trans_handle *trans,
if (ret > 0)
return NULL;
return btrfs_match_dir_item_name(root, path, name, name_len);
return btrfs_match_dir_item_name(root->fs_info, path, name, name_len);
}
int btrfs_check_dir_item_collision(struct btrfs_root *root, u64 dir,
@ -246,7 +248,7 @@ int btrfs_check_dir_item_collision(struct btrfs_root *root, u64 dir,
}
/* we found an item, look for our name in the item */
di = btrfs_match_dir_item_name(root, path, name, name_len);
di = btrfs_match_dir_item_name(root->fs_info, path, name, name_len);
if (di) {
/* our exact name was found */
ret = -EEXIST;
@ -261,7 +263,7 @@ int btrfs_check_dir_item_collision(struct btrfs_root *root, u64 dir,
leaf = path->nodes[0];
slot = path->slots[0];
if (data_size + btrfs_item_size_nr(leaf, slot) +
sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(root)) {
sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(root->fs_info)) {
ret = -EOVERFLOW;
} else {
/* plenty of insertion room */
@ -301,7 +303,7 @@ btrfs_lookup_dir_index_item(struct btrfs_trans_handle *trans,
return ERR_PTR(ret);
if (ret > 0)
return ERR_PTR(-ENOENT);
return btrfs_match_dir_item_name(root, path, name, name_len);
return btrfs_match_dir_item_name(root->fs_info, path, name, name_len);
}
struct btrfs_dir_item *
@ -342,7 +344,8 @@ btrfs_search_dir_index_item(struct btrfs_root *root,
if (key.objectid != dirid || key.type != BTRFS_DIR_INDEX_KEY)
break;
di = btrfs_match_dir_item_name(root, path, name, name_len);
di = btrfs_match_dir_item_name(root->fs_info, path,
name, name_len);
if (di)
return di;
@ -371,7 +374,7 @@ struct btrfs_dir_item *btrfs_lookup_xattr(struct btrfs_trans_handle *trans,
if (ret > 0)
return NULL;
return btrfs_match_dir_item_name(root, path, name, name_len);
return btrfs_match_dir_item_name(root->fs_info, path, name, name_len);
}
/*
@ -379,7 +382,7 @@ struct btrfs_dir_item *btrfs_lookup_xattr(struct btrfs_trans_handle *trans,
* this walks through all the entries in a dir item and finds one
* for a specific name.
*/
struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_root *root,
struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_fs_info *fs_info,
struct btrfs_path *path,
const char *name, int name_len)
{
@ -392,7 +395,7 @@ struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_root *root,
leaf = path->nodes[0];
dir_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dir_item);
if (verify_dir_item(root, leaf, dir_item))
if (verify_dir_item(fs_info, leaf, dir_item))
return NULL;
total_len = btrfs_item_size_nr(leaf, path->slots[0]);
@ -442,12 +445,13 @@ int btrfs_delete_one_dir_name(struct btrfs_trans_handle *trans,
start = btrfs_item_ptr_offset(leaf, path->slots[0]);
memmove_extent_buffer(leaf, ptr, ptr + sub_item_len,
item_len - (ptr + sub_item_len - start));
btrfs_truncate_item(root, path, item_len - sub_item_len, 1);
btrfs_truncate_item(root->fs_info, path,
item_len - sub_item_len, 1);
}
return ret;
}
int verify_dir_item(struct btrfs_root *root,
int verify_dir_item(struct btrfs_fs_info *fs_info,
struct extent_buffer *leaf,
struct btrfs_dir_item *dir_item)
{
@ -455,8 +459,7 @@ int verify_dir_item(struct btrfs_root *root,
u8 type = btrfs_dir_type(leaf, dir_item);
if (type >= BTRFS_FT_MAX) {
btrfs_crit(root->fs_info, "invalid dir item type: %d",
(int)type);
btrfs_crit(fs_info, "invalid dir item type: %d", (int)type);
return 1;
}
@ -464,16 +467,16 @@ int verify_dir_item(struct btrfs_root *root,
namelen = XATTR_NAME_MAX;
if (btrfs_dir_name_len(leaf, dir_item) > namelen) {
btrfs_crit(root->fs_info, "invalid dir item name len: %u",
btrfs_crit(fs_info, "invalid dir item name len: %u",
(unsigned)btrfs_dir_data_len(leaf, dir_item));
return 1;
}
/* BTRFS_MAX_XATTR_SIZE is the same for all dir items */
if ((btrfs_dir_data_len(leaf, dir_item) +
btrfs_dir_name_len(leaf, dir_item)) > BTRFS_MAX_XATTR_SIZE(root)) {
btrfs_crit(root->fs_info,
"invalid dir item name + data len: %u + %u",
btrfs_dir_name_len(leaf, dir_item)) >
BTRFS_MAX_XATTR_SIZE(fs_info)) {
btrfs_crit(fs_info, "invalid dir item name + data len: %u + %u",
(unsigned)btrfs_dir_name_len(leaf, dir_item),
(unsigned)btrfs_dir_data_len(leaf, dir_item));
return 1;

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -44,27 +44,26 @@ static inline u64 btrfs_sb_offset(int mirror)
struct btrfs_device;
struct btrfs_fs_devices;
struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
u64 parent_transid);
void readahead_tree_block(struct btrfs_root *root, u64 bytenr);
int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr,
struct extent_buffer *read_tree_block(struct btrfs_fs_info *fs_info,
u64 bytenr, u64 parent_transid);
void readahead_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr);
int reada_tree_block_flagged(struct btrfs_fs_info *fs_info, u64 bytenr,
int mirror_num, struct extent_buffer **eb);
struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
u64 bytenr);
struct extent_buffer *btrfs_find_create_tree_block(
struct btrfs_fs_info *fs_info,
u64 bytenr);
void clean_tree_block(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info, struct extent_buffer *buf);
int open_ctree(struct super_block *sb,
struct btrfs_fs_devices *fs_devices,
char *options);
void close_ctree(struct btrfs_root *root);
void close_ctree(struct btrfs_fs_info *fs_info);
int write_ctree_super(struct btrfs_trans_handle *trans,
struct btrfs_root *root, int max_mirrors);
struct btrfs_fs_info *fs_info, int max_mirrors);
struct buffer_head *btrfs_read_dev_super(struct block_device *bdev);
int btrfs_read_dev_one_super(struct block_device *bdev, int copy_num,
struct buffer_head **bh_ret);
int btrfs_commit_super(struct btrfs_root *root);
struct extent_buffer *btrfs_find_tree_block(struct btrfs_fs_info *fs_info,
u64 bytenr);
int btrfs_commit_super(struct btrfs_fs_info *fs_info);
struct btrfs_root *btrfs_read_fs_root(struct btrfs_root *tree_root,
struct btrfs_key *location);
int btrfs_init_fs_root(struct btrfs_root *root);
@ -85,15 +84,14 @@ btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info,
}
int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info);
void btrfs_btree_balance_dirty(struct btrfs_root *root);
void btrfs_btree_balance_dirty_nodelay(struct btrfs_root *root);
void btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info);
void btrfs_btree_balance_dirty_nodelay(struct btrfs_fs_info *fs_info);
void btrfs_drop_and_free_fs_root(struct btrfs_fs_info *fs_info,
struct btrfs_root *root);
void btrfs_free_fs_root(struct btrfs_root *root);
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
struct btrfs_root *btrfs_alloc_dummy_root(struct btrfs_fs_info *fs_info,
u32 sectorsize, u32 nodesize);
struct btrfs_root *btrfs_alloc_dummy_root(struct btrfs_fs_info *fs_info);
#endif
/*
@ -121,7 +119,7 @@ int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid,
int atomic);
int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid);
u32 btrfs_csum_data(char *data, u32 seed, size_t len);
void btrfs_csum_final(u32 crc, char *result);
void btrfs_csum_final(u32 crc, u8 *result);
int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
enum btrfs_wq_endio_type metadata);
int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
@ -137,9 +135,9 @@ int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
void btrfs_cleanup_dirty_bgs(struct btrfs_transaction *trans,
struct btrfs_root *root);
struct btrfs_fs_info *fs_info);
void btrfs_cleanup_one_transaction(struct btrfs_transaction *trans,
struct btrfs_root *root);
struct btrfs_fs_info *fs_info);
struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info,
u64 objectid);

Просмотреть файл

@ -153,6 +153,7 @@ static struct dentry *btrfs_fh_to_dentry(struct super_block *sb, struct fid *fh,
static struct dentry *btrfs_get_parent(struct dentry *child)
{
struct inode *dir = d_inode(child);
struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
struct btrfs_root *root = BTRFS_I(dir)->root;
struct btrfs_path *path;
struct extent_buffer *leaf;
@ -169,7 +170,7 @@ static struct dentry *btrfs_get_parent(struct dentry *child)
key.objectid = root->root_key.objectid;
key.type = BTRFS_ROOT_BACKREF_KEY;
key.offset = (u64)-1;
root = root->fs_info->tree_root;
root = fs_info->tree_root;
} else {
key.objectid = btrfs_ino(dir);
key.type = BTRFS_INODE_REF_KEY;
@ -205,13 +206,13 @@ static struct dentry *btrfs_get_parent(struct dentry *child)
btrfs_free_path(path);
if (found_key.type == BTRFS_ROOT_BACKREF_KEY) {
return btrfs_get_dentry(root->fs_info->sb, key.objectid,
return btrfs_get_dentry(fs_info->sb, key.objectid,
found_key.offset, 0, 0);
}
key.type = BTRFS_INODE_ITEM_KEY;
key.offset = 0;
return d_obtain_alias(btrfs_iget(root->fs_info->sb, &key, root, NULL));
return d_obtain_alias(btrfs_iget(fs_info->sb, &key, root, NULL));
fail:
btrfs_free_path(path);
return ERR_PTR(ret);
@ -222,6 +223,7 @@ static int btrfs_get_name(struct dentry *parent, char *name,
{
struct inode *inode = d_inode(child);
struct inode *dir = d_inode(parent);
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_path *path;
struct btrfs_root *root = BTRFS_I(dir)->root;
struct btrfs_inode_ref *iref;
@ -250,7 +252,7 @@ static int btrfs_get_name(struct dentry *parent, char *name,
key.objectid = BTRFS_I(inode)->root->root_key.objectid;
key.type = BTRFS_ROOT_BACKREF_KEY;
key.offset = (u64)-1;
root = root->fs_info->tree_root;
root = fs_info->tree_root;
} else {
key.objectid = ino;
key.offset = btrfs_ino(dir);

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -2029,7 +2029,7 @@ int repair_io_failure(struct inode *inode, u64 start, u64 length, u64 logical,
* read repair operation.
*/
btrfs_bio_counter_inc_blocked(fs_info);
ret = btrfs_map_block(fs_info, WRITE, logical,
ret = btrfs_map_block(fs_info, BTRFS_MAP_WRITE, logical,
&map_length, &bbio, mirror_num);
if (ret) {
btrfs_bio_counter_dec(fs_info);
@ -2067,20 +2067,20 @@ int repair_io_failure(struct inode *inode, u64 start, u64 length, u64 logical,
return 0;
}
int repair_eb_io_failure(struct btrfs_root *root, struct extent_buffer *eb,
int mirror_num)
int repair_eb_io_failure(struct btrfs_fs_info *fs_info,
struct extent_buffer *eb, int mirror_num)
{
u64 start = eb->start;
unsigned long i, num_pages = num_extent_pages(eb->start, eb->len);
int ret = 0;
if (root->fs_info->sb->s_flags & MS_RDONLY)
if (fs_info->sb->s_flags & MS_RDONLY)
return -EROFS;
for (i = 0; i < num_pages; i++) {
struct page *p = eb->pages[i];
ret = repair_io_failure(root->fs_info->btree_inode, start,
ret = repair_io_failure(fs_info->btree_inode, start,
PAGE_SIZE, start, p,
start - page_offset(p), mirror_num);
if (ret)
@ -2341,6 +2341,7 @@ struct bio *btrfs_create_repair_bio(struct inode *inode, struct bio *failed_bio,
struct page *page, int pg_offset, int icsum,
bio_end_io_t *endio_func, void *data)
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct bio *bio;
struct btrfs_io_bio *btrfs_failed_bio;
struct btrfs_io_bio *btrfs_bio;
@ -2351,13 +2352,12 @@ struct bio *btrfs_create_repair_bio(struct inode *inode, struct bio *failed_bio,
bio->bi_end_io = endio_func;
bio->bi_iter.bi_sector = failrec->logical >> 9;
bio->bi_bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
bio->bi_bdev = fs_info->fs_devices->latest_bdev;
bio->bi_iter.bi_size = 0;
bio->bi_private = data;
btrfs_failed_bio = btrfs_io_bio(failed_bio);
if (btrfs_failed_bio->csum) {
struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
btrfs_bio = btrfs_io_bio(bio);
@ -2474,6 +2474,8 @@ static void end_bio_extent_writepage(struct bio *bio)
bio_for_each_segment_all(bvec, bio, i) {
struct page *page = bvec->bv_page;
struct inode *inode = page->mapping->host;
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
/* We always issue full-page reads, but if some block
* in a page fails to read, blk_update_request() will
@ -2482,11 +2484,11 @@ static void end_bio_extent_writepage(struct bio *bio)
* if they don't add up to a full page. */
if (bvec->bv_offset || bvec->bv_len != PAGE_SIZE) {
if (bvec->bv_offset + bvec->bv_len != PAGE_SIZE)
btrfs_err(BTRFS_I(page->mapping->host)->root->fs_info,
btrfs_err(fs_info,
"partial page write in btrfs with offset %u and length %u",
bvec->bv_offset, bvec->bv_len);
else
btrfs_info(BTRFS_I(page->mapping->host)->root->fs_info,
btrfs_info(fs_info,
"incomplete page write in btrfs with offset %u and length %u",
bvec->bv_offset, bvec->bv_len);
}
@ -3741,16 +3743,15 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
if (btrfs_header_level(eb) > 0) {
end = btrfs_node_key_ptr_offset(nritems);
memset_extent_buffer(eb, 0, end, eb->len - end);
memzero_extent_buffer(eb, end, eb->len - end);
} else {
/*
* leaf:
* header 0 1 2 .. N ... data_N .. data_2 data_1 data_0
*/
start = btrfs_item_nr_offset(nritems);
end = btrfs_leaf_data(eb) +
leaf_data_end(fs_info->tree_root, eb);
memset_extent_buffer(eb, 0, start, end - start);
end = btrfs_leaf_data(eb) + leaf_data_end(fs_info, eb);
memzero_extent_buffer(eb, start, end - start);
}
for (i = 0; i < num_pages; i++) {
@ -4341,7 +4342,7 @@ static struct extent_map *get_extent_skip_holes(struct inode *inode,
u64 last,
get_extent_t *get_extent)
{
u64 sectorsize = BTRFS_I(inode)->root->sectorsize;
u64 sectorsize = btrfs_inode_sectorsize(inode);
struct extent_map *em;
u64 len;
@ -4402,8 +4403,8 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
return -ENOMEM;
path->leave_spinning = 1;
start = round_down(start, BTRFS_I(inode)->root->sectorsize);
len = round_up(max, BTRFS_I(inode)->root->sectorsize) - start;
start = round_down(start, btrfs_inode_sectorsize(inode));
len = round_up(max, btrfs_inode_sectorsize(inode)) - start;
/*
* lookup the last file extent. We're not using i_size here
@ -4537,7 +4538,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
root->objectid,
btrfs_ino(inode), bytenr);
if (trans)
btrfs_end_transaction(trans, root);
btrfs_end_transaction(trans);
if (ret < 0)
goto out_free;
if (ret)
@ -4718,9 +4719,9 @@ struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src)
WARN_ON(PageDirty(p));
SetPageUptodate(p);
new->pages[i] = p;
copy_page(page_address(p), page_address(src->pages[i]));
}
copy_extent_buffer(new, src, 0, 0, src->len);
set_bit(EXTENT_BUFFER_UPTODATE, &new->bflags);
set_bit(EXTENT_BUFFER_DUMMY, &new->bflags);
@ -4758,21 +4759,9 @@ err:
}
struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
u64 start, u32 nodesize)
u64 start)
{
unsigned long len;
if (!fs_info) {
/*
* Called only from tests that don't always have a fs_info
* available
*/
len = nodesize;
} else {
len = fs_info->tree_root->nodesize;
}
return __alloc_dummy_extent_buffer(fs_info, start, len);
return __alloc_dummy_extent_buffer(fs_info, start, fs_info->nodesize);
}
static void check_buffer_tree_ref(struct extent_buffer *eb)
@ -4863,7 +4852,7 @@ struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
u64 start, u32 nodesize)
u64 start)
{
struct extent_buffer *eb, *exists = NULL;
int ret;
@ -4871,7 +4860,7 @@ struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
eb = find_extent_buffer(fs_info, start);
if (eb)
return eb;
eb = alloc_dummy_extent_buffer(fs_info, start, nodesize);
eb = alloc_dummy_extent_buffer(fs_info, start);
if (!eb)
return NULL;
eb->fs_info = fs_info;
@ -4911,7 +4900,7 @@ free_eb:
struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
u64 start)
{
unsigned long len = fs_info->tree_root->nodesize;
unsigned long len = fs_info->nodesize;
unsigned long num_pages = num_extent_pages(start, len);
unsigned long i;
unsigned long index = start >> PAGE_SHIFT;
@ -4922,7 +4911,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
int uptodate = 1;
int ret;
if (!IS_ALIGNED(start, fs_info->tree_root->sectorsize)) {
if (!IS_ALIGNED(start, fs_info->sectorsize)) {
btrfs_err(fs_info, "bad tree block start %llu", start);
return ERR_PTR(-EINVAL);
}
@ -5463,6 +5452,27 @@ int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
return ret;
}
void write_extent_buffer_chunk_tree_uuid(struct extent_buffer *eb,
const void *srcv)
{
char *kaddr;
WARN_ON(!PageUptodate(eb->pages[0]));
kaddr = page_address(eb->pages[0]);
memcpy(kaddr + offsetof(struct btrfs_header, chunk_tree_uuid), srcv,
BTRFS_FSID_SIZE);
}
void write_extent_buffer_fsid(struct extent_buffer *eb, const void *srcv)
{
char *kaddr;
WARN_ON(!PageUptodate(eb->pages[0]));
kaddr = page_address(eb->pages[0]);
memcpy(kaddr + offsetof(struct btrfs_header, fsid), srcv,
BTRFS_FSID_SIZE);
}
void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
unsigned long start, unsigned long len)
{
@ -5494,8 +5504,8 @@ void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
}
}
void memset_extent_buffer(struct extent_buffer *eb, char c,
unsigned long start, unsigned long len)
void memzero_extent_buffer(struct extent_buffer *eb, unsigned long start,
unsigned long len)
{
size_t cur;
size_t offset;
@ -5515,7 +5525,7 @@ void memset_extent_buffer(struct extent_buffer *eb, char c,
cur = min(len, PAGE_SIZE - offset);
kaddr = page_address(page);
memset(kaddr + offset, c, cur);
memset(kaddr + offset, 0, cur);
len -= cur;
offset = 0;
@ -5523,6 +5533,20 @@ void memset_extent_buffer(struct extent_buffer *eb, char c,
}
}
void copy_extent_buffer_full(struct extent_buffer *dst,
struct extent_buffer *src)
{
int i;
unsigned num_pages;
ASSERT(dst->len == src->len);
num_pages = num_extent_pages(dst->start, dst->len);
for (i = 0; i < num_pages; i++)
copy_page(page_address(dst->pages[i]),
page_address(src->pages[i]));
}
void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
unsigned long dst_offset, unsigned long src_offset,
unsigned long len)
@ -5764,6 +5788,7 @@ static void copy_pages(struct page *dst_page, struct page *src_page,
void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
unsigned long src_offset, unsigned long len)
{
struct btrfs_fs_info *fs_info = dst->fs_info;
size_t cur;
size_t dst_off_in_page;
size_t src_off_in_page;
@ -5772,13 +5797,13 @@ void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
unsigned long src_i;
if (src_offset + len > dst->len) {
btrfs_err(dst->fs_info,
btrfs_err(fs_info,
"memmove bogus src_offset %lu move len %lu dst len %lu",
src_offset, len, dst->len);
BUG_ON(1);
}
if (dst_offset + len > dst->len) {
btrfs_err(dst->fs_info,
btrfs_err(fs_info,
"memmove bogus dst_offset %lu move len %lu dst len %lu",
dst_offset, len, dst->len);
BUG_ON(1);
@ -5810,6 +5835,7 @@ void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
unsigned long src_offset, unsigned long len)
{
struct btrfs_fs_info *fs_info = dst->fs_info;
size_t cur;
size_t dst_off_in_page;
size_t src_off_in_page;
@ -5820,13 +5846,13 @@ void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
unsigned long src_i;
if (src_offset + len > dst->len) {
btrfs_err(dst->fs_info,
btrfs_err(fs_info,
"memmove bogus src_offset %lu move len %lu len %lu",
src_offset, len, dst->len);
BUG_ON(1);
}
if (dst_offset + len > dst->len) {
btrfs_err(dst->fs_info,
btrfs_err(fs_info,
"memmove bogus dst_offset %lu move len %lu len %lu",
dst_offset, len, dst->len);
BUG_ON(1);

Просмотреть файл

@ -371,7 +371,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
u64 start, unsigned long len);
struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
u64 start, u32 nodesize);
u64 start);
struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src);
struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
u64 start);
@ -405,8 +405,13 @@ void read_extent_buffer(struct extent_buffer *eb, void *dst,
int read_extent_buffer_to_user(struct extent_buffer *eb, void __user *dst,
unsigned long start,
unsigned long len);
void write_extent_buffer_fsid(struct extent_buffer *eb, const void *src);
void write_extent_buffer_chunk_tree_uuid(struct extent_buffer *eb,
const void *src);
void write_extent_buffer(struct extent_buffer *eb, const void *src,
unsigned long start, unsigned long len);
void copy_extent_buffer_full(struct extent_buffer *dst,
struct extent_buffer *src);
void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
unsigned long dst_offset, unsigned long src_offset,
unsigned long len);
@ -414,8 +419,8 @@ void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
unsigned long src_offset, unsigned long len);
void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
unsigned long src_offset, unsigned long len);
void memset_extent_buffer(struct extent_buffer *eb, char c,
unsigned long start, unsigned long len);
void memzero_extent_buffer(struct extent_buffer *eb, unsigned long start,
unsigned long len);
int extent_buffer_test_bit(struct extent_buffer *eb, unsigned long start,
unsigned long pos);
void extent_buffer_bitmap_set(struct extent_buffer *eb, unsigned long start,
@ -452,8 +457,8 @@ int repair_io_failure(struct inode *inode, u64 start, u64 length, u64 logical,
int clean_io_failure(struct inode *inode, u64 start, struct page *page,
unsigned int pg_offset);
void end_extent_writepage(struct page *page, int err, u64 start, u64 end);
int repair_eb_io_failure(struct btrfs_root *root, struct extent_buffer *eb,
int mirror_num);
int repair_eb_io_failure(struct btrfs_fs_info *fs_info,
struct extent_buffer *eb, int mirror_num);
/*
* When IO fails, either with EIO or csum verification fails, we
@ -491,5 +496,5 @@ noinline u64 find_lock_delalloc_range(struct inode *inode,
u64 *end, u64 max_bytes);
#endif
struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
u64 start, u32 nodesize);
u64 start);
#endif

Просмотреть файл

@ -34,9 +34,9 @@
#define MAX_CSUM_ITEMS(r, size) (min_t(u32, __MAX_CSUM_ITEMS(r, size), \
PAGE_SIZE))
#define MAX_ORDERED_SUM_BYTES(r) ((PAGE_SIZE - \
#define MAX_ORDERED_SUM_BYTES(fs_info) ((PAGE_SIZE - \
sizeof(struct btrfs_ordered_sum)) / \
sizeof(u32) * (r)->sectorsize)
sizeof(u32) * (fs_info)->sectorsize)
int btrfs_insert_file_extent(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
@ -90,13 +90,14 @@ btrfs_lookup_csum(struct btrfs_trans_handle *trans,
struct btrfs_path *path,
u64 bytenr, int cow)
{
struct btrfs_fs_info *fs_info = root->fs_info;
int ret;
struct btrfs_key file_key;
struct btrfs_key found_key;
struct btrfs_csum_item *item;
struct extent_buffer *leaf;
u64 csum_offset = 0;
u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
int csums_in_item;
file_key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
@ -116,7 +117,7 @@ btrfs_lookup_csum(struct btrfs_trans_handle *trans,
goto fail;
csum_offset = (bytenr - found_key.offset) >>
root->fs_info->sb->s_blocksize_bits;
fs_info->sb->s_blocksize_bits;
csums_in_item = btrfs_item_size_nr(leaf, path->slots[0]);
csums_in_item /= csum_size;
@ -159,11 +160,11 @@ static void btrfs_io_bio_endio_readpage(struct btrfs_io_bio *bio, int err)
kfree(bio->csum_allocated);
}
static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
struct inode *inode, struct bio *bio,
static int __btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio,
u64 logical_offset, u32 *dst, int dio)
{
struct bio_vec *bvec = bio->bi_io_vec;
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct bio_vec *bvec;
struct btrfs_io_bio *btrfs_bio = btrfs_io_bio(bio);
struct btrfs_csum_item *item = NULL;
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
@ -176,9 +177,8 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
u64 page_bytes_left;
u32 diff;
int nblocks;
int bio_index = 0;
int count;
u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
int count = 0, i;
u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
path = btrfs_alloc_path();
if (!path)
@ -223,8 +223,11 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
if (dio)
offset = logical_offset;
page_bytes_left = bvec->bv_len;
while (bio_index < bio->bi_vcnt) {
bio_for_each_segment_all(bvec, bio, i) {
page_bytes_left = bvec->bv_len;
if (count)
goto next;
if (!dio)
offset = page_offset(bvec->bv_page) + bvec->bv_offset;
count = btrfs_find_ordered_sum(inode, offset, disk_bytenr,
@ -239,7 +242,7 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
if (item)
btrfs_release_path(path);
item = btrfs_lookup_csum(NULL, root->fs_info->csum_root,
item = btrfs_lookup_csum(NULL, fs_info->csum_root,
path, disk_bytenr, 0);
if (IS_ERR(item)) {
count = 1;
@ -247,10 +250,10 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
if (BTRFS_I(inode)->root->root_key.objectid ==
BTRFS_DATA_RELOC_TREE_OBJECTID) {
set_extent_bits(io_tree, offset,
offset + root->sectorsize - 1,
offset + fs_info->sectorsize - 1,
EXTENT_NODATASUM);
} else {
btrfs_info_rl(BTRFS_I(inode)->root->fs_info,
btrfs_info_rl(fs_info,
"no csum found for inode %llu start %llu",
btrfs_ino(inode), offset);
}
@ -266,7 +269,7 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
path->slots[0]);
item_last_offset = item_start_offset +
(item_size / csum_size) *
root->sectorsize;
fs_info->sectorsize;
item = btrfs_item_ptr(path->nodes[0], path->slots[0],
struct btrfs_csum_item);
}
@ -275,7 +278,7 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
* a single leaf so it will also fit inside a u32
*/
diff = disk_bytenr - item_start_offset;
diff = diff / root->sectorsize;
diff = diff / fs_info->sectorsize;
diff = diff * csum_size;
count = min_t(int, nblocks, (item_last_offset - disk_bytenr) >>
inode->i_sb->s_blocksize_bits);
@ -285,48 +288,35 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
found:
csum += count * csum_size;
nblocks -= count;
next:
while (count--) {
disk_bytenr += root->sectorsize;
offset += root->sectorsize;
page_bytes_left -= root->sectorsize;
if (!page_bytes_left) {
bio_index++;
/*
* make sure we're still inside the
* bio before we update page_bytes_left
*/
if (bio_index >= bio->bi_vcnt) {
WARN_ON_ONCE(count);
goto done;
}
bvec++;
page_bytes_left = bvec->bv_len;
}
disk_bytenr += fs_info->sectorsize;
offset += fs_info->sectorsize;
page_bytes_left -= fs_info->sectorsize;
if (!page_bytes_left)
break; /* move to next bio */
}
}
done:
WARN_ON_ONCE(count);
btrfs_free_path(path);
return 0;
}
int btrfs_lookup_bio_sums(struct btrfs_root *root, struct inode *inode,
struct bio *bio, u32 *dst)
int btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, u32 *dst)
{
return __btrfs_lookup_bio_sums(root, inode, bio, 0, dst, 0);
return __btrfs_lookup_bio_sums(inode, bio, 0, dst, 0);
}
int btrfs_lookup_bio_sums_dio(struct btrfs_root *root, struct inode *inode,
struct bio *bio, u64 offset)
int btrfs_lookup_bio_sums_dio(struct inode *inode, struct bio *bio, u64 offset)
{
return __btrfs_lookup_bio_sums(root, inode, bio, offset, NULL, 1);
return __btrfs_lookup_bio_sums(inode, bio, offset, NULL, 1);
}
int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
struct list_head *list, int search_commit)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_key key;
struct btrfs_path *path;
struct extent_buffer *leaf;
@ -337,10 +327,10 @@ int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
int ret;
size_t size;
u64 csum_end;
u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
ASSERT(IS_ALIGNED(start, root->sectorsize) &&
IS_ALIGNED(end + 1, root->sectorsize));
ASSERT(IS_ALIGNED(start, fs_info->sectorsize) &&
IS_ALIGNED(end + 1, fs_info->sectorsize));
path = btrfs_alloc_path();
if (!path)
@ -365,7 +355,7 @@ int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
if (key.objectid == BTRFS_EXTENT_CSUM_OBJECTID &&
key.type == BTRFS_EXTENT_CSUM_KEY) {
offset = (start - key.offset) >>
root->fs_info->sb->s_blocksize_bits;
fs_info->sb->s_blocksize_bits;
if (offset * csum_size <
btrfs_item_size_nr(leaf, path->slots[0] - 1))
path->slots[0]--;
@ -393,7 +383,7 @@ int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
start = key.offset;
size = btrfs_item_size_nr(leaf, path->slots[0]);
csum_end = key.offset + (size / csum_size) * root->sectorsize;
csum_end = key.offset + (size / csum_size) * fs_info->sectorsize;
if (csum_end <= start) {
path->slots[0]++;
continue;
@ -404,8 +394,8 @@ int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
struct btrfs_csum_item);
while (start < csum_end) {
size = min_t(size_t, csum_end - start,
MAX_ORDERED_SUM_BYTES(root));
sums = kzalloc(btrfs_ordered_sum_size(root, size),
MAX_ORDERED_SUM_BYTES(fs_info));
sums = kzalloc(btrfs_ordered_sum_size(fs_info, size),
GFP_NOFS);
if (!sums) {
ret = -ENOMEM;
@ -416,16 +406,16 @@ int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
sums->len = (int)size;
offset = (start - key.offset) >>
root->fs_info->sb->s_blocksize_bits;
fs_info->sb->s_blocksize_bits;
offset *= csum_size;
size >>= root->fs_info->sb->s_blocksize_bits;
size >>= fs_info->sb->s_blocksize_bits;
read_extent_buffer(path->nodes[0],
sums->sums,
((unsigned long)item) + offset,
csum_size * size);
start += root->sectorsize * size;
start += fs_info->sectorsize * size;
list_add_tail(&sums->list, &tmplist);
}
path->slots[0]++;
@ -443,23 +433,23 @@ fail:
return ret;
}
int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
struct bio *bio, u64 file_start, int contig)
int btrfs_csum_one_bio(struct inode *inode, struct bio *bio,
u64 file_start, int contig)
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_ordered_sum *sums;
struct btrfs_ordered_extent *ordered;
struct btrfs_ordered_extent *ordered = NULL;
char *data;
struct bio_vec *bvec = bio->bi_io_vec;
int bio_index = 0;
struct bio_vec *bvec;
int index;
int nr_sectors;
int i;
int i, j;
unsigned long total_bytes = 0;
unsigned long this_sum_bytes = 0;
u64 offset;
WARN_ON(bio->bi_vcnt <= 0);
sums = kzalloc(btrfs_ordered_sum_size(root, bio->bi_iter.bi_size),
sums = kzalloc(btrfs_ordered_sum_size(fs_info, bio->bi_iter.bi_size),
GFP_NOFS);
if (!sums)
return -ENOMEM;
@ -470,22 +460,25 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
if (contig)
offset = file_start;
else
offset = page_offset(bvec->bv_page) + bvec->bv_offset;
offset = 0; /* shut up gcc */
ordered = btrfs_lookup_ordered_extent(inode, offset);
BUG_ON(!ordered); /* Logic error */
sums->bytenr = (u64)bio->bi_iter.bi_sector << 9;
index = 0;
while (bio_index < bio->bi_vcnt) {
bio_for_each_segment_all(bvec, bio, j) {
if (!contig)
offset = page_offset(bvec->bv_page) + bvec->bv_offset;
if (!ordered) {
ordered = btrfs_lookup_ordered_extent(inode, offset);
BUG_ON(!ordered); /* Logic error */
}
data = kmap_atomic(bvec->bv_page);
nr_sectors = BTRFS_BYTES_TO_BLKS(root->fs_info,
bvec->bv_len + root->sectorsize
- 1);
nr_sectors = BTRFS_BYTES_TO_BLKS(fs_info,
bvec->bv_len + fs_info->sectorsize
- 1);
for (i = 0; i < nr_sectors; i++) {
if (offset >= ordered->file_offset + ordered->len ||
@ -500,8 +493,8 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
bytes_left = bio->bi_iter.bi_size - total_bytes;
sums = kzalloc(btrfs_ordered_sum_size(root, bytes_left),
GFP_NOFS);
sums = kzalloc(btrfs_ordered_sum_size(fs_info, bytes_left),
GFP_NOFS);
BUG_ON(!sums); /* -ENOMEM */
sums->len = bytes_left;
ordered = btrfs_lookup_ordered_extent(inode,
@ -517,21 +510,18 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
sums->sums[index] = ~(u32)0;
sums->sums[index]
= btrfs_csum_data(data + bvec->bv_offset
+ (i * root->sectorsize),
+ (i * fs_info->sectorsize),
sums->sums[index],
root->sectorsize);
fs_info->sectorsize);
btrfs_csum_final(sums->sums[index],
(char *)(sums->sums + index));
index++;
offset += root->sectorsize;
this_sum_bytes += root->sectorsize;
total_bytes += root->sectorsize;
offset += fs_info->sectorsize;
this_sum_bytes += fs_info->sectorsize;
total_bytes += fs_info->sectorsize;
}
kunmap_atomic(data);
bio_index++;
bvec++;
}
this_sum_bytes = 0;
btrfs_add_ordered_sum(inode, ordered, sums);
@ -550,20 +540,20 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
* This calls btrfs_truncate_item with the correct args based on the
* overlap, and fixes up the key as required.
*/
static noinline void truncate_one_csum(struct btrfs_root *root,
static noinline void truncate_one_csum(struct btrfs_fs_info *fs_info,
struct btrfs_path *path,
struct btrfs_key *key,
u64 bytenr, u64 len)
{
struct extent_buffer *leaf;
u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
u64 csum_end;
u64 end_byte = bytenr + len;
u32 blocksize_bits = root->fs_info->sb->s_blocksize_bits;
u32 blocksize_bits = fs_info->sb->s_blocksize_bits;
leaf = path->nodes[0];
csum_end = btrfs_item_size_nr(leaf, path->slots[0]) / csum_size;
csum_end <<= root->fs_info->sb->s_blocksize_bits;
csum_end <<= fs_info->sb->s_blocksize_bits;
csum_end += key->offset;
if (key->offset < bytenr && csum_end <= end_byte) {
@ -575,7 +565,7 @@ static noinline void truncate_one_csum(struct btrfs_root *root,
*/
u32 new_size = (bytenr - key->offset) >> blocksize_bits;
new_size *= csum_size;
btrfs_truncate_item(root, path, new_size, 1);
btrfs_truncate_item(fs_info, path, new_size, 1);
} else if (key->offset >= bytenr && csum_end > end_byte &&
end_byte > key->offset) {
/*
@ -587,10 +577,10 @@ static noinline void truncate_one_csum(struct btrfs_root *root,
u32 new_size = (csum_end - end_byte) >> blocksize_bits;
new_size *= csum_size;
btrfs_truncate_item(root, path, new_size, 0);
btrfs_truncate_item(fs_info, path, new_size, 0);
key->offset = end_byte;
btrfs_set_item_key_safe(root->fs_info, path, key);
btrfs_set_item_key_safe(fs_info, path, key);
} else {
BUG();
}
@ -601,18 +591,17 @@ static noinline void truncate_one_csum(struct btrfs_root *root,
* range of bytes.
*/
int btrfs_del_csums(struct btrfs_trans_handle *trans,
struct btrfs_root *root, u64 bytenr, u64 len)
struct btrfs_fs_info *fs_info, u64 bytenr, u64 len)
{
struct btrfs_root *root = fs_info->csum_root;
struct btrfs_path *path;
struct btrfs_key key;
u64 end_byte = bytenr + len;
u64 csum_end;
struct extent_buffer *leaf;
int ret;
u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
int blocksize_bits = root->fs_info->sb->s_blocksize_bits;
root = root->fs_info->csum_root;
u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
int blocksize_bits = fs_info->sb->s_blocksize_bits;
path = btrfs_alloc_path();
if (!path)
@ -689,7 +678,7 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
item_offset = btrfs_item_ptr_offset(leaf,
path->slots[0]);
memset_extent_buffer(leaf, 0, item_offset + offset,
memzero_extent_buffer(leaf, item_offset + offset,
shift_len);
key.offset = bytenr;
@ -705,7 +694,7 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
key.offset = end_byte - 1;
} else {
truncate_one_csum(root, path, &key, bytenr, len);
truncate_one_csum(fs_info, path, &key, bytenr, len);
if (key.offset < bytenr)
break;
}
@ -721,6 +710,7 @@ int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_ordered_sum *sums)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_key file_key;
struct btrfs_key found_key;
struct btrfs_path *path;
@ -736,7 +726,7 @@ int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
int index = 0;
int found_next;
int ret;
u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
path = btrfs_alloc_path();
if (!path)
@ -769,7 +759,7 @@ again:
leaf = path->nodes[0];
item_size = btrfs_item_size_nr(leaf, path->slots[0]);
if ((item_size / csum_size) >=
MAX_CSUM_ITEMS(root, csum_size)) {
MAX_CSUM_ITEMS(fs_info, csum_size)) {
/* already at max size, make a new one */
goto insert;
}
@ -815,11 +805,11 @@ again:
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
csum_offset = (bytenr - found_key.offset) >>
root->fs_info->sb->s_blocksize_bits;
fs_info->sb->s_blocksize_bits;
if (found_key.type != BTRFS_EXTENT_CSUM_KEY ||
found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
csum_offset >= MAX_CSUM_ITEMS(root, csum_size)) {
csum_offset >= MAX_CSUM_ITEMS(fs_info, csum_size)) {
goto insert;
}
@ -830,26 +820,27 @@ again:
u32 diff;
u32 free_space;
if (btrfs_leaf_free_space(root, leaf) <
if (btrfs_leaf_free_space(fs_info, leaf) <
sizeof(struct btrfs_item) + csum_size * 2)
goto insert;
free_space = btrfs_leaf_free_space(root, leaf) -
free_space = btrfs_leaf_free_space(fs_info, leaf) -
sizeof(struct btrfs_item) - csum_size;
tmp = sums->len - total_bytes;
tmp >>= root->fs_info->sb->s_blocksize_bits;
tmp >>= fs_info->sb->s_blocksize_bits;
WARN_ON(tmp < 1);
extend_nr = max_t(int, 1, (int)tmp);
diff = (csum_offset + extend_nr) * csum_size;
diff = min(diff, MAX_CSUM_ITEMS(root, csum_size) * csum_size);
diff = min(diff,
MAX_CSUM_ITEMS(fs_info, csum_size) * csum_size);
diff = diff - btrfs_item_size_nr(leaf, path->slots[0]);
diff = min(free_space, diff);
diff /= csum_size;
diff *= csum_size;
btrfs_extend_item(root, path, diff);
btrfs_extend_item(fs_info, path, diff);
ret = 0;
goto csum;
}
@ -861,12 +852,12 @@ insert:
u64 tmp;
tmp = sums->len - total_bytes;
tmp >>= root->fs_info->sb->s_blocksize_bits;
tmp >>= fs_info->sb->s_blocksize_bits;
tmp = min(tmp, (next_offset - file_key.offset) >>
root->fs_info->sb->s_blocksize_bits);
fs_info->sb->s_blocksize_bits);
tmp = max((u64)1, tmp);
tmp = min(tmp, (u64)MAX_CSUM_ITEMS(root, csum_size));
tmp = min(tmp, (u64)MAX_CSUM_ITEMS(fs_info, csum_size));
ins_size = csum_size * tmp;
} else {
ins_size = csum_size;
@ -888,7 +879,7 @@ csum:
csum_offset * csum_size);
found:
ins_size = (u32)(sums->len - total_bytes) >>
root->fs_info->sb->s_blocksize_bits;
fs_info->sb->s_blocksize_bits;
ins_size *= csum_size;
ins_size = min_t(u32, (unsigned long)item_end - (unsigned long)item,
ins_size);
@ -896,7 +887,7 @@ found:
ins_size);
ins_size /= csum_size;
total_bytes += ins_size * root->sectorsize;
total_bytes += ins_size * fs_info->sectorsize;
index += ins_size;
btrfs_mark_buffer_dirty(path->nodes[0]);
@ -919,6 +910,7 @@ void btrfs_extent_item_to_extent_map(struct inode *inode,
const bool new_inline,
struct extent_map *em)
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root *root = BTRFS_I(inode)->root;
struct extent_buffer *leaf = path->nodes[0];
const int slot = path->slots[0];
@ -928,7 +920,7 @@ void btrfs_extent_item_to_extent_map(struct inode *inode,
u8 type = btrfs_file_extent_type(leaf, fi);
int compress_type = btrfs_file_extent_compression(leaf, fi);
em->bdev = root->fs_info->fs_devices->latest_bdev;
em->bdev = fs_info->fs_devices->latest_bdev;
btrfs_item_key_to_cpu(leaf, &key, slot);
extent_start = key.offset;
@ -939,7 +931,8 @@ void btrfs_extent_item_to_extent_map(struct inode *inode,
} else if (type == BTRFS_FILE_EXTENT_INLINE) {
size_t size;
size = btrfs_file_extent_inline_len(leaf, slot, fi);
extent_end = ALIGN(extent_start + size, root->sectorsize);
extent_end = ALIGN(extent_start + size,
fs_info->sectorsize);
}
em->ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
@ -982,7 +975,7 @@ void btrfs_extent_item_to_extent_map(struct inode *inode,
em->compress_type = compress_type;
}
} else {
btrfs_err(root->fs_info,
btrfs_err(fs_info,
"unknown file extent item type %d, inode %llu, offset %llu, root %llu",
type, btrfs_ino(inode), extent_start,
root->root_key.objectid);

Просмотреть файл

@ -27,7 +27,6 @@
#include <linux/falloc.h>
#include <linux/swap.h>
#include <linux/writeback.h>
#include <linux/statfs.h>
#include <linux/compat.h>
#include <linux/slab.h>
#include <linux/btrfs.h>
@ -96,13 +95,13 @@ static int __compare_inode_defrag(struct inode_defrag *defrag1,
static int __btrfs_add_inode_defrag(struct inode *inode,
struct inode_defrag *defrag)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct inode_defrag *entry;
struct rb_node **p;
struct rb_node *parent = NULL;
int ret;
p = &root->fs_info->defrag_inodes.rb_node;
p = &fs_info->defrag_inodes.rb_node;
while (*p) {
parent = *p;
entry = rb_entry(parent, struct inode_defrag, rb_node);
@ -126,16 +125,16 @@ static int __btrfs_add_inode_defrag(struct inode *inode,
}
set_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
rb_link_node(&defrag->rb_node, parent, p);
rb_insert_color(&defrag->rb_node, &root->fs_info->defrag_inodes);
rb_insert_color(&defrag->rb_node, &fs_info->defrag_inodes);
return 0;
}
static inline int __need_auto_defrag(struct btrfs_root *root)
static inline int __need_auto_defrag(struct btrfs_fs_info *fs_info)
{
if (!btrfs_test_opt(root->fs_info, AUTO_DEFRAG))
if (!btrfs_test_opt(fs_info, AUTO_DEFRAG))
return 0;
if (btrfs_fs_closing(root->fs_info))
if (btrfs_fs_closing(fs_info))
return 0;
return 1;
@ -148,12 +147,13 @@ static inline int __need_auto_defrag(struct btrfs_root *root)
int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
struct inode *inode)
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root *root = BTRFS_I(inode)->root;
struct inode_defrag *defrag;
u64 transid;
int ret;
if (!__need_auto_defrag(root))
if (!__need_auto_defrag(fs_info))
return 0;
if (test_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags))
@ -172,7 +172,7 @@ int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
defrag->transid = transid;
defrag->root = root->root_key.objectid;
spin_lock(&root->fs_info->defrag_inodes_lock);
spin_lock(&fs_info->defrag_inodes_lock);
if (!test_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags)) {
/*
* If we set IN_DEFRAG flag and evict the inode from memory,
@ -185,7 +185,7 @@ int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
} else {
kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
}
spin_unlock(&root->fs_info->defrag_inodes_lock);
spin_unlock(&fs_info->defrag_inodes_lock);
return 0;
}
@ -197,19 +197,19 @@ int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
static void btrfs_requeue_inode_defrag(struct inode *inode,
struct inode_defrag *defrag)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
int ret;
if (!__need_auto_defrag(root))
if (!__need_auto_defrag(fs_info))
goto out;
/*
* Here we don't check the IN_DEFRAG flag, because we need merge
* them together.
*/
spin_lock(&root->fs_info->defrag_inodes_lock);
spin_lock(&fs_info->defrag_inodes_lock);
ret = __btrfs_add_inode_defrag(inode, defrag);
spin_unlock(&root->fs_info->defrag_inodes_lock);
spin_unlock(&fs_info->defrag_inodes_lock);
if (ret)
goto out;
return;
@ -373,7 +373,7 @@ int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
&fs_info->fs_state))
break;
if (!__need_auto_defrag(fs_info->tree_root))
if (!__need_auto_defrag(fs_info))
break;
/* find an inode to defrag */
@ -485,11 +485,11 @@ static void btrfs_drop_pages(struct page **pages, size_t num_pages)
* this also makes the decision about creating an inline extent vs
* doing real data extents, marking pages dirty and delalloc as required.
*/
int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode,
struct page **pages, size_t num_pages,
loff_t pos, size_t write_bytes,
struct extent_state **cached)
int btrfs_dirty_pages(struct inode *inode, struct page **pages,
size_t num_pages, loff_t pos, size_t write_bytes,
struct extent_state **cached)
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
int err = 0;
int i;
u64 num_bytes;
@ -498,8 +498,9 @@ int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode,
u64 end_pos = pos + write_bytes;
loff_t isize = i_size_read(inode);
start_pos = pos & ~((u64)root->sectorsize - 1);
num_bytes = round_up(write_bytes + pos - start_pos, root->sectorsize);
start_pos = pos & ~((u64) fs_info->sectorsize - 1);
num_bytes = round_up(write_bytes + pos - start_pos,
fs_info->sectorsize);
end_of_last_block = start_pos + num_bytes - 1;
err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
@ -696,6 +697,7 @@ int __btrfs_drop_extents(struct btrfs_trans_handle *trans,
u32 extent_item_size,
int *key_inserted)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_buffer *leaf;
struct btrfs_file_extent_item *fi;
struct btrfs_key key;
@ -706,6 +708,7 @@ int __btrfs_drop_extents(struct btrfs_trans_handle *trans,
u64 num_bytes = 0;
u64 extent_offset = 0;
u64 extent_end = 0;
u64 last_end = start;
int del_nr = 0;
int del_slot = 0;
int extent_type;
@ -723,7 +726,7 @@ int __btrfs_drop_extents(struct btrfs_trans_handle *trans,
modify_tree = 0;
update_refs = (test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
root == root->fs_info->tree_root);
root == fs_info->tree_root);
while (1) {
recow = 0;
ret = btrfs_lookup_file_extent(trans, root, path, ino,
@ -797,8 +800,10 @@ next_slot:
* extent item in the call to setup_items_for_insert() later
* in this function.
*/
if (extent_end == key.offset && extent_end >= search_start)
if (extent_end == key.offset && extent_end >= search_start) {
last_end = extent_end;
goto delete_extent_item;
}
if (extent_end <= search_start) {
path->slots[0]++;
@ -851,7 +856,7 @@ next_slot:
btrfs_mark_buffer_dirty(leaf);
if (update_refs && disk_bytenr > 0) {
ret = btrfs_inc_extent_ref(trans, root,
ret = btrfs_inc_extent_ref(trans, fs_info,
disk_bytenr, num_bytes, 0,
root->root_key.objectid,
new_key.objectid,
@ -860,6 +865,12 @@ next_slot:
}
key.offset = start;
}
/*
* From here on out we will have actually dropped something, so
* last_end can be updated.
*/
last_end = extent_end;
/*
* | ---- range to drop ----- |
* | -------- extent -------- |
@ -872,7 +883,7 @@ next_slot:
memcpy(&new_key, &key, sizeof(new_key));
new_key.offset = end;
btrfs_set_item_key_safe(root->fs_info, path, &new_key);
btrfs_set_item_key_safe(fs_info, path, &new_key);
extent_offset += end - key.offset;
btrfs_set_file_extent_offset(leaf, fi, extent_offset);
@ -927,9 +938,9 @@ delete_extent_item:
inode_sub_bytes(inode,
extent_end - key.offset);
extent_end = ALIGN(extent_end,
root->sectorsize);
fs_info->sectorsize);
} else if (update_refs && disk_bytenr > 0) {
ret = btrfs_free_extent(trans, root,
ret = btrfs_free_extent(trans, fs_info,
disk_bytenr, num_bytes, 0,
root->root_key.objectid,
key.objectid, key.offset -
@ -986,7 +997,7 @@ delete_extent_item:
if (!ret && replace_extent && leafs_visited == 1 &&
(path->locks[0] == BTRFS_WRITE_LOCK_BLOCKING ||
path->locks[0] == BTRFS_WRITE_LOCK) &&
btrfs_leaf_free_space(root, leaf) >=
btrfs_leaf_free_space(fs_info, leaf) >=
sizeof(struct btrfs_item) + extent_item_size) {
key.objectid = ino;
@ -1010,7 +1021,7 @@ delete_extent_item:
if (!replace_extent || !(*key_inserted))
btrfs_release_path(path);
if (drop_end)
*drop_end = found ? min(end, extent_end) : end;
*drop_end = found ? min(end, last_end) : end;
return ret;
}
@ -1073,6 +1084,7 @@ static int extent_mergeable(struct extent_buffer *leaf, int slot,
int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
struct inode *inode, u64 start, u64 end)
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root *root = BTRFS_I(inode)->root;
struct extent_buffer *leaf;
struct btrfs_path *path;
@ -1142,7 +1154,7 @@ again:
ino, bytenr, orig_offset,
&other_start, &other_end)) {
new_key.offset = end;
btrfs_set_item_key_safe(root->fs_info, path, &new_key);
btrfs_set_item_key_safe(fs_info, path, &new_key);
fi = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item);
btrfs_set_file_extent_generation(leaf, fi,
@ -1176,7 +1188,7 @@ again:
trans->transid);
path->slots[0]++;
new_key.offset = start;
btrfs_set_item_key_safe(root->fs_info, path, &new_key);
btrfs_set_item_key_safe(fs_info, path, &new_key);
fi = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item);
@ -1222,8 +1234,8 @@ again:
extent_end - split);
btrfs_mark_buffer_dirty(leaf);
ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes, 0,
root->root_key.objectid,
ret = btrfs_inc_extent_ref(trans, fs_info, bytenr, num_bytes,
0, root->root_key.objectid,
ino, orig_offset);
if (ret) {
btrfs_abort_transaction(trans, ret);
@ -1256,7 +1268,7 @@ again:
extent_end = other_end;
del_slot = path->slots[0] + 1;
del_nr++;
ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
ret = btrfs_free_extent(trans, fs_info, bytenr, num_bytes,
0, root->root_key.objectid,
ino, orig_offset);
if (ret) {
@ -1276,7 +1288,7 @@ again:
key.offset = other_start;
del_slot = path->slots[0];
del_nr++;
ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
ret = btrfs_free_extent(trans, fs_info, bytenr, num_bytes,
0, root->root_key.objectid,
ino, orig_offset);
if (ret) {
@ -1409,15 +1421,16 @@ lock_and_cleanup_extent_if_need(struct inode *inode, struct page **pages,
u64 *lockstart, u64 *lockend,
struct extent_state **cached_state)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
u64 start_pos;
u64 last_pos;
int i;
int ret = 0;
start_pos = round_down(pos, root->sectorsize);
start_pos = round_down(pos, fs_info->sectorsize);
last_pos = start_pos
+ round_up(pos + write_bytes - start_pos, root->sectorsize) - 1;
+ round_up(pos + write_bytes - start_pos,
fs_info->sectorsize) - 1;
if (start_pos < inode->i_size) {
struct btrfs_ordered_extent *ordered;
@ -1464,6 +1477,7 @@ lock_and_cleanup_extent_if_need(struct inode *inode, struct page **pages,
static noinline int check_can_nocow(struct inode *inode, loff_t pos,
size_t *write_bytes)
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_ordered_extent *ordered;
u64 lockstart, lockend;
@ -1474,8 +1488,9 @@ static noinline int check_can_nocow(struct inode *inode, loff_t pos,
if (!ret)
return -ENOSPC;
lockstart = round_down(pos, root->sectorsize);
lockend = round_up(pos + *write_bytes, root->sectorsize) - 1;
lockstart = round_down(pos, fs_info->sectorsize);
lockend = round_up(pos + *write_bytes,
fs_info->sectorsize) - 1;
while (1) {
lock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend);
@ -1509,6 +1524,7 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
loff_t pos)
{
struct inode *inode = file_inode(file);
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root *root = BTRFS_I(inode)->root;
struct page **pages = NULL;
struct extent_state *cached_state = NULL;
@ -1555,9 +1571,9 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
break;
}
sector_offset = pos & (root->sectorsize - 1);
sector_offset = pos & (fs_info->sectorsize - 1);
reserve_bytes = round_up(write_bytes + sector_offset,
root->sectorsize);
fs_info->sectorsize);
ret = btrfs_check_data_free_space(inode, pos, write_bytes);
if (ret < 0) {
@ -1577,7 +1593,7 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
PAGE_SIZE);
reserve_bytes = round_up(write_bytes +
sector_offset,
root->sectorsize);
fs_info->sectorsize);
} else {
break;
}
@ -1621,12 +1637,10 @@ again:
copied = btrfs_copy_from_user(pos, write_bytes, pages, i);
num_sectors = BTRFS_BYTES_TO_BLKS(root->fs_info,
reserve_bytes);
num_sectors = BTRFS_BYTES_TO_BLKS(fs_info, reserve_bytes);
dirty_sectors = round_up(copied + sector_offset,
root->sectorsize);
dirty_sectors = BTRFS_BYTES_TO_BLKS(root->fs_info,
dirty_sectors);
fs_info->sectorsize);
dirty_sectors = BTRFS_BYTES_TO_BLKS(fs_info, dirty_sectors);
/*
* if we have trouble faulting in the pages, fall
@ -1654,11 +1668,9 @@ again:
* managed to copy.
*/
if (num_sectors > dirty_sectors) {
/* release everything except the sectors we dirtied */
release_bytes -= dirty_sectors <<
root->fs_info->sb->s_blocksize_bits;
fs_info->sb->s_blocksize_bits;
if (copied > 0) {
spin_lock(&BTRFS_I(inode)->lock);
BTRFS_I(inode)->outstanding_extents++;
@ -1670,7 +1682,8 @@ again:
} else {
u64 __pos;
__pos = round_down(pos, root->sectorsize) +
__pos = round_down(pos,
fs_info->sectorsize) +
(dirty_pages << PAGE_SHIFT);
btrfs_delalloc_release_space(inode, __pos,
release_bytes);
@ -1678,12 +1691,11 @@ again:
}
release_bytes = round_up(copied + sector_offset,
root->sectorsize);
fs_info->sectorsize);
if (copied > 0)
ret = btrfs_dirty_pages(root, inode, pages,
dirty_pages, pos, copied,
NULL);
ret = btrfs_dirty_pages(inode, pages, dirty_pages,
pos, copied, NULL);
if (need_unlock)
unlock_extent_cached(&BTRFS_I(inode)->io_tree,
lockstart, lockend, &cached_state,
@ -1698,8 +1710,10 @@ again:
btrfs_end_write_no_snapshoting(root);
if (only_release_metadata && copied > 0) {
lockstart = round_down(pos, root->sectorsize);
lockend = round_up(pos + copied, root->sectorsize) - 1;
lockstart = round_down(pos,
fs_info->sectorsize);
lockend = round_up(pos + copied,
fs_info->sectorsize) - 1;
set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
lockend, EXTENT_NORESERVE, NULL,
@ -1712,8 +1726,8 @@ again:
cond_resched();
balance_dirty_pages_ratelimited(inode->i_mapping);
if (dirty_pages < (root->nodesize >> PAGE_SHIFT) + 1)
btrfs_btree_balance_dirty(root);
if (dirty_pages < (fs_info->nodesize >> PAGE_SHIFT) + 1)
btrfs_btree_balance_dirty(fs_info);
pos += copied;
num_written += copied;
@ -1727,7 +1741,7 @@ again:
btrfs_delalloc_release_metadata(inode, release_bytes);
} else {
btrfs_delalloc_release_space(inode,
round_down(pos, root->sectorsize),
round_down(pos, fs_info->sectorsize),
release_bytes);
}
}
@ -1798,6 +1812,7 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
{
struct file *file = iocb->ki_filp;
struct inode *inode = file_inode(file);
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root *root = BTRFS_I(inode)->root;
u64 start_pos;
u64 end_pos;
@ -1829,7 +1844,7 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
* although we have opened a file as writable, we have
* to stop this write operation to ensure FS consistency.
*/
if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state)) {
if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
inode_unlock(inode);
err = -EROFS;
goto out;
@ -1845,17 +1860,18 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
pos = iocb->ki_pos;
count = iov_iter_count(from);
start_pos = round_down(pos, root->sectorsize);
start_pos = round_down(pos, fs_info->sectorsize);
oldsize = i_size_read(inode);
if (start_pos > oldsize) {
/* Expand hole size to cover write data, preventing empty gap */
end_pos = round_up(pos + count, root->sectorsize);
end_pos = round_up(pos + count,
fs_info->sectorsize);
err = btrfs_cont_expand(inode, oldsize, end_pos);
if (err) {
inode_unlock(inode);
goto out;
}
if (start_pos > round_up(oldsize, root->sectorsize))
if (start_pos > round_up(oldsize, fs_info->sectorsize))
clean_page = 1;
}
@ -1935,6 +1951,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
{
struct dentry *dentry = file_dentry(file);
struct inode *inode = d_inode(dentry);
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_trans_handle *trans;
struct btrfs_log_ctx ctx;
@ -2045,12 +2062,12 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
* commit does not start nor waits for ordered extents to complete.
*/
smp_mb();
if (btrfs_inode_in_log(inode, root->fs_info->generation) ||
if (btrfs_inode_in_log(inode, fs_info->generation) ||
(full_sync && BTRFS_I(inode)->last_trans <=
root->fs_info->last_trans_committed) ||
fs_info->last_trans_committed) ||
(!btrfs_have_ordered_extents_in_range(inode, start, len) &&
BTRFS_I(inode)->last_trans
<= root->fs_info->last_trans_committed)) {
<= fs_info->last_trans_committed)) {
/*
* We've had everything committed since the last time we were
* modified so clear this flag in case it was set for whatever
@ -2129,7 +2146,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
* which are indicated by ctx.io_err.
*/
if (ctx.io_err) {
btrfs_end_transaction(trans, root);
btrfs_end_transaction(trans);
ret = ctx.io_err;
goto out;
}
@ -2138,20 +2155,20 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
if (!ret) {
ret = btrfs_sync_log(trans, root, &ctx);
if (!ret) {
ret = btrfs_end_transaction(trans, root);
ret = btrfs_end_transaction(trans);
goto out;
}
}
if (!full_sync) {
ret = btrfs_wait_ordered_range(inode, start, len);
if (ret) {
btrfs_end_transaction(trans, root);
btrfs_end_transaction(trans);
goto out;
}
}
ret = btrfs_commit_transaction(trans, root);
ret = btrfs_commit_transaction(trans);
} else {
ret = btrfs_end_transaction(trans, root);
ret = btrfs_end_transaction(trans);
}
out:
return ret > 0 ? -EIO : ret;
@ -2208,6 +2225,7 @@ static int hole_mergeable(struct inode *inode, struct extent_buffer *leaf,
static int fill_holes(struct btrfs_trans_handle *trans, struct inode *inode,
struct btrfs_path *path, u64 offset, u64 end)
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root *root = BTRFS_I(inode)->root;
struct extent_buffer *leaf;
struct btrfs_file_extent_item *fi;
@ -2216,7 +2234,7 @@ static int fill_holes(struct btrfs_trans_handle *trans, struct inode *inode,
struct btrfs_key key;
int ret;
if (btrfs_fs_incompat(root->fs_info, NO_HOLES))
if (btrfs_fs_incompat(fs_info, NO_HOLES))
goto out;
key.objectid = btrfs_ino(inode);
@ -2224,9 +2242,15 @@ static int fill_holes(struct btrfs_trans_handle *trans, struct inode *inode,
key.offset = offset;
ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
if (ret < 0)
if (ret <= 0) {
/*
* We should have dropped this offset, so if we find it then
* something has gone horribly wrong.
*/
if (ret == 0)
ret = -EINVAL;
return ret;
BUG_ON(!ret);
}
leaf = path->nodes[0];
if (hole_mergeable(inode, leaf, path->slots[0]-1, offset, end)) {
@ -2248,7 +2272,7 @@ static int fill_holes(struct btrfs_trans_handle *trans, struct inode *inode,
u64 num_bytes;
key.offset = offset;
btrfs_set_item_key_safe(root->fs_info, path, &key);
btrfs_set_item_key_safe(fs_info, path, &key);
fi = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item);
num_bytes = btrfs_file_extent_num_bytes(leaf, fi) + end -
@ -2284,7 +2308,7 @@ out:
hole_em->block_start = EXTENT_MAP_HOLE;
hole_em->block_len = 0;
hole_em->orig_block_len = 0;
hole_em->bdev = root->fs_info->fs_devices->latest_bdev;
hole_em->bdev = fs_info->fs_devices->latest_bdev;
hole_em->compress_type = BTRFS_COMPRESS_NONE;
hole_em->generation = trans->transid;
@ -2336,6 +2360,7 @@ static int find_first_non_hole(struct inode *inode, u64 *start, u64 *len)
static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root *root = BTRFS_I(inode)->root;
struct extent_state *cached_state = NULL;
struct btrfs_path *path;
@ -2347,13 +2372,13 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
u64 tail_len;
u64 orig_start = offset;
u64 cur_offset;
u64 min_size = btrfs_calc_trunc_metadata_size(root, 1);
u64 min_size = btrfs_calc_trans_metadata_size(fs_info, 1);
u64 drop_end;
int ret = 0;
int err = 0;
unsigned int rsv_count;
bool same_block;
bool no_holes = btrfs_fs_incompat(root->fs_info, NO_HOLES);
bool no_holes = btrfs_fs_incompat(fs_info, NO_HOLES);
u64 ino_size;
bool truncated_block = false;
bool updated_inode = false;
@ -2363,7 +2388,7 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
return ret;
inode_lock(inode);
ino_size = round_up(inode->i_size, root->sectorsize);
ino_size = round_up(inode->i_size, fs_info->sectorsize);
ret = find_first_non_hole(inode, &offset, &len);
if (ret < 0)
goto out_only_mutex;
@ -2373,11 +2398,11 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
goto out_only_mutex;
}
lockstart = round_up(offset, BTRFS_I(inode)->root->sectorsize);
lockstart = round_up(offset, btrfs_inode_sectorsize(inode));
lockend = round_down(offset + len,
BTRFS_I(inode)->root->sectorsize) - 1;
same_block = (BTRFS_BYTES_TO_BLKS(root->fs_info, offset))
== (BTRFS_BYTES_TO_BLKS(root->fs_info, offset + len - 1));
btrfs_inode_sectorsize(inode)) - 1;
same_block = (BTRFS_BYTES_TO_BLKS(fs_info, offset))
== (BTRFS_BYTES_TO_BLKS(fs_info, offset + len - 1));
/*
* We needn't truncate any block which is beyond the end of the file
* because we are sure there is no data there.
@ -2386,7 +2411,7 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
* Only do this if we are in the same block and we aren't doing the
* entire block.
*/
if (same_block && len < root->sectorsize) {
if (same_block && len < fs_info->sectorsize) {
if (offset < ino_size) {
truncated_block = true;
ret = btrfs_truncate_block(inode, offset, len, 0);
@ -2489,12 +2514,12 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
goto out;
}
rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP);
rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP);
if (!rsv) {
ret = -ENOMEM;
goto out_free;
}
rsv->size = btrfs_calc_trunc_metadata_size(root, 1);
rsv->size = btrfs_calc_trans_metadata_size(fs_info, 1);
rsv->failfast = 1;
/*
@ -2509,7 +2534,7 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
goto out_free;
}
ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv, rsv,
ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, rsv,
min_size, 0);
BUG_ON(ret);
trans->block_rsv = rsv;
@ -2523,12 +2548,19 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
if (ret != -ENOSPC)
break;
trans->block_rsv = &root->fs_info->trans_block_rsv;
trans->block_rsv = &fs_info->trans_block_rsv;
if (cur_offset < ino_size) {
if (cur_offset < drop_end && cur_offset < ino_size) {
ret = fill_holes(trans, inode, path, cur_offset,
drop_end);
if (ret) {
/*
* If we failed then we didn't insert our hole
* entries for the area we dropped, so now the
* fs is corrupted, so we must abort the
* transaction.
*/
btrfs_abort_transaction(trans, ret);
err = ret;
break;
}
@ -2542,8 +2574,8 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
break;
}
btrfs_end_transaction(trans, root);
btrfs_btree_balance_dirty(root);
btrfs_end_transaction(trans);
btrfs_btree_balance_dirty(fs_info);
trans = btrfs_start_transaction(root, rsv_count);
if (IS_ERR(trans)) {
@ -2552,7 +2584,7 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
break;
}
ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv,
ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv,
rsv, min_size, 0);
BUG_ON(ret); /* shouldn't happen */
trans->block_rsv = rsv;
@ -2571,7 +2603,7 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
goto out_trans;
}
trans->block_rsv = &root->fs_info->trans_block_rsv;
trans->block_rsv = &fs_info->trans_block_rsv;
/*
* If we are using the NO_HOLES feature we might have had already an
* hole that overlaps a part of the region [lockstart, lockend] and
@ -2593,6 +2625,8 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
if (cur_offset < ino_size && cur_offset < drop_end) {
ret = fill_holes(trans, inode, path, cur_offset, drop_end);
if (ret) {
/* Same comment as above. */
btrfs_abort_transaction(trans, ret);
err = ret;
goto out_trans;
}
@ -2605,14 +2639,14 @@ out_trans:
inode_inc_iversion(inode);
inode->i_mtime = inode->i_ctime = current_time(inode);
trans->block_rsv = &root->fs_info->trans_block_rsv;
trans->block_rsv = &fs_info->trans_block_rsv;
ret = btrfs_update_inode(trans, root, inode);
updated_inode = true;
btrfs_end_transaction(trans, root);
btrfs_btree_balance_dirty(root);
btrfs_end_transaction(trans);
btrfs_btree_balance_dirty(fs_info);
out_free:
btrfs_free_path(path);
btrfs_free_block_rsv(root, rsv);
btrfs_free_block_rsv(fs_info, rsv);
out:
unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
&cached_state, GFP_NOFS);
@ -2630,7 +2664,7 @@ out_only_mutex:
err = PTR_ERR(trans);
} else {
err = btrfs_update_inode(trans, root, inode);
ret = btrfs_end_transaction(trans, root);
ret = btrfs_end_transaction(trans);
}
}
inode_unlock(inode);
@ -2695,7 +2729,7 @@ static long btrfs_fallocate(struct file *file, int mode,
u64 locked_end;
u64 actual_end = 0;
struct extent_map *em;
int blocksize = BTRFS_I(inode)->root->sectorsize;
int blocksize = btrfs_inode_sectorsize(inode);
int ret;
alloc_start = round_down(offset, blocksize);
@ -2872,9 +2906,9 @@ static long btrfs_fallocate(struct file *file, int mode,
btrfs_ordered_update_i_size(inode, actual_end, NULL);
ret = btrfs_update_inode(trans, root, inode);
if (ret)
btrfs_end_transaction(trans, root);
btrfs_end_transaction(trans);
else
ret = btrfs_end_transaction(trans, root);
ret = btrfs_end_transaction(trans);
}
}
out_unlock:
@ -2891,7 +2925,7 @@ out:
static int find_desired_extent(struct inode *inode, loff_t *offset, int whence)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct extent_map *em = NULL;
struct extent_state *cached_state = NULL;
u64 lockstart;
@ -2909,10 +2943,11 @@ static int find_desired_extent(struct inode *inode, loff_t *offset, int whence)
*/
start = max_t(loff_t, 0, *offset);
lockstart = round_down(start, root->sectorsize);
lockend = round_up(i_size_read(inode), root->sectorsize);
lockstart = round_down(start, fs_info->sectorsize);
lockend = round_up(i_size_read(inode),
fs_info->sectorsize);
if (lockend <= lockstart)
lockend = lockstart + root->sectorsize;
lockend = lockstart + fs_info->sectorsize;
lockend--;
len = lockend - lockstart + 1;

Просмотреть файл

@ -42,11 +42,16 @@ static int link_free_space(struct btrfs_free_space_ctl *ctl,
struct btrfs_free_space *info);
static void unlink_free_space(struct btrfs_free_space_ctl *ctl,
struct btrfs_free_space *info);
static int btrfs_wait_cache_io_root(struct btrfs_root *root,
struct btrfs_trans_handle *trans,
struct btrfs_io_ctl *io_ctl,
struct btrfs_path *path);
static struct inode *__lookup_free_space_inode(struct btrfs_root *root,
struct btrfs_path *path,
u64 offset)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_key key;
struct btrfs_key location;
struct btrfs_disk_key disk_key;
@ -74,9 +79,7 @@ static struct inode *__lookup_free_space_inode(struct btrfs_root *root,
btrfs_disk_key_to_cpu(&location, &disk_key);
btrfs_release_path(path);
inode = btrfs_iget(root->fs_info->sb, &location, root, NULL);
if (!inode)
return ERR_PTR(-ENOENT);
inode = btrfs_iget(fs_info->sb, &location, root, NULL);
if (IS_ERR(inode))
return inode;
if (is_bad_inode(inode)) {
@ -96,6 +99,7 @@ struct inode *lookup_free_space_inode(struct btrfs_root *root,
*block_group, struct btrfs_path *path)
{
struct inode *inode = NULL;
struct btrfs_fs_info *fs_info = root->fs_info;
u32 flags = BTRFS_INODE_NODATASUM | BTRFS_INODE_NODATACOW;
spin_lock(&block_group->lock);
@ -112,8 +116,7 @@ struct inode *lookup_free_space_inode(struct btrfs_root *root,
spin_lock(&block_group->lock);
if (!((BTRFS_I(inode)->flags & flags) == flags)) {
btrfs_info(root->fs_info,
"Old style space inode found, converting.");
btrfs_info(fs_info, "Old style space inode found, converting.");
BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM |
BTRFS_INODE_NODATACOW;
block_group->disk_cache_state = BTRFS_DC_CLEAR;
@ -153,7 +156,7 @@ static int __create_free_space_inode(struct btrfs_root *root,
inode_item = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_inode_item);
btrfs_item_key(leaf, &disk_key, path->slots[0]);
memset_extent_buffer(leaf, 0, (unsigned long)inode_item,
memzero_extent_buffer(leaf, (unsigned long)inode_item,
sizeof(*inode_item));
btrfs_set_inode_generation(leaf, inode_item, trans->transid);
btrfs_set_inode_size(leaf, inode_item, 0);
@ -181,7 +184,7 @@ static int __create_free_space_inode(struct btrfs_root *root,
leaf = path->nodes[0];
header = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_free_space_header);
memset_extent_buffer(leaf, 0, (unsigned long)header, sizeof(*header));
memzero_extent_buffer(leaf, (unsigned long)header, sizeof(*header));
btrfs_set_free_space_key(leaf, header, &disk_key);
btrfs_mark_buffer_dirty(leaf);
btrfs_release_path(path);
@ -205,15 +208,15 @@ int create_free_space_inode(struct btrfs_root *root,
block_group->key.objectid);
}
int btrfs_check_trunc_cache_free_space(struct btrfs_root *root,
int btrfs_check_trunc_cache_free_space(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *rsv)
{
u64 needed_bytes;
int ret;
/* 1 for slack space, 1 for updating the inode */
needed_bytes = btrfs_calc_trunc_metadata_size(root, 1) +
btrfs_calc_trans_metadata_size(root, 1);
needed_bytes = btrfs_calc_trunc_metadata_size(fs_info, 1) +
btrfs_calc_trans_metadata_size(fs_info, 1);
spin_lock(&rsv->lock);
if (rsv->reserved < needed_bytes)
@ -244,9 +247,7 @@ int btrfs_truncate_free_space_cache(struct btrfs_root *root,
if (!list_empty(&block_group->io_list)) {
list_del_init(&block_group->io_list);
btrfs_wait_cache_io(root, trans, block_group,
&block_group->io_ctl, path,
block_group->key.objectid);
btrfs_wait_cache_io(trans, block_group, path);
btrfs_put_block_group(block_group);
}
@ -305,7 +306,7 @@ static int readahead_cache(struct inode *inode)
}
static int io_ctl_init(struct btrfs_io_ctl *io_ctl, struct inode *inode,
struct btrfs_root *root, int write)
int write)
{
int num_pages;
int check_crcs = 0;
@ -327,7 +328,7 @@ static int io_ctl_init(struct btrfs_io_ctl *io_ctl, struct inode *inode,
return -ENOMEM;
io_ctl->num_pages = num_pages;
io_ctl->root = root;
io_ctl->fs_info = btrfs_sb(inode->i_sb);
io_ctl->check_crcs = check_crcs;
io_ctl->inode = inode;
@ -450,7 +451,7 @@ static int io_ctl_check_generation(struct btrfs_io_ctl *io_ctl, u64 generation)
gen = io_ctl->cur;
if (le64_to_cpu(*gen) != generation) {
btrfs_err_rl(io_ctl->root->fs_info,
btrfs_err_rl(io_ctl->fs_info,
"space cache generation (%llu) does not match inode (%llu)",
*gen, generation);
io_ctl_unmap_page(io_ctl);
@ -476,7 +477,7 @@ static void io_ctl_set_crc(struct btrfs_io_ctl *io_ctl, int index)
crc = btrfs_csum_data(io_ctl->orig + offset, crc,
PAGE_SIZE - offset);
btrfs_csum_final(crc, (char *)&crc);
btrfs_csum_final(crc, (u8 *)&crc);
io_ctl_unmap_page(io_ctl);
tmp = page_address(io_ctl->pages[0]);
tmp += index;
@ -504,9 +505,9 @@ static int io_ctl_check_crc(struct btrfs_io_ctl *io_ctl, int index)
io_ctl_map_page(io_ctl, 0);
crc = btrfs_csum_data(io_ctl->orig + offset, crc,
PAGE_SIZE - offset);
btrfs_csum_final(crc, (char *)&crc);
btrfs_csum_final(crc, (u8 *)&crc);
if (val != crc) {
btrfs_err_rl(io_ctl->root->fs_info,
btrfs_err_rl(io_ctl->fs_info,
"csum mismatch on free space cache");
io_ctl_unmap_page(io_ctl);
return -EIO;
@ -669,6 +670,7 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
struct btrfs_free_space_ctl *ctl,
struct btrfs_path *path, u64 offset)
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_free_space_header *header;
struct extent_buffer *leaf;
struct btrfs_io_ctl io_ctl;
@ -708,23 +710,23 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
btrfs_release_path(path);
if (!BTRFS_I(inode)->generation) {
btrfs_info(root->fs_info,
btrfs_info(fs_info,
"The free space cache file (%llu) is invalid. skip it\n",
offset);
return 0;
}
if (BTRFS_I(inode)->generation != generation) {
btrfs_err(root->fs_info,
"free space inode generation (%llu) did not match free space cache generation (%llu)",
BTRFS_I(inode)->generation, generation);
btrfs_err(fs_info,
"free space inode generation (%llu) did not match free space cache generation (%llu)",
BTRFS_I(inode)->generation, generation);
return 0;
}
if (!num_entries)
return 0;
ret = io_ctl_init(&io_ctl, inode, root, 0);
ret = io_ctl_init(&io_ctl, inode, 0);
if (ret)
return ret;
@ -766,7 +768,7 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
ret = link_free_space(ctl, e);
spin_unlock(&ctl->tree_lock);
if (ret) {
btrfs_err(root->fs_info,
btrfs_err(fs_info,
"Duplicate entries in free space cache, dumping");
kmem_cache_free(btrfs_free_space_cachep, e);
goto free_cache;
@ -786,7 +788,7 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
ctl->op->recalc_thresholds(ctl);
spin_unlock(&ctl->tree_lock);
if (ret) {
btrfs_err(root->fs_info,
btrfs_err(fs_info,
"Duplicate entries in free space cache, dumping");
kmem_cache_free(btrfs_free_space_cachep, e);
goto free_cache;
@ -1033,7 +1035,7 @@ fail:
}
static noinline_for_stack int
write_pinned_extent_entries(struct btrfs_root *root,
write_pinned_extent_entries(struct btrfs_fs_info *fs_info,
struct btrfs_block_group_cache *block_group,
struct btrfs_io_ctl *io_ctl,
int *entries)
@ -1052,7 +1054,7 @@ write_pinned_extent_entries(struct btrfs_root *root,
* We shouldn't have switched the pinned extents yet so this is the
* right one
*/
unpin = root->fs_info->pinned_extents;
unpin = fs_info->pinned_extents;
start = block_group->key.objectid;
@ -1135,20 +1137,20 @@ cleanup_write_cache_enospc(struct inode *inode,
GFP_NOFS);
}
int btrfs_wait_cache_io(struct btrfs_root *root,
struct btrfs_trans_handle *trans,
struct btrfs_block_group_cache *block_group,
struct btrfs_io_ctl *io_ctl,
struct btrfs_path *path, u64 offset)
static int __btrfs_wait_cache_io(struct btrfs_root *root,
struct btrfs_trans_handle *trans,
struct btrfs_block_group_cache *block_group,
struct btrfs_io_ctl *io_ctl,
struct btrfs_path *path, u64 offset)
{
int ret;
struct inode *inode = io_ctl->inode;
struct btrfs_fs_info *fs_info;
if (!inode)
return 0;
if (block_group)
root = root->fs_info->tree_root;
fs_info = btrfs_sb(inode->i_sb);
/* Flush the dirty pages in the cache file. */
ret = flush_dirty_cache(inode);
@ -1165,9 +1167,9 @@ out:
BTRFS_I(inode)->generation = 0;
if (block_group) {
#ifdef DEBUG
btrfs_err(root->fs_info,
"failed to write free space cache for block group %llu",
block_group->key.objectid);
btrfs_err(fs_info,
"failed to write free space cache for block group %llu",
block_group->key.objectid);
#endif
}
}
@ -1200,6 +1202,23 @@ out:
}
static int btrfs_wait_cache_io_root(struct btrfs_root *root,
struct btrfs_trans_handle *trans,
struct btrfs_io_ctl *io_ctl,
struct btrfs_path *path)
{
return __btrfs_wait_cache_io(root, trans, NULL, io_ctl, path, 0);
}
int btrfs_wait_cache_io(struct btrfs_trans_handle *trans,
struct btrfs_block_group_cache *block_group,
struct btrfs_path *path)
{
return __btrfs_wait_cache_io(block_group->fs_info->tree_root, trans,
block_group, &block_group->io_ctl,
path, block_group->key.objectid);
}
/**
* __btrfs_write_out_cache - write out cached info to an inode
* @root - the root the inode belongs to
@ -1220,6 +1239,7 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
struct btrfs_trans_handle *trans,
struct btrfs_path *path, u64 offset)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_state *cached_state = NULL;
LIST_HEAD(bitmap_list);
int entries = 0;
@ -1231,7 +1251,7 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
return -EIO;
WARN_ON(io_ctl->pages);
ret = io_ctl_init(io_ctl, inode, root, 1);
ret = io_ctl_init(io_ctl, inode, 1);
if (ret)
return ret;
@ -1277,7 +1297,8 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
* If this changes while we are working we'll get added back to
* the dirty list and redo it. No locking needed
*/
ret = write_pinned_extent_entries(root, block_group, io_ctl, &entries);
ret = write_pinned_extent_entries(fs_info, block_group,
io_ctl, &entries);
if (ret)
goto out_nospc_locked;
@ -1296,8 +1317,8 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
io_ctl_zero_remaining_pages(io_ctl);
/* Everything is written out, now we dirty the pages in the file. */
ret = btrfs_dirty_pages(root, inode, io_ctl->pages, io_ctl->num_pages,
0, i_size_read(inode), &cached_state);
ret = btrfs_dirty_pages(inode, io_ctl->pages, io_ctl->num_pages, 0,
i_size_read(inode), &cached_state);
if (ret)
goto out_nospc;
@ -1352,17 +1373,16 @@ out_nospc:
goto out;
}
int btrfs_write_out_cache(struct btrfs_root *root,
int btrfs_write_out_cache(struct btrfs_fs_info *fs_info,
struct btrfs_trans_handle *trans,
struct btrfs_block_group_cache *block_group,
struct btrfs_path *path)
{
struct btrfs_root *root = fs_info->tree_root;
struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
struct inode *inode;
int ret = 0;
root = root->fs_info->tree_root;
spin_lock(&block_group->lock);
if (block_group->disk_cache_state < BTRFS_DC_SETUP) {
spin_unlock(&block_group->lock);
@ -1379,9 +1399,9 @@ int btrfs_write_out_cache(struct btrfs_root *root,
path, block_group->key.objectid);
if (ret) {
#ifdef DEBUG
btrfs_err(root->fs_info,
"failed to write free space cache for block group %llu",
block_group->key.objectid);
btrfs_err(fs_info,
"failed to write free space cache for block group %llu",
block_group->key.objectid);
#endif
spin_lock(&block_group->lock);
block_group->disk_cache_state = BTRFS_DC_ERROR;
@ -1968,11 +1988,11 @@ static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
struct btrfs_free_space *info)
{
struct btrfs_block_group_cache *block_group = ctl->private;
struct btrfs_fs_info *fs_info = block_group->fs_info;
bool forced = false;
#ifdef CONFIG_BTRFS_DEBUG
if (btrfs_should_fragment_free_space(block_group->fs_info->extent_root,
block_group))
if (btrfs_should_fragment_free_space(block_group))
forced = true;
#endif
@ -1988,7 +2008,7 @@ static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
* of cache left then go ahead an dadd them, no sense in adding
* the overhead of a bitmap if we don't have to.
*/
if (info->bytes <= block_group->sectorsize * 4) {
if (info->bytes <= fs_info->sectorsize * 4) {
if (ctl->free_extents * 2 <= ctl->extents_thresh)
return false;
} else {
@ -2447,6 +2467,7 @@ out:
void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
u64 bytes)
{
struct btrfs_fs_info *fs_info = block_group->fs_info;
struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
struct btrfs_free_space *info;
struct rb_node *n;
@ -2456,23 +2477,23 @@ void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
info = rb_entry(n, struct btrfs_free_space, offset_index);
if (info->bytes >= bytes && !block_group->ro)
count++;
btrfs_crit(block_group->fs_info,
"entry offset %llu, bytes %llu, bitmap %s",
btrfs_crit(fs_info, "entry offset %llu, bytes %llu, bitmap %s",
info->offset, info->bytes,
(info->bitmap) ? "yes" : "no");
}
btrfs_info(block_group->fs_info, "block group has cluster?: %s",
btrfs_info(fs_info, "block group has cluster?: %s",
list_empty(&block_group->cluster_list) ? "no" : "yes");
btrfs_info(block_group->fs_info,
btrfs_info(fs_info,
"%d blocks of free space at or bigger than bytes is", count);
}
void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group)
{
struct btrfs_fs_info *fs_info = block_group->fs_info;
struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
spin_lock_init(&ctl->tree_lock);
ctl->unit = block_group->sectorsize;
ctl->unit = fs_info->sectorsize;
ctl->start = block_group->key.objectid;
ctl->private = block_group;
ctl->op = &free_space_op;
@ -3011,7 +3032,7 @@ setup_cluster_bitmap(struct btrfs_block_group_cache *block_group,
* returns zero and sets up cluster if things worked out, otherwise
* it returns -enospc
*/
int btrfs_find_space_cluster(struct btrfs_root *root,
int btrfs_find_space_cluster(struct btrfs_fs_info *fs_info,
struct btrfs_block_group_cache *block_group,
struct btrfs_free_cluster *cluster,
u64 offset, u64 bytes, u64 empty_size)
@ -3029,14 +3050,14 @@ int btrfs_find_space_cluster(struct btrfs_root *root,
* For metadata, allow allocates with smaller extents. For
* data, keep it dense.
*/
if (btrfs_test_opt(root->fs_info, SSD_SPREAD)) {
if (btrfs_test_opt(fs_info, SSD_SPREAD)) {
cont1_bytes = min_bytes = bytes + empty_size;
} else if (block_group->flags & BTRFS_BLOCK_GROUP_METADATA) {
cont1_bytes = bytes;
min_bytes = block_group->sectorsize;
min_bytes = fs_info->sectorsize;
} else {
cont1_bytes = max(bytes, (bytes + empty_size) >> 2);
min_bytes = block_group->sectorsize;
min_bytes = fs_info->sectorsize;
}
spin_lock(&ctl->tree_lock);
@ -3124,8 +3145,7 @@ static int do_trimming(struct btrfs_block_group_cache *block_group,
spin_unlock(&block_group->lock);
spin_unlock(&space_info->lock);
ret = btrfs_discard_extent(fs_info->extent_root,
start, bytes, &trimmed);
ret = btrfs_discard_extent(fs_info, start, bytes, &trimmed);
if (!ret)
*total_trimmed += trimmed;
@ -3321,6 +3341,7 @@ void btrfs_get_block_group_trimming(struct btrfs_block_group_cache *cache)
void btrfs_put_block_group_trimming(struct btrfs_block_group_cache *block_group)
{
struct btrfs_fs_info *fs_info = block_group->fs_info;
struct extent_map_tree *em_tree;
struct extent_map *em;
bool cleanup;
@ -3331,8 +3352,8 @@ void btrfs_put_block_group_trimming(struct btrfs_block_group_cache *block_group)
spin_unlock(&block_group->lock);
if (cleanup) {
lock_chunks(block_group->fs_info->chunk_root);
em_tree = &block_group->fs_info->mapping_tree.map_tree;
mutex_lock(&fs_info->chunk_mutex);
em_tree = &fs_info->mapping_tree.map_tree;
write_lock(&em_tree->lock);
em = lookup_extent_mapping(em_tree, block_group->key.objectid,
1);
@ -3343,7 +3364,7 @@ void btrfs_put_block_group_trimming(struct btrfs_block_group_cache *block_group)
*/
remove_extent_mapping(em_tree, em);
write_unlock(&em_tree->lock);
unlock_chunks(block_group->fs_info->chunk_root);
mutex_unlock(&fs_info->chunk_mutex);
/* once for us and once for the tree */
free_extent_map(em);
@ -3473,7 +3494,7 @@ int load_free_ino_cache(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
int ret = 0;
u64 root_gen = btrfs_root_generation(&root->root_item);
if (!btrfs_test_opt(root->fs_info, INODE_MAP_CACHE))
if (!btrfs_test_opt(fs_info, INODE_MAP_CACHE))
return 0;
/*
@ -3512,12 +3533,13 @@ int btrfs_write_out_ino_cache(struct btrfs_root *root,
struct btrfs_path *path,
struct inode *inode)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
int ret;
struct btrfs_io_ctl io_ctl;
bool release_metadata = true;
if (!btrfs_test_opt(root->fs_info, INODE_MAP_CACHE))
if (!btrfs_test_opt(fs_info, INODE_MAP_CACHE))
return 0;
memset(&io_ctl, 0, sizeof(io_ctl));
@ -3531,16 +3553,16 @@ int btrfs_write_out_ino_cache(struct btrfs_root *root,
* with or without an error.
*/
release_metadata = false;
ret = btrfs_wait_cache_io(root, trans, NULL, &io_ctl, path, 0);
ret = btrfs_wait_cache_io_root(root, trans, &io_ctl, path);
}
if (ret) {
if (release_metadata)
btrfs_delalloc_release_metadata(inode, inode->i_size);
#ifdef DEBUG
btrfs_err(root->fs_info,
"failed to write free ino cache for root %llu",
root->root_key.objectid);
btrfs_err(fs_info,
"failed to write free ino cache for root %llu",
root->root_key.objectid);
#endif
}

Просмотреть файл

@ -59,7 +59,7 @@ int create_free_space_inode(struct btrfs_root *root,
struct btrfs_block_group_cache *block_group,
struct btrfs_path *path);
int btrfs_check_trunc_cache_free_space(struct btrfs_root *root,
int btrfs_check_trunc_cache_free_space(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *rsv);
int btrfs_truncate_free_space_cache(struct btrfs_root *root,
struct btrfs_trans_handle *trans,
@ -67,12 +67,10 @@ int btrfs_truncate_free_space_cache(struct btrfs_root *root,
struct inode *inode);
int load_free_space_cache(struct btrfs_fs_info *fs_info,
struct btrfs_block_group_cache *block_group);
int btrfs_wait_cache_io(struct btrfs_root *root,
struct btrfs_trans_handle *trans,
int btrfs_wait_cache_io(struct btrfs_trans_handle *trans,
struct btrfs_block_group_cache *block_group,
struct btrfs_io_ctl *io_ctl,
struct btrfs_path *path, u64 offset);
int btrfs_write_out_cache(struct btrfs_root *root,
struct btrfs_path *path);
int btrfs_write_out_cache(struct btrfs_fs_info *fs_info,
struct btrfs_trans_handle *trans,
struct btrfs_block_group_cache *block_group,
struct btrfs_path *path);
@ -111,7 +109,7 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
u64 btrfs_find_ino_for_alloc(struct btrfs_root *fs_root);
void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
u64 bytes);
int btrfs_find_space_cluster(struct btrfs_root *root,
int btrfs_find_space_cluster(struct btrfs_fs_info *fs_info,
struct btrfs_block_group_cache *block_group,
struct btrfs_free_cluster *cluster,
u64 offset, u64 bytes, u64 empty_size);

Просмотреть файл

@ -39,7 +39,7 @@ void set_free_space_tree_thresholds(struct btrfs_block_group_cache *cache)
* We convert to bitmaps when the disk space required for using extents
* exceeds that required for using bitmaps.
*/
bitmap_range = cache->sectorsize * BTRFS_FREE_SPACE_BITMAP_BITS;
bitmap_range = cache->fs_info->sectorsize * BTRFS_FREE_SPACE_BITMAP_BITS;
num_bitmaps = div_u64(cache->key.offset + bitmap_range - 1,
bitmap_range);
bitmap_size = sizeof(struct btrfs_item) + BTRFS_FREE_SPACE_BITMAP_SIZE;
@ -189,7 +189,7 @@ int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans,
int ret;
bitmap_size = free_space_bitmap_size(block_group->key.offset,
block_group->sectorsize);
fs_info->sectorsize);
bitmap = alloc_bitmap(bitmap_size);
if (!bitmap) {
ret = -ENOMEM;
@ -227,9 +227,9 @@ int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans,
ASSERT(found_key.objectid + found_key.offset <= end);
first = div_u64(found_key.objectid - start,
block_group->sectorsize);
fs_info->sectorsize);
last = div_u64(found_key.objectid + found_key.offset - start,
block_group->sectorsize);
fs_info->sectorsize);
le_bitmap_set(bitmap, first, last - first);
extent_count++;
@ -270,7 +270,7 @@ int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans,
}
bitmap_cursor = bitmap;
bitmap_range = block_group->sectorsize * BTRFS_FREE_SPACE_BITMAP_BITS;
bitmap_range = fs_info->sectorsize * BTRFS_FREE_SPACE_BITMAP_BITS;
i = start;
while (i < end) {
unsigned long ptr;
@ -279,7 +279,7 @@ int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans,
extent_size = min(end - i, bitmap_range);
data_size = free_space_bitmap_size(extent_size,
block_group->sectorsize);
fs_info->sectorsize);
key.objectid = i;
key.type = BTRFS_FREE_SPACE_BITMAP_KEY;
@ -330,7 +330,7 @@ int convert_free_space_to_extents(struct btrfs_trans_handle *trans,
int ret;
bitmap_size = free_space_bitmap_size(block_group->key.offset,
block_group->sectorsize);
fs_info->sectorsize);
bitmap = alloc_bitmap(bitmap_size);
if (!bitmap) {
ret = -ENOMEM;
@ -370,11 +370,11 @@ int convert_free_space_to_extents(struct btrfs_trans_handle *trans,
ASSERT(found_key.objectid + found_key.offset <= end);
bitmap_pos = div_u64(found_key.objectid - start,
block_group->sectorsize *
fs_info->sectorsize *
BITS_PER_BYTE);
bitmap_cursor = bitmap + bitmap_pos;
data_size = free_space_bitmap_size(found_key.offset,
block_group->sectorsize);
fs_info->sectorsize);
ptr = btrfs_item_ptr_offset(leaf, path->slots[0] - 1);
read_extent_buffer(leaf, bitmap_cursor, ptr,
@ -425,7 +425,7 @@ int convert_free_space_to_extents(struct btrfs_trans_handle *trans,
extent_count++;
}
prev_bit = bit;
offset += block_group->sectorsize;
offset += fs_info->sectorsize;
bitnr++;
}
if (prev_bit == 1) {
@ -517,7 +517,8 @@ int free_space_test_bit(struct btrfs_block_group_cache *block_group,
ASSERT(offset >= found_start && offset < found_end);
ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
i = div_u64(offset - found_start, block_group->sectorsize);
i = div_u64(offset - found_start,
block_group->fs_info->sectorsize);
return !!extent_buffer_test_bit(leaf, ptr, i);
}
@ -525,6 +526,7 @@ static void free_space_set_bits(struct btrfs_block_group_cache *block_group,
struct btrfs_path *path, u64 *start, u64 *size,
int bit)
{
struct btrfs_fs_info *fs_info = block_group->fs_info;
struct extent_buffer *leaf;
struct btrfs_key key;
u64 end = *start + *size;
@ -544,8 +546,8 @@ static void free_space_set_bits(struct btrfs_block_group_cache *block_group,
end = found_end;
ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
first = div_u64(*start - found_start, block_group->sectorsize);
last = div_u64(end - found_start, block_group->sectorsize);
first = div_u64(*start - found_start, fs_info->sectorsize);
last = div_u64(end - found_start, fs_info->sectorsize);
if (bit)
extent_buffer_bitmap_set(leaf, ptr, first, last - first);
else
@ -606,7 +608,7 @@ static int modify_free_space_bitmap(struct btrfs_trans_handle *trans,
* that block is within the block group.
*/
if (start > block_group->key.objectid) {
u64 prev_block = start - block_group->sectorsize;
u64 prev_block = start - block_group->fs_info->sectorsize;
key.objectid = prev_block;
key.type = (u8)-1;
@ -1121,7 +1123,7 @@ static int populate_free_space_tree(struct btrfs_trans_handle *trans,
}
start = key.objectid;
if (key.type == BTRFS_METADATA_ITEM_KEY)
start += fs_info->tree_root->nodesize;
start += fs_info->nodesize;
else
start += key.offset;
} else if (key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
@ -1187,7 +1189,7 @@ int btrfs_create_free_space_tree(struct btrfs_fs_info *fs_info)
btrfs_set_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID);
clear_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags);
ret = btrfs_commit_transaction(trans, tree_root);
ret = btrfs_commit_transaction(trans);
if (ret)
return ret;
@ -1196,7 +1198,7 @@ int btrfs_create_free_space_tree(struct btrfs_fs_info *fs_info)
abort:
clear_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags);
btrfs_abort_transaction(trans, ret);
btrfs_end_transaction(trans, tree_root);
btrfs_end_transaction(trans);
return ret;
}
@ -1267,7 +1269,7 @@ int btrfs_clear_free_space_tree(struct btrfs_fs_info *fs_info)
list_del(&free_space_root->dirty_list);
btrfs_tree_lock(free_space_root->node);
clean_tree_block(trans, tree_root->fs_info, free_space_root->node);
clean_tree_block(trans, fs_info, free_space_root->node);
btrfs_tree_unlock(free_space_root->node);
btrfs_free_tree_block(trans, free_space_root, free_space_root->node,
0, 1);
@ -1276,7 +1278,7 @@ int btrfs_clear_free_space_tree(struct btrfs_fs_info *fs_info)
free_extent_buffer(free_space_root->commit_root);
kfree(free_space_root);
ret = btrfs_commit_transaction(trans, tree_root);
ret = btrfs_commit_transaction(trans);
if (ret)
return ret;
@ -1284,7 +1286,7 @@ int btrfs_clear_free_space_tree(struct btrfs_fs_info *fs_info)
abort:
btrfs_abort_transaction(trans, ret);
btrfs_end_transaction(trans, tree_root);
btrfs_end_transaction(trans);
return ret;
}
@ -1473,7 +1475,7 @@ static int load_free_space_bitmaps(struct btrfs_caching_control *caching_ctl,
extent_count++;
}
prev_bit = bit;
offset += block_group->sectorsize;
offset += fs_info->sectorsize;
}
}
if (prev_bit == 1) {

Просмотреть файл

@ -182,7 +182,7 @@ static int btrfs_del_inode_extref(struct btrfs_trans_handle *trans,
memmove_extent_buffer(leaf, ptr, ptr + del_len,
item_size - (ptr + del_len - item_start));
btrfs_truncate_item(root, path, item_size - del_len, 1);
btrfs_truncate_item(root->fs_info, path, item_size - del_len, 1);
out:
btrfs_free_path(path);
@ -245,7 +245,7 @@ int btrfs_del_inode_ref(struct btrfs_trans_handle *trans,
item_start = btrfs_item_ptr_offset(leaf, path->slots[0]);
memmove_extent_buffer(leaf, ptr, ptr + sub_item_len,
item_size - (ptr + sub_item_len - item_start));
btrfs_truncate_item(root, path, item_size - sub_item_len, 1);
btrfs_truncate_item(root->fs_info, path, item_size - sub_item_len, 1);
out:
btrfs_free_path(path);
@ -297,7 +297,7 @@ static int btrfs_insert_inode_extref(struct btrfs_trans_handle *trans,
name, name_len, NULL))
goto out;
btrfs_extend_item(root, path, ins_len);
btrfs_extend_item(root->fs_info, path, ins_len);
ret = 0;
}
if (ret < 0)
@ -328,6 +328,7 @@ int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans,
const char *name, int name_len,
u64 inode_objectid, u64 ref_objectid, u64 index)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_path *path;
struct btrfs_key key;
struct btrfs_inode_ref *ref;
@ -354,7 +355,7 @@ int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans,
goto out;
old_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]);
btrfs_extend_item(root, path, ins_len);
btrfs_extend_item(fs_info, path, ins_len);
ref = btrfs_item_ptr(path->nodes[0], path->slots[0],
struct btrfs_inode_ref);
ref = (struct btrfs_inode_ref *)((unsigned long)ref + old_size);
@ -384,7 +385,7 @@ out:
btrfs_free_path(path);
if (ret == -EMLINK) {
struct btrfs_super_block *disk_super = root->fs_info->super_copy;
struct btrfs_super_block *disk_super = fs_info->super_copy;
/* We ran out of space in the ref array. Need to
* add an extended ref. */
if (btrfs_super_incompat_flags(disk_super)

Просмотреть файл

@ -38,7 +38,7 @@ static int caching_kthread(void *data)
int slot;
int ret;
if (!btrfs_test_opt(root->fs_info, INODE_MAP_CACHE))
if (!btrfs_test_opt(fs_info, INODE_MAP_CACHE))
return 0;
path = btrfs_alloc_path();
@ -180,7 +180,7 @@ static void start_caching(struct btrfs_root *root)
if (IS_ERR(tsk)) {
btrfs_warn(fs_info, "failed to start inode caching task");
btrfs_clear_pending_and_info(fs_info, INODE_MAP_CACHE,
"disabling inode map caching");
"disabling inode map caching");
}
}
@ -395,6 +395,7 @@ void btrfs_init_free_ino_ctl(struct btrfs_root *root)
int btrfs_save_ino_cache(struct btrfs_root *root,
struct btrfs_trans_handle *trans)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
struct btrfs_path *path;
struct inode *inode;
@ -415,7 +416,7 @@ int btrfs_save_ino_cache(struct btrfs_root *root,
if (btrfs_root_refs(&root->root_item) == 0)
return 0;
if (!btrfs_test_opt(root->fs_info, INODE_MAP_CACHE))
if (!btrfs_test_opt(fs_info, INODE_MAP_CACHE))
return 0;
path = btrfs_alloc_path();
@ -423,7 +424,7 @@ int btrfs_save_ino_cache(struct btrfs_root *root,
return -ENOMEM;
rsv = trans->block_rsv;
trans->block_rsv = &root->fs_info->trans_block_rsv;
trans->block_rsv = &fs_info->trans_block_rsv;
num_bytes = trans->bytes_reserved;
/*
@ -433,14 +434,14 @@ int btrfs_save_ino_cache(struct btrfs_root *root,
* 1 item for free space object
* 3 items for pre-allocation
*/
trans->bytes_reserved = btrfs_calc_trans_metadata_size(root, 10);
trans->bytes_reserved = btrfs_calc_trans_metadata_size(fs_info, 10);
ret = btrfs_block_rsv_add(root, trans->block_rsv,
trans->bytes_reserved,
BTRFS_RESERVE_NO_FLUSH);
if (ret)
goto out;
trace_btrfs_space_reservation(root->fs_info, "ino_cache",
trans->transid, trans->bytes_reserved, 1);
trace_btrfs_space_reservation(fs_info, "ino_cache", trans->transid,
trans->bytes_reserved, 1);
again:
inode = lookup_free_ino_inode(root, path);
if (IS_ERR(inode) && (PTR_ERR(inode) != -ENOENT || retry)) {
@ -506,9 +507,10 @@ again:
out_put:
iput(inode);
out_release:
trace_btrfs_space_reservation(root->fs_info, "ino_cache",
trans->transid, trans->bytes_reserved, 0);
btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
trace_btrfs_space_reservation(fs_info, "ino_cache", trans->transid,
trans->bytes_reserved, 0);
btrfs_block_rsv_release(fs_info, trans->block_rsv,
trans->bytes_reserved);
out:
trans->block_rsv = rsv;
trans->bytes_reserved = num_bytes;

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -254,25 +254,21 @@ out:
return ret;
}
static int lzo_decompress_biovec(struct list_head *ws,
static int lzo_decompress_bio(struct list_head *ws,
struct page **pages_in,
u64 disk_start,
struct bio_vec *bvec,
int vcnt,
struct bio *orig_bio,
size_t srclen)
{
struct workspace *workspace = list_entry(ws, struct workspace, list);
int ret = 0, ret2;
char *data_in;
unsigned long page_in_index = 0;
unsigned long page_out_index = 0;
unsigned long total_pages_in = DIV_ROUND_UP(srclen, PAGE_SIZE);
unsigned long buf_start;
unsigned long buf_offset = 0;
unsigned long bytes;
unsigned long working_bytes;
unsigned long pg_offset;
size_t in_len;
size_t out_len;
unsigned long in_offset;
@ -292,7 +288,6 @@ static int lzo_decompress_biovec(struct list_head *ws,
in_page_bytes_left = PAGE_SIZE - LZO_LEN;
tot_out = 0;
pg_offset = 0;
while (tot_in < tot_len) {
in_len = read_compress_length(data_in + in_offset);
@ -365,16 +360,14 @@ cont:
tot_out += out_len;
ret2 = btrfs_decompress_buf2page(workspace->buf, buf_start,
tot_out, disk_start,
bvec, vcnt,
&page_out_index, &pg_offset);
tot_out, disk_start, orig_bio);
if (ret2 == 0)
break;
}
done:
kunmap(pages_in[page_in_index]);
if (!ret)
btrfs_clear_biovec_end(bvec, vcnt, page_out_index, pg_offset);
zero_fill_bio(orig_bio);
return ret;
}
@ -438,6 +431,6 @@ const struct btrfs_compress_op btrfs_lzo_compress = {
.alloc_workspace = lzo_alloc_workspace,
.free_workspace = lzo_free_workspace,
.compress_pages = lzo_compress_pages,
.decompress_biovec = lzo_decompress_biovec,
.decompress_bio = lzo_decompress_bio,
.decompress = lzo_decompress,
};

Просмотреть файл

@ -186,6 +186,7 @@ static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
u64 start, u64 len, u64 disk_len,
int type, int dio, int compress_type)
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_ordered_inode_tree *tree;
struct rb_node *node;
@ -234,11 +235,10 @@ static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
&root->ordered_extents);
root->nr_ordered_extents++;
if (root->nr_ordered_extents == 1) {
spin_lock(&root->fs_info->ordered_root_lock);
spin_lock(&fs_info->ordered_root_lock);
BUG_ON(!list_empty(&root->ordered_root));
list_add_tail(&root->ordered_root,
&root->fs_info->ordered_roots);
spin_unlock(&root->fs_info->ordered_root_lock);
list_add_tail(&root->ordered_root, &fs_info->ordered_roots);
spin_unlock(&fs_info->ordered_root_lock);
}
spin_unlock(&root->ordered_extent_lock);
@ -303,6 +303,7 @@ int btrfs_dec_test_first_ordered_pending(struct inode *inode,
struct btrfs_ordered_extent **cached,
u64 *file_offset, u64 io_size, int uptodate)
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_ordered_inode_tree *tree;
struct rb_node *node;
struct btrfs_ordered_extent *entry = NULL;
@ -331,14 +332,14 @@ int btrfs_dec_test_first_ordered_pending(struct inode *inode,
entry->len);
*file_offset = dec_end;
if (dec_start > dec_end) {
btrfs_crit(BTRFS_I(inode)->root->fs_info,
"bad ordering dec_start %llu end %llu", dec_start, dec_end);
btrfs_crit(fs_info, "bad ordering dec_start %llu end %llu",
dec_start, dec_end);
}
to_dec = dec_end - dec_start;
if (to_dec > entry->bytes_left) {
btrfs_crit(BTRFS_I(inode)->root->fs_info,
"bad ordered accounting left %llu size %llu",
entry->bytes_left, to_dec);
btrfs_crit(fs_info,
"bad ordered accounting left %llu size %llu",
entry->bytes_left, to_dec);
}
entry->bytes_left -= to_dec;
if (!uptodate)
@ -588,6 +589,7 @@ void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
void btrfs_remove_ordered_extent(struct inode *inode,
struct btrfs_ordered_extent *entry)
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_ordered_inode_tree *tree;
struct btrfs_root *root = BTRFS_I(inode)->root;
struct rb_node *node;
@ -618,11 +620,11 @@ void btrfs_remove_ordered_extent(struct inode *inode,
* lock, so be nice and check if trans is set, but ASSERT() so
* if it isn't set a developer will notice.
*/
spin_lock(&root->fs_info->trans_lock);
trans = root->fs_info->running_transaction;
spin_lock(&fs_info->trans_lock);
trans = fs_info->running_transaction;
if (trans)
atomic_inc(&trans->use_count);
spin_unlock(&root->fs_info->trans_lock);
spin_unlock(&fs_info->trans_lock);
ASSERT(trans);
if (trans) {
@ -639,10 +641,10 @@ void btrfs_remove_ordered_extent(struct inode *inode,
trace_btrfs_ordered_extent_remove(inode, entry);
if (!root->nr_ordered_extents) {
spin_lock(&root->fs_info->ordered_root_lock);
spin_lock(&fs_info->ordered_root_lock);
BUG_ON(list_empty(&root->ordered_root));
list_del_init(&root->ordered_root);
spin_unlock(&root->fs_info->ordered_root_lock);
spin_unlock(&fs_info->ordered_root_lock);
}
spin_unlock(&root->ordered_extent_lock);
wake_up(&entry->wait);
@ -664,6 +666,7 @@ static void btrfs_run_ordered_extent_work(struct btrfs_work *work)
int btrfs_wait_ordered_extents(struct btrfs_root *root, int nr,
const u64 range_start, const u64 range_len)
{
struct btrfs_fs_info *fs_info = root->fs_info;
LIST_HEAD(splice);
LIST_HEAD(skipped);
LIST_HEAD(works);
@ -694,8 +697,7 @@ int btrfs_wait_ordered_extents(struct btrfs_root *root, int nr,
btrfs_flush_delalloc_helper,
btrfs_run_ordered_extent_work, NULL, NULL);
list_add_tail(&ordered->work_list, &works);
btrfs_queue_work(root->fs_info->flush_workers,
&ordered->flush_work);
btrfs_queue_work(fs_info->flush_workers, &ordered->flush_work);
cond_resched();
spin_lock(&root->ordered_extent_lock);
@ -978,7 +980,7 @@ int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
ordered->file_offset +
ordered->truncated_len);
} else {
offset = ALIGN(offset, BTRFS_I(inode)->root->sectorsize);
offset = ALIGN(offset, btrfs_inode_sectorsize(inode));
}
disk_i_size = BTRFS_I(inode)->disk_i_size;
@ -1087,7 +1089,7 @@ int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr,
struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
unsigned long num_sectors;
unsigned long i;
u32 sectorsize = BTRFS_I(inode)->root->sectorsize;
u32 sectorsize = btrfs_inode_sectorsize(inode);
int index = 0;
ordered = btrfs_lookup_ordered_extent(inode, offset);

Просмотреть файл

@ -145,10 +145,10 @@ struct btrfs_ordered_extent {
* calculates the total size you need to allocate for an ordered sum
* structure spanning 'bytes' in the file
*/
static inline int btrfs_ordered_sum_size(struct btrfs_root *root,
static inline int btrfs_ordered_sum_size(struct btrfs_fs_info *fs_info,
unsigned long bytes)
{
int num_sectors = (int)DIV_ROUND_UP(bytes, root->sectorsize);
int num_sectors = (int)DIV_ROUND_UP(bytes, fs_info->sectorsize);
return sizeof(struct btrfs_ordered_sum) + num_sectors * sizeof(u32);
}

Просмотреть файл

@ -161,7 +161,7 @@ static void print_uuid_item(struct extent_buffer *l, unsigned long offset,
}
}
void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l)
void btrfs_print_leaf(struct btrfs_fs_info *fs_info, struct extent_buffer *l)
{
int i;
u32 type, nr;
@ -182,8 +182,9 @@ void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l)
nr = btrfs_header_nritems(l);
btrfs_info(root->fs_info, "leaf %llu total ptrs %d free space %d",
btrfs_header_bytenr(l), nr, btrfs_leaf_free_space(root, l));
btrfs_info(fs_info, "leaf %llu total ptrs %d free space %d",
btrfs_header_bytenr(l), nr,
btrfs_leaf_free_space(fs_info, l));
for (i = 0 ; i < nr ; i++) {
item = btrfs_item_nr(i);
btrfs_item_key_to_cpu(l, &key, i);
@ -314,7 +315,7 @@ void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l)
}
}
void btrfs_print_tree(struct btrfs_root *root, struct extent_buffer *c)
void btrfs_print_tree(struct btrfs_fs_info *fs_info, struct extent_buffer *c)
{
int i; u32 nr;
struct btrfs_key key;
@ -325,13 +326,13 @@ void btrfs_print_tree(struct btrfs_root *root, struct extent_buffer *c)
nr = btrfs_header_nritems(c);
level = btrfs_header_level(c);
if (level == 0) {
btrfs_print_leaf(root, c);
btrfs_print_leaf(fs_info, c);
return;
}
btrfs_info(root->fs_info,
btrfs_info(fs_info,
"node %llu level %d total ptrs %d free spc %u",
btrfs_header_bytenr(c), level, nr,
(u32)BTRFS_NODEPTRS_PER_BLOCK(root) - nr);
(u32)BTRFS_NODEPTRS_PER_BLOCK(fs_info) - nr);
for (i = 0; i < nr; i++) {
btrfs_node_key_to_cpu(c, &key, i);
pr_info("\tkey %d (%llu %u %llu) block %llu\n",
@ -339,7 +340,7 @@ void btrfs_print_tree(struct btrfs_root *root, struct extent_buffer *c)
btrfs_node_blockptr(c, i));
}
for (i = 0; i < nr; i++) {
struct extent_buffer *next = read_tree_block(root,
struct extent_buffer *next = read_tree_block(fs_info,
btrfs_node_blockptr(c, i),
btrfs_node_ptr_generation(c, i));
if (IS_ERR(next)) {
@ -355,7 +356,7 @@ void btrfs_print_tree(struct btrfs_root *root, struct extent_buffer *c)
if (btrfs_header_level(next) !=
level - 1)
BUG();
btrfs_print_tree(root, next);
btrfs_print_tree(fs_info, next);
free_extent_buffer(next);
}
}

Просмотреть файл

@ -18,6 +18,6 @@
#ifndef __PRINT_TREE_
#define __PRINT_TREE_
void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l);
void btrfs_print_tree(struct btrfs_root *root, struct extent_buffer *c);
void btrfs_print_leaf(struct btrfs_fs_info *fs_info, struct extent_buffer *l);
void btrfs_print_tree(struct btrfs_fs_info *fs_info, struct extent_buffer *c);
#endif

Просмотреть файл

@ -301,6 +301,7 @@ static int inherit_props(struct btrfs_trans_handle *trans,
struct inode *parent)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_fs_info *fs_info = root->fs_info;
int ret;
int i;
@ -320,14 +321,14 @@ static int inherit_props(struct btrfs_trans_handle *trans,
if (!value)
continue;
num_bytes = btrfs_calc_trans_metadata_size(root, 1);
num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
ret = btrfs_block_rsv_add(root, trans->block_rsv,
num_bytes, BTRFS_RESERVE_NO_FLUSH);
if (ret)
goto out;
ret = __btrfs_set_prop(trans, inode, h->xattr_name,
value, strlen(value), 0);
btrfs_block_rsv_release(root, trans->block_rsv, num_bytes);
btrfs_block_rsv_release(fs_info, trans->block_rsv, num_bytes);
if (ret)
goto out;
}

Просмотреть файл

@ -131,8 +131,15 @@ struct btrfs_qgroup_list {
struct btrfs_qgroup *member;
};
#define ptr_to_u64(x) ((u64)(uintptr_t)x)
#define u64_to_ptr(x) ((struct btrfs_qgroup *)(uintptr_t)x)
static inline u64 qgroup_to_aux(struct btrfs_qgroup *qg)
{
return (u64)(uintptr_t)qg;
}
static inline struct btrfs_qgroup* unode_aux_to_qgroup(struct ulist_node *n)
{
return (struct btrfs_qgroup *)(uintptr_t)n->aux;
}
static int
qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
@ -1012,7 +1019,7 @@ int btrfs_quota_disable(struct btrfs_trans_handle *trans,
list_del(&quota_root->dirty_list);
btrfs_tree_lock(quota_root->node);
clean_tree_block(trans, tree_root->fs_info, quota_root->node);
clean_tree_block(trans, fs_info, quota_root->node);
btrfs_tree_unlock(quota_root->node);
btrfs_free_tree_block(trans, quota_root, quota_root->node, 0, 1);
@ -1066,7 +1073,7 @@ static int __qgroup_excl_accounting(struct btrfs_fs_info *fs_info,
/* Get all of the parent groups that contain this qgroup */
list_for_each_entry(glist, &qgroup->groups, next_group) {
ret = ulist_add(tmp, glist->group->qgroupid,
ptr_to_u64(glist->group), GFP_ATOMIC);
qgroup_to_aux(glist->group), GFP_ATOMIC);
if (ret < 0)
goto out;
}
@ -1074,7 +1081,7 @@ static int __qgroup_excl_accounting(struct btrfs_fs_info *fs_info,
/* Iterate all of the parents and adjust their reference counts */
ULIST_ITER_INIT(&uiter);
while ((unode = ulist_next(tmp, &uiter))) {
qgroup = u64_to_ptr(unode->aux);
qgroup = unode_aux_to_qgroup(unode);
qgroup->rfer += sign * num_bytes;
qgroup->rfer_cmpr += sign * num_bytes;
WARN_ON(sign < 0 && qgroup->excl < num_bytes);
@ -1087,7 +1094,7 @@ static int __qgroup_excl_accounting(struct btrfs_fs_info *fs_info,
/* Add any parents of the parents */
list_for_each_entry(glist, &qgroup->groups, next_group) {
ret = ulist_add(tmp, glist->group->qgroupid,
ptr_to_u64(glist->group), GFP_ATOMIC);
qgroup_to_aux(glist->group), GFP_ATOMIC);
if (ret < 0)
goto out;
}
@ -1185,7 +1192,7 @@ int btrfs_add_qgroup_relation(struct btrfs_trans_handle *trans,
}
spin_lock(&fs_info->qgroup_lock);
ret = add_relation_rb(quota_root->fs_info, src, dst);
ret = add_relation_rb(fs_info, src, dst);
if (ret < 0) {
spin_unlock(&fs_info->qgroup_lock);
goto out;
@ -1333,7 +1340,7 @@ int btrfs_remove_qgroup(struct btrfs_trans_handle *trans,
}
spin_lock(&fs_info->qgroup_lock);
del_qgroup_rb(quota_root->fs_info, qgroupid);
del_qgroup_rb(fs_info, qgroupid);
spin_unlock(&fs_info->qgroup_lock);
out:
mutex_unlock(&fs_info->qgroup_ioctl_lock);
@ -1450,7 +1457,7 @@ int btrfs_qgroup_prepare_account_extents(struct btrfs_trans_handle *trans,
return ret;
}
int btrfs_qgroup_insert_dirty_extent_nolock(struct btrfs_fs_info *fs_info,
int btrfs_qgroup_trace_extent_nolock(struct btrfs_fs_info *fs_info,
struct btrfs_delayed_ref_root *delayed_refs,
struct btrfs_qgroup_extent_record *record)
{
@ -1460,7 +1467,7 @@ int btrfs_qgroup_insert_dirty_extent_nolock(struct btrfs_fs_info *fs_info,
u64 bytenr = record->bytenr;
assert_spin_locked(&delayed_refs->lock);
trace_btrfs_qgroup_insert_dirty_extent(fs_info, record);
trace_btrfs_qgroup_trace_extent(fs_info, record);
while (*p) {
parent_node = *p;
@ -1479,7 +1486,7 @@ int btrfs_qgroup_insert_dirty_extent_nolock(struct btrfs_fs_info *fs_info,
return 0;
}
int btrfs_qgroup_insert_dirty_extent(struct btrfs_trans_handle *trans,
int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info, u64 bytenr, u64 num_bytes,
gfp_t gfp_flag)
{
@ -1502,14 +1509,228 @@ int btrfs_qgroup_insert_dirty_extent(struct btrfs_trans_handle *trans,
record->old_roots = NULL;
spin_lock(&delayed_refs->lock);
ret = btrfs_qgroup_insert_dirty_extent_nolock(fs_info, delayed_refs,
record);
ret = btrfs_qgroup_trace_extent_nolock(fs_info, delayed_refs, record);
spin_unlock(&delayed_refs->lock);
if (ret > 0)
kfree(record);
return 0;
}
int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info,
struct extent_buffer *eb)
{
int nr = btrfs_header_nritems(eb);
int i, extent_type, ret;
struct btrfs_key key;
struct btrfs_file_extent_item *fi;
u64 bytenr, num_bytes;
/* We can be called directly from walk_up_proc() */
if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
return 0;
for (i = 0; i < nr; i++) {
btrfs_item_key_to_cpu(eb, &key, i);
if (key.type != BTRFS_EXTENT_DATA_KEY)
continue;
fi = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
/* filter out non qgroup-accountable extents */
extent_type = btrfs_file_extent_type(eb, fi);
if (extent_type == BTRFS_FILE_EXTENT_INLINE)
continue;
bytenr = btrfs_file_extent_disk_bytenr(eb, fi);
if (!bytenr)
continue;
num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi);
ret = btrfs_qgroup_trace_extent(trans, fs_info, bytenr,
num_bytes, GFP_NOFS);
if (ret)
return ret;
}
return 0;
}
/*
* Walk up the tree from the bottom, freeing leaves and any interior
* nodes which have had all slots visited. If a node (leaf or
* interior) is freed, the node above it will have it's slot
* incremented. The root node will never be freed.
*
* At the end of this function, we should have a path which has all
* slots incremented to the next position for a search. If we need to
* read a new node it will be NULL and the node above it will have the
* correct slot selected for a later read.
*
* If we increment the root nodes slot counter past the number of
* elements, 1 is returned to signal completion of the search.
*/
static int adjust_slots_upwards(struct btrfs_root *root,
struct btrfs_path *path, int root_level)
{
int level = 0;
int nr, slot;
struct extent_buffer *eb;
if (root_level == 0)
return 1;
while (level <= root_level) {
eb = path->nodes[level];
nr = btrfs_header_nritems(eb);
path->slots[level]++;
slot = path->slots[level];
if (slot >= nr || level == 0) {
/*
* Don't free the root - we will detect this
* condition after our loop and return a
* positive value for caller to stop walking the tree.
*/
if (level != root_level) {
btrfs_tree_unlock_rw(eb, path->locks[level]);
path->locks[level] = 0;
free_extent_buffer(eb);
path->nodes[level] = NULL;
path->slots[level] = 0;
}
} else {
/*
* We have a valid slot to walk back down
* from. Stop here so caller can process these
* new nodes.
*/
break;
}
level++;
}
eb = path->nodes[root_level];
if (path->slots[root_level] >= btrfs_header_nritems(eb))
return 1;
return 0;
}
int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct extent_buffer *root_eb,
u64 root_gen, int root_level)
{
struct btrfs_fs_info *fs_info = root->fs_info;
int ret = 0;
int level;
struct extent_buffer *eb = root_eb;
struct btrfs_path *path = NULL;
BUG_ON(root_level < 0 || root_level > BTRFS_MAX_LEVEL);
BUG_ON(root_eb == NULL);
if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
return 0;
if (!extent_buffer_uptodate(root_eb)) {
ret = btrfs_read_buffer(root_eb, root_gen);
if (ret)
goto out;
}
if (root_level == 0) {
ret = btrfs_qgroup_trace_leaf_items(trans, fs_info, root_eb);
goto out;
}
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
/*
* Walk down the tree. Missing extent blocks are filled in as
* we go. Metadata is accounted every time we read a new
* extent block.
*
* When we reach a leaf, we account for file extent items in it,
* walk back up the tree (adjusting slot pointers as we go)
* and restart the search process.
*/
extent_buffer_get(root_eb); /* For path */
path->nodes[root_level] = root_eb;
path->slots[root_level] = 0;
path->locks[root_level] = 0; /* so release_path doesn't try to unlock */
walk_down:
level = root_level;
while (level >= 0) {
if (path->nodes[level] == NULL) {
int parent_slot;
u64 child_gen;
u64 child_bytenr;
/*
* We need to get child blockptr/gen from parent before
* we can read it.
*/
eb = path->nodes[level + 1];
parent_slot = path->slots[level + 1];
child_bytenr = btrfs_node_blockptr(eb, parent_slot);
child_gen = btrfs_node_ptr_generation(eb, parent_slot);
eb = read_tree_block(fs_info, child_bytenr, child_gen);
if (IS_ERR(eb)) {
ret = PTR_ERR(eb);
goto out;
} else if (!extent_buffer_uptodate(eb)) {
free_extent_buffer(eb);
ret = -EIO;
goto out;
}
path->nodes[level] = eb;
path->slots[level] = 0;
btrfs_tree_read_lock(eb);
btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
path->locks[level] = BTRFS_READ_LOCK_BLOCKING;
ret = btrfs_qgroup_trace_extent(trans, fs_info,
child_bytenr,
fs_info->nodesize,
GFP_NOFS);
if (ret)
goto out;
}
if (level == 0) {
ret = btrfs_qgroup_trace_leaf_items(trans,fs_info,
path->nodes[level]);
if (ret)
goto out;
/* Nonzero return here means we completed our search */
ret = adjust_slots_upwards(root, path, root_level);
if (ret)
break;
/* Restart search with new slots */
goto walk_down;
}
level--;
}
ret = 0;
out:
btrfs_free_path(path);
return ret;
}
#define UPDATE_NEW 0
#define UPDATE_OLD 1
/*
@ -1535,30 +1756,30 @@ static int qgroup_update_refcnt(struct btrfs_fs_info *fs_info,
continue;
ulist_reinit(tmp);
ret = ulist_add(qgroups, qg->qgroupid, ptr_to_u64(qg),
ret = ulist_add(qgroups, qg->qgroupid, qgroup_to_aux(qg),
GFP_ATOMIC);
if (ret < 0)
return ret;
ret = ulist_add(tmp, qg->qgroupid, ptr_to_u64(qg), GFP_ATOMIC);
ret = ulist_add(tmp, qg->qgroupid, qgroup_to_aux(qg), GFP_ATOMIC);
if (ret < 0)
return ret;
ULIST_ITER_INIT(&tmp_uiter);
while ((tmp_unode = ulist_next(tmp, &tmp_uiter))) {
struct btrfs_qgroup_list *glist;
qg = u64_to_ptr(tmp_unode->aux);
qg = unode_aux_to_qgroup(tmp_unode);
if (update_old)
btrfs_qgroup_update_old_refcnt(qg, seq, 1);
else
btrfs_qgroup_update_new_refcnt(qg, seq, 1);
list_for_each_entry(glist, &qg->groups, next_group) {
ret = ulist_add(qgroups, glist->group->qgroupid,
ptr_to_u64(glist->group),
qgroup_to_aux(glist->group),
GFP_ATOMIC);
if (ret < 0)
return ret;
ret = ulist_add(tmp, glist->group->qgroupid,
ptr_to_u64(glist->group),
qgroup_to_aux(glist->group),
GFP_ATOMIC);
if (ret < 0)
return ret;
@ -1619,7 +1840,7 @@ static int qgroup_update_counters(struct btrfs_fs_info *fs_info,
while ((unode = ulist_next(qgroups, &uiter))) {
bool dirty = false;
qg = u64_to_ptr(unode->aux);
qg = unode_aux_to_qgroup(unode);
cur_old_count = btrfs_qgroup_get_old_refcnt(qg, seq);
cur_new_count = btrfs_qgroup_get_new_refcnt(qg, seq);
@ -1950,7 +2171,7 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
}
rcu_read_lock();
level_size = srcroot->nodesize;
level_size = fs_info->nodesize;
rcu_read_unlock();
}
@ -2034,8 +2255,7 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans,
i_qgroups = (u64 *)(inherit + 1);
for (i = 0; i < inherit->num_qgroups; ++i) {
if (*i_qgroups) {
ret = add_relation_rb(quota_root->fs_info, objectid,
*i_qgroups);
ret = add_relation_rb(fs_info, objectid, *i_qgroups);
if (ret)
goto unlock;
}
@ -2125,7 +2345,7 @@ static int qgroup_reserve(struct btrfs_root *root, u64 num_bytes)
struct btrfs_qgroup *qg;
struct btrfs_qgroup_list *glist;
qg = u64_to_ptr(unode->aux);
qg = unode_aux_to_qgroup(unode);
if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_RFER) &&
qg->reserved + (s64)qg->rfer + num_bytes >
@ -2157,7 +2377,7 @@ static int qgroup_reserve(struct btrfs_root *root, u64 num_bytes)
while ((unode = ulist_next(fs_info->qgroup_ulist, &uiter))) {
struct btrfs_qgroup *qg;
qg = u64_to_ptr(unode->aux);
qg = unode_aux_to_qgroup(unode);
qg->reserved += num_bytes;
}
@ -2202,7 +2422,7 @@ void btrfs_qgroup_free_refroot(struct btrfs_fs_info *fs_info,
struct btrfs_qgroup *qg;
struct btrfs_qgroup_list *glist;
qg = u64_to_ptr(unode->aux);
qg = unode_aux_to_qgroup(unode);
qg->reserved -= num_bytes;
@ -2302,7 +2522,7 @@ qgroup_rescan_leaf(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
found.type != BTRFS_METADATA_ITEM_KEY)
continue;
if (found.type == BTRFS_METADATA_ITEM_KEY)
num_bytes = fs_info->extent_root->nodesize;
num_bytes = fs_info->nodesize;
else
num_bytes = found.offset;
@ -2335,10 +2555,6 @@ static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
int err = -ENOMEM;
int ret = 0;
mutex_lock(&fs_info->qgroup_rescan_lock);
fs_info->qgroup_rescan_running = true;
mutex_unlock(&fs_info->qgroup_rescan_lock);
path = btrfs_alloc_path();
if (!path)
goto out;
@ -2356,9 +2572,9 @@ static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
err = qgroup_rescan_leaf(fs_info, path, trans);
}
if (err > 0)
btrfs_commit_transaction(trans, fs_info->fs_root);
btrfs_commit_transaction(trans);
else
btrfs_end_transaction(trans, fs_info->fs_root);
btrfs_end_transaction(trans);
}
out:
@ -2393,7 +2609,7 @@ out:
err = ret;
btrfs_err(fs_info, "fail to update qgroup status: %d", err);
}
btrfs_end_transaction(trans, fs_info->quota_root);
btrfs_end_transaction(trans);
if (btrfs_fs_closing(fs_info)) {
btrfs_info(fs_info, "qgroup scan paused");
@ -2449,6 +2665,7 @@ qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
sizeof(fs_info->qgroup_rescan_progress));
fs_info->qgroup_rescan_progress.objectid = progress_objectid;
init_completion(&fs_info->qgroup_rescan_completion);
fs_info->qgroup_rescan_running = true;
spin_unlock(&fs_info->qgroup_lock);
mutex_unlock(&fs_info->qgroup_rescan_lock);
@ -2512,7 +2729,7 @@ btrfs_qgroup_rescan(struct btrfs_fs_info *fs_info)
fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
return PTR_ERR(trans);
}
ret = btrfs_commit_transaction(trans, fs_info->fs_root);
ret = btrfs_commit_transaction(trans);
if (ret) {
fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
return ret;
@ -2677,13 +2894,14 @@ int btrfs_qgroup_release_data(struct inode *inode, u64 start, u64 len)
int btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes)
{
struct btrfs_fs_info *fs_info = root->fs_info;
int ret;
if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags) ||
if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) ||
!is_fstree(root->objectid) || num_bytes == 0)
return 0;
BUG_ON(num_bytes != round_down(num_bytes, root->nodesize));
BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize));
ret = qgroup_reserve(root, num_bytes);
if (ret < 0)
return ret;
@ -2693,9 +2911,10 @@ int btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes)
void btrfs_qgroup_free_meta_all(struct btrfs_root *root)
{
struct btrfs_fs_info *fs_info = root->fs_info;
int reserved;
if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags) ||
if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) ||
!is_fstree(root->objectid))
return;
@ -2707,11 +2926,13 @@ void btrfs_qgroup_free_meta_all(struct btrfs_root *root)
void btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes)
{
if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &root->fs_info->flags) ||
struct btrfs_fs_info *fs_info = root->fs_info;
if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) ||
!is_fstree(root->objectid))
return;
BUG_ON(num_bytes != round_down(num_bytes, root->nodesize));
BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize));
WARN_ON(atomic_read(&root->qgroup_meta_rsv) < num_bytes);
atomic_sub(num_bytes, &root->qgroup_meta_rsv);
qgroup_free(root, num_bytes);

Просмотреть файл

@ -22,6 +22,34 @@
#include "ulist.h"
#include "delayed-ref.h"
/*
* Btrfs qgroup overview
*
* Btrfs qgroup splits into 3 main part:
* 1) Reserve
* Reserve metadata/data space for incoming operations
* Affect how qgroup limit works
*
* 2) Trace
* Tell btrfs qgroup to trace dirty extents.
*
* Dirty extents including:
* - Newly allocated extents
* - Extents going to be deleted (in this trans)
* - Extents whose owner is going to be modified
*
* This is the main part affects whether qgroup numbers will stay
* consistent.
* Btrfs qgroup can trace clean extents and won't cause any problem,
* but it will consume extra CPU time, it should be avoided if possible.
*
* 3) Account
* Btrfs qgroup will updates its numbers, based on dirty extents traced
* in previous step.
*
* Normally at qgroup rescan and transaction commit time.
*/
/*
* Record a dirty extent, and info qgroup to update quota on it
* TODO: Use kmem cache to alloc it.
@ -65,8 +93,8 @@ struct btrfs_delayed_extent_op;
int btrfs_qgroup_prepare_account_extents(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info);
/*
* Insert one dirty extent record into @delayed_refs, informing qgroup to
* account that extent at commit trans time.
* Inform qgroup to trace one dirty extent, its info is recorded in @record.
* So qgroup can account it at commit trans time.
*
* No lock version, caller must acquire delayed ref lock and allocate memory.
*
@ -74,14 +102,15 @@ int btrfs_qgroup_prepare_account_extents(struct btrfs_trans_handle *trans,
* Return >0 for existing record, caller can free @record safely.
* Error is not possible
*/
int btrfs_qgroup_insert_dirty_extent_nolock(
int btrfs_qgroup_trace_extent_nolock(
struct btrfs_fs_info *fs_info,
struct btrfs_delayed_ref_root *delayed_refs,
struct btrfs_qgroup_extent_record *record);
/*
* Insert one dirty extent record into @delayed_refs, informing qgroup to
* account that extent at commit trans time.
* Inform qgroup to trace one dirty extent, specified by @bytenr and
* @num_bytes.
* So qgroup can account it at commit trans time.
*
* Better encapsulated version.
*
@ -89,10 +118,33 @@ int btrfs_qgroup_insert_dirty_extent_nolock(
* Return <0 for error, like memory allocation failure or invalid parameter
* (NULL trans)
*/
int btrfs_qgroup_insert_dirty_extent(struct btrfs_trans_handle *trans,
int btrfs_qgroup_trace_extent(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info, u64 bytenr, u64 num_bytes,
gfp_t gfp_flag);
/*
* Inform qgroup to trace all leaf items of data
*
* Return 0 for success
* Return <0 for error(ENOMEM)
*/
int btrfs_qgroup_trace_leaf_items(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info,
struct extent_buffer *eb);
/*
* Inform qgroup to trace a whole subtree, including all its child tree
* blocks and data.
* The root tree block is specified by @root_eb.
*
* Normally used by relocation(tree block swap) and subvolume deletion.
*
* Return 0 for success
* Return <0 for error(ENOMEM or tree search error)
*/
int btrfs_qgroup_trace_subtree(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct extent_buffer *root_eb,
u64 root_gen, int root_level);
int
btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info,

Просмотреть файл

@ -969,8 +969,9 @@ static unsigned long rbio_nr_pages(unsigned long stripe_len, int nr_stripes)
* allocation and initial setup for the btrfs_raid_bio. Not
* this does not allocate any pages for rbio->pages.
*/
static struct btrfs_raid_bio *alloc_rbio(struct btrfs_root *root,
struct btrfs_bio *bbio, u64 stripe_len)
static struct btrfs_raid_bio *alloc_rbio(struct btrfs_fs_info *fs_info,
struct btrfs_bio *bbio,
u64 stripe_len)
{
struct btrfs_raid_bio *rbio;
int nr_data = 0;
@ -991,7 +992,7 @@ static struct btrfs_raid_bio *alloc_rbio(struct btrfs_root *root,
INIT_LIST_HEAD(&rbio->stripe_cache);
INIT_LIST_HEAD(&rbio->hash_list);
rbio->bbio = bbio;
rbio->fs_info = root->fs_info;
rbio->fs_info = fs_info;
rbio->stripe_len = stripe_len;
rbio->nr_pages = num_pages;
rbio->real_stripes = real_stripes;
@ -1144,10 +1145,10 @@ static void validate_rbio_for_rmw(struct btrfs_raid_bio *rbio)
static void index_rbio_pages(struct btrfs_raid_bio *rbio)
{
struct bio *bio;
struct bio_vec *bvec;
u64 start;
unsigned long stripe_offset;
unsigned long page_index;
struct page *p;
int i;
spin_lock_irq(&rbio->bio_list_lock);
@ -1156,10 +1157,8 @@ static void index_rbio_pages(struct btrfs_raid_bio *rbio)
stripe_offset = start - rbio->bbio->raid_map[0];
page_index = stripe_offset >> PAGE_SHIFT;
for (i = 0; i < bio->bi_vcnt; i++) {
p = bio->bi_io_vec[i].bv_page;
rbio->bio_pages[page_index + i] = p;
}
bio_for_each_segment_all(bvec, bio, i)
rbio->bio_pages[page_index + i] = bvec->bv_page;
}
spin_unlock_irq(&rbio->bio_list_lock);
}
@ -1433,13 +1432,11 @@ static int fail_bio_stripe(struct btrfs_raid_bio *rbio,
*/
static void set_bio_pages_uptodate(struct bio *bio)
{
struct bio_vec *bvec;
int i;
struct page *p;
for (i = 0; i < bio->bi_vcnt; i++) {
p = bio->bi_io_vec[i].bv_page;
SetPageUptodate(p);
}
bio_for_each_segment_all(bvec, bio, i)
SetPageUptodate(bvec->bv_page);
}
/*
@ -1482,11 +1479,8 @@ cleanup:
static void async_rmw_stripe(struct btrfs_raid_bio *rbio)
{
btrfs_init_work(&rbio->work, btrfs_rmw_helper,
rmw_work, NULL, NULL);
btrfs_queue_work(rbio->fs_info->rmw_workers,
&rbio->work);
btrfs_init_work(&rbio->work, btrfs_rmw_helper, rmw_work, NULL, NULL);
btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work);
}
static void async_read_rebuild(struct btrfs_raid_bio *rbio)
@ -1494,8 +1488,7 @@ static void async_read_rebuild(struct btrfs_raid_bio *rbio)
btrfs_init_work(&rbio->work, btrfs_rmw_helper,
read_rebuild_work, NULL, NULL);
btrfs_queue_work(rbio->fs_info->rmw_workers,
&rbio->work);
btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work);
}
/*
@ -1577,8 +1570,7 @@ static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio)
bio->bi_end_io = raid_rmw_end_io;
bio_set_op_attrs(bio, REQ_OP_READ, 0);
btrfs_bio_wq_end_io(rbio->fs_info, bio,
BTRFS_WQ_ENDIO_RAID56);
btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
submit_bio(bio);
}
@ -1743,7 +1735,7 @@ static void btrfs_raid_unplug(struct blk_plug_cb *cb, bool from_schedule)
/*
* our main entry point for writes from the rest of the FS.
*/
int raid56_parity_write(struct btrfs_root *root, struct bio *bio,
int raid56_parity_write(struct btrfs_fs_info *fs_info, struct bio *bio,
struct btrfs_bio *bbio, u64 stripe_len)
{
struct btrfs_raid_bio *rbio;
@ -1751,7 +1743,7 @@ int raid56_parity_write(struct btrfs_root *root, struct bio *bio,
struct blk_plug_cb *cb;
int ret;
rbio = alloc_rbio(root, bbio, stripe_len);
rbio = alloc_rbio(fs_info, bbio, stripe_len);
if (IS_ERR(rbio)) {
btrfs_put_bbio(bbio);
return PTR_ERR(rbio);
@ -1760,7 +1752,7 @@ int raid56_parity_write(struct btrfs_root *root, struct bio *bio,
rbio->bio_list_bytes = bio->bi_iter.bi_size;
rbio->operation = BTRFS_RBIO_WRITE;
btrfs_bio_counter_inc_noblocked(root->fs_info);
btrfs_bio_counter_inc_noblocked(fs_info);
rbio->generic_bio_cnt = 1;
/*
@ -1770,16 +1762,15 @@ int raid56_parity_write(struct btrfs_root *root, struct bio *bio,
if (rbio_is_full(rbio)) {
ret = full_stripe_write(rbio);
if (ret)
btrfs_bio_counter_dec(root->fs_info);
btrfs_bio_counter_dec(fs_info);
return ret;
}
cb = blk_check_plugged(btrfs_raid_unplug, root->fs_info,
sizeof(*plug));
cb = blk_check_plugged(btrfs_raid_unplug, fs_info, sizeof(*plug));
if (cb) {
plug = container_of(cb, struct btrfs_plug_cb, cb);
if (!plug->info) {
plug->info = root->fs_info;
plug->info = fs_info;
INIT_LIST_HEAD(&plug->rbio_list);
}
list_add_tail(&rbio->plug_list, &plug->rbio_list);
@ -1787,7 +1778,7 @@ int raid56_parity_write(struct btrfs_root *root, struct bio *bio,
} else {
ret = __raid56_parity_write(rbio);
if (ret)
btrfs_bio_counter_dec(root->fs_info);
btrfs_bio_counter_dec(fs_info);
}
return ret;
}
@ -2102,8 +2093,7 @@ static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
bio->bi_end_io = raid_recover_end_io;
bio_set_op_attrs(bio, REQ_OP_READ, 0);
btrfs_bio_wq_end_io(rbio->fs_info, bio,
BTRFS_WQ_ENDIO_RAID56);
btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
submit_bio(bio);
}
@ -2123,14 +2113,14 @@ cleanup:
* so we assume the bio they send down corresponds to a failed part
* of the drive.
*/
int raid56_parity_recover(struct btrfs_root *root, struct bio *bio,
int raid56_parity_recover(struct btrfs_fs_info *fs_info, struct bio *bio,
struct btrfs_bio *bbio, u64 stripe_len,
int mirror_num, int generic_io)
{
struct btrfs_raid_bio *rbio;
int ret;
rbio = alloc_rbio(root, bbio, stripe_len);
rbio = alloc_rbio(fs_info, bbio, stripe_len);
if (IS_ERR(rbio)) {
if (generic_io)
btrfs_put_bbio(bbio);
@ -2143,7 +2133,7 @@ int raid56_parity_recover(struct btrfs_root *root, struct bio *bio,
rbio->faila = find_logical_bio_stripe(rbio, bio);
if (rbio->faila == -1) {
btrfs_warn(root->fs_info,
btrfs_warn(fs_info,
"%s could not find the bad stripe in raid56 so that we cannot recover any more (bio has logical %llu len %llu, bbio has map_type %llu)",
__func__, (u64)bio->bi_iter.bi_sector << 9,
(u64)bio->bi_iter.bi_size, bbio->map_type);
@ -2154,7 +2144,7 @@ int raid56_parity_recover(struct btrfs_root *root, struct bio *bio,
}
if (generic_io) {
btrfs_bio_counter_inc_noblocked(root->fs_info);
btrfs_bio_counter_inc_noblocked(fs_info);
rbio->generic_bio_cnt = 1;
} else {
btrfs_get_bbio(bbio);
@ -2212,7 +2202,7 @@ static void read_rebuild_work(struct btrfs_work *work)
*/
struct btrfs_raid_bio *
raid56_parity_alloc_scrub_rbio(struct btrfs_root *root, struct bio *bio,
raid56_parity_alloc_scrub_rbio(struct btrfs_fs_info *fs_info, struct bio *bio,
struct btrfs_bio *bbio, u64 stripe_len,
struct btrfs_device *scrub_dev,
unsigned long *dbitmap, int stripe_nsectors)
@ -2220,7 +2210,7 @@ raid56_parity_alloc_scrub_rbio(struct btrfs_root *root, struct bio *bio,
struct btrfs_raid_bio *rbio;
int i;
rbio = alloc_rbio(root, bbio, stripe_len);
rbio = alloc_rbio(fs_info, bbio, stripe_len);
if (IS_ERR(rbio))
return NULL;
bio_list_add(&rbio->bio_list, bio);
@ -2239,7 +2229,7 @@ raid56_parity_alloc_scrub_rbio(struct btrfs_root *root, struct bio *bio,
}
/* Now we just support the sectorsize equals to page size */
ASSERT(root->sectorsize == PAGE_SIZE);
ASSERT(fs_info->sectorsize == PAGE_SIZE);
ASSERT(rbio->stripe_npages == stripe_nsectors);
bitmap_copy(rbio->dbitmap, dbitmap, stripe_nsectors);
@ -2621,8 +2611,7 @@ static void raid56_parity_scrub_stripe(struct btrfs_raid_bio *rbio)
bio->bi_end_io = raid56_parity_scrub_end_io;
bio_set_op_attrs(bio, REQ_OP_READ, 0);
btrfs_bio_wq_end_io(rbio->fs_info, bio,
BTRFS_WQ_ENDIO_RAID56);
btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
submit_bio(bio);
}
@ -2650,8 +2639,7 @@ static void async_scrub_parity(struct btrfs_raid_bio *rbio)
btrfs_init_work(&rbio->work, btrfs_rmw_helper,
scrub_parity_work, NULL, NULL);
btrfs_queue_work(rbio->fs_info->rmw_workers,
&rbio->work);
btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work);
}
void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio)
@ -2663,12 +2651,12 @@ void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio)
/* The following code is used for dev replace of a missing RAID 5/6 device. */
struct btrfs_raid_bio *
raid56_alloc_missing_rbio(struct btrfs_root *root, struct bio *bio,
raid56_alloc_missing_rbio(struct btrfs_fs_info *fs_info, struct bio *bio,
struct btrfs_bio *bbio, u64 length)
{
struct btrfs_raid_bio *rbio;
rbio = alloc_rbio(root, bbio, length);
rbio = alloc_rbio(fs_info, bbio, length);
if (IS_ERR(rbio))
return NULL;

Просмотреть файл

@ -42,24 +42,24 @@ static inline int nr_data_stripes(struct map_lookup *map)
struct btrfs_raid_bio;
struct btrfs_device;
int raid56_parity_recover(struct btrfs_root *root, struct bio *bio,
int raid56_parity_recover(struct btrfs_fs_info *fs_info, struct bio *bio,
struct btrfs_bio *bbio, u64 stripe_len,
int mirror_num, int generic_io);
int raid56_parity_write(struct btrfs_root *root, struct bio *bio,
int raid56_parity_write(struct btrfs_fs_info *fs_info, struct bio *bio,
struct btrfs_bio *bbio, u64 stripe_len);
void raid56_add_scrub_pages(struct btrfs_raid_bio *rbio, struct page *page,
u64 logical);
struct btrfs_raid_bio *
raid56_parity_alloc_scrub_rbio(struct btrfs_root *root, struct bio *bio,
raid56_parity_alloc_scrub_rbio(struct btrfs_fs_info *fs_info, struct bio *bio,
struct btrfs_bio *bbio, u64 stripe_len,
struct btrfs_device *scrub_dev,
unsigned long *dbitmap, int stripe_nsectors);
void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio);
struct btrfs_raid_bio *
raid56_alloc_missing_rbio(struct btrfs_root *root, struct bio *bio,
raid56_alloc_missing_rbio(struct btrfs_fs_info *fs_info, struct bio *bio,
struct btrfs_bio *bbio, u64 length);
void raid56_submit_missing_rbio(struct btrfs_raid_bio *rbio);

Просмотреть файл

@ -107,18 +107,14 @@ static int reada_add_block(struct reada_control *rc, u64 logical,
/* in case of err, eb might be NULL */
static void __readahead_hook(struct btrfs_fs_info *fs_info,
struct reada_extent *re, struct extent_buffer *eb,
u64 start, int err)
int err)
{
int level = 0;
int nritems;
int i;
u64 bytenr;
u64 generation;
struct list_head list;
if (eb)
level = btrfs_header_level(eb);
spin_lock(&re->lock);
/*
* just take the full list from the extent. afterwards we
@ -143,7 +139,7 @@ static void __readahead_hook(struct btrfs_fs_info *fs_info,
* trigger more readahead depending from the content, e.g.
* fetch the checksums for the extents in the leaf.
*/
if (!level)
if (!btrfs_header_level(eb))
goto cleanup;
nritems = btrfs_header_nritems(eb);
@ -213,12 +209,8 @@ cleanup:
return;
}
/*
* start is passed separately in case eb in NULL, which may be the case with
* failed I/O
*/
int btree_readahead_hook(struct btrfs_fs_info *fs_info,
struct extent_buffer *eb, u64 start, int err)
struct extent_buffer *eb, int err)
{
int ret = 0;
struct reada_extent *re;
@ -226,7 +218,7 @@ int btree_readahead_hook(struct btrfs_fs_info *fs_info,
/* find extent */
spin_lock(&fs_info->reada_lock);
re = radix_tree_lookup(&fs_info->reada_tree,
start >> PAGE_SHIFT);
eb->start >> PAGE_SHIFT);
if (re)
re->refcnt++;
spin_unlock(&fs_info->reada_lock);
@ -235,7 +227,7 @@ int btree_readahead_hook(struct btrfs_fs_info *fs_info,
goto start_machine;
}
__readahead_hook(fs_info, re, eb, start, err);
__readahead_hook(fs_info, re, eb, err);
reada_extent_put(fs_info, re); /* our ref */
start_machine:
@ -311,14 +303,13 @@ static struct reada_zone *reada_find_zone(struct btrfs_fs_info *fs_info,
return zone;
}
static struct reada_extent *reada_find_extent(struct btrfs_root *root,
static struct reada_extent *reada_find_extent(struct btrfs_fs_info *fs_info,
u64 logical,
struct btrfs_key *top)
{
int ret;
struct reada_extent *re = NULL;
struct reada_extent *re_exist = NULL;
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_bio *bbio = NULL;
struct btrfs_device *dev;
struct btrfs_device *prev_dev;
@ -343,7 +334,7 @@ static struct reada_extent *reada_find_extent(struct btrfs_root *root,
if (!re)
return NULL;
blocksize = root->nodesize;
blocksize = fs_info->nodesize;
re->logical = logical;
re->top = *top;
INIT_LIST_HEAD(&re->extctl);
@ -354,13 +345,13 @@ static struct reada_extent *reada_find_extent(struct btrfs_root *root,
* map block
*/
length = blocksize;
ret = btrfs_map_block(fs_info, REQ_GET_READ_MIRRORS, logical, &length,
&bbio, 0);
ret = btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS, logical,
&length, &bbio, 0);
if (ret || !bbio || length < blocksize)
goto error;
if (bbio->num_stripes > BTRFS_MAX_MIRRORS) {
btrfs_err(root->fs_info,
btrfs_err(fs_info,
"readahead: more than %d copies not supported",
BTRFS_MAX_MIRRORS);
goto error;
@ -401,7 +392,6 @@ static struct reada_extent *reada_find_extent(struct btrfs_root *root,
ret = radix_tree_insert(&fs_info->reada_tree, index, re);
if (ret == -EEXIST) {
re_exist = radix_tree_lookup(&fs_info->reada_tree, index);
BUG_ON(!re_exist);
re_exist->refcnt++;
spin_unlock(&fs_info->reada_lock);
btrfs_dev_replace_unlock(&fs_info->dev_replace, 0);
@ -448,7 +438,6 @@ static struct reada_extent *reada_find_extent(struct btrfs_root *root,
/* ignore whether the entry was inserted */
radix_tree_delete(&dev->reada_extents, index);
}
BUG_ON(fs_info == NULL);
radix_tree_delete(&fs_info->reada_tree, index);
spin_unlock(&fs_info->reada_lock);
btrfs_dev_replace_unlock(&fs_info->dev_replace, 0);
@ -554,17 +543,18 @@ static void reada_control_release(struct kref *kref)
static int reada_add_block(struct reada_control *rc, u64 logical,
struct btrfs_key *top, u64 generation)
{
struct btrfs_root *root = rc->root;
struct btrfs_fs_info *fs_info = rc->fs_info;
struct reada_extent *re;
struct reada_extctl *rec;
re = reada_find_extent(root, logical, top); /* takes one ref */
/* takes one ref */
re = reada_find_extent(fs_info, logical, top);
if (!re)
return -1;
rec = kzalloc(sizeof(*rec), GFP_KERNEL);
if (!rec) {
reada_extent_put(root->fs_info, re);
reada_extent_put(fs_info, re);
return -ENOMEM;
}
@ -688,7 +678,7 @@ static int reada_start_machine_dev(struct btrfs_fs_info *fs_info,
spin_unlock(&fs_info->reada_lock);
return 0;
}
dev->reada_next = re->logical + fs_info->tree_root->nodesize;
dev->reada_next = re->logical + fs_info->nodesize;
re->refcnt++;
spin_unlock(&fs_info->reada_lock);
@ -714,12 +704,11 @@ static int reada_start_machine_dev(struct btrfs_fs_info *fs_info,
logical = re->logical;
atomic_inc(&dev->reada_in_flight);
ret = reada_tree_block_flagged(fs_info->extent_root, logical,
mirror_num, &eb);
ret = reada_tree_block_flagged(fs_info, logical, mirror_num, &eb);
if (ret)
__readahead_hook(fs_info, re, NULL, logical, ret);
__readahead_hook(fs_info, re, NULL, ret);
else if (eb)
__readahead_hook(fs_info, re, eb, eb->start, ret);
__readahead_hook(fs_info, re, eb, ret);
if (eb)
free_extent_buffer(eb);
@ -852,7 +841,7 @@ static void dump_devs(struct btrfs_fs_info *fs_info, int all)
if (ret == 0)
break;
pr_debug(" re: logical %llu size %u empty %d scheduled %d",
re->logical, fs_info->tree_root->nodesize,
re->logical, fs_info->nodesize,
list_empty(&re->extctl), re->scheduled);
for (i = 0; i < re->nzones; ++i) {
@ -885,7 +874,7 @@ static void dump_devs(struct btrfs_fs_info *fs_info, int all)
continue;
}
pr_debug("re: logical %llu size %u list empty %d scheduled %d",
re->logical, fs_info->tree_root->nodesize,
re->logical, fs_info->nodesize,
list_empty(&re->extctl), re->scheduled);
for (i = 0; i < re->nzones; ++i) {
pr_cont(" zone %llu-%llu devs",
@ -924,7 +913,7 @@ struct reada_control *btrfs_reada_add(struct btrfs_root *root,
if (!rc)
return ERR_PTR(-ENOMEM);
rc->root = root;
rc->fs_info = root->fs_info;
rc->key_start = *key_start;
rc->key_end = *key_end;
atomic_set(&rc->elems, 0);
@ -952,18 +941,17 @@ struct reada_control *btrfs_reada_add(struct btrfs_root *root,
int btrfs_reada_wait(void *handle)
{
struct reada_control *rc = handle;
struct btrfs_fs_info *fs_info = rc->root->fs_info;
struct btrfs_fs_info *fs_info = rc->fs_info;
while (atomic_read(&rc->elems)) {
if (!atomic_read(&fs_info->reada_works_cnt))
reada_start_machine(fs_info);
wait_event_timeout(rc->wait, atomic_read(&rc->elems) == 0,
5 * HZ);
dump_devs(rc->root->fs_info,
atomic_read(&rc->elems) < 10 ? 1 : 0);
dump_devs(fs_info, atomic_read(&rc->elems) < 10 ? 1 : 0);
}
dump_devs(rc->root->fs_info, atomic_read(&rc->elems) < 10 ? 1 : 0);
dump_devs(fs_info, atomic_read(&rc->elems) < 10 ? 1 : 0);
kref_put(&rc->refcnt, reada_control_release);
@ -973,7 +961,7 @@ int btrfs_reada_wait(void *handle)
int btrfs_reada_wait(void *handle)
{
struct reada_control *rc = handle;
struct btrfs_fs_info *fs_info = rc->root->fs_info;
struct btrfs_fs_info *fs_info = rc->fs_info;
while (atomic_read(&rc->elems)) {
if (!atomic_read(&fs_info->reada_works_cnt))

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -132,6 +132,7 @@ int btrfs_update_root(struct btrfs_trans_handle *trans, struct btrfs_root
*root, struct btrfs_key *key, struct btrfs_root_item
*item)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_path *path;
struct extent_buffer *l;
int ret;
@ -150,9 +151,8 @@ int btrfs_update_root(struct btrfs_trans_handle *trans, struct btrfs_root
}
if (ret != 0) {
btrfs_print_leaf(root, path->nodes[0]);
btrfs_crit(root->fs_info,
"unable to update root key %llu %u %llu",
btrfs_print_leaf(fs_info, path->nodes[0]);
btrfs_crit(fs_info, "unable to update root key %llu %u %llu",
key->objectid, key->type, key->offset);
BUG_ON(1);
}
@ -216,8 +216,9 @@ int btrfs_insert_root(struct btrfs_trans_handle *trans, struct btrfs_root *root,
return btrfs_insert_item(trans, root, key, item, sizeof(*item));
}
int btrfs_find_orphan_roots(struct btrfs_root *tree_root)
int btrfs_find_orphan_roots(struct btrfs_fs_info *fs_info)
{
struct btrfs_root *tree_root = fs_info->tree_root;
struct extent_buffer *leaf;
struct btrfs_path *path;
struct btrfs_key key;
@ -227,7 +228,7 @@ int btrfs_find_orphan_roots(struct btrfs_root *tree_root)
int ret;
bool can_recover = true;
if (tree_root->fs_info->sb->s_flags & MS_RDONLY)
if (fs_info->sb->s_flags & MS_RDONLY)
can_recover = false;
path = btrfs_alloc_path();
@ -275,8 +276,7 @@ int btrfs_find_orphan_roots(struct btrfs_root *tree_root)
* in turn reads and inserts fs roots while doing backref
* walking.
*/
root = btrfs_lookup_fs_root(tree_root->fs_info,
root_key.objectid);
root = btrfs_lookup_fs_root(fs_info, root_key.objectid);
if (root) {
WARN_ON(!test_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED,
&root->state));
@ -297,15 +297,15 @@ int btrfs_find_orphan_roots(struct btrfs_root *tree_root)
trans = btrfs_join_transaction(tree_root);
if (IS_ERR(trans)) {
err = PTR_ERR(trans);
btrfs_handle_fs_error(tree_root->fs_info, err,
btrfs_handle_fs_error(fs_info, err,
"Failed to start trans to delete orphan item");
break;
}
err = btrfs_del_orphan_item(trans, tree_root,
root_key.objectid);
btrfs_end_transaction(trans, tree_root);
btrfs_end_transaction(trans);
if (err) {
btrfs_handle_fs_error(tree_root->fs_info, err,
btrfs_handle_fs_error(fs_info, err,
"Failed to delete root orphan item");
break;
}
@ -320,7 +320,7 @@ int btrfs_find_orphan_roots(struct btrfs_root *tree_root)
set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state);
err = btrfs_insert_fs_root(root->fs_info, root);
err = btrfs_insert_fs_root(fs_info, root);
if (err) {
BUG_ON(err == -EEXIST);
btrfs_free_fs_root(root);
@ -358,11 +358,12 @@ out:
}
int btrfs_del_root_ref(struct btrfs_trans_handle *trans,
struct btrfs_root *tree_root,
struct btrfs_fs_info *fs_info,
u64 root_id, u64 ref_id, u64 dirid, u64 *sequence,
const char *name, int name_len)
{
struct btrfs_root *tree_root = fs_info->tree_root;
struct btrfs_path *path;
struct btrfs_root_ref *ref;
struct extent_buffer *leaf;
@ -429,10 +430,11 @@ out:
* Will return 0, -ENOMEM, or anything from the CoW path
*/
int btrfs_add_root_ref(struct btrfs_trans_handle *trans,
struct btrfs_root *tree_root,
struct btrfs_fs_info *fs_info,
u64 root_id, u64 ref_id, u64 dirid, u64 sequence,
const char *name, int name_len)
{
struct btrfs_root *tree_root = fs_info->tree_root;
struct btrfs_key key;
int ret;
struct btrfs_path *path;

Просмотреть файл

@ -171,7 +171,7 @@ struct scrub_wr_ctx {
struct scrub_ctx {
struct scrub_bio *bios[SCRUB_BIOS_PER_SCTX];
struct btrfs_root *dev_root;
struct btrfs_fs_info *fs_info;
int first_free;
int curr;
atomic_t bios_in_flight;
@ -356,7 +356,7 @@ static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
*/
static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx)
{
struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
struct btrfs_fs_info *fs_info = sctx->fs_info;
atomic_inc(&sctx->refs);
/*
@ -388,7 +388,7 @@ static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx)
/* used for workers that require transaction commits */
static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx)
{
struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
struct btrfs_fs_info *fs_info = sctx->fs_info;
/*
* see scrub_pending_trans_workers_inc() why we're pretending
@ -458,7 +458,7 @@ struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
{
struct scrub_ctx *sctx;
int i;
struct btrfs_fs_info *fs_info = dev->dev_root->fs_info;
struct btrfs_fs_info *fs_info = dev->fs_info;
int ret;
sctx = kzalloc(sizeof(*sctx), GFP_KERNEL);
@ -468,7 +468,7 @@ struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
sctx->is_dev_replace = is_dev_replace;
sctx->pages_per_rd_bio = SCRUB_PAGES_PER_RD_BIO;
sctx->curr = -1;
sctx->dev_root = dev->dev_root;
sctx->fs_info = dev->fs_info;
for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
struct scrub_bio *sbio;
@ -489,8 +489,8 @@ struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
sctx->bios[i]->next_free = -1;
}
sctx->first_free = 0;
sctx->nodesize = dev->dev_root->nodesize;
sctx->sectorsize = dev->dev_root->sectorsize;
sctx->nodesize = fs_info->nodesize;
sctx->sectorsize = fs_info->sectorsize;
atomic_set(&sctx->bios_in_flight, 0);
atomic_set(&sctx->workers_pending, 0);
atomic_set(&sctx->cancel_req, 0);
@ -524,7 +524,7 @@ static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root,
struct extent_buffer *eb;
struct btrfs_inode_item *inode_item;
struct scrub_warning *swarn = warn_ctx;
struct btrfs_fs_info *fs_info = swarn->dev->dev_root->fs_info;
struct btrfs_fs_info *fs_info = swarn->dev->fs_info;
struct inode_fs_paths *ipath = NULL;
struct btrfs_root *local_root;
struct btrfs_key root_key;
@ -618,7 +618,7 @@ static void scrub_print_warning(const char *errstr, struct scrub_block *sblock)
WARN_ON(sblock->page_count < 1);
dev = sblock->pagev[0]->dev;
fs_info = sblock->sctx->dev_root->fs_info;
fs_info = sblock->sctx->fs_info;
path = btrfs_alloc_path();
if (!path)
@ -789,6 +789,7 @@ out:
static void scrub_fixup_nodatasum(struct btrfs_work *work)
{
struct btrfs_fs_info *fs_info;
int ret;
struct scrub_fixup_nodatasum *fixup;
struct scrub_ctx *sctx;
@ -798,6 +799,7 @@ static void scrub_fixup_nodatasum(struct btrfs_work *work)
fixup = container_of(work, struct scrub_fixup_nodatasum, work);
sctx = fixup->sctx;
fs_info = fixup->root->fs_info;
path = btrfs_alloc_path();
if (!path) {
@ -823,9 +825,8 @@ static void scrub_fixup_nodatasum(struct btrfs_work *work)
* (once it's finished) and rewrite the failed sector if a good copy
* can be found.
*/
ret = iterate_inodes_from_logical(fixup->logical, fixup->root->fs_info,
path, scrub_fixup_readpage,
fixup);
ret = iterate_inodes_from_logical(fixup->logical, fs_info, path,
scrub_fixup_readpage, fixup);
if (ret < 0) {
uncorrectable = 1;
goto out;
@ -838,15 +839,14 @@ static void scrub_fixup_nodatasum(struct btrfs_work *work)
out:
if (trans && !IS_ERR(trans))
btrfs_end_transaction(trans, fixup->root);
btrfs_end_transaction(trans);
if (uncorrectable) {
spin_lock(&sctx->stat_lock);
++sctx->stat.uncorrectable_errors;
spin_unlock(&sctx->stat_lock);
btrfs_dev_replace_stats_inc(
&sctx->dev_root->fs_info->dev_replace.
num_uncorrectable_read_errors);
btrfs_err_rl_in_rcu(sctx->dev_root->fs_info,
&fs_info->dev_replace.num_uncorrectable_read_errors);
btrfs_err_rl_in_rcu(fs_info,
"unable to fixup (nodatasum) error at logical %llu on dev %s",
fixup->logical, rcu_str_deref(fixup->dev->name));
}
@ -898,7 +898,7 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
DEFAULT_RATELIMIT_BURST);
BUG_ON(sblock_to_check->page_count < 1);
fs_info = sctx->dev_root->fs_info;
fs_info = sctx->fs_info;
if (sblock_to_check->pagev[0]->flags & BTRFS_EXTENT_FLAG_SUPER) {
/*
* if we find an error in a super block, we just report it.
@ -1177,9 +1177,7 @@ nodatasum_case:
if (scrub_write_page_to_dev_replace(sblock_other,
page_num) != 0) {
btrfs_dev_replace_stats_inc(
&sctx->dev_root->
fs_info->dev_replace.
num_write_errors);
&fs_info->dev_replace.num_write_errors);
success = 0;
}
} else if (sblock_other) {
@ -1302,7 +1300,7 @@ static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
struct scrub_block *sblocks_for_recheck)
{
struct scrub_ctx *sctx = original_sblock->sctx;
struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
struct btrfs_fs_info *fs_info = sctx->fs_info;
u64 length = original_sblock->page_count * PAGE_SIZE;
u64 logical = original_sblock->pagev[0]->logical;
u64 generation = original_sblock->pagev[0]->generation;
@ -1334,8 +1332,8 @@ static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
* with a length of PAGE_SIZE, each returned stripe
* represents one mirror
*/
ret = btrfs_map_sblock(fs_info, REQ_GET_READ_MIRRORS, logical,
&mapped_length, &bbio, 0, 1);
ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS,
logical, &mapped_length, &bbio, 0, 1);
if (ret || !bbio || mapped_length < sublen) {
btrfs_put_bbio(bbio);
return -EIO;
@ -1452,7 +1450,7 @@ static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info,
bio->bi_private = &done;
bio->bi_end_io = scrub_bio_wait_endio;
ret = raid56_parity_recover(fs_info->fs_root, bio, page->recover->bbio,
ret = raid56_parity_recover(fs_info, bio, page->recover->bbio,
page->recover->map_length,
page->mirror_num, 0);
if (ret)
@ -1565,6 +1563,7 @@ static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
{
struct scrub_page *page_bad = sblock_bad->pagev[page_num];
struct scrub_page *page_good = sblock_good->pagev[page_num];
struct btrfs_fs_info *fs_info = sblock_bad->sctx->fs_info;
BUG_ON(page_bad->page == NULL);
BUG_ON(page_good->page == NULL);
@ -1574,7 +1573,7 @@ static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
int ret;
if (!page_bad->dev->bdev) {
btrfs_warn_rl(sblock_bad->sctx->dev_root->fs_info,
btrfs_warn_rl(fs_info,
"scrub_repair_page_from_good_copy(bdev == NULL) is unexpected");
return -EIO;
}
@ -1596,8 +1595,7 @@ static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
btrfs_dev_stat_inc_and_print(page_bad->dev,
BTRFS_DEV_STAT_WRITE_ERRS);
btrfs_dev_replace_stats_inc(
&sblock_bad->sctx->dev_root->fs_info->
dev_replace.num_write_errors);
&fs_info->dev_replace.num_write_errors);
bio_put(bio);
return -EIO;
}
@ -1609,6 +1607,7 @@ static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
static void scrub_write_block_to_dev_replace(struct scrub_block *sblock)
{
struct btrfs_fs_info *fs_info = sblock->sctx->fs_info;
int page_num;
/*
@ -1624,8 +1623,7 @@ static void scrub_write_block_to_dev_replace(struct scrub_block *sblock)
ret = scrub_write_page_to_dev_replace(sblock, page_num);
if (ret)
btrfs_dev_replace_stats_inc(
&sblock->sctx->dev_root->fs_info->dev_replace.
num_write_errors);
&fs_info->dev_replace.num_write_errors);
}
}
@ -1740,7 +1738,7 @@ static void scrub_wr_submit(struct scrub_ctx *sctx)
static void scrub_wr_bio_end_io(struct bio *bio)
{
struct scrub_bio *sbio = bio->bi_private;
struct btrfs_fs_info *fs_info = sbio->dev->dev_root->fs_info;
struct btrfs_fs_info *fs_info = sbio->dev->fs_info;
sbio->err = bio->bi_error;
sbio->bio = bio;
@ -1759,7 +1757,7 @@ static void scrub_wr_bio_end_io_worker(struct btrfs_work *work)
WARN_ON(sbio->page_count > SCRUB_PAGES_PER_WR_BIO);
if (sbio->err) {
struct btrfs_dev_replace *dev_replace =
&sbio->sctx->dev_root->fs_info->dev_replace;
&sbio->sctx->fs_info->dev_replace;
for (i = 0; i < sbio->page_count; i++) {
struct scrub_page *spage = sbio->pagev[i];
@ -1859,8 +1857,7 @@ static int scrub_checksum_tree_block(struct scrub_block *sblock)
{
struct scrub_ctx *sctx = sblock->sctx;
struct btrfs_header *h;
struct btrfs_root *root = sctx->dev_root;
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_fs_info *fs_info = sctx->fs_info;
u8 calculated_csum[BTRFS_CSUM_SIZE];
u8 on_disk_csum[BTRFS_CSUM_SIZE];
struct page *page;
@ -2126,7 +2123,7 @@ again:
static void scrub_missing_raid56_end_io(struct bio *bio)
{
struct scrub_block *sblock = bio->bi_private;
struct btrfs_fs_info *fs_info = sblock->sctx->dev_root->fs_info;
struct btrfs_fs_info *fs_info = sblock->sctx->fs_info;
if (bio->bi_error)
sblock->no_io_error_seen = 0;
@ -2140,6 +2137,7 @@ static void scrub_missing_raid56_worker(struct btrfs_work *work)
{
struct scrub_block *sblock = container_of(work, struct scrub_block, work);
struct scrub_ctx *sctx = sblock->sctx;
struct btrfs_fs_info *fs_info = sctx->fs_info;
u64 logical;
struct btrfs_device *dev;
@ -2153,14 +2151,14 @@ static void scrub_missing_raid56_worker(struct btrfs_work *work)
spin_lock(&sctx->stat_lock);
sctx->stat.read_errors++;
spin_unlock(&sctx->stat_lock);
btrfs_err_rl_in_rcu(sctx->dev_root->fs_info,
btrfs_err_rl_in_rcu(fs_info,
"IO error rebuilding logical %llu for dev %s",
logical, rcu_str_deref(dev->name));
} else if (sblock->header_error || sblock->checksum_error) {
spin_lock(&sctx->stat_lock);
sctx->stat.uncorrectable_errors++;
spin_unlock(&sctx->stat_lock);
btrfs_err_rl_in_rcu(sctx->dev_root->fs_info,
btrfs_err_rl_in_rcu(fs_info,
"failed to rebuild valid logical %llu for dev %s",
logical, rcu_str_deref(dev->name));
} else {
@ -2182,7 +2180,7 @@ static void scrub_missing_raid56_worker(struct btrfs_work *work)
static void scrub_missing_raid56_pages(struct scrub_block *sblock)
{
struct scrub_ctx *sctx = sblock->sctx;
struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
struct btrfs_fs_info *fs_info = sctx->fs_info;
u64 length = sblock->page_count * PAGE_SIZE;
u64 logical = sblock->pagev[0]->logical;
struct btrfs_bio *bbio = NULL;
@ -2191,8 +2189,8 @@ static void scrub_missing_raid56_pages(struct scrub_block *sblock)
int ret;
int i;
ret = btrfs_map_sblock(fs_info, REQ_GET_READ_MIRRORS, logical, &length,
&bbio, 0, 1);
ret = btrfs_map_sblock(fs_info, BTRFS_MAP_GET_READ_MIRRORS, logical,
&length, &bbio, 0, 1);
if (ret || !bbio || !bbio->raid_map)
goto bbio_out;
@ -2215,7 +2213,7 @@ static void scrub_missing_raid56_pages(struct scrub_block *sblock)
bio->bi_private = sblock;
bio->bi_end_io = scrub_missing_raid56_end_io;
rbio = raid56_alloc_missing_rbio(sctx->dev_root, bio, bbio, length);
rbio = raid56_alloc_missing_rbio(fs_info, bio, bbio, length);
if (!rbio)
goto rbio_out;
@ -2334,7 +2332,7 @@ leave_nomem:
static void scrub_bio_end_io(struct bio *bio)
{
struct scrub_bio *sbio = bio->bi_private;
struct btrfs_fs_info *fs_info = sbio->dev->dev_root->fs_info;
struct btrfs_fs_info *fs_info = sbio->dev->fs_info;
sbio->err = bio->bi_error;
sbio->bio = bio;
@ -2391,7 +2389,7 @@ static inline void __scrub_mark_bitmap(struct scrub_parity *sparity,
{
u32 offset;
int nsectors;
int sectorsize = sparity->sctx->dev_root->sectorsize;
int sectorsize = sparity->sctx->fs_info->sectorsize;
if (len >= sparity->stripe_len) {
bitmap_set(bitmap, 0, sparity->nsectors);
@ -2750,6 +2748,7 @@ static void scrub_parity_bio_endio_worker(struct btrfs_work *work)
static void scrub_parity_bio_endio(struct bio *bio)
{
struct scrub_parity *sparity = (struct scrub_parity *)bio->bi_private;
struct btrfs_fs_info *fs_info = sparity->sctx->fs_info;
if (bio->bi_error)
bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap,
@ -2759,13 +2758,13 @@ static void scrub_parity_bio_endio(struct bio *bio)
btrfs_init_work(&sparity->work, btrfs_scrubparity_helper,
scrub_parity_bio_endio_worker, NULL, NULL);
btrfs_queue_work(sparity->sctx->dev_root->fs_info->scrub_parity_workers,
&sparity->work);
btrfs_queue_work(fs_info->scrub_parity_workers, &sparity->work);
}
static void scrub_parity_check_and_repair(struct scrub_parity *sparity)
{
struct scrub_ctx *sctx = sparity->sctx;
struct btrfs_fs_info *fs_info = sctx->fs_info;
struct bio *bio;
struct btrfs_raid_bio *rbio;
struct scrub_page *spage;
@ -2778,8 +2777,7 @@ static void scrub_parity_check_and_repair(struct scrub_parity *sparity)
goto out;
length = sparity->logic_end - sparity->logic_start;
ret = btrfs_map_sblock(sctx->dev_root->fs_info, WRITE,
sparity->logic_start,
ret = btrfs_map_sblock(fs_info, BTRFS_MAP_WRITE, sparity->logic_start,
&length, &bbio, 0, 1);
if (ret || !bbio || !bbio->raid_map)
goto bbio_out;
@ -2792,7 +2790,7 @@ static void scrub_parity_check_and_repair(struct scrub_parity *sparity)
bio->bi_private = sparity;
bio->bi_end_io = scrub_parity_bio_endio;
rbio = raid56_parity_alloc_scrub_rbio(sctx->dev_root, bio, bbio,
rbio = raid56_parity_alloc_scrub_rbio(fs_info, bio, bbio,
length, sparity->scrub_dev,
sparity->dbitmap,
sparity->nsectors);
@ -2844,7 +2842,7 @@ static noinline_for_stack int scrub_raid56_parity(struct scrub_ctx *sctx,
u64 logic_start,
u64 logic_end)
{
struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
struct btrfs_fs_info *fs_info = sctx->fs_info;
struct btrfs_root *root = fs_info->extent_root;
struct btrfs_root *csum_root = fs_info->csum_root;
struct btrfs_extent_item *extent;
@ -2866,7 +2864,7 @@ static noinline_for_stack int scrub_raid56_parity(struct scrub_ctx *sctx,
int extent_mirror_num;
int stop_loop = 0;
nsectors = div_u64(map->stripe_len, root->sectorsize);
nsectors = div_u64(map->stripe_len, fs_info->sectorsize);
bitmap_len = scrub_calc_parity_bitmap_len(nsectors);
sparity = kzalloc(sizeof(struct scrub_parity) + 2 * bitmap_len,
GFP_NOFS);
@ -2937,7 +2935,7 @@ static noinline_for_stack int scrub_raid56_parity(struct scrub_ctx *sctx,
goto next;
if (key.type == BTRFS_METADATA_ITEM_KEY)
bytes = root->nodesize;
bytes = fs_info->nodesize;
else
bytes = key.offset;
@ -2988,8 +2986,9 @@ again:
mapped_length = extent_len;
bbio = NULL;
ret = btrfs_map_block(fs_info, READ, extent_logical,
&mapped_length, &bbio, 0);
ret = btrfs_map_block(fs_info, BTRFS_MAP_READ,
extent_logical, &mapped_length, &bbio,
0);
if (!ret) {
if (!bbio || mapped_length < extent_len)
ret = -EIO;
@ -3068,7 +3067,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
int is_dev_replace)
{
struct btrfs_path *path, *ppath;
struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
struct btrfs_fs_info *fs_info = sctx->fs_info;
struct btrfs_root *root = fs_info->extent_root;
struct btrfs_root *csum_root = fs_info->csum_root;
struct btrfs_extent_item *extent;
@ -3289,7 +3288,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
goto next;
if (key.type == BTRFS_METADATA_ITEM_KEY)
bytes = root->nodesize;
bytes = fs_info->nodesize;
else
bytes = key.offset;
@ -3442,8 +3441,8 @@ static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
struct btrfs_block_group_cache *cache,
int is_dev_replace)
{
struct btrfs_mapping_tree *map_tree =
&sctx->dev_root->fs_info->mapping_tree;
struct btrfs_fs_info *fs_info = sctx->fs_info;
struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
struct map_lookup *map;
struct extent_map *em;
int i;
@ -3496,8 +3495,8 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
{
struct btrfs_dev_extent *dev_extent = NULL;
struct btrfs_path *path;
struct btrfs_root *root = sctx->dev_root;
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_fs_info *fs_info = sctx->fs_info;
struct btrfs_root *root = fs_info->dev_root;
u64 length;
u64 chunk_offset;
int ret = 0;
@ -3617,8 +3616,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
if (IS_ERR(trans))
ret = PTR_ERR(trans);
else
ret = btrfs_commit_transaction(trans,
root);
ret = btrfs_commit_transaction(trans);
if (ret) {
scrub_pause_off(fs_info);
btrfs_put_block_group(cache);
@ -3693,7 +3691,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
btrfs_dev_replace_unlock(&fs_info->dev_replace, 1);
if (ro_set)
btrfs_dec_block_group_ro(root, cache);
btrfs_dec_block_group_ro(cache);
/*
* We might have prevented the cleaner kthread from deleting
@ -3746,16 +3744,16 @@ static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx,
u64 bytenr;
u64 gen;
int ret;
struct btrfs_root *root = sctx->dev_root;
struct btrfs_fs_info *fs_info = sctx->fs_info;
if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state))
if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
return -EIO;
/* Seed devices of a new filesystem has their own generation. */
if (scrub_dev->fs_devices != root->fs_info->fs_devices)
if (scrub_dev->fs_devices != fs_info->fs_devices)
gen = scrub_dev->generation;
else
gen = root->fs_info->last_trans_committed;
gen = fs_info->last_trans_committed;
for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
bytenr = btrfs_sb_offset(i);
@ -3847,7 +3845,7 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
if (btrfs_fs_closing(fs_info))
return -EINVAL;
if (fs_info->chunk_root->nodesize > BTRFS_STRIPE_LEN) {
if (fs_info->nodesize > BTRFS_STRIPE_LEN) {
/*
* in this case scrub is unable to calculate the checksum
* the way scrub is implemented. Do not handle this
@ -3855,31 +3853,31 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
*/
btrfs_err(fs_info,
"scrub: size assumption nodesize <= BTRFS_STRIPE_LEN (%d <= %d) fails",
fs_info->chunk_root->nodesize, BTRFS_STRIPE_LEN);
fs_info->nodesize,
BTRFS_STRIPE_LEN);
return -EINVAL;
}
if (fs_info->chunk_root->sectorsize != PAGE_SIZE) {
if (fs_info->sectorsize != PAGE_SIZE) {
/* not supported for data w/o checksums */
btrfs_err_rl(fs_info,
"scrub: size assumption sectorsize != PAGE_SIZE (%d != %lu) fails",
fs_info->chunk_root->sectorsize, PAGE_SIZE);
fs_info->sectorsize, PAGE_SIZE);
return -EINVAL;
}
if (fs_info->chunk_root->nodesize >
if (fs_info->nodesize >
PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK ||
fs_info->chunk_root->sectorsize >
PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK) {
fs_info->sectorsize > PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK) {
/*
* would exhaust the array bounds of pagev member in
* struct scrub_block
*/
btrfs_err(fs_info,
"scrub: size assumption nodesize and sectorsize <= SCRUB_MAX_PAGES_PER_BLOCK (%d <= %d && %d <= %d) fails",
fs_info->chunk_root->nodesize,
fs_info->nodesize,
SCRUB_MAX_PAGES_PER_BLOCK,
fs_info->chunk_root->sectorsize,
fs_info->sectorsize,
SCRUB_MAX_PAGES_PER_BLOCK);
return -EINVAL;
}
@ -3979,10 +3977,8 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
return ret;
}
void btrfs_scrub_pause(struct btrfs_root *root)
void btrfs_scrub_pause(struct btrfs_fs_info *fs_info)
{
struct btrfs_fs_info *fs_info = root->fs_info;
mutex_lock(&fs_info->scrub_lock);
atomic_inc(&fs_info->scrub_pause_req);
while (atomic_read(&fs_info->scrubs_paused) !=
@ -3996,10 +3992,8 @@ void btrfs_scrub_pause(struct btrfs_root *root)
mutex_unlock(&fs_info->scrub_lock);
}
void btrfs_scrub_continue(struct btrfs_root *root)
void btrfs_scrub_continue(struct btrfs_fs_info *fs_info)
{
struct btrfs_fs_info *fs_info = root->fs_info;
atomic_dec(&fs_info->scrub_pause_req);
wake_up(&fs_info->scrub_pause_wait);
}
@ -4048,19 +4042,19 @@ int btrfs_scrub_cancel_dev(struct btrfs_fs_info *fs_info,
return 0;
}
int btrfs_scrub_progress(struct btrfs_root *root, u64 devid,
int btrfs_scrub_progress(struct btrfs_fs_info *fs_info, u64 devid,
struct btrfs_scrub_progress *progress)
{
struct btrfs_device *dev;
struct scrub_ctx *sctx = NULL;
mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
dev = btrfs_find_device(root->fs_info, devid, NULL, NULL);
mutex_lock(&fs_info->fs_devices->device_list_mutex);
dev = btrfs_find_device(fs_info, devid, NULL, NULL);
if (dev)
sctx = dev->scrub_device;
if (sctx)
memcpy(progress, &sctx->stat, sizeof(*progress));
mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
mutex_unlock(&fs_info->fs_devices->device_list_mutex);
return dev ? (sctx ? 0 : -ENOTCONN) : -ENODEV;
}
@ -4076,7 +4070,7 @@ static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
int ret;
mapped_length = extent_len;
ret = btrfs_map_block(fs_info, READ, extent_logical,
ret = btrfs_map_block(fs_info, BTRFS_MAP_READ, extent_logical,
&mapped_length, &bbio, 0);
if (ret || !bbio || mapped_length < extent_len ||
!bbio->stripes[0].dev->bdev) {
@ -4122,7 +4116,7 @@ static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
int mirror_num, u64 physical_for_dev_replace)
{
struct scrub_copy_nocow_ctx *nocow_ctx;
struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
struct btrfs_fs_info *fs_info = sctx->fs_info;
nocow_ctx = kzalloc(sizeof(*nocow_ctx), GFP_NOFS);
if (!nocow_ctx) {
@ -4170,20 +4164,17 @@ static void copy_nocow_pages_worker(struct btrfs_work *work)
struct scrub_copy_nocow_ctx *nocow_ctx =
container_of(work, struct scrub_copy_nocow_ctx, work);
struct scrub_ctx *sctx = nocow_ctx->sctx;
struct btrfs_fs_info *fs_info = sctx->fs_info;
struct btrfs_root *root = fs_info->extent_root;
u64 logical = nocow_ctx->logical;
u64 len = nocow_ctx->len;
int mirror_num = nocow_ctx->mirror_num;
u64 physical_for_dev_replace = nocow_ctx->physical_for_dev_replace;
int ret;
struct btrfs_trans_handle *trans = NULL;
struct btrfs_fs_info *fs_info;
struct btrfs_path *path;
struct btrfs_root *root;
int not_written = 0;
fs_info = sctx->dev_root->fs_info;
root = fs_info->extent_root;
path = btrfs_alloc_path();
if (!path) {
spin_lock(&sctx->stat_lock);
@ -4210,7 +4201,7 @@ static void copy_nocow_pages_worker(struct btrfs_work *work)
goto out;
}
btrfs_end_transaction(trans, root);
btrfs_end_transaction(trans);
trans = NULL;
while (!list_empty(&nocow_ctx->inodes)) {
struct scrub_nocow_inode *entry;
@ -4238,7 +4229,7 @@ out:
kfree(entry);
}
if (trans && !IS_ERR(trans))
btrfs_end_transaction(trans, root);
btrfs_end_transaction(trans);
if (not_written)
btrfs_dev_replace_stats_inc(&fs_info->dev_replace.
num_uncorrectable_read_errors);
@ -4296,7 +4287,7 @@ out_unlock:
static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root,
struct scrub_copy_nocow_ctx *nocow_ctx)
{
struct btrfs_fs_info *fs_info = nocow_ctx->sctx->dev_root->fs_info;
struct btrfs_fs_info *fs_info = nocow_ctx->sctx->fs_info;
struct btrfs_key key;
struct inode *inode;
struct page *page;
@ -4426,7 +4417,7 @@ static int write_page_nocow(struct scrub_ctx *sctx,
if (!dev)
return -EIO;
if (!dev->bdev) {
btrfs_warn_rl(dev->dev_root->fs_info,
btrfs_warn_rl(dev->fs_info,
"scrub write_page_nocow(bdev == NULL) is unexpected");
return -EIO;
}

Просмотреть файл

@ -1054,7 +1054,8 @@ static int iterate_dir_item(struct btrfs_root *root, struct btrfs_path *path,
ret = -ENAMETOOLONG;
goto out;
}
if (name_len + data_len > BTRFS_MAX_XATTR_SIZE(root)) {
if (name_len + data_len >
BTRFS_MAX_XATTR_SIZE(root->fs_info)) {
ret = -E2BIG;
goto out;
}
@ -1430,9 +1431,9 @@ static int find_extent_clone(struct send_ctx *sctx,
extent_item_pos = logical - found_key.objectid;
else
extent_item_pos = 0;
ret = iterate_extent_inodes(fs_info,
found_key.objectid, extent_item_pos, 1,
__iterate_backrefs, backref_ctx);
ret = iterate_extent_inodes(fs_info, found_key.objectid,
extent_item_pos, 1, __iterate_backrefs,
backref_ctx);
if (ret < 0)
goto out;
@ -3434,6 +3435,7 @@ static int wait_for_dest_dir_move(struct send_ctx *sctx,
struct recorded_ref *parent_ref,
const bool is_orphan)
{
struct btrfs_fs_info *fs_info = sctx->parent_root->fs_info;
struct btrfs_path *path;
struct btrfs_key key;
struct btrfs_key di_key;
@ -3462,8 +3464,8 @@ static int wait_for_dest_dir_move(struct send_ctx *sctx,
goto out;
}
di = btrfs_match_dir_item_name(sctx->parent_root, path,
parent_ref->name, parent_ref->name_len);
di = btrfs_match_dir_item_name(fs_info, path, parent_ref->name,
parent_ref->name_len);
if (!di) {
ret = 0;
goto out;
@ -5264,7 +5266,7 @@ static int get_last_extent(struct send_ctx *sctx, u64 offset)
u64 size = btrfs_file_extent_inline_len(path->nodes[0],
path->slots[0], fi);
extent_end = ALIGN(key.offset + size,
sctx->send_root->sectorsize);
sctx->send_root->fs_info->sectorsize);
} else {
extent_end = key.offset +
btrfs_file_extent_num_bytes(path->nodes[0], fi);
@ -5299,7 +5301,7 @@ static int maybe_send_hole(struct send_ctx *sctx, struct btrfs_path *path,
u64 size = btrfs_file_extent_inline_len(path->nodes[0],
path->slots[0], fi);
extent_end = ALIGN(key->offset + size,
sctx->send_root->sectorsize);
sctx->send_root->fs_info->sectorsize);
} else {
extent_end = key->offset +
btrfs_file_extent_num_bytes(path->nodes[0], fi);
@ -6110,7 +6112,7 @@ again:
goto commit_trans;
if (trans)
return btrfs_end_transaction(trans, sctx->send_root);
return btrfs_end_transaction(trans);
return 0;
@ -6123,7 +6125,7 @@ commit_trans:
goto again;
}
return btrfs_commit_transaction(trans, sctx->send_root);
return btrfs_commit_transaction(trans);
}
static void btrfs_root_dec_send_in_progress(struct btrfs_root* root)
@ -6136,17 +6138,17 @@ static void btrfs_root_dec_send_in_progress(struct btrfs_root* root)
*/
if (root->send_in_progress < 0)
btrfs_err(root->fs_info,
"send_in_progres unbalanced %d root %llu",
root->send_in_progress, root->root_key.objectid);
"send_in_progres unbalanced %d root %llu",
root->send_in_progress, root->root_key.objectid);
spin_unlock(&root->root_item_lock);
}
long btrfs_ioctl_send(struct file *mnt_file, void __user *arg_)
{
int ret = 0;
struct btrfs_root *send_root;
struct btrfs_root *send_root = BTRFS_I(file_inode(mnt_file))->root;
struct btrfs_fs_info *fs_info = send_root->fs_info;
struct btrfs_root *clone_root;
struct btrfs_fs_info *fs_info;
struct btrfs_ioctl_send_args *arg = NULL;
struct btrfs_key key;
struct send_ctx *sctx = NULL;
@ -6160,9 +6162,6 @@ long btrfs_ioctl_send(struct file *mnt_file, void __user *arg_)
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
send_root = BTRFS_I(file_inode(mnt_file))->root;
fs_info = send_root->fs_info;
/*
* The subvolume must remain read-only during send, protect against
* making it RW. This also protects against deletion.

Просмотреть файл

@ -303,7 +303,7 @@ void __btrfs_panic(struct btrfs_fs_info *fs_info, const char *function,
static void btrfs_put_super(struct super_block *sb)
{
close_ctree(btrfs_sb(sb)->tree_root);
close_ctree(btrfs_sb(sb));
}
enum {
@ -394,10 +394,9 @@ static const match_table_t tokens = {
* reading in a new superblock is parsed here.
* XXX JDM: This needs to be cleaned up for remount.
*/
int btrfs_parse_options(struct btrfs_root *root, char *options,
int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
unsigned long new_flags)
{
struct btrfs_fs_info *info = root->fs_info;
substring_t args[MAX_OPT_ARGS];
char *p, *num, *orig = NULL;
u64 cache_gen;
@ -409,8 +408,8 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
bool saved_compress_force;
int no_compress = 0;
cache_gen = btrfs_super_cache_generation(root->fs_info->super_copy);
if (btrfs_fs_compat_ro(root->fs_info, FREE_SPACE_TREE))
cache_gen = btrfs_super_cache_generation(info->super_copy);
if (btrfs_fs_compat_ro(info, FREE_SPACE_TREE))
btrfs_set_opt(info->mount_opt, FREE_SPACE_TREE);
else if (cache_gen)
btrfs_set_opt(info->mount_opt, SPACE_CACHE);
@ -440,7 +439,7 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
token = match_token(p, tokens, args);
switch (token) {
case Opt_degraded:
btrfs_info(root->fs_info, "allowing degraded mounts");
btrfs_info(info, "allowing degraded mounts");
btrfs_set_opt(info->mount_opt, DEGRADED);
break;
case Opt_subvol:
@ -459,11 +458,10 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
case Opt_datasum:
if (btrfs_test_opt(info, NODATASUM)) {
if (btrfs_test_opt(info, NODATACOW))
btrfs_info(root->fs_info,
btrfs_info(info,
"setting datasum, datacow enabled");
else
btrfs_info(root->fs_info,
"setting datasum");
btrfs_info(info, "setting datasum");
}
btrfs_clear_opt(info->mount_opt, NODATACOW);
btrfs_clear_opt(info->mount_opt, NODATASUM);
@ -472,11 +470,10 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
if (!btrfs_test_opt(info, NODATACOW)) {
if (!btrfs_test_opt(info, COMPRESS) ||
!btrfs_test_opt(info, FORCE_COMPRESS)) {
btrfs_info(root->fs_info,
btrfs_info(info,
"setting nodatacow, compression disabled");
} else {
btrfs_info(root->fs_info,
"setting nodatacow");
btrfs_info(info, "setting nodatacow");
}
}
btrfs_clear_opt(info->mount_opt, COMPRESS);
@ -543,8 +540,7 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
compress_force != saved_compress_force)) ||
(!btrfs_test_opt(info, COMPRESS) &&
no_compress == 1)) {
btrfs_info(root->fs_info,
"%s %s compression",
btrfs_info(info, "%s %s compression",
(compress_force) ? "force" : "use",
compress_type);
}
@ -592,10 +588,10 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
if (info->max_inline) {
info->max_inline = min_t(u64,
info->max_inline,
root->sectorsize);
info->sectorsize);
}
btrfs_info(root->fs_info, "max_inline at %llu",
info->max_inline);
btrfs_info(info, "max_inline at %llu",
info->max_inline);
} else {
ret = -ENOMEM;
goto out;
@ -608,8 +604,7 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
info->alloc_start = memparse(num, NULL);
mutex_unlock(&info->chunk_mutex);
kfree(num);
btrfs_info(root->fs_info,
"allocations start at %llu",
btrfs_info(info, "allocations start at %llu",
info->alloc_start);
} else {
ret = -ENOMEM;
@ -618,16 +613,15 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
break;
case Opt_acl:
#ifdef CONFIG_BTRFS_FS_POSIX_ACL
root->fs_info->sb->s_flags |= MS_POSIXACL;
info->sb->s_flags |= MS_POSIXACL;
break;
#else
btrfs_err(root->fs_info,
"support for ACL not compiled in!");
btrfs_err(info, "support for ACL not compiled in!");
ret = -EINVAL;
goto out;
#endif
case Opt_noacl:
root->fs_info->sb->s_flags &= ~MS_POSIXACL;
info->sb->s_flags &= ~MS_POSIXACL;
break;
case Opt_notreelog:
btrfs_set_and_info(info, NOTREELOG,
@ -656,8 +650,8 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
goto out;
} else if (intarg >= 0) {
info->metadata_ratio = intarg;
btrfs_info(root->fs_info, "metadata ratio %d",
info->metadata_ratio);
btrfs_info(info, "metadata ratio %d",
info->metadata_ratio);
} else {
ret = -EINVAL;
goto out;
@ -675,15 +669,14 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
case Opt_space_cache_version:
if (token == Opt_space_cache ||
strcmp(args[0].from, "v1") == 0) {
btrfs_clear_opt(root->fs_info->mount_opt,
btrfs_clear_opt(info->mount_opt,
FREE_SPACE_TREE);
btrfs_set_and_info(info, SPACE_CACHE,
"enabling disk space caching");
"enabling disk space caching");
} else if (strcmp(args[0].from, "v2") == 0) {
btrfs_clear_opt(root->fs_info->mount_opt,
btrfs_clear_opt(info->mount_opt,
SPACE_CACHE);
btrfs_set_and_info(info,
FREE_SPACE_TREE,
btrfs_set_and_info(info, FREE_SPACE_TREE,
"enabling free space tree");
} else {
ret = -EINVAL;
@ -695,14 +688,12 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
break;
case Opt_no_space_cache:
if (btrfs_test_opt(info, SPACE_CACHE)) {
btrfs_clear_and_info(info,
SPACE_CACHE,
"disabling disk space caching");
btrfs_clear_and_info(info, SPACE_CACHE,
"disabling disk space caching");
}
if (btrfs_test_opt(info, FREE_SPACE_TREE)) {
btrfs_clear_and_info(info,
FREE_SPACE_TREE,
"disabling free space tree");
btrfs_clear_and_info(info, FREE_SPACE_TREE,
"disabling free space tree");
}
break;
case Opt_inode_cache:
@ -735,10 +726,10 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
"disabling auto defrag");
break;
case Opt_recovery:
btrfs_warn(root->fs_info,
btrfs_warn(info,
"'recovery' is deprecated, use 'usebackuproot' instead");
case Opt_usebackuproot:
btrfs_info(root->fs_info,
btrfs_info(info,
"trying to use backup root at mount time");
btrfs_set_opt(info->mount_opt, USEBACKUPROOT);
break;
@ -747,14 +738,14 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
break;
#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
case Opt_check_integrity_including_extent_data:
btrfs_info(root->fs_info,
btrfs_info(info,
"enabling check integrity including extent data");
btrfs_set_opt(info->mount_opt,
CHECK_INTEGRITY_INCLUDING_EXTENT_DATA);
btrfs_set_opt(info->mount_opt, CHECK_INTEGRITY);
break;
case Opt_check_integrity:
btrfs_info(root->fs_info, "enabling check integrity");
btrfs_info(info, "enabling check integrity");
btrfs_set_opt(info->mount_opt, CHECK_INTEGRITY);
break;
case Opt_check_integrity_print_mask:
@ -763,7 +754,7 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
goto out;
} else if (intarg >= 0) {
info->check_integrity_print_mask = intarg;
btrfs_info(root->fs_info,
btrfs_info(info,
"check_integrity_print_mask 0x%x",
info->check_integrity_print_mask);
} else {
@ -775,8 +766,8 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
case Opt_check_integrity_including_extent_data:
case Opt_check_integrity:
case Opt_check_integrity_print_mask:
btrfs_err(root->fs_info,
"support for check_integrity* not compiled in!");
btrfs_err(info,
"support for check_integrity* not compiled in!");
ret = -EINVAL;
goto out;
#endif
@ -796,20 +787,19 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
intarg = 0;
ret = match_int(&args[0], &intarg);
if (ret < 0) {
btrfs_err(root->fs_info,
"invalid commit interval");
btrfs_err(info, "invalid commit interval");
ret = -EINVAL;
goto out;
}
if (intarg > 0) {
if (intarg > 300) {
btrfs_warn(root->fs_info,
btrfs_warn(info,
"excessive commit interval %d",
intarg);
}
info->commit_interval = intarg;
} else {
btrfs_info(root->fs_info,
btrfs_info(info,
"using default commit interval %ds",
BTRFS_DEFAULT_COMMIT_INTERVAL);
info->commit_interval = BTRFS_DEFAULT_COMMIT_INTERVAL;
@ -817,23 +807,22 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
break;
#ifdef CONFIG_BTRFS_DEBUG
case Opt_fragment_all:
btrfs_info(root->fs_info, "fragmenting all space");
btrfs_info(info, "fragmenting all space");
btrfs_set_opt(info->mount_opt, FRAGMENT_DATA);
btrfs_set_opt(info->mount_opt, FRAGMENT_METADATA);
break;
case Opt_fragment_metadata:
btrfs_info(root->fs_info, "fragmenting metadata");
btrfs_info(info, "fragmenting metadata");
btrfs_set_opt(info->mount_opt,
FRAGMENT_METADATA);
break;
case Opt_fragment_data:
btrfs_info(root->fs_info, "fragmenting data");
btrfs_info(info, "fragmenting data");
btrfs_set_opt(info->mount_opt, FRAGMENT_DATA);
break;
#endif
case Opt_err:
btrfs_info(root->fs_info,
"unrecognized mount option '%s'", p);
btrfs_info(info, "unrecognized mount option '%s'", p);
ret = -EINVAL;
goto out;
default:
@ -845,22 +834,22 @@ check:
* Extra check for current option against current flag
*/
if (btrfs_test_opt(info, NOLOGREPLAY) && !(new_flags & MS_RDONLY)) {
btrfs_err(root->fs_info,
btrfs_err(info,
"nologreplay must be used with ro mount option");
ret = -EINVAL;
}
out:
if (btrfs_fs_compat_ro(root->fs_info, FREE_SPACE_TREE) &&
if (btrfs_fs_compat_ro(info, FREE_SPACE_TREE) &&
!btrfs_test_opt(info, FREE_SPACE_TREE) &&
!btrfs_test_opt(info, CLEAR_CACHE)) {
btrfs_err(root->fs_info, "cannot disable free space tree");
btrfs_err(info, "cannot disable free space tree");
ret = -EINVAL;
}
if (!ret && btrfs_test_opt(info, SPACE_CACHE))
btrfs_info(root->fs_info, "disk space caching is enabled");
btrfs_info(info, "disk space caching is enabled");
if (!ret && btrfs_test_opt(info, FREE_SPACE_TREE))
btrfs_info(root->fs_info, "using free space tree");
btrfs_info(info, "using free space tree");
kfree(orig);
return ret;
}
@ -1171,7 +1160,7 @@ static int btrfs_fill_super(struct super_block *sb,
return 0;
fail_close:
close_ctree(fs_info->tree_root);
close_ctree(fs_info);
return err;
}
@ -1215,13 +1204,12 @@ int btrfs_sync_fs(struct super_block *sb, int wait)
if (IS_ERR(trans))
return PTR_ERR(trans);
}
return btrfs_commit_transaction(trans, root);
return btrfs_commit_transaction(trans);
}
static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry)
{
struct btrfs_fs_info *info = btrfs_sb(dentry->d_sb);
struct btrfs_root *root = info->tree_root;
char *compress_type;
if (btrfs_test_opt(info, DEGRADED))
@ -1263,7 +1251,7 @@ static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry)
seq_puts(seq, ",flushoncommit");
if (btrfs_test_opt(info, DISCARD))
seq_puts(seq, ",discard");
if (!(root->fs_info->sb->s_flags & MS_POSIXACL))
if (!(info->sb->s_flags & MS_POSIXACL))
seq_puts(seq, ",noacl");
if (btrfs_test_opt(info, SPACE_CACHE))
seq_puts(seq, ",space_cache");
@ -1742,7 +1730,7 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
}
}
ret = btrfs_parse_options(root, data, *flags);
ret = btrfs_parse_options(fs_info, data, *flags);
if (ret) {
ret = -EINVAL;
goto restore;
@ -1782,11 +1770,11 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
btrfs_scrub_cancel(fs_info);
btrfs_pause_balance(fs_info);
ret = btrfs_commit_super(root);
ret = btrfs_commit_super(fs_info);
if (ret)
goto restore;
} else {
if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state)) {
if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
btrfs_err(fs_info,
"Remounting read-write after error is not allowed");
ret = -EINVAL;
@ -1899,9 +1887,10 @@ static inline void btrfs_descending_sort_devices(
* The helper to calc the free space on the devices that can be used to store
* file data.
*/
static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes)
static int btrfs_calc_avail_data_space(struct btrfs_fs_info *fs_info,
u64 *free_bytes)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_root *root = fs_info->tree_root;
struct btrfs_device_info *devices_info;
struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
struct btrfs_device *device;
@ -2084,10 +2073,6 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
u64 thresh = 0;
int mixed = 0;
/*
* holding chunk_mutex to avoid allocating new chunks, holding
* device_list_mutex to avoid the device being removed
*/
rcu_read_lock();
list_for_each_entry_rcu(found, head, list) {
if (found->flags & BTRFS_BLOCK_GROUP_DATA) {
@ -2139,7 +2124,7 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
spin_unlock(&block_rsv->lock);
buf->f_bavail = div_u64(total_free_data, factor);
ret = btrfs_calc_avail_data_space(fs_info->tree_root, &total_free_data);
ret = btrfs_calc_avail_data_space(fs_info, &total_free_data);
if (ret)
return ret;
buf->f_bavail += div_u64(total_free_data, factor);
@ -2247,9 +2232,10 @@ static long btrfs_control_ioctl(struct file *file, unsigned int cmd,
static int btrfs_freeze(struct super_block *sb)
{
struct btrfs_trans_handle *trans;
struct btrfs_root *root = btrfs_sb(sb)->tree_root;
struct btrfs_fs_info *fs_info = btrfs_sb(sb);
struct btrfs_root *root = fs_info->tree_root;
root->fs_info->fs_frozen = 1;
fs_info->fs_frozen = 1;
/*
* We don't need a barrier here, we'll wait for any transaction that
* could be in progress on other threads (and do delayed iputs that
@ -2263,14 +2249,12 @@ static int btrfs_freeze(struct super_block *sb)
return 0;
return PTR_ERR(trans);
}
return btrfs_commit_transaction(trans, root);
return btrfs_commit_transaction(trans);
}
static int btrfs_unfreeze(struct super_block *sb)
{
struct btrfs_root *root = btrfs_sb(sb)->tree_root;
root->fs_info->fs_frozen = 0;
btrfs_sb(sb)->fs_frozen = 0;
return 0;
}

Просмотреть файл

@ -79,7 +79,7 @@ static void btrfs_destroy_test_fs(void)
unregister_filesystem(&test_type);
}
struct btrfs_fs_info *btrfs_alloc_dummy_fs_info(void)
struct btrfs_fs_info *btrfs_alloc_dummy_fs_info(u32 nodesize, u32 sectorsize)
{
struct btrfs_fs_info *fs_info = kzalloc(sizeof(struct btrfs_fs_info),
GFP_KERNEL);
@ -100,6 +100,9 @@ struct btrfs_fs_info *btrfs_alloc_dummy_fs_info(void)
return NULL;
}
fs_info->nodesize = nodesize;
fs_info->sectorsize = sectorsize;
if (init_srcu_struct(&fs_info->subvol_srcu)) {
kfree(fs_info->fs_devices);
kfree(fs_info->super_copy);
@ -190,7 +193,8 @@ void btrfs_free_dummy_root(struct btrfs_root *root)
}
struct btrfs_block_group_cache *
btrfs_alloc_dummy_block_group(unsigned long length, u32 sectorsize)
btrfs_alloc_dummy_block_group(struct btrfs_fs_info *fs_info,
unsigned long length)
{
struct btrfs_block_group_cache *cache;
@ -207,8 +211,9 @@ btrfs_alloc_dummy_block_group(unsigned long length, u32 sectorsize)
cache->key.objectid = 0;
cache->key.offset = length;
cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
cache->sectorsize = sectorsize;
cache->full_stripe_len = sectorsize;
cache->sectorsize = fs_info->sectorsize;
cache->full_stripe_len = fs_info->sectorsize;
cache->fs_info = fs_info;
INIT_LIST_HEAD(&cache->list);
INIT_LIST_HEAD(&cache->cluster_list);

Просмотреть файл

@ -34,11 +34,11 @@ int btrfs_test_inodes(u32 sectorsize, u32 nodesize);
int btrfs_test_qgroups(u32 sectorsize, u32 nodesize);
int btrfs_test_free_space_tree(u32 sectorsize, u32 nodesize);
struct inode *btrfs_new_test_inode(void);
struct btrfs_fs_info *btrfs_alloc_dummy_fs_info(void);
struct btrfs_fs_info *btrfs_alloc_dummy_fs_info(u32 nodesize, u32 sectorsize);
void btrfs_free_dummy_fs_info(struct btrfs_fs_info *fs_info);
void btrfs_free_dummy_root(struct btrfs_root *root);
struct btrfs_block_group_cache *
btrfs_alloc_dummy_block_group(unsigned long length, u32 sectorsize);
btrfs_alloc_dummy_block_group(struct btrfs_fs_info *fs_info, unsigned long length);
void btrfs_free_dummy_block_group(struct btrfs_block_group_cache *cache);
void btrfs_init_dummy_trans(struct btrfs_trans_handle *trans);
#else

Просмотреть файл

@ -41,13 +41,13 @@ static int test_btrfs_split_item(u32 sectorsize, u32 nodesize)
test_msg("Running btrfs_split_item tests\n");
fs_info = btrfs_alloc_dummy_fs_info();
fs_info = btrfs_alloc_dummy_fs_info(nodesize, sectorsize);
if (!fs_info) {
test_msg("Could not allocate fs_info\n");
return -ENOMEM;
}
root = btrfs_alloc_dummy_root(fs_info, sectorsize, nodesize);
root = btrfs_alloc_dummy_root(fs_info);
if (IS_ERR(root)) {
test_msg("Could not allocate root\n");
ret = PTR_ERR(root);
@ -61,8 +61,7 @@ static int test_btrfs_split_item(u32 sectorsize, u32 nodesize)
goto out;
}
path->nodes[0] = eb = alloc_dummy_extent_buffer(NULL, nodesize,
nodesize);
path->nodes[0] = eb = alloc_dummy_extent_buffer(fs_info, nodesize);
if (!eb) {
test_msg("Could not allocate dummy buffer\n");
ret = -ENOMEM;

Просмотреть файл

@ -306,7 +306,7 @@ static int __test_eb_bitmaps(unsigned long *bitmap, struct extent_buffer *eb,
int ret;
memset(bitmap, 0, len);
memset_extent_buffer(eb, 0, 0, len);
memzero_extent_buffer(eb, 0, len);
if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) {
test_msg("Bitmap was not zeroed\n");
return -EINVAL;
@ -383,6 +383,7 @@ static int __test_eb_bitmaps(unsigned long *bitmap, struct extent_buffer *eb,
static int test_eb_bitmaps(u32 sectorsize, u32 nodesize)
{
struct btrfs_fs_info *fs_info;
unsigned long len;
unsigned long *bitmap;
struct extent_buffer *eb;
@ -397,13 +398,15 @@ static int test_eb_bitmaps(u32 sectorsize, u32 nodesize)
len = (sectorsize < BTRFS_MAX_METADATA_BLOCKSIZE)
? sectorsize * 4 : sectorsize;
fs_info = btrfs_alloc_dummy_fs_info(len, len);
bitmap = kmalloc(len, GFP_KERNEL);
if (!bitmap) {
test_msg("Couldn't allocate test bitmap\n");
return -ENOMEM;
}
eb = __alloc_dummy_extent_buffer(NULL, 0, len);
eb = __alloc_dummy_extent_buffer(fs_info, 0, len);
if (!eb) {
test_msg("Couldn't allocate test extent buffer\n");
kfree(bitmap);

Просмотреть файл

@ -843,33 +843,31 @@ int btrfs_test_free_space_cache(u32 sectorsize, u32 nodesize)
int ret = -ENOMEM;
test_msg("Running btrfs free space cache tests\n");
fs_info = btrfs_alloc_dummy_fs_info(nodesize, sectorsize);
if (!fs_info)
return -ENOMEM;
/*
* For ppc64 (with 64k page size), bytes per bitmap might be
* larger than 1G. To make bitmap test available in ppc64,
* alloc dummy block group whose size cross bitmaps.
*/
cache = btrfs_alloc_dummy_block_group(BITS_PER_BITMAP * sectorsize
+ PAGE_SIZE, sectorsize);
cache = btrfs_alloc_dummy_block_group(fs_info,
BITS_PER_BITMAP * sectorsize + PAGE_SIZE);
if (!cache) {
test_msg("Couldn't run the tests\n");
btrfs_free_dummy_fs_info(fs_info);
return 0;
}
fs_info = btrfs_alloc_dummy_fs_info();
if (!fs_info) {
ret = -ENOMEM;
goto out;
}
root = btrfs_alloc_dummy_root(fs_info, sectorsize, nodesize);
root = btrfs_alloc_dummy_root(fs_info);
if (IS_ERR(root)) {
ret = PTR_ERR(root);
goto out;
}
root->fs_info->extent_root = root;
cache->fs_info = root->fs_info;
ret = test_extents(cache);
if (ret)

Просмотреть файл

@ -455,14 +455,14 @@ static int run_test(test_func_t test_func, int bitmaps, u32 sectorsize,
struct btrfs_path *path = NULL;
int ret;
fs_info = btrfs_alloc_dummy_fs_info();
fs_info = btrfs_alloc_dummy_fs_info(nodesize, sectorsize);
if (!fs_info) {
test_msg("Couldn't allocate dummy fs info\n");
ret = -ENOMEM;
goto out;
}
root = btrfs_alloc_dummy_root(fs_info, sectorsize, nodesize);
root = btrfs_alloc_dummy_root(fs_info);
if (IS_ERR(root)) {
test_msg("Couldn't allocate dummy root\n");
ret = PTR_ERR(root);
@ -474,8 +474,7 @@ static int run_test(test_func_t test_func, int bitmaps, u32 sectorsize,
root->fs_info->free_space_root = root;
root->fs_info->tree_root = root;
root->node = alloc_test_extent_buffer(root->fs_info,
nodesize, nodesize);
root->node = alloc_test_extent_buffer(root->fs_info, nodesize);
if (!root->node) {
test_msg("Couldn't allocate dummy buffer\n");
ret = -ENOMEM;
@ -485,7 +484,7 @@ static int run_test(test_func_t test_func, int bitmaps, u32 sectorsize,
btrfs_set_header_nritems(root->node, 0);
root->alloc_bytenr += 2 * nodesize;
cache = btrfs_alloc_dummy_block_group(8 * alignment, sectorsize);
cache = btrfs_alloc_dummy_block_group(fs_info, 8 * alignment);
if (!cache) {
test_msg("Couldn't allocate dummy block group cache\n");
ret = -ENOMEM;

Просмотреть файл

@ -249,19 +249,19 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
BTRFS_I(inode)->location.objectid = BTRFS_FIRST_FREE_OBJECTID;
BTRFS_I(inode)->location.offset = 0;
fs_info = btrfs_alloc_dummy_fs_info();
fs_info = btrfs_alloc_dummy_fs_info(nodesize, sectorsize);
if (!fs_info) {
test_msg("Couldn't allocate dummy fs info\n");
goto out;
}
root = btrfs_alloc_dummy_root(fs_info, sectorsize, nodesize);
root = btrfs_alloc_dummy_root(fs_info);
if (IS_ERR(root)) {
test_msg("Couldn't allocate root\n");
goto out;
}
root->node = alloc_dummy_extent_buffer(NULL, nodesize, nodesize);
root->node = alloc_dummy_extent_buffer(fs_info, nodesize);
if (!root->node) {
test_msg("Couldn't allocate dummy buffer\n");
goto out;
@ -854,19 +854,19 @@ static int test_hole_first(u32 sectorsize, u32 nodesize)
BTRFS_I(inode)->location.objectid = BTRFS_FIRST_FREE_OBJECTID;
BTRFS_I(inode)->location.offset = 0;
fs_info = btrfs_alloc_dummy_fs_info();
fs_info = btrfs_alloc_dummy_fs_info(nodesize, sectorsize);
if (!fs_info) {
test_msg("Couldn't allocate dummy fs info\n");
goto out;
}
root = btrfs_alloc_dummy_root(fs_info, sectorsize, nodesize);
root = btrfs_alloc_dummy_root(fs_info);
if (IS_ERR(root)) {
test_msg("Couldn't allocate root\n");
goto out;
}
root->node = alloc_dummy_extent_buffer(NULL, nodesize, nodesize);
root->node = alloc_dummy_extent_buffer(fs_info, nodesize);
if (!root->node) {
test_msg("Couldn't allocate dummy buffer\n");
goto out;
@ -950,13 +950,13 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize)
return ret;
}
fs_info = btrfs_alloc_dummy_fs_info();
fs_info = btrfs_alloc_dummy_fs_info(nodesize, sectorsize);
if (!fs_info) {
test_msg("Couldn't allocate dummy fs info\n");
goto out;
}
root = btrfs_alloc_dummy_root(fs_info, sectorsize, nodesize);
root = btrfs_alloc_dummy_root(fs_info);
if (IS_ERR(root)) {
test_msg("Couldn't allocate root\n");
goto out;

Просмотреть файл

@ -458,13 +458,13 @@ int btrfs_test_qgroups(u32 sectorsize, u32 nodesize)
struct btrfs_root *tmp_root;
int ret = 0;
fs_info = btrfs_alloc_dummy_fs_info();
fs_info = btrfs_alloc_dummy_fs_info(nodesize, sectorsize);
if (!fs_info) {
test_msg("Couldn't allocate dummy fs info\n");
return -ENOMEM;
}
root = btrfs_alloc_dummy_root(fs_info, sectorsize, nodesize);
root = btrfs_alloc_dummy_root(fs_info);
if (IS_ERR(root)) {
test_msg("Couldn't allocate root\n");
ret = PTR_ERR(root);
@ -486,8 +486,7 @@ int btrfs_test_qgroups(u32 sectorsize, u32 nodesize)
* Can't use bytenr 0, some things freak out
* *cough*backref walking code*cough*
*/
root->node = alloc_test_extent_buffer(root->fs_info, nodesize,
nodesize);
root->node = alloc_test_extent_buffer(root->fs_info, nodesize);
if (!root->node) {
test_msg("Couldn't allocate dummy buffer\n");
ret = -ENOMEM;
@ -497,7 +496,7 @@ int btrfs_test_qgroups(u32 sectorsize, u32 nodesize)
btrfs_set_header_nritems(root->node, 0);
root->alloc_bytenr += 2 * nodesize;
tmp_root = btrfs_alloc_dummy_root(fs_info, sectorsize, nodesize);
tmp_root = btrfs_alloc_dummy_root(fs_info);
if (IS_ERR(tmp_root)) {
test_msg("Couldn't allocate a fs root\n");
ret = PTR_ERR(tmp_root);
@ -512,7 +511,7 @@ int btrfs_test_qgroups(u32 sectorsize, u32 nodesize)
goto out;
}
tmp_root = btrfs_alloc_dummy_root(fs_info, sectorsize, nodesize);
tmp_root = btrfs_alloc_dummy_root(fs_info);
if (IS_ERR(tmp_root)) {
test_msg("Couldn't allocate a fs root\n");
ret = PTR_ERR(tmp_root);

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -123,11 +123,6 @@ struct btrfs_trans_handle {
bool sync;
bool dirty;
unsigned int type;
/*
* this root is only needed to validate that the root passed to
* start_transaction is the same as the one passed to end_transaction.
* Subvolume quota depends on this
*/
struct btrfs_root *root;
struct btrfs_fs_info *fs_info;
struct seq_list delayed_ref_elem;
@ -185,8 +180,7 @@ static inline void btrfs_clear_skip_qgroup(struct btrfs_trans_handle *trans)
delayed_refs->qgroup_to_skip = 0;
}
int btrfs_end_transaction(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
int btrfs_end_transaction(struct btrfs_trans_handle *trans);
struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
unsigned int num_items);
struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv(
@ -202,27 +196,24 @@ struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root);
struct btrfs_trans_handle *btrfs_attach_transaction_barrier(
struct btrfs_root *root);
struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *root);
int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid);
int btrfs_wait_for_commit(struct btrfs_fs_info *fs_info, u64 transid);
void btrfs_add_dead_root(struct btrfs_root *root);
int btrfs_defrag_root(struct btrfs_root *root);
int btrfs_clean_one_deleted_snapshot(struct btrfs_root *root);
int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
int btrfs_commit_transaction(struct btrfs_trans_handle *trans);
int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
int wait_for_unblock);
int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
int btrfs_should_end_transaction(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
void btrfs_throttle(struct btrfs_root *root);
int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans);
int btrfs_should_end_transaction(struct btrfs_trans_handle *trans);
void btrfs_throttle(struct btrfs_fs_info *fs_info);
int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
int btrfs_write_marked_extents(struct btrfs_root *root,
struct extent_io_tree *dirty_pages, int mark);
int btrfs_wait_marked_extents(struct btrfs_root *root,
int btrfs_write_marked_extents(struct btrfs_fs_info *fs_info,
struct extent_io_tree *dirty_pages, int mark);
int btrfs_wait_extents(struct btrfs_fs_info *fs_info,
struct extent_io_tree *dirty_pages);
int btrfs_wait_tree_log_extents(struct btrfs_root *root, int mark);
int btrfs_transaction_blocked(struct btrfs_fs_info *info);
int btrfs_transaction_in_commit(struct btrfs_fs_info *info);
void btrfs_put_transaction(struct btrfs_transaction *transaction);

Просмотреть файл

@ -142,12 +142,13 @@ static int start_log_trans(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_log_ctx *ctx)
{
struct btrfs_fs_info *fs_info = root->fs_info;
int ret = 0;
mutex_lock(&root->log_mutex);
if (root->log_root) {
if (btrfs_need_log_full_commit(root->fs_info, trans)) {
if (btrfs_need_log_full_commit(fs_info, trans)) {
ret = -EAGAIN;
goto out;
}
@ -159,10 +160,10 @@ static int start_log_trans(struct btrfs_trans_handle *trans,
set_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
}
} else {
mutex_lock(&root->fs_info->tree_log_mutex);
if (!root->fs_info->log_root_tree)
ret = btrfs_init_log_root_tree(trans, root->fs_info);
mutex_unlock(&root->fs_info->tree_log_mutex);
mutex_lock(&fs_info->tree_log_mutex);
if (!fs_info->log_root_tree)
ret = btrfs_init_log_root_tree(trans, fs_info);
mutex_unlock(&fs_info->tree_log_mutex);
if (ret)
goto out;
@ -292,25 +293,26 @@ static int process_one_buffer(struct btrfs_root *log,
struct extent_buffer *eb,
struct walk_control *wc, u64 gen)
{
struct btrfs_fs_info *fs_info = log->fs_info;
int ret = 0;
/*
* If this fs is mixed then we need to be able to process the leaves to
* pin down any logged extents, so we have to read the block.
*/
if (btrfs_fs_incompat(log->fs_info, MIXED_GROUPS)) {
if (btrfs_fs_incompat(fs_info, MIXED_GROUPS)) {
ret = btrfs_read_buffer(eb, gen);
if (ret)
return ret;
}
if (wc->pin)
ret = btrfs_pin_extent_for_log_replay(log->fs_info->extent_root,
eb->start, eb->len);
ret = btrfs_pin_extent_for_log_replay(fs_info, eb->start,
eb->len);
if (!ret && btrfs_buffer_uptodate(eb, gen, 0)) {
if (wc->pin && btrfs_header_level(eb) == 0)
ret = btrfs_exclude_logged_extents(log, eb);
ret = btrfs_exclude_logged_extents(fs_info, eb);
if (wc->write)
btrfs_write_tree_block(eb);
if (wc->wait)
@ -339,6 +341,7 @@ static noinline int overwrite_item(struct btrfs_trans_handle *trans,
struct extent_buffer *eb, int slot,
struct btrfs_key *key)
{
struct btrfs_fs_info *fs_info = root->fs_info;
int ret;
u32 item_size;
u64 saved_i_size = 0;
@ -459,9 +462,9 @@ insert:
found_size = btrfs_item_size_nr(path->nodes[0],
path->slots[0]);
if (found_size > item_size)
btrfs_truncate_item(root, path, item_size, 1);
btrfs_truncate_item(fs_info, path, item_size, 1);
else if (found_size < item_size)
btrfs_extend_item(root, path,
btrfs_extend_item(fs_info, path,
item_size - found_size);
} else if (ret) {
return ret;
@ -582,6 +585,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
struct extent_buffer *eb, int slot,
struct btrfs_key *key)
{
struct btrfs_fs_info *fs_info = root->fs_info;
int found_type;
u64 extent_end;
u64 start = key->offset;
@ -608,7 +612,8 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
} else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
size = btrfs_file_extent_inline_len(eb, slot, item);
nbytes = btrfs_file_extent_ram_bytes(eb, item);
extent_end = ALIGN(start + size, root->sectorsize);
extent_end = ALIGN(start + size,
fs_info->sectorsize);
} else {
ret = 0;
goto out;
@ -689,7 +694,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
* as the owner of the file extent changed from log tree
* (doesn't affect qgroup) to fs/file tree(affects qgroup)
*/
ret = btrfs_qgroup_insert_dirty_extent(trans, root->fs_info,
ret = btrfs_qgroup_trace_extent(trans, fs_info,
btrfs_file_extent_disk_bytenr(eb, item),
btrfs_file_extent_disk_num_bytes(eb, item),
GFP_NOFS);
@ -704,10 +709,10 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
* is this extent already allocated in the extent
* allocation tree? If so, just add a reference
*/
ret = btrfs_lookup_data_extent(root, ins.objectid,
ret = btrfs_lookup_data_extent(fs_info, ins.objectid,
ins.offset);
if (ret == 0) {
ret = btrfs_inc_extent_ref(trans, root,
ret = btrfs_inc_extent_ref(trans, fs_info,
ins.objectid, ins.offset,
0, root->root_key.objectid,
key->objectid, offset);
@ -719,7 +724,8 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
* allocation tree
*/
ret = btrfs_alloc_logged_file_extent(trans,
root, root->root_key.objectid,
fs_info,
root->root_key.objectid,
key->objectid, offset, &ins);
if (ret)
goto out;
@ -796,14 +802,12 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
struct btrfs_ordered_sum,
list);
if (!ret)
ret = btrfs_del_csums(trans,
root->fs_info->csum_root,
sums->bytenr,
sums->len);
ret = btrfs_del_csums(trans, fs_info,
sums->bytenr,
sums->len);
if (!ret)
ret = btrfs_csum_file_blocks(trans,
root->fs_info->csum_root,
sums);
fs_info->csum_root, sums);
list_del(&sums->list);
kfree(sums);
}
@ -841,6 +845,7 @@ static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans,
struct inode *dir,
struct btrfs_dir_item *di)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct inode *inode;
char *name;
int name_len;
@ -873,7 +878,7 @@ static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans,
if (ret)
goto out;
else
ret = btrfs_run_delayed_items(trans, root);
ret = btrfs_run_delayed_items(trans, fs_info);
out:
kfree(name);
iput(inode);
@ -991,6 +996,7 @@ static inline int __add_inode_ref(struct btrfs_trans_handle *trans,
u64 ref_index, char *name, int namelen,
int *search_done)
{
struct btrfs_fs_info *fs_info = root->fs_info;
int ret;
char *victim_name;
int victim_name_len;
@ -1049,7 +1055,7 @@ again:
kfree(victim_name);
if (ret)
return ret;
ret = btrfs_run_delayed_items(trans, root);
ret = btrfs_run_delayed_items(trans, fs_info);
if (ret)
return ret;
*search_done = 1;
@ -1120,7 +1126,8 @@ again:
victim_name_len);
if (!ret)
ret = btrfs_run_delayed_items(
trans, root);
trans,
fs_info);
}
iput(victim_parent);
kfree(victim_name);
@ -1811,6 +1818,7 @@ static noinline int replay_one_dir_item(struct btrfs_trans_handle *trans,
struct extent_buffer *eb, int slot,
struct btrfs_key *key)
{
struct btrfs_fs_info *fs_info = root->fs_info;
int ret = 0;
u32 item_size = btrfs_item_size_nr(eb, slot);
struct btrfs_dir_item *di;
@ -1823,7 +1831,7 @@ static noinline int replay_one_dir_item(struct btrfs_trans_handle *trans,
ptr_end = ptr + item_size;
while (ptr < ptr_end) {
di = (struct btrfs_dir_item *)ptr;
if (verify_dir_item(root, eb, di))
if (verify_dir_item(fs_info, eb, di))
return -EIO;
name_len = btrfs_dir_name_len(eb, di);
ret = replay_one_name(trans, root, path, eb, di, key);
@ -1940,12 +1948,11 @@ static noinline int find_dir_range(struct btrfs_root *root,
next:
/* check the next slot in the tree to see if it is a valid item */
nritems = btrfs_header_nritems(path->nodes[0]);
path->slots[0]++;
if (path->slots[0] >= nritems) {
ret = btrfs_next_leaf(root, path);
if (ret)
goto out;
} else {
path->slots[0]++;
}
btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
@ -1978,6 +1985,7 @@ static noinline int check_item_in_log(struct btrfs_trans_handle *trans,
struct inode *dir,
struct btrfs_key *dir_key)
{
struct btrfs_fs_info *fs_info = root->fs_info;
int ret;
struct extent_buffer *eb;
int slot;
@ -1999,7 +2007,7 @@ again:
ptr_end = ptr + item_size;
while (ptr < ptr_end) {
di = (struct btrfs_dir_item *)ptr;
if (verify_dir_item(root, eb, di)) {
if (verify_dir_item(fs_info, eb, di)) {
ret = -EIO;
goto out;
}
@ -2046,7 +2054,7 @@ again:
ret = btrfs_unlink_inode(trans, root, dir, inode,
name, name_len);
if (!ret)
ret = btrfs_run_delayed_items(trans, root);
ret = btrfs_run_delayed_items(trans, fs_info);
kfree(name);
iput(inode);
if (ret)
@ -2407,6 +2415,7 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
struct btrfs_path *path, int *level,
struct walk_control *wc)
{
struct btrfs_fs_info *fs_info = root->fs_info;
u64 root_owner;
u64 bytenr;
u64 ptr_gen;
@ -2432,12 +2441,12 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]);
blocksize = root->nodesize;
blocksize = fs_info->nodesize;
parent = path->nodes[*level];
root_owner = btrfs_header_owner(parent);
next = btrfs_find_create_tree_block(root, bytenr);
next = btrfs_find_create_tree_block(fs_info, bytenr);
if (IS_ERR(next))
return PTR_ERR(next);
@ -2459,16 +2468,16 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
if (trans) {
btrfs_tree_lock(next);
btrfs_set_lock_blocking(next);
clean_tree_block(trans, root->fs_info,
next);
clean_tree_block(trans, fs_info, next);
btrfs_wait_tree_block_writeback(next);
btrfs_tree_unlock(next);
}
WARN_ON(root_owner !=
BTRFS_TREE_LOG_OBJECTID);
ret = btrfs_free_and_pin_reserved_extent(root,
bytenr, blocksize);
ret = btrfs_free_and_pin_reserved_extent(
fs_info, bytenr,
blocksize);
if (ret) {
free_extent_buffer(next);
return ret;
@ -2505,6 +2514,7 @@ static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans,
struct btrfs_path *path, int *level,
struct walk_control *wc)
{
struct btrfs_fs_info *fs_info = root->fs_info;
u64 root_owner;
int i;
int slot;
@ -2538,14 +2548,14 @@ static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans,
if (trans) {
btrfs_tree_lock(next);
btrfs_set_lock_blocking(next);
clean_tree_block(trans, root->fs_info,
next);
clean_tree_block(trans, fs_info, next);
btrfs_wait_tree_block_writeback(next);
btrfs_tree_unlock(next);
}
WARN_ON(root_owner != BTRFS_TREE_LOG_OBJECTID);
ret = btrfs_free_and_pin_reserved_extent(root,
ret = btrfs_free_and_pin_reserved_extent(
fs_info,
path->nodes[*level]->start,
path->nodes[*level]->len);
if (ret)
@ -2567,6 +2577,7 @@ static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans,
static int walk_log_tree(struct btrfs_trans_handle *trans,
struct btrfs_root *log, struct walk_control *wc)
{
struct btrfs_fs_info *fs_info = log->fs_info;
int ret = 0;
int wret;
int level;
@ -2615,15 +2626,15 @@ static int walk_log_tree(struct btrfs_trans_handle *trans,
if (trans) {
btrfs_tree_lock(next);
btrfs_set_lock_blocking(next);
clean_tree_block(trans, log->fs_info, next);
clean_tree_block(trans, fs_info, next);
btrfs_wait_tree_block_writeback(next);
btrfs_tree_unlock(next);
}
WARN_ON(log->root_key.objectid !=
BTRFS_TREE_LOG_OBJECTID);
ret = btrfs_free_and_pin_reserved_extent(log, next->start,
next->len);
ret = btrfs_free_and_pin_reserved_extent(fs_info,
next->start, next->len);
if (ret)
goto out;
}
@ -2641,14 +2652,15 @@ out:
static int update_log_root(struct btrfs_trans_handle *trans,
struct btrfs_root *log)
{
struct btrfs_fs_info *fs_info = log->fs_info;
int ret;
if (log->log_transid == 1) {
/* insert root item on the first sync */
ret = btrfs_insert_root(trans, log->fs_info->log_root_tree,
ret = btrfs_insert_root(trans, fs_info->log_root_tree,
&log->root_key, &log->root_item);
} else {
ret = btrfs_update_root(trans, log->fs_info->log_root_tree,
ret = btrfs_update_root(trans, fs_info->log_root_tree,
&log->root_key, &log->root_item);
}
return ret;
@ -2742,8 +2754,9 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
int index2;
int mark;
int ret;
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_root *log = root->log_root;
struct btrfs_root *log_root_tree = root->fs_info->log_root_tree;
struct btrfs_root *log_root_tree = fs_info->log_root_tree;
int log_transid = 0;
struct btrfs_log_ctx root_log_ctx;
struct blk_plug plug;
@ -2771,7 +2784,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
while (1) {
int batch = atomic_read(&root->log_batch);
/* when we're on an ssd, just kick the log commit out */
if (!btrfs_test_opt(root->fs_info, SSD) &&
if (!btrfs_test_opt(fs_info, SSD) &&
test_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state)) {
mutex_unlock(&root->log_mutex);
schedule_timeout_uninterruptible(1);
@ -2783,7 +2796,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
}
/* bail out if we need to do a full commit */
if (btrfs_need_log_full_commit(root->fs_info, trans)) {
if (btrfs_need_log_full_commit(fs_info, trans)) {
ret = -EAGAIN;
btrfs_free_logged_extents(log, log_transid);
mutex_unlock(&root->log_mutex);
@ -2799,12 +2812,12 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
* wait for them until later.
*/
blk_start_plug(&plug);
ret = btrfs_write_marked_extents(log, &log->dirty_log_pages, mark);
ret = btrfs_write_marked_extents(fs_info, &log->dirty_log_pages, mark);
if (ret) {
blk_finish_plug(&plug);
btrfs_abort_transaction(trans, ret);
btrfs_free_logged_extents(log, log_transid);
btrfs_set_log_full_commit(root->fs_info, trans);
btrfs_set_log_full_commit(fs_info, trans);
mutex_unlock(&root->log_mutex);
goto out;
}
@ -2849,14 +2862,14 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
list_del_init(&root_log_ctx.list);
blk_finish_plug(&plug);
btrfs_set_log_full_commit(root->fs_info, trans);
btrfs_set_log_full_commit(fs_info, trans);
if (ret != -ENOSPC) {
btrfs_abort_transaction(trans, ret);
mutex_unlock(&log_root_tree->log_mutex);
goto out;
}
btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
btrfs_wait_tree_log_extents(log, mark);
btrfs_free_logged_extents(log, log_transid);
mutex_unlock(&log_root_tree->log_mutex);
ret = -EAGAIN;
@ -2874,8 +2887,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
index2 = root_log_ctx.log_transid % 2;
if (atomic_read(&log_root_tree->log_commit[index2])) {
blk_finish_plug(&plug);
ret = btrfs_wait_marked_extents(log, &log->dirty_log_pages,
mark);
ret = btrfs_wait_tree_log_extents(log, mark);
btrfs_wait_logged_extents(trans, log, log_transid);
wait_log_commit(log_root_tree,
root_log_ctx.log_transid);
@ -2898,43 +2910,42 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
* now that we've moved on to the tree of log tree roots,
* check the full commit flag again
*/
if (btrfs_need_log_full_commit(root->fs_info, trans)) {
if (btrfs_need_log_full_commit(fs_info, trans)) {
blk_finish_plug(&plug);
btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
btrfs_wait_tree_log_extents(log, mark);
btrfs_free_logged_extents(log, log_transid);
mutex_unlock(&log_root_tree->log_mutex);
ret = -EAGAIN;
goto out_wake_log_root;
}
ret = btrfs_write_marked_extents(log_root_tree,
ret = btrfs_write_marked_extents(fs_info,
&log_root_tree->dirty_log_pages,
EXTENT_DIRTY | EXTENT_NEW);
blk_finish_plug(&plug);
if (ret) {
btrfs_set_log_full_commit(root->fs_info, trans);
btrfs_set_log_full_commit(fs_info, trans);
btrfs_abort_transaction(trans, ret);
btrfs_free_logged_extents(log, log_transid);
mutex_unlock(&log_root_tree->log_mutex);
goto out_wake_log_root;
}
ret = btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
ret = btrfs_wait_tree_log_extents(log, mark);
if (!ret)
ret = btrfs_wait_marked_extents(log_root_tree,
&log_root_tree->dirty_log_pages,
EXTENT_NEW | EXTENT_DIRTY);
ret = btrfs_wait_tree_log_extents(log_root_tree,
EXTENT_NEW | EXTENT_DIRTY);
if (ret) {
btrfs_set_log_full_commit(root->fs_info, trans);
btrfs_set_log_full_commit(fs_info, trans);
btrfs_free_logged_extents(log, log_transid);
mutex_unlock(&log_root_tree->log_mutex);
goto out_wake_log_root;
}
btrfs_wait_logged_extents(trans, log, log_transid);
btrfs_set_super_log_root(root->fs_info->super_for_commit,
log_root_tree->node->start);
btrfs_set_super_log_root_level(root->fs_info->super_for_commit,
btrfs_header_level(log_root_tree->node));
btrfs_set_super_log_root(fs_info->super_for_commit,
log_root_tree->node->start);
btrfs_set_super_log_root_level(fs_info->super_for_commit,
btrfs_header_level(log_root_tree->node));
log_root_tree->log_transid++;
mutex_unlock(&log_root_tree->log_mutex);
@ -2946,9 +2957,9 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
* the running transaction open, so a full commit can't hop
* in and cause problems either.
*/
ret = write_ctree_super(trans, root->fs_info->tree_root, 1);
ret = write_ctree_super(trans, fs_info, 1);
if (ret) {
btrfs_set_log_full_commit(root->fs_info, trans);
btrfs_set_log_full_commit(fs_info, trans);
btrfs_abort_transaction(trans, ret);
goto out_wake_log_root;
}
@ -3182,6 +3193,7 @@ int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans,
const char *name, int name_len,
struct inode *inode, u64 dirid)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_root *log;
u64 index;
int ret;
@ -3199,7 +3211,7 @@ int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans,
dirid, &index);
mutex_unlock(&BTRFS_I(inode)->log_mutex);
if (ret == -ENOSPC) {
btrfs_set_log_full_commit(root->fs_info, trans);
btrfs_set_log_full_commit(fs_info, trans);
ret = 0;
} else if (ret < 0 && ret != -ENOENT)
btrfs_abort_transaction(trans, ret);
@ -3606,6 +3618,7 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
int start_slot, int nr, int inode_only,
u64 logged_isize)
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
unsigned long src_offset;
unsigned long dst_offset;
struct btrfs_root *log = BTRFS_I(inode)->root->log_root;
@ -3716,7 +3729,7 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
}
ret = btrfs_lookup_csums_range(
log->fs_info->csum_root,
fs_info->csum_root,
ds + cs, ds + cs + cl - 1,
&ordered_sums, 0);
if (ret) {
@ -3789,7 +3802,7 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
src_path->slots[0],
extent);
*last_extent = ALIGN(key.offset + len,
log->sectorsize);
fs_info->sectorsize);
} else {
len = btrfs_file_extent_num_bytes(src, extent);
*last_extent = key.offset + len;
@ -3852,7 +3865,8 @@ fill_holes:
if (btrfs_file_extent_type(src, extent) ==
BTRFS_FILE_EXTENT_INLINE) {
len = btrfs_file_extent_inline_len(src, i, extent);
extent_end = ALIGN(key.offset + len, log->sectorsize);
extent_end = ALIGN(key.offset + len,
fs_info->sectorsize);
} else {
len = btrfs_file_extent_num_bytes(src, extent);
extent_end = key.offset + len;
@ -3902,6 +3916,7 @@ static int wait_ordered_extents(struct btrfs_trans_handle *trans,
const struct list_head *logged_list,
bool *ordered_io_error)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_ordered_extent *ordered;
struct btrfs_root *log = root->log_root;
u64 mod_start = em->mod_start;
@ -4018,7 +4033,7 @@ static int wait_ordered_extents(struct btrfs_trans_handle *trans,
}
/* block start is already adjusted for the file extent offset. */
ret = btrfs_lookup_csums_range(log->fs_info->csum_root,
ret = btrfs_lookup_csums_range(fs_info->csum_root,
em->block_start + csum_offset,
em->block_start + csum_offset +
csum_len - 1, &ordered_sums, 0);
@ -4361,6 +4376,7 @@ static int btrfs_log_trailing_hole(struct btrfs_trans_handle *trans,
struct inode *inode,
struct btrfs_path *path)
{
struct btrfs_fs_info *fs_info = root->fs_info;
int ret;
struct btrfs_key key;
u64 hole_start;
@ -4370,7 +4386,7 @@ static int btrfs_log_trailing_hole(struct btrfs_trans_handle *trans,
const u64 ino = btrfs_ino(inode);
const u64 i_size = i_size_read(inode);
if (!btrfs_fs_incompat(root->fs_info, NO_HOLES))
if (!btrfs_fs_incompat(fs_info, NO_HOLES))
return 0;
key.objectid = ino;
@ -4427,7 +4443,7 @@ static int btrfs_log_trailing_hole(struct btrfs_trans_handle *trans,
if (hole_size == 0)
return 0;
hole_size = ALIGN(hole_size, root->sectorsize);
hole_size = ALIGN(hole_size, fs_info->sectorsize);
ret = btrfs_insert_file_extent(trans, log, ino, hole_start, 0, 0,
hole_size, 0, hole_size, 0, 0, 0);
return ret;
@ -4585,6 +4601,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
const loff_t end,
struct btrfs_log_ctx *ctx)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_path *path;
struct btrfs_path *dst_path;
struct btrfs_key min_key;
@ -4637,7 +4654,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
* fixup (create temporary BTRFS_TREE_LOG_FIXUP_OBJECTID items).
*/
if (S_ISDIR(inode->i_mode) ||
BTRFS_I(inode)->generation > root->fs_info->last_trans_committed)
BTRFS_I(inode)->generation > fs_info->last_trans_committed)
ret = btrfs_commit_inode_delayed_items(trans, inode);
else
ret = btrfs_commit_inode_delayed_inode(inode);
@ -4774,7 +4791,7 @@ again:
inode_key.objectid = other_ino;
inode_key.type = BTRFS_INODE_ITEM_KEY;
inode_key.offset = 0;
other_inode = btrfs_iget(root->fs_info->sb,
other_inode = btrfs_iget(fs_info->sb,
&inode_key, root,
NULL);
/*
@ -5138,6 +5155,7 @@ static int log_new_dir_dentries(struct btrfs_trans_handle *trans,
struct inode *start_inode,
struct btrfs_log_ctx *ctx)
{
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_root *log = root->log_root;
struct btrfs_path *path;
LIST_HEAD(dir_list);
@ -5205,8 +5223,8 @@ process_leaf:
if (di_key.type == BTRFS_ROOT_ITEM_KEY)
continue;
di_inode = btrfs_iget(root->fs_info->sb, &di_key,
root, NULL);
btrfs_release_path(path);
di_inode = btrfs_iget(fs_info->sb, &di_key, root, NULL);
if (IS_ERR(di_inode)) {
ret = PTR_ERR(di_inode);
goto next_dir_inode;
@ -5214,13 +5232,12 @@ process_leaf:
if (btrfs_inode_in_log(di_inode, trans->transid)) {
iput(di_inode);
continue;
break;
}
ctx->log_new_dentries = false;
if (type == BTRFS_FT_DIR || type == BTRFS_FT_SYMLINK)
log_mode = LOG_INODE_ALL;
btrfs_release_path(path);
ret = btrfs_log_inode(trans, root, di_inode,
log_mode, 0, LLONG_MAX, ctx);
if (!ret &&
@ -5268,6 +5285,7 @@ static int btrfs_log_all_parents(struct btrfs_trans_handle *trans,
struct inode *inode,
struct btrfs_log_ctx *ctx)
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
int ret;
struct btrfs_path *path;
struct btrfs_key key;
@ -5332,7 +5350,7 @@ static int btrfs_log_all_parents(struct btrfs_trans_handle *trans,
cur_offset = item_size;
}
dir_inode = btrfs_iget(root->fs_info->sb, &inode_key,
dir_inode = btrfs_iget(fs_info->sb, &inode_key,
root, NULL);
/* If parent inode was deleted, skip it. */
if (IS_ERR(dir_inode))
@ -5374,17 +5392,18 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
int exists_only,
struct btrfs_log_ctx *ctx)
{
struct btrfs_fs_info *fs_info = root->fs_info;
int inode_only = exists_only ? LOG_INODE_EXISTS : LOG_INODE_ALL;
struct super_block *sb;
struct dentry *old_parent = NULL;
int ret = 0;
u64 last_committed = root->fs_info->last_trans_committed;
u64 last_committed = fs_info->last_trans_committed;
bool log_dentries = false;
struct inode *orig_inode = inode;
sb = inode->i_sb;
if (btrfs_test_opt(root->fs_info, NOTREELOG)) {
if (btrfs_test_opt(fs_info, NOTREELOG)) {
ret = 1;
goto end_no_trans;
}
@ -5393,8 +5412,8 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
* The prev transaction commit doesn't complete, we need do
* full commit by ourselves.
*/
if (root->fs_info->last_trans_log_full_commit >
root->fs_info->last_trans_committed) {
if (fs_info->last_trans_log_full_commit >
fs_info->last_trans_committed) {
ret = 1;
goto end_no_trans;
}
@ -5515,7 +5534,7 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
end_trans:
dput(old_parent);
if (ret < 0) {
btrfs_set_log_full_commit(root->fs_info, trans);
btrfs_set_log_full_commit(fs_info, trans);
ret = 1;
}
@ -5675,7 +5694,7 @@ again:
btrfs_free_path(path);
/* step 4: commit the transaction, which also unpins the blocks */
ret = btrfs_commit_transaction(trans, fs_info->tree_root);
ret = btrfs_commit_transaction(trans);
if (ret)
return ret;
@ -5687,7 +5706,7 @@ again:
return 0;
error:
if (wc.trans)
btrfs_end_transaction(wc.trans, fs_info->tree_root);
btrfs_end_transaction(wc.trans);
btrfs_free_path(path);
return ret;
}
@ -5786,6 +5805,7 @@ int btrfs_log_new_name(struct btrfs_trans_handle *trans,
struct inode *inode, struct inode *old_dir,
struct dentry *parent)
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root * root = BTRFS_I(inode)->root;
/*
@ -5800,9 +5820,9 @@ int btrfs_log_new_name(struct btrfs_trans_handle *trans,
* from hasn't been logged, we don't need to log it
*/
if (BTRFS_I(inode)->logged_trans <=
root->fs_info->last_trans_committed &&
fs_info->last_trans_committed &&
(!old_dir || BTRFS_I(old_dir)->logged_trans <=
root->fs_info->last_trans_committed))
fs_info->last_trans_committed))
return 0;
return btrfs_log_inode_parent(trans, root, inode, parent, 0,

Просмотреть файл

@ -92,9 +92,10 @@ out:
}
int btrfs_uuid_tree_add(struct btrfs_trans_handle *trans,
struct btrfs_root *uuid_root, u8 *uuid, u8 type,
struct btrfs_fs_info *fs_info, u8 *uuid, u8 type,
u64 subid_cpu)
{
struct btrfs_root *uuid_root = fs_info->uuid_root;
int ret;
struct btrfs_path *path = NULL;
struct btrfs_key key;
@ -132,13 +133,13 @@ int btrfs_uuid_tree_add(struct btrfs_trans_handle *trans,
* An item with that type already exists.
* Extend the item and store the new subid at the end.
*/
btrfs_extend_item(uuid_root, path, sizeof(subid_le));
btrfs_extend_item(fs_info, path, sizeof(subid_le));
eb = path->nodes[0];
slot = path->slots[0];
offset = btrfs_item_ptr_offset(eb, slot);
offset += btrfs_item_size_nr(eb, slot) - sizeof(subid_le);
} else if (ret < 0) {
btrfs_warn(uuid_root->fs_info,
btrfs_warn(fs_info,
"insert uuid item failed %d (0x%016llx, 0x%016llx) type %u!",
ret, (unsigned long long)key.objectid,
(unsigned long long)key.offset, type);
@ -156,9 +157,10 @@ out:
}
int btrfs_uuid_tree_rem(struct btrfs_trans_handle *trans,
struct btrfs_root *uuid_root, u8 *uuid, u8 type,
struct btrfs_fs_info *fs_info, u8 *uuid, u8 type,
u64 subid)
{
struct btrfs_root *uuid_root = fs_info->uuid_root;
int ret;
struct btrfs_path *path = NULL;
struct btrfs_key key;
@ -185,8 +187,8 @@ int btrfs_uuid_tree_rem(struct btrfs_trans_handle *trans,
ret = btrfs_search_slot(trans, uuid_root, &key, path, -1, 1);
if (ret < 0) {
btrfs_warn(uuid_root->fs_info,
"error %d while searching for uuid item!", ret);
btrfs_warn(fs_info, "error %d while searching for uuid item!",
ret);
goto out;
}
if (ret > 0) {
@ -199,8 +201,7 @@ int btrfs_uuid_tree_rem(struct btrfs_trans_handle *trans,
offset = btrfs_item_ptr_offset(eb, slot);
item_size = btrfs_item_size_nr(eb, slot);
if (!IS_ALIGNED(item_size, sizeof(u64))) {
btrfs_warn(uuid_root->fs_info,
"uuid item with illegal size %lu!",
btrfs_warn(fs_info, "uuid item with illegal size %lu!",
(unsigned long)item_size);
ret = -ENOENT;
goto out;
@ -230,7 +231,7 @@ int btrfs_uuid_tree_rem(struct btrfs_trans_handle *trans,
move_src = offset + sizeof(subid);
move_len = item_size - (move_src - btrfs_item_ptr_offset(eb, slot));
memmove_extent_buffer(eb, move_dst, move_src, move_len);
btrfs_truncate_item(uuid_root, path, item_size - sizeof(subid), 1);
btrfs_truncate_item(fs_info, path, item_size - sizeof(subid), 1);
out:
btrfs_free_path(path);
@ -250,8 +251,8 @@ static int btrfs_uuid_iter_rem(struct btrfs_root *uuid_root, u8 *uuid, u8 type,
goto out;
}
ret = btrfs_uuid_tree_rem(trans, uuid_root, uuid, type, subid);
btrfs_end_transaction(trans, uuid_root);
ret = btrfs_uuid_tree_rem(trans, uuid_root->fs_info, uuid, type, subid);
btrfs_end_transaction(trans);
out:
return ret;

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -51,8 +51,7 @@ struct btrfs_device {
struct list_head dev_list;
struct list_head dev_alloc_list;
struct btrfs_fs_devices *fs_devices;
struct btrfs_root *dev_root;
struct btrfs_fs_info *fs_info;
struct rcu_string *name;
@ -371,27 +370,48 @@ struct btrfs_balance_control {
struct btrfs_balance_progress stat;
};
enum btrfs_map_op {
BTRFS_MAP_READ,
BTRFS_MAP_WRITE,
BTRFS_MAP_DISCARD,
BTRFS_MAP_GET_READ_MIRRORS,
};
static inline enum btrfs_map_op btrfs_op(struct bio *bio)
{
switch (bio_op(bio)) {
case REQ_OP_DISCARD:
return BTRFS_MAP_DISCARD;
case REQ_OP_WRITE:
return BTRFS_MAP_WRITE;
default:
WARN_ON_ONCE(1);
case REQ_OP_READ:
return BTRFS_MAP_READ;
}
}
int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start,
u64 end, u64 *length);
void btrfs_get_bbio(struct btrfs_bio *bbio);
void btrfs_put_bbio(struct btrfs_bio *bbio);
int btrfs_map_block(struct btrfs_fs_info *fs_info, int op,
int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
u64 logical, u64 *length,
struct btrfs_bio **bbio_ret, int mirror_num);
int btrfs_map_sblock(struct btrfs_fs_info *fs_info, int op,
int btrfs_map_sblock(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
u64 logical, u64 *length,
struct btrfs_bio **bbio_ret, int mirror_num,
int need_raid_map);
int btrfs_rmap_block(struct btrfs_fs_info *fs_info,
u64 chunk_start, u64 physical, u64 devid,
u64 **logical, int *naddrs, int *stripe_len);
int btrfs_read_sys_array(struct btrfs_root *root);
int btrfs_read_chunk_tree(struct btrfs_root *root);
int btrfs_read_sys_array(struct btrfs_fs_info *fs_info);
int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info);
int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
struct btrfs_root *extent_root, u64 type);
struct btrfs_fs_info *fs_info, u64 type);
void btrfs_mapping_init(struct btrfs_mapping_tree *tree);
void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree);
int btrfs_map_bio(struct btrfs_root *root, struct bio *bio,
int btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
int mirror_num, int async_submit);
int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
fmode_t flags, void *holder);
@ -401,16 +421,17 @@ int btrfs_close_devices(struct btrfs_fs_devices *fs_devices);
void btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices, int step);
void btrfs_assign_next_active_device(struct btrfs_fs_info *fs_info,
struct btrfs_device *device, struct btrfs_device *this_dev);
int btrfs_find_device_missing_or_by_path(struct btrfs_root *root,
int btrfs_find_device_missing_or_by_path(struct btrfs_fs_info *fs_info,
char *device_path,
struct btrfs_device **device);
int btrfs_find_device_by_devspec(struct btrfs_root *root, u64 devid,
int btrfs_find_device_by_devspec(struct btrfs_fs_info *fs_info, u64 devid,
char *devpath,
struct btrfs_device **device);
struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
const u64 *devid,
const u8 *uuid);
int btrfs_rm_device(struct btrfs_root *root, char *device_path, u64 devid);
int btrfs_rm_device(struct btrfs_fs_info *fs_info,
char *device_path, u64 devid);
void btrfs_cleanup_fs_uuids(void);
int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len);
int btrfs_grow_device(struct btrfs_trans_handle *trans,
@ -418,8 +439,9 @@ int btrfs_grow_device(struct btrfs_trans_handle *trans,
struct btrfs_device *btrfs_find_device(struct btrfs_fs_info *fs_info, u64 devid,
u8 *uuid, u8 *fsid);
int btrfs_shrink_device(struct btrfs_device *device, u64 new_size);
int btrfs_init_new_device(struct btrfs_root *root, char *path);
int btrfs_init_dev_replace_tgtdev(struct btrfs_root *root, char *device_path,
int btrfs_init_new_device(struct btrfs_fs_info *fs_info, char *path);
int btrfs_init_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
char *device_path,
struct btrfs_device *srcdev,
struct btrfs_device **device_out);
int btrfs_balance(struct btrfs_balance_control *bctl,
@ -430,7 +452,7 @@ int btrfs_pause_balance(struct btrfs_fs_info *fs_info);
int btrfs_cancel_balance(struct btrfs_fs_info *fs_info);
int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info);
int btrfs_check_uuid_tree(struct btrfs_fs_info *fs_info);
int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset);
int btrfs_chunk_readonly(struct btrfs_fs_info *fs_info, u64 chunk_offset);
int find_free_dev_extent_start(struct btrfs_transaction *transaction,
struct btrfs_device *device, u64 num_bytes,
u64 search_start, u64 *start, u64 *max_avail);
@ -438,7 +460,7 @@ int find_free_dev_extent(struct btrfs_trans_handle *trans,
struct btrfs_device *device, u64 num_bytes,
u64 *start, u64 *max_avail);
void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index);
int btrfs_get_dev_stats(struct btrfs_root *root,
int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info,
struct btrfs_ioctl_get_dev_stats *stats);
void btrfs_init_devices_late(struct btrfs_fs_info *fs_info);
int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info);
@ -455,14 +477,14 @@ void btrfs_init_dev_replace_tgtdev_for_resume(struct btrfs_fs_info *fs_info,
void btrfs_scratch_superblocks(struct block_device *bdev, char *device_path);
int btrfs_is_parity_mirror(struct btrfs_mapping_tree *map_tree,
u64 logical, u64 len, int mirror_num);
unsigned long btrfs_full_stripe_len(struct btrfs_root *root,
unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info,
struct btrfs_mapping_tree *map_tree,
u64 logical);
int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
struct btrfs_root *extent_root,
struct btrfs_fs_info *fs_info,
u64 chunk_offset, u64 chunk_size);
int btrfs_remove_chunk(struct btrfs_trans_handle *trans,
struct btrfs_root *root, u64 chunk_offset);
struct btrfs_fs_info *fs_info, u64 chunk_offset);
static inline int btrfs_dev_stats_dirty(struct btrfs_device *dev)
{
@ -509,19 +531,9 @@ static inline void btrfs_dev_stat_reset(struct btrfs_device *dev,
}
void btrfs_update_commit_device_size(struct btrfs_fs_info *fs_info);
void btrfs_update_commit_device_bytes_used(struct btrfs_root *root,
void btrfs_update_commit_device_bytes_used(struct btrfs_fs_info *fs_info,
struct btrfs_transaction *transaction);
static inline void lock_chunks(struct btrfs_root *root)
{
mutex_lock(&root->fs_info->chunk_mutex);
}
static inline void unlock_chunks(struct btrfs_root *root)
{
mutex_unlock(&root->fs_info->chunk_mutex);
}
struct list_head *btrfs_get_fs_uuids(void);
void btrfs_set_fs_info_ptr(struct btrfs_fs_info *fs_info);
void btrfs_reset_fs_info_ptr(struct btrfs_fs_info *fs_info);

Просмотреть файл

@ -94,11 +94,12 @@ static int do_setxattr(struct btrfs_trans_handle *trans,
{
struct btrfs_dir_item *di = NULL;
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_path *path;
size_t name_len = strlen(name);
int ret = 0;
if (name_len + size > BTRFS_MAX_XATTR_SIZE(root))
if (name_len + size > BTRFS_MAX_XATTR_SIZE(root->fs_info))
return -ENOSPC;
path = btrfs_alloc_path();
@ -149,14 +150,14 @@ static int do_setxattr(struct btrfs_trans_handle *trans,
*/
ret = 0;
btrfs_assert_tree_locked(path->nodes[0]);
di = btrfs_match_dir_item_name(root, path, name, name_len);
di = btrfs_match_dir_item_name(fs_info, path, name, name_len);
if (!di && !(flags & XATTR_REPLACE)) {
ret = -ENOSPC;
goto out;
}
} else if (ret == -EEXIST) {
ret = 0;
di = btrfs_match_dir_item_name(root, path, name, name_len);
di = btrfs_match_dir_item_name(fs_info, path, name, name_len);
ASSERT(di); /* logic error */
} else if (ret) {
goto out;
@ -185,7 +186,7 @@ static int do_setxattr(struct btrfs_trans_handle *trans,
char *ptr;
if (size > old_data_len) {
if (btrfs_leaf_free_space(root, leaf) <
if (btrfs_leaf_free_space(fs_info, leaf) <
(size - old_data_len)) {
ret = -ENOSPC;
goto out;
@ -195,16 +196,17 @@ static int do_setxattr(struct btrfs_trans_handle *trans,
if (old_data_len + name_len + sizeof(*di) == item_size) {
/* No other xattrs packed in the same leaf item. */
if (size > old_data_len)
btrfs_extend_item(root, path,
btrfs_extend_item(fs_info, path,
size - old_data_len);
else if (size < old_data_len)
btrfs_truncate_item(root, path, data_size, 1);
btrfs_truncate_item(fs_info, path,
data_size, 1);
} else {
/* There are other xattrs packed in the same item. */
ret = btrfs_delete_one_dir_name(trans, root, path, di);
if (ret)
goto out;
btrfs_extend_item(root, path, data_size);
btrfs_extend_item(fs_info, path, data_size);
}
item = btrfs_item_nr(slot);
@ -257,7 +259,7 @@ int __btrfs_setxattr(struct btrfs_trans_handle *trans,
ret = btrfs_update_inode(trans, root, inode);
BUG_ON(ret);
out:
btrfs_end_transaction(trans, root);
btrfs_end_transaction(trans);
return ret;
}
@ -265,6 +267,7 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size)
{
struct btrfs_key key;
struct inode *inode = d_inode(dentry);
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_path *path;
int ret = 0;
@ -333,7 +336,7 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size)
u32 this_len = sizeof(*di) + name_len + data_len;
unsigned long name_ptr = (unsigned long)(di + 1);
if (verify_dir_item(root, leaf, di)) {
if (verify_dir_item(fs_info, leaf, di)) {
ret = -EIO;
goto err;
}

Просмотреть файл

@ -210,10 +210,9 @@ out:
return ret;
}
static int zlib_decompress_biovec(struct list_head *ws, struct page **pages_in,
static int zlib_decompress_bio(struct list_head *ws, struct page **pages_in,
u64 disk_start,
struct bio_vec *bvec,
int vcnt,
struct bio *orig_bio,
size_t srclen)
{
struct workspace *workspace = list_entry(ws, struct workspace, list);
@ -222,10 +221,8 @@ static int zlib_decompress_biovec(struct list_head *ws, struct page **pages_in,
char *data_in;
size_t total_out = 0;
unsigned long page_in_index = 0;
unsigned long page_out_index = 0;
unsigned long total_pages_in = DIV_ROUND_UP(srclen, PAGE_SIZE);
unsigned long buf_start;
unsigned long pg_offset;
data_in = kmap(pages_in[page_in_index]);
workspace->strm.next_in = data_in;
@ -235,7 +232,6 @@ static int zlib_decompress_biovec(struct list_head *ws, struct page **pages_in,
workspace->strm.total_out = 0;
workspace->strm.next_out = workspace->buf;
workspace->strm.avail_out = PAGE_SIZE;
pg_offset = 0;
/* If it's deflate, and it's got no preset dictionary, then
we can tell zlib to skip the adler32 check. */
@ -250,6 +246,7 @@ static int zlib_decompress_biovec(struct list_head *ws, struct page **pages_in,
if (Z_OK != zlib_inflateInit2(&workspace->strm, wbits)) {
pr_warn("BTRFS: inflateInit failed\n");
kunmap(pages_in[page_in_index]);
return -EIO;
}
while (workspace->strm.total_in < srclen) {
@ -266,8 +263,7 @@ static int zlib_decompress_biovec(struct list_head *ws, struct page **pages_in,
ret2 = btrfs_decompress_buf2page(workspace->buf, buf_start,
total_out, disk_start,
bvec, vcnt,
&page_out_index, &pg_offset);
orig_bio);
if (ret2 == 0) {
ret = 0;
goto done;
@ -300,7 +296,7 @@ done:
if (data_in)
kunmap(pages_in[page_in_index]);
if (!ret)
btrfs_clear_biovec_end(bvec, vcnt, page_out_index, pg_offset);
zero_fill_bio(orig_bio);
return ret;
}
@ -407,6 +403,6 @@ const struct btrfs_compress_op btrfs_zlib_compress = {
.alloc_workspace = zlib_alloc_workspace,
.free_workspace = zlib_free_workspace,
.compress_pages = zlib_compress_pages,
.decompress_biovec = zlib_decompress_biovec,
.decompress_bio = zlib_decompress_bio,
.decompress = zlib_decompress,
};

Просмотреть файл

@ -698,10 +698,10 @@ DEFINE_EVENT(btrfs_delayed_ref_head, run_delayed_ref_head,
DECLARE_EVENT_CLASS(btrfs__chunk,
TP_PROTO(struct btrfs_root *root, struct map_lookup *map,
TP_PROTO(struct btrfs_fs_info *fs_info, struct map_lookup *map,
u64 offset, u64 size),
TP_ARGS(root, map, offset, size),
TP_ARGS(fs_info, map, offset, size),
TP_STRUCT__entry_btrfs(
__field( int, num_stripes )
@ -712,13 +712,13 @@ DECLARE_EVENT_CLASS(btrfs__chunk,
__field( u64, root_objectid )
),
TP_fast_assign_btrfs(root->fs_info,
TP_fast_assign_btrfs(fs_info,
__entry->num_stripes = map->num_stripes;
__entry->type = map->type;
__entry->sub_stripes = map->sub_stripes;
__entry->offset = offset;
__entry->size = size;
__entry->root_objectid = root->root_key.objectid;
__entry->root_objectid = fs_info->chunk_root->root_key.objectid;
),
TP_printk_btrfs("root = %llu(%s), offset = %llu, size = %llu, "
@ -732,18 +732,18 @@ DECLARE_EVENT_CLASS(btrfs__chunk,
DEFINE_EVENT(btrfs__chunk, btrfs_chunk_alloc,
TP_PROTO(struct btrfs_root *root, struct map_lookup *map,
TP_PROTO(struct btrfs_fs_info *fs_info, struct map_lookup *map,
u64 offset, u64 size),
TP_ARGS(root, map, offset, size)
TP_ARGS(fs_info, map, offset, size)
);
DEFINE_EVENT(btrfs__chunk, btrfs_chunk_free,
TP_PROTO(struct btrfs_root *root, struct map_lookup *map,
TP_PROTO(struct btrfs_fs_info *fs_info, struct map_lookup *map,
u64 offset, u64 size),
TP_ARGS(root, map, offset, size)
TP_ARGS(fs_info, map, offset, size)
);
TRACE_EVENT(btrfs_cow_block,
@ -891,65 +891,61 @@ TRACE_EVENT(btrfs_flush_space,
DECLARE_EVENT_CLASS(btrfs__reserved_extent,
TP_PROTO(struct btrfs_root *root, u64 start, u64 len),
TP_PROTO(struct btrfs_fs_info *fs_info, u64 start, u64 len),
TP_ARGS(root, start, len),
TP_ARGS(fs_info, start, len),
TP_STRUCT__entry_btrfs(
__field( u64, root_objectid )
__field( u64, start )
__field( u64, len )
),
TP_fast_assign_btrfs(root->fs_info,
__entry->root_objectid = root->root_key.objectid;
TP_fast_assign_btrfs(fs_info,
__entry->start = start;
__entry->len = len;
),
TP_printk_btrfs("root = %llu(%s), start = %llu, len = %llu",
show_root_type(__entry->root_objectid),
show_root_type(BTRFS_EXTENT_TREE_OBJECTID),
(unsigned long long)__entry->start,
(unsigned long long)__entry->len)
);
DEFINE_EVENT(btrfs__reserved_extent, btrfs_reserved_extent_alloc,
TP_PROTO(struct btrfs_root *root, u64 start, u64 len),
TP_PROTO(struct btrfs_fs_info *fs_info, u64 start, u64 len),
TP_ARGS(root, start, len)
TP_ARGS(fs_info, start, len)
);
DEFINE_EVENT(btrfs__reserved_extent, btrfs_reserved_extent_free,
TP_PROTO(struct btrfs_root *root, u64 start, u64 len),
TP_PROTO(struct btrfs_fs_info *fs_info, u64 start, u64 len),
TP_ARGS(root, start, len)
TP_ARGS(fs_info, start, len)
);
TRACE_EVENT(find_free_extent,
TP_PROTO(struct btrfs_root *root, u64 num_bytes, u64 empty_size,
TP_PROTO(struct btrfs_fs_info *fs_info, u64 num_bytes, u64 empty_size,
u64 data),
TP_ARGS(root, num_bytes, empty_size, data),
TP_ARGS(fs_info, num_bytes, empty_size, data),
TP_STRUCT__entry_btrfs(
__field( u64, root_objectid )
__field( u64, num_bytes )
__field( u64, empty_size )
__field( u64, data )
),
TP_fast_assign_btrfs(root->fs_info,
__entry->root_objectid = root->root_key.objectid;
TP_fast_assign_btrfs(fs_info,
__entry->num_bytes = num_bytes;
__entry->empty_size = empty_size;
__entry->data = data;
),
TP_printk_btrfs("root = %Lu(%s), len = %Lu, empty_size = %Lu, "
"flags = %Lu(%s)", show_root_type(__entry->root_objectid),
TP_printk_btrfs("root = %Lu(%s), len = %Lu, empty_size = %Lu, flags = %Lu(%s)",
show_root_type(BTRFS_EXTENT_TREE_OBJECTID),
__entry->num_bytes, __entry->empty_size, __entry->data,
__print_flags((unsigned long)__entry->data, "|",
BTRFS_GROUP_FLAGS))
@ -957,22 +953,20 @@ TRACE_EVENT(find_free_extent,
DECLARE_EVENT_CLASS(btrfs__reserve_extent,
TP_PROTO(struct btrfs_root *root,
TP_PROTO(struct btrfs_fs_info *fs_info,
struct btrfs_block_group_cache *block_group, u64 start,
u64 len),
TP_ARGS(root, block_group, start, len),
TP_ARGS(fs_info, block_group, start, len),
TP_STRUCT__entry_btrfs(
__field( u64, root_objectid )
__field( u64, bg_objectid )
__field( u64, flags )
__field( u64, start )
__field( u64, len )
),
TP_fast_assign_btrfs(root->fs_info,
__entry->root_objectid = root->root_key.objectid;
TP_fast_assign_btrfs(fs_info,
__entry->bg_objectid = block_group->key.objectid;
__entry->flags = block_group->flags;
__entry->start = start;
@ -981,7 +975,8 @@ DECLARE_EVENT_CLASS(btrfs__reserve_extent,
TP_printk_btrfs("root = %Lu(%s), block_group = %Lu, flags = %Lu(%s), "
"start = %Lu, len = %Lu",
show_root_type(__entry->root_objectid), __entry->bg_objectid,
show_root_type(BTRFS_EXTENT_TREE_OBJECTID),
__entry->bg_objectid,
__entry->flags, __print_flags((unsigned long)__entry->flags,
"|", BTRFS_GROUP_FLAGS),
__entry->start, __entry->len)
@ -989,20 +984,20 @@ DECLARE_EVENT_CLASS(btrfs__reserve_extent,
DEFINE_EVENT(btrfs__reserve_extent, btrfs_reserve_extent,
TP_PROTO(struct btrfs_root *root,
TP_PROTO(struct btrfs_fs_info *fs_info,
struct btrfs_block_group_cache *block_group, u64 start,
u64 len),
TP_ARGS(root, block_group, start, len)
TP_ARGS(fs_info, block_group, start, len)
);
DEFINE_EVENT(btrfs__reserve_extent, btrfs_reserve_extent_cluster,
TP_PROTO(struct btrfs_root *root,
TP_PROTO(struct btrfs_fs_info *fs_info,
struct btrfs_block_group_cache *block_group, u64 start,
u64 len),
TP_ARGS(root, block_group, start, len)
TP_ARGS(fs_info, block_group, start, len)
);
TRACE_EVENT(btrfs_find_cluster,
@ -1406,7 +1401,7 @@ DEFINE_EVENT(btrfs_qgroup_extent, btrfs_qgroup_account_extents,
TP_ARGS(fs_info, rec)
);
DEFINE_EVENT(btrfs_qgroup_extent, btrfs_qgroup_insert_dirty_extent,
DEFINE_EVENT(btrfs_qgroup_extent, btrfs_qgroup_trace_extent,
TP_PROTO(struct btrfs_fs_info *fs_info,
struct btrfs_qgroup_extent_record *rec),