btrfs: drop extent_io_ops::merge_bio_hook callback
The data and metadata callback implementation both use the same function. We can remove the call indirection completely. Reviewed-by: Nikolay Borisov <nborisov@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
Родитель
05912a3c04
Коммит
00032d38ea
|
@ -299,7 +299,6 @@ blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start,
|
|||
struct bio *bio = NULL;
|
||||
struct compressed_bio *cb;
|
||||
unsigned long bytes_left;
|
||||
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
|
||||
int pg_index = 0;
|
||||
struct page *page;
|
||||
u64 first_byte = disk_start;
|
||||
|
@ -338,9 +337,7 @@ blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start,
|
|||
page = compressed_pages[pg_index];
|
||||
page->mapping = inode->i_mapping;
|
||||
if (bio->bi_iter.bi_size)
|
||||
submit = io_tree->ops->merge_bio_hook(page, 0,
|
||||
PAGE_SIZE,
|
||||
bio, 0);
|
||||
submit = btrfs_merge_bio_hook(page, 0, PAGE_SIZE, bio, 0);
|
||||
|
||||
page->mapping = NULL;
|
||||
if (submit || bio_add_page(bio, page, PAGE_SIZE, 0) <
|
||||
|
@ -622,8 +619,7 @@ blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
|
|||
page->index = em_start >> PAGE_SHIFT;
|
||||
|
||||
if (comp_bio->bi_iter.bi_size)
|
||||
submit = tree->ops->merge_bio_hook(page, 0,
|
||||
PAGE_SIZE,
|
||||
submit = btrfs_merge_bio_hook(page, 0, PAGE_SIZE,
|
||||
comp_bio, 0);
|
||||
|
||||
page->mapping = NULL;
|
||||
|
|
|
@ -4527,8 +4527,6 @@ static const struct extent_io_ops btree_extent_io_ops = {
|
|||
/* mandatory callbacks */
|
||||
.submit_bio_hook = btree_submit_bio_hook,
|
||||
.readpage_end_io_hook = btree_readpage_end_io_hook,
|
||||
/* note we're sharing with inode.c for the merge bio hook */
|
||||
.merge_bio_hook = btrfs_merge_bio_hook,
|
||||
.readpage_io_failed_hook = btree_io_failed_hook,
|
||||
.set_range_writeback = btrfs_set_range_writeback,
|
||||
|
||||
|
|
|
@ -2784,8 +2784,8 @@ static int submit_extent_page(unsigned int opf, struct extent_io_tree *tree,
|
|||
else
|
||||
contig = bio_end_sector(bio) == sector;
|
||||
|
||||
if (tree->ops && tree->ops->merge_bio_hook(page, offset,
|
||||
page_size, bio, bio_flags))
|
||||
if (tree->ops && btrfs_merge_bio_hook(page, offset, page_size,
|
||||
bio, bio_flags))
|
||||
can_merge = false;
|
||||
|
||||
if (prev_bio_flags != bio_flags || !contig || !can_merge ||
|
||||
|
|
|
@ -101,9 +101,6 @@ struct extent_io_ops {
|
|||
int (*readpage_end_io_hook)(struct btrfs_io_bio *io_bio, u64 phy_offset,
|
||||
struct page *page, u64 start, u64 end,
|
||||
int mirror);
|
||||
int (*merge_bio_hook)(struct page *page, unsigned long offset,
|
||||
size_t size, struct bio *bio,
|
||||
unsigned long bio_flags);
|
||||
int (*readpage_io_failed_hook)(struct page *page, int failed_mirror);
|
||||
void (*set_range_writeback)(void *private_data, u64 start, u64 end);
|
||||
|
||||
|
|
|
@ -1898,8 +1898,8 @@ static void btrfs_clear_bit_hook(void *private_data,
|
|||
}
|
||||
|
||||
/*
|
||||
* extent_io.c merge_bio_hook, this must check the chunk tree to make sure
|
||||
* we don't create bios that span stripes or chunks
|
||||
* Merge bio hook, this must check the chunk tree to make sure we don't create
|
||||
* bios that span stripes or chunks
|
||||
*
|
||||
* return 1 if page cannot be merged to bio
|
||||
* return 0 if page can be merged to bio
|
||||
|
@ -10545,7 +10545,6 @@ static const struct extent_io_ops btrfs_extent_io_ops = {
|
|||
/* mandatory callbacks */
|
||||
.submit_bio_hook = btrfs_submit_bio_hook,
|
||||
.readpage_end_io_hook = btrfs_readpage_end_io_hook,
|
||||
.merge_bio_hook = btrfs_merge_bio_hook,
|
||||
.readpage_io_failed_hook = btrfs_readpage_io_failed_hook,
|
||||
.set_range_writeback = btrfs_set_range_writeback,
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче