btrfs: Make btrfs_lookup_ordered_range take btrfs_inode
Signed-off-by: Nikolay Borisov <nborisov@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
Родитель
7a6d706795
Коммит
a776c6fa1f
|
@ -3101,7 +3101,7 @@ static inline void __do_contiguous_readpages(struct extent_io_tree *tree,
|
|||
inode = pages[0]->mapping->host;
|
||||
while (1) {
|
||||
lock_extent(tree, start, end);
|
||||
ordered = btrfs_lookup_ordered_range(inode, start,
|
||||
ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), start,
|
||||
end - start + 1);
|
||||
if (!ordered)
|
||||
break;
|
||||
|
@ -3173,7 +3173,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
|
|||
|
||||
while (1) {
|
||||
lock_extent(tree, start, end);
|
||||
ordered = btrfs_lookup_ordered_range(inode, start,
|
||||
ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), start,
|
||||
PAGE_SIZE);
|
||||
if (!ordered)
|
||||
break;
|
||||
|
|
|
@ -1436,7 +1436,7 @@ lock_and_cleanup_extent_if_need(struct inode *inode, struct page **pages,
|
|||
struct btrfs_ordered_extent *ordered;
|
||||
lock_extent_bits(&BTRFS_I(inode)->io_tree,
|
||||
start_pos, last_pos, cached_state);
|
||||
ordered = btrfs_lookup_ordered_range(inode, start_pos,
|
||||
ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), start_pos,
|
||||
last_pos - start_pos + 1);
|
||||
if (ordered &&
|
||||
ordered->file_offset + ordered->len > start_pos &&
|
||||
|
@ -1494,7 +1494,7 @@ static noinline int check_can_nocow(struct inode *inode, loff_t pos,
|
|||
|
||||
while (1) {
|
||||
lock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend);
|
||||
ordered = btrfs_lookup_ordered_range(inode, lockstart,
|
||||
ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), lockstart,
|
||||
lockend - lockstart + 1);
|
||||
if (!ordered) {
|
||||
break;
|
||||
|
|
|
@ -1966,7 +1966,7 @@ again:
|
|||
if (PagePrivate2(page))
|
||||
goto out;
|
||||
|
||||
ordered = btrfs_lookup_ordered_range(inode, page_start,
|
||||
ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), page_start,
|
||||
PAGE_SIZE);
|
||||
if (ordered) {
|
||||
unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start,
|
||||
|
@ -4838,7 +4838,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
|
|||
|
||||
lock_extent_bits(io_tree, hole_start, block_end - 1,
|
||||
&cached_state);
|
||||
ordered = btrfs_lookup_ordered_range(inode, hole_start,
|
||||
ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), hole_start,
|
||||
block_end - hole_start);
|
||||
if (!ordered)
|
||||
break;
|
||||
|
@ -7428,7 +7428,7 @@ static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend,
|
|||
* doing DIO to, so we need to make sure there's no ordered
|
||||
* extents in this range.
|
||||
*/
|
||||
ordered = btrfs_lookup_ordered_range(inode, lockstart,
|
||||
ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), lockstart,
|
||||
lockend - lockstart + 1);
|
||||
|
||||
/*
|
||||
|
@ -8801,7 +8801,7 @@ static void btrfs_invalidatepage(struct page *page, unsigned int offset,
|
|||
lock_extent_bits(tree, page_start, page_end, &cached_state);
|
||||
again:
|
||||
start = page_start;
|
||||
ordered = btrfs_lookup_ordered_range(inode, start,
|
||||
ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), start,
|
||||
page_end - start + 1);
|
||||
if (ordered) {
|
||||
end = min(page_end, ordered->file_offset + ordered->len - 1);
|
||||
|
@ -8967,7 +8967,8 @@ again:
|
|||
* we can't set the delalloc bits if there are pending ordered
|
||||
* extents. Drop our locks and wait for them to finish
|
||||
*/
|
||||
ordered = btrfs_lookup_ordered_range(inode, page_start, PAGE_SIZE);
|
||||
ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), page_start,
|
||||
PAGE_SIZE);
|
||||
if (ordered) {
|
||||
unlock_extent_cached(io_tree, page_start, page_end,
|
||||
&cached_state, GFP_NOFS);
|
||||
|
|
|
@ -879,15 +879,14 @@ out:
|
|||
/* Since the DIO code tries to lock a wide area we need to look for any ordered
|
||||
* extents that exist in the range, rather than just the start of the range.
|
||||
*/
|
||||
struct btrfs_ordered_extent *btrfs_lookup_ordered_range(struct inode *inode,
|
||||
u64 file_offset,
|
||||
u64 len)
|
||||
struct btrfs_ordered_extent *btrfs_lookup_ordered_range(
|
||||
struct btrfs_inode *inode, u64 file_offset, u64 len)
|
||||
{
|
||||
struct btrfs_ordered_inode_tree *tree;
|
||||
struct rb_node *node;
|
||||
struct btrfs_ordered_extent *entry = NULL;
|
||||
|
||||
tree = &BTRFS_I(inode)->ordered_tree;
|
||||
tree = &inode->ordered_tree;
|
||||
spin_lock_irq(&tree->lock);
|
||||
node = tree_search(tree, file_offset);
|
||||
if (!node) {
|
||||
|
@ -923,7 +922,7 @@ bool btrfs_have_ordered_extents_in_range(struct inode *inode,
|
|||
{
|
||||
struct btrfs_ordered_extent *oe;
|
||||
|
||||
oe = btrfs_lookup_ordered_range(inode, file_offset, len);
|
||||
oe = btrfs_lookup_ordered_range(BTRFS_I(inode), file_offset, len);
|
||||
if (oe) {
|
||||
btrfs_put_ordered_extent(oe);
|
||||
return true;
|
||||
|
|
|
@ -189,9 +189,10 @@ void btrfs_start_ordered_extent(struct inode *inode,
|
|||
int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len);
|
||||
struct btrfs_ordered_extent *
|
||||
btrfs_lookup_first_ordered_extent(struct inode * inode, u64 file_offset);
|
||||
struct btrfs_ordered_extent *btrfs_lookup_ordered_range(struct inode *inode,
|
||||
u64 file_offset,
|
||||
u64 len);
|
||||
struct btrfs_ordered_extent *btrfs_lookup_ordered_range(
|
||||
struct btrfs_inode *inode,
|
||||
u64 file_offset,
|
||||
u64 len);
|
||||
bool btrfs_have_ordered_extents_in_range(struct inode *inode,
|
||||
u64 file_offset,
|
||||
u64 len);
|
||||
|
|
|
@ -4249,7 +4249,7 @@ static int check_extent_to_block(struct inode *inode, u64 start, u64 len,
|
|||
io_tree = &BTRFS_I(inode)->io_tree;
|
||||
|
||||
lock_extent_bits(io_tree, lockstart, lockend, &cached_state);
|
||||
ordered = btrfs_lookup_ordered_range(inode, lockstart, len);
|
||||
ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), lockstart, len);
|
||||
if (ordered) {
|
||||
btrfs_put_ordered_extent(ordered);
|
||||
ret = 1;
|
||||
|
|
Загрузка…
Ссылка в новой задаче