btrfs: add tracepoints for ordered extents
When debugging a reference counting issue with ordered extents, I've found we're lacking a lot of tracepoint coverage in the ordered extent code. Close these gaps by adding tracepoints after every refcount_inc() in the ordered extent code. Reviewed-by: Boris Burkov <boris@bur.io> Reviewed-by: Qu Wenruo <wqu@suse.com> Reviewed-by: Anand Jain <anand.jain@oracle.com> Signed-off-by: Johannes Thumshirn <johannes.thumshirn@wdc.com> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
Родитель
15dcccdb8b
Коммит
5bea250881
|
@ -401,6 +401,7 @@ void btrfs_mark_ordered_io_finished(struct btrfs_inode *inode,
|
|||
set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
|
||||
cond_wake_up(&entry->wait);
|
||||
refcount_inc(&entry->refs);
|
||||
trace_btrfs_ordered_extent_mark_finished(inode, entry);
|
||||
spin_unlock_irqrestore(&tree->lock, flags);
|
||||
btrfs_init_work(&entry->work, finish_func, NULL, NULL);
|
||||
btrfs_queue_work(wq, &entry->work);
|
||||
|
@ -473,6 +474,7 @@ out:
|
|||
if (finished && cached && entry) {
|
||||
*cached = entry;
|
||||
refcount_inc(&entry->refs);
|
||||
trace_btrfs_ordered_extent_dec_test_pending(inode, entry);
|
||||
}
|
||||
spin_unlock_irqrestore(&tree->lock, flags);
|
||||
return finished;
|
||||
|
@ -807,8 +809,10 @@ struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct btrfs_inode *ino
|
|||
entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
|
||||
if (!in_range(file_offset, entry->file_offset, entry->num_bytes))
|
||||
entry = NULL;
|
||||
if (entry)
|
||||
if (entry) {
|
||||
refcount_inc(&entry->refs);
|
||||
trace_btrfs_ordered_extent_lookup(inode, entry);
|
||||
}
|
||||
out:
|
||||
spin_unlock_irqrestore(&tree->lock, flags);
|
||||
return entry;
|
||||
|
@ -848,8 +852,10 @@ struct btrfs_ordered_extent *btrfs_lookup_ordered_range(
|
|||
break;
|
||||
}
|
||||
out:
|
||||
if (entry)
|
||||
if (entry) {
|
||||
refcount_inc(&entry->refs);
|
||||
trace_btrfs_ordered_extent_lookup_range(inode, entry);
|
||||
}
|
||||
spin_unlock_irq(&tree->lock);
|
||||
return entry;
|
||||
}
|
||||
|
@ -878,6 +884,7 @@ void btrfs_get_ordered_extents_for_logging(struct btrfs_inode *inode,
|
|||
ASSERT(list_empty(&ordered->log_list));
|
||||
list_add_tail(&ordered->log_list, list);
|
||||
refcount_inc(&ordered->refs);
|
||||
trace_btrfs_ordered_extent_lookup_for_logging(inode, ordered);
|
||||
}
|
||||
spin_unlock_irq(&tree->lock);
|
||||
}
|
||||
|
@ -901,6 +908,7 @@ btrfs_lookup_first_ordered_extent(struct btrfs_inode *inode, u64 file_offset)
|
|||
|
||||
entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
|
||||
refcount_inc(&entry->refs);
|
||||
trace_btrfs_ordered_extent_lookup_first(inode, entry);
|
||||
out:
|
||||
spin_unlock_irq(&tree->lock);
|
||||
return entry;
|
||||
|
@ -975,8 +983,11 @@ struct btrfs_ordered_extent *btrfs_lookup_first_ordered_range(
|
|||
/* No ordered extent in the range */
|
||||
entry = NULL;
|
||||
out:
|
||||
if (entry)
|
||||
if (entry) {
|
||||
refcount_inc(&entry->refs);
|
||||
trace_btrfs_ordered_extent_lookup_first_range(inode, entry);
|
||||
}
|
||||
|
||||
spin_unlock_irq(&tree->lock);
|
||||
return entry;
|
||||
}
|
||||
|
@ -1055,6 +1066,8 @@ int btrfs_split_ordered_extent(struct btrfs_ordered_extent *ordered, u64 pre,
|
|||
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
|
||||
int ret = 0;
|
||||
|
||||
trace_btrfs_ordered_extent_split(BTRFS_I(inode), ordered);
|
||||
|
||||
spin_lock_irq(&tree->lock);
|
||||
/* Remove from tree once */
|
||||
node = &ordered->rb_node;
|
||||
|
|
|
@ -598,6 +598,70 @@ DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_put,
|
|||
TP_ARGS(inode, ordered)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_lookup,
|
||||
|
||||
TP_PROTO(const struct btrfs_inode *inode,
|
||||
const struct btrfs_ordered_extent *ordered),
|
||||
|
||||
TP_ARGS(inode, ordered)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_lookup_range,
|
||||
|
||||
TP_PROTO(const struct btrfs_inode *inode,
|
||||
const struct btrfs_ordered_extent *ordered),
|
||||
|
||||
TP_ARGS(inode, ordered)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_lookup_first_range,
|
||||
|
||||
TP_PROTO(const struct btrfs_inode *inode,
|
||||
const struct btrfs_ordered_extent *ordered),
|
||||
|
||||
TP_ARGS(inode, ordered)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_lookup_for_logging,
|
||||
|
||||
TP_PROTO(const struct btrfs_inode *inode,
|
||||
const struct btrfs_ordered_extent *ordered),
|
||||
|
||||
TP_ARGS(inode, ordered)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_lookup_first,
|
||||
|
||||
TP_PROTO(const struct btrfs_inode *inode,
|
||||
const struct btrfs_ordered_extent *ordered),
|
||||
|
||||
TP_ARGS(inode, ordered)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_split,
|
||||
|
||||
TP_PROTO(const struct btrfs_inode *inode,
|
||||
const struct btrfs_ordered_extent *ordered),
|
||||
|
||||
TP_ARGS(inode, ordered)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_dec_test_pending,
|
||||
|
||||
TP_PROTO(const struct btrfs_inode *inode,
|
||||
const struct btrfs_ordered_extent *ordered),
|
||||
|
||||
TP_ARGS(inode, ordered)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_mark_finished,
|
||||
|
||||
TP_PROTO(const struct btrfs_inode *inode,
|
||||
const struct btrfs_ordered_extent *ordered),
|
||||
|
||||
TP_ARGS(inode, ordered)
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(btrfs__writepage,
|
||||
|
||||
TP_PROTO(const struct page *page, const struct inode *inode,
|
||||
|
|
Загрузка…
Ссылка в новой задаче