summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJohannes Thumshirn <johannes.thumshirn@wdc.com>2022-06-09 19:28:04 +0300
committerDavid Sterba <dsterba@suse.com>2022-07-25 18:45:34 +0300
commit5bea2508811ec76105b01c90c1f1661024c257a9 (patch)
tree24e39c815ce0a5a4cb96a7640ea0706e190622c6
parent15dcccdb8b48fa3514c038bcaf0fa455fa6d89a0 (diff)
downloadlinux-5bea2508811ec76105b01c90c1f1661024c257a9.tar.xz
btrfs: add tracepoints for ordered extents
When debugging a reference counting issue with ordered extents, I've found we're lacking a lot of tracepoint coverage in the ordered extent code. Close these gaps by adding tracepoints after every refcount_inc() in the ordered extent code. Reviewed-by: Boris Burkov <boris@bur.io> Reviewed-by: Qu Wenruo <wqu@suse.com> Reviewed-by: Anand Jain <anand.jain@oracle.com> Signed-off-by: Johannes Thumshirn <johannes.thumshirn@wdc.com> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
-rw-r--r--fs/btrfs/ordered-data.c19
-rw-r--r--include/trace/events/btrfs.h64
2 files changed, 80 insertions, 3 deletions
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index dc88d2b3721f..41b3bc44c92b 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -401,6 +401,7 @@ void btrfs_mark_ordered_io_finished(struct btrfs_inode *inode,
set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
cond_wake_up(&entry->wait);
refcount_inc(&entry->refs);
+ trace_btrfs_ordered_extent_mark_finished(inode, entry);
spin_unlock_irqrestore(&tree->lock, flags);
btrfs_init_work(&entry->work, finish_func, NULL, NULL);
btrfs_queue_work(wq, &entry->work);
@@ -473,6 +474,7 @@ out:
if (finished && cached && entry) {
*cached = entry;
refcount_inc(&entry->refs);
+ trace_btrfs_ordered_extent_dec_test_pending(inode, entry);
}
spin_unlock_irqrestore(&tree->lock, flags);
return finished;
@@ -807,8 +809,10 @@ struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct btrfs_inode *ino
entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
if (!in_range(file_offset, entry->file_offset, entry->num_bytes))
entry = NULL;
- if (entry)
+ if (entry) {
refcount_inc(&entry->refs);
+ trace_btrfs_ordered_extent_lookup(inode, entry);
+ }
out:
spin_unlock_irqrestore(&tree->lock, flags);
return entry;
@@ -848,8 +852,10 @@ struct btrfs_ordered_extent *btrfs_lookup_ordered_range(
break;
}
out:
- if (entry)
+ if (entry) {
refcount_inc(&entry->refs);
+ trace_btrfs_ordered_extent_lookup_range(inode, entry);
+ }
spin_unlock_irq(&tree->lock);
return entry;
}
@@ -878,6 +884,7 @@ void btrfs_get_ordered_extents_for_logging(struct btrfs_inode *inode,
ASSERT(list_empty(&ordered->log_list));
list_add_tail(&ordered->log_list, list);
refcount_inc(&ordered->refs);
+ trace_btrfs_ordered_extent_lookup_for_logging(inode, ordered);
}
spin_unlock_irq(&tree->lock);
}
@@ -901,6 +908,7 @@ btrfs_lookup_first_ordered_extent(struct btrfs_inode *inode, u64 file_offset)
entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
refcount_inc(&entry->refs);
+ trace_btrfs_ordered_extent_lookup_first(inode, entry);
out:
spin_unlock_irq(&tree->lock);
return entry;
@@ -975,8 +983,11 @@ struct btrfs_ordered_extent *btrfs_lookup_first_ordered_range(
/* No ordered extent in the range */
entry = NULL;
out:
- if (entry)
+ if (entry) {
refcount_inc(&entry->refs);
+ trace_btrfs_ordered_extent_lookup_first_range(inode, entry);
+ }
+
spin_unlock_irq(&tree->lock);
return entry;
}
@@ -1055,6 +1066,8 @@ int btrfs_split_ordered_extent(struct btrfs_ordered_extent *ordered, u64 pre,
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
int ret = 0;
+ trace_btrfs_ordered_extent_split(BTRFS_I(inode), ordered);
+
spin_lock_irq(&tree->lock);
/* Remove from tree once */
node = &ordered->rb_node;
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
index 29fa8ea2cc0f..73df80d462dc 100644
--- a/include/trace/events/btrfs.h
+++ b/include/trace/events/btrfs.h
@@ -598,6 +598,70 @@ DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_put,
TP_ARGS(inode, ordered)
);
+DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_lookup,
+
+ TP_PROTO(const struct btrfs_inode *inode,
+ const struct btrfs_ordered_extent *ordered),
+
+ TP_ARGS(inode, ordered)
+);
+
+DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_lookup_range,
+
+ TP_PROTO(const struct btrfs_inode *inode,
+ const struct btrfs_ordered_extent *ordered),
+
+ TP_ARGS(inode, ordered)
+);
+
+DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_lookup_first_range,
+
+ TP_PROTO(const struct btrfs_inode *inode,
+ const struct btrfs_ordered_extent *ordered),
+
+ TP_ARGS(inode, ordered)
+);
+
+DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_lookup_for_logging,
+
+ TP_PROTO(const struct btrfs_inode *inode,
+ const struct btrfs_ordered_extent *ordered),
+
+ TP_ARGS(inode, ordered)
+);
+
+DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_lookup_first,
+
+ TP_PROTO(const struct btrfs_inode *inode,
+ const struct btrfs_ordered_extent *ordered),
+
+ TP_ARGS(inode, ordered)
+);
+
+DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_split,
+
+ TP_PROTO(const struct btrfs_inode *inode,
+ const struct btrfs_ordered_extent *ordered),
+
+ TP_ARGS(inode, ordered)
+);
+
+DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_dec_test_pending,
+
+ TP_PROTO(const struct btrfs_inode *inode,
+ const struct btrfs_ordered_extent *ordered),
+
+ TP_ARGS(inode, ordered)
+);
+
+DEFINE_EVENT(btrfs__ordered_extent, btrfs_ordered_extent_mark_finished,
+
+ TP_PROTO(const struct btrfs_inode *inode,
+ const struct btrfs_ordered_extent *ordered),
+
+ TP_ARGS(inode, ordered)
+);
+
DECLARE_EVENT_CLASS(btrfs__writepage,
TP_PROTO(const struct page *page, const struct inode *inode,