summaryrefslogtreecommitdiff
path: root/fs/bcachefs/util.c
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@linux.dev>2022-11-24 02:23:48 +0300
committerKent Overstreet <kent.overstreet@linux.dev>2023-10-23 00:09:47 +0300
commit30c92ffe4752b10059cfe00cea775d4af2f5196c (patch)
treef5c9007da535a9fa8cf53d3e0f0ed8bb417d787b /fs/bcachefs/util.c
parentc96f108b053b394d622f56f2bcefeccb32d0394c (diff)
downloadlinux-30c92ffe4752b10059cfe00cea775d4af2f5196c.tar.xz
bcachefs: Better inlining in bch2_time_stats_update()
Move the actual slowpath off into a new function - bch2_time_stats_clear_buffer() - and inline bch2_time_stats_update_one(). Alo, use the new inlined update functions from mean_and_variance. Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'fs/bcachefs/util.c')
-rw-r--r--fs/bcachefs/util.c34
1 files changed, 20 insertions, 14 deletions
diff --git a/fs/bcachefs/util.c b/fs/bcachefs/util.c
index 8b2eef24498e..31934f7a6436 100644
--- a/fs/bcachefs/util.c
+++ b/fs/bcachefs/util.c
@@ -319,8 +319,8 @@ static void bch2_quantiles_update(struct bch2_quantiles *q, u64 v)
}
}
-static void bch2_time_stats_update_one(struct bch2_time_stats *stats,
- u64 start, u64 end)
+static inline void bch2_time_stats_update_one(struct bch2_time_stats *stats,
+ u64 start, u64 end)
{
u64 duration, freq;
@@ -343,6 +343,22 @@ static void bch2_time_stats_update_one(struct bch2_time_stats *stats,
}
}
+static noinline void bch2_time_stats_clear_buffer(struct bch2_time_stats *stats,
+ struct bch2_time_stat_buffer *b)
+{
+ struct bch2_time_stat_buffer_entry *i;
+ unsigned long flags;
+
+ spin_lock_irqsave(&stats->lock, flags);
+ for (i = b->entries;
+ i < b->entries + ARRAY_SIZE(b->entries);
+ i++)
+ bch2_time_stats_update_one(stats, i->start, i->end);
+ spin_unlock_irqrestore(&stats->lock, flags);
+
+ b->nr = 0;
+}
+
void __bch2_time_stats_update(struct bch2_time_stats *stats, u64 start, u64 end)
{
unsigned long flags;
@@ -362,7 +378,6 @@ void __bch2_time_stats_update(struct bch2_time_stats *stats, u64 start, u64 end)
GFP_ATOMIC);
spin_unlock_irqrestore(&stats->lock, flags);
} else {
- struct bch2_time_stat_buffer_entry *i;
struct bch2_time_stat_buffer *b;
preempt_disable();
@@ -374,17 +389,8 @@ void __bch2_time_stats_update(struct bch2_time_stats *stats, u64 start, u64 end)
.end = end
};
- if (b->nr == ARRAY_SIZE(b->entries)) {
- spin_lock_irqsave(&stats->lock, flags);
- for (i = b->entries;
- i < b->entries + ARRAY_SIZE(b->entries);
- i++)
- bch2_time_stats_update_one(stats, i->start, i->end);
- spin_unlock_irqrestore(&stats->lock, flags);
-
- b->nr = 0;
- }
-
+ if (unlikely(b->nr == ARRAY_SIZE(b->entries)))
+ bch2_time_stats_clear_buffer(stats, b);
preempt_enable();
}
}