diff options
author | Kent Overstreet <kent.overstreet@linux.dev> | 2022-08-27 19:48:36 +0300 |
---|---|---|
committer | Kent Overstreet <kent.overstreet@linux.dev> | 2023-10-23 00:09:39 +0300 |
commit | 674cfc26240b7807f078a23a4f04681ccae49b02 (patch) | |
tree | 16927f0b64915bb2aa8f3e4ff441712dfd783a8b | |
parent | d97e6aaed60a9c2c727cce2979ca311fe232163f (diff) | |
download | linux-674cfc26240b7807f078a23a4f04681ccae49b02.tar.xz |
bcachefs: Add persistent counters for all tracepoints
Also, do some reorganizing/renaming, convert atomic counters in bch_fs
to persistent counters, and add a few missing counters.
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
-rw-r--r-- | fs/bcachefs/alloc_background.c | 3 | ||||
-rw-r--r-- | fs/bcachefs/alloc_foreground.c | 52 | ||||
-rw-r--r-- | fs/bcachefs/bcachefs.h | 12 | ||||
-rw-r--r-- | fs/bcachefs/bcachefs_format.h | 81 | ||||
-rw-r--r-- | fs/bcachefs/btree_cache.c | 22 | ||||
-rw-r--r-- | fs/bcachefs/btree_gc.c | 4 | ||||
-rw-r--r-- | fs/bcachefs/btree_io.c | 4 | ||||
-rw-r--r-- | fs/bcachefs/btree_iter.c | 8 | ||||
-rw-r--r-- | fs/bcachefs/btree_iter.h | 2 | ||||
-rw-r--r-- | fs/bcachefs/btree_key_cache.c | 4 | ||||
-rw-r--r-- | fs/bcachefs/btree_locking.c | 12 | ||||
-rw-r--r-- | fs/bcachefs/btree_update_interior.c | 21 | ||||
-rw-r--r-- | fs/bcachefs/btree_update_leaf.c | 24 | ||||
-rw-r--r-- | fs/bcachefs/data_update.c | 13 | ||||
-rw-r--r-- | fs/bcachefs/io.c | 10 | ||||
-rw-r--r-- | fs/bcachefs/journal.c | 4 | ||||
-rw-r--r-- | fs/bcachefs/journal_io.c | 2 | ||||
-rw-r--r-- | fs/bcachefs/journal_reclaim.c | 5 | ||||
-rw-r--r-- | fs/bcachefs/move.c | 6 | ||||
-rw-r--r-- | fs/bcachefs/movinggc.c | 4 | ||||
-rw-r--r-- | fs/bcachefs/super-io.c | 2 | ||||
-rw-r--r-- | fs/bcachefs/sysfs.c | 19 | ||||
-rw-r--r-- | fs/bcachefs/trace.h | 182 |
23 files changed, 279 insertions, 217 deletions
diff --git a/fs/bcachefs/alloc_background.c b/fs/bcachefs/alloc_background.c index 15c3c9a2da7b..ffcfb9f1916e 100644 --- a/fs/bcachefs/alloc_background.c +++ b/fs/bcachefs/alloc_background.c @@ -1134,8 +1134,7 @@ static int invalidate_one_bucket(struct btree_trans *trans, if (ret) goto out; - trace_invalidate_bucket(c, bucket.inode, bucket.offset, cached_sectors); - this_cpu_inc(c->counters[BCH_COUNTER_bucket_invalidate]); + trace_and_count(c, bucket_invalidate, c, bucket.inode, bucket.offset, cached_sectors); --*nr_to_invalidate; out: bch2_trans_iter_exit(trans, &alloc_iter); diff --git a/fs/bcachefs/alloc_foreground.c b/fs/bcachefs/alloc_foreground.c index bbe74a05a7a2..f60fe159916e 100644 --- a/fs/bcachefs/alloc_foreground.c +++ b/fs/bcachefs/alloc_foreground.c @@ -584,32 +584,32 @@ err: if (!ob) ob = ERR_PTR(-BCH_ERR_no_buckets_found); - if (!IS_ERR(ob)) { - trace_bucket_alloc(ca, bch2_alloc_reserves[reserve], - usage.d[BCH_DATA_free].buckets, - avail, - bch2_copygc_wait_amount(c), - c->copygc_wait - atomic64_read(&c->io_clock[WRITE].now), - buckets_seen, - skipped_open, - skipped_need_journal_commit, - skipped_nouse, - cl == NULL, - ""); - } else { - trace_bucket_alloc_fail(ca, bch2_alloc_reserves[reserve], - usage.d[BCH_DATA_free].buckets, - avail, - bch2_copygc_wait_amount(c), - c->copygc_wait - atomic64_read(&c->io_clock[WRITE].now), - buckets_seen, - skipped_open, - skipped_need_journal_commit, - skipped_nouse, - cl == NULL, - bch2_err_str(PTR_ERR(ob))); - atomic_long_inc(&c->bucket_alloc_fail); - } + if (!IS_ERR(ob)) + trace_and_count(c, bucket_alloc, ca, + bch2_alloc_reserves[reserve], + usage.d[BCH_DATA_free].buckets, + avail, + bch2_copygc_wait_amount(c), + c->copygc_wait - atomic64_read(&c->io_clock[WRITE].now), + buckets_seen, + skipped_open, + skipped_need_journal_commit, + skipped_nouse, + cl == NULL, + ""); + else + trace_and_count(c, bucket_alloc_fail, ca, + bch2_alloc_reserves[reserve], + usage.d[BCH_DATA_free].buckets, + avail, + bch2_copygc_wait_amount(c), + c->copygc_wait - atomic64_read(&c->io_clock[WRITE].now), + buckets_seen, + skipped_open, + skipped_need_journal_commit, + skipped_nouse, + cl == NULL, + bch2_err_str(PTR_ERR(ob))); return ob; } diff --git a/fs/bcachefs/bcachefs.h b/fs/bcachefs/bcachefs.h index 9e6c10dfa443..bca61af71652 100644 --- a/fs/bcachefs/bcachefs.h +++ b/fs/bcachefs/bcachefs.h @@ -212,6 +212,12 @@ #define dynamic_fault(...) 0 #define race_fault(...) 0 +#define trace_and_count(_c, _name, ...) \ +do { \ + this_cpu_inc((_c)->counters[BCH_COUNTER_##_name]); \ + trace_##_name(__VA_ARGS__); \ +} while (0) + #define bch2_fs_init_fault(name) \ dynamic_fault("bcachefs:bch_fs_init:" name) #define bch2_meta_read_fault(name) \ @@ -916,12 +922,6 @@ mempool_t bio_bounce_pages; u64 last_bucket_seq_cleanup; - /* TODO rewrite as counters - The rest of this all shows up in sysfs */ - atomic_long_t read_realloc_races; - atomic_long_t extent_migrate_done; - atomic_long_t extent_migrate_raced; - atomic_long_t bucket_alloc_fail; - u64 counters_on_mount[BCH_COUNTER_NR]; u64 __percpu *counters; diff --git a/fs/bcachefs/bcachefs_format.h b/fs/bcachefs/bcachefs_format.h index b9d614f608b5..0e80fe2568f2 100644 --- a/fs/bcachefs/bcachefs_format.h +++ b/fs/bcachefs/bcachefs_format.h @@ -1326,12 +1326,81 @@ struct bch_sb_field_disk_groups { /* BCH_SB_FIELD_counters */ -#define BCH_PERSISTENT_COUNTERS() \ - x(io_read, 0) \ - x(io_write, 1) \ - x(io_move, 2) \ - x(bucket_invalidate, 3) \ - x(bucket_discard, 4) +#define BCH_PERSISTENT_COUNTERS() \ + x(io_read, 0) \ + x(io_write, 1) \ + x(io_move, 2) \ + x(bucket_invalidate, 3) \ + x(bucket_discard, 4) \ + x(bucket_alloc, 5) \ + x(bucket_alloc_fail, 6) \ + x(btree_cache_scan, 7) \ + x(btree_cache_reap, 8) \ + x(btree_cache_cannibalize, 9) \ + x(btree_cache_cannibalize_lock, 10) \ + x(btree_cache_cannibalize_lock_fail, 11) \ + x(btree_cache_cannibalize_unlock, 12) \ + x(btree_node_write, 13) \ + x(btree_node_read, 14) \ + x(btree_node_compact, 15) \ + x(btree_node_merge, 16) \ + x(btree_node_split, 17) \ + x(btree_node_rewrite, 18) \ + x(btree_node_alloc, 19) \ + x(btree_node_free, 20) \ + x(btree_node_set_root, 21) \ + x(btree_path_relock_fail, 22) \ + x(btree_path_upgrade_fail, 23) \ + x(btree_reserve_get_fail, 24) \ + x(journal_entry_full, 25) \ + x(journal_full, 26) \ + x(journal_reclaim_finish, 27) \ + x(journal_reclaim_start, 28) \ + x(journal_write, 29) \ + x(read_promote, 30) \ + x(read_bounce, 31) \ + x(read_split, 33) \ + x(read_retry, 32) \ + x(read_reuse_race, 34) \ + x(move_extent_read, 35) \ + x(move_extent_write, 36) \ + x(move_extent_finish, 37) \ + x(move_extent_fail, 38) \ + x(move_extent_alloc_mem_fail, 39) \ + x(copygc, 40) \ + x(copygc_wait, 41) \ + x(gc_gens_end, 42) \ + x(gc_gens_start, 43) \ + x(trans_blocked_journal_reclaim, 44) \ + x(trans_restart_btree_node_reused, 45) \ + x(trans_restart_btree_node_split, 46) \ + x(trans_restart_fault_inject, 47) \ + x(trans_restart_iter_upgrade, 48) \ + x(trans_restart_journal_preres_get, 49) \ + x(trans_restart_journal_reclaim, 50) \ + x(trans_restart_journal_res_get, 51) \ + x(trans_restart_key_cache_key_realloced, 52) \ + x(trans_restart_key_cache_raced, 53) \ + x(trans_restart_mark_replicas, 54) \ + x(trans_restart_mem_realloced, 55) \ + x(trans_restart_memory_allocation_failure, 56) \ + x(trans_restart_relock, 57) \ + x(trans_restart_relock_after_fill, 58) \ + x(trans_restart_relock_key_cache_fill, 59) \ + x(trans_restart_relock_next_node, 60) \ + x(trans_restart_relock_parent_for_fill, 61) \ + x(trans_restart_relock_path, 62) \ + x(trans_restart_relock_path_intent, 63) \ + x(trans_restart_too_many_iters, 64) \ + x(trans_restart_traverse, 65) \ + x(trans_restart_upgrade, 66) \ + x(trans_restart_would_deadlock, 67) \ + x(trans_restart_would_deadlock_write, 68) \ + x(trans_restart_injected, 69) \ + x(trans_restart_key_cache_upgrade, 70) \ + x(trans_traverse_all, 71) \ + x(transaction_commit, 72) \ + x(write_super, 73) enum bch_persistent_counters { #define x(t, n, ...) BCH_COUNTER_##t, diff --git a/fs/bcachefs/btree_cache.c b/fs/bcachefs/btree_cache.c index 7ffa88b74236..e09fbf36ebc2 100644 --- a/fs/bcachefs/btree_cache.c +++ b/fs/bcachefs/btree_cache.c @@ -253,7 +253,7 @@ wait_on_io: } out: if (b->hash_val && !ret) - trace_btree_node_reap(c, b); + trace_and_count(c, btree_cache_reap, c, b); return ret; out_unlock: six_unlock_write(&b->c.lock); @@ -377,7 +377,7 @@ out: ret = freed; memalloc_nofs_restore(flags); out_norestore: - trace_btree_cache_scan(sc->nr_to_scan, can_free, ret); + trace_and_count(c, btree_cache_scan, sc->nr_to_scan, can_free, ret); return ret; } @@ -504,7 +504,7 @@ void bch2_btree_cache_cannibalize_unlock(struct bch_fs *c) struct btree_cache *bc = &c->btree_cache; if (bc->alloc_lock == current) { - trace_btree_node_cannibalize_unlock(c); + trace_and_count(c, btree_cache_cannibalize_unlock, c); bc->alloc_lock = NULL; closure_wake_up(&bc->alloc_wait); } @@ -520,7 +520,7 @@ int bch2_btree_cache_cannibalize_lock(struct bch_fs *c, struct closure *cl) goto success; if (!cl) { - trace_btree_node_cannibalize_lock_fail(c); + trace_and_count(c, btree_cache_cannibalize_lock_fail, c); return -ENOMEM; } @@ -534,11 +534,11 @@ int bch2_btree_cache_cannibalize_lock(struct bch_fs *c, struct closure *cl) goto success; } - trace_btree_node_cannibalize_lock_fail(c); + trace_and_count(c, btree_cache_cannibalize_lock_fail, c); return -EAGAIN; success: - trace_btree_node_cannibalize_lock(c); + trace_and_count(c, btree_cache_cannibalize_lock, c); return 0; } @@ -662,7 +662,7 @@ err_locked: mutex_unlock(&bc->lock); - trace_btree_node_cannibalize(c); + trace_and_count(c, btree_cache_cannibalize, c); goto out; } @@ -691,7 +691,7 @@ static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c, * been freed: */ if (trans && !bch2_btree_node_relock(trans, path, level + 1)) { - trace_trans_restart_relock_parent_for_fill(trans, _THIS_IP_, path); + trace_and_count(c, trans_restart_relock_parent_for_fill, trans, _THIS_IP_, path); return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_fill_relock)); } @@ -699,7 +699,7 @@ static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c, if (trans && b == ERR_PTR(-ENOMEM)) { trans->memory_allocation_failure = true; - trace_trans_restart_memory_allocation_failure(trans, _THIS_IP_, path); + trace_and_count(c, trans_restart_memory_allocation_failure, trans, _THIS_IP_, path); return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_fill_mem_alloc_fail)); } @@ -748,7 +748,7 @@ static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c, if (!six_relock_type(&b->c.lock, lock_type, seq)) { if (trans) - trace_trans_restart_relock_after_fill(trans, _THIS_IP_, path); + trace_and_count(c, trans_restart_relock_after_fill, trans, _THIS_IP_, path); return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_relock_after_fill)); } @@ -903,7 +903,7 @@ lock_node: if (bch2_btree_node_relock(trans, path, level + 1)) goto retry; - trace_trans_restart_btree_node_reused(trans, trace_ip, path); + trace_and_count(c, trans_restart_btree_node_reused, trans, trace_ip, path); return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_lock_node_reused)); } } diff --git a/fs/bcachefs/btree_gc.c b/fs/bcachefs/btree_gc.c index 4ab59880781a..239eda57bf02 100644 --- a/fs/bcachefs/btree_gc.c +++ b/fs/bcachefs/btree_gc.c @@ -1931,7 +1931,7 @@ int bch2_gc_gens(struct bch_fs *c) if (!mutex_trylock(&c->gc_gens_lock)) return 0; - trace_gc_gens_start(c); + trace_and_count(c, gc_gens_start, c); down_read(&c->gc_lock); bch2_trans_init(&trans, c, 0, 0); @@ -1992,7 +1992,7 @@ int bch2_gc_gens(struct bch_fs *c) c->gc_count++; bch2_time_stats_update(&c->times[BCH_TIME_btree_gc], start_time); - trace_gc_gens_end(c); + trace_and_count(c, gc_gens_end, c); err: for_each_member_device(ca, c, i) { kvfree(ca->oldest_gen); diff --git a/fs/bcachefs/btree_io.c b/fs/bcachefs/btree_io.c index bd74bd31dd1f..b3dc8b43298e 100644 --- a/fs/bcachefs/btree_io.c +++ b/fs/bcachefs/btree_io.c @@ -1485,7 +1485,7 @@ void bch2_btree_node_read(struct bch_fs *c, struct btree *b, struct bio *bio; int ret; - trace_btree_read(c, b); + trace_and_count(c, btree_node_read, c, b); if (bch2_verify_all_btree_replicas && !btree_node_read_all_replicas(c, b, sync)) @@ -1974,7 +1974,7 @@ do_write: c->opts.nochanges) goto err; - trace_btree_write(b, bytes_to_write, sectors_to_write); + trace_and_count(c, btree_node_write, b, bytes_to_write, sectors_to_write); wbio = container_of(bio_alloc_bioset(NULL, buf_pages(data, sectors_to_write << 9), diff --git a/fs/bcachefs/btree_iter.c b/fs/bcachefs/btree_iter.c index 99422e29c704..e76907af09f1 100644 --- a/fs/bcachefs/btree_iter.c +++ b/fs/bcachefs/btree_iter.c @@ -1072,7 +1072,7 @@ err: trans->in_traverse_all = false; - trace_trans_traverse_all(trans, trace_ip); + trace_and_count(c, trans_traverse_all, trans, trace_ip); return ret; } @@ -1209,7 +1209,7 @@ int __must_check bch2_btree_path_traverse(struct btree_trans *trans, u64 max = ~(~0ULL << restart_probability_bits); if (!get_random_u32_below(max)) { - trace_transaction_restart_injected(trans, _RET_IP_); + trace_and_count(trans->c, trans_restart_injected, trans, _RET_IP_); return btree_trans_restart(trans, BCH_ERR_transaction_restart_fault_inject); } } @@ -1728,7 +1728,7 @@ struct btree *bch2_btree_iter_next_node(struct btree_iter *iter) path->l[path->level].b = ERR_PTR(-BCH_ERR_no_btree_node_relock); path->l[path->level + 1].b = ERR_PTR(-BCH_ERR_no_btree_node_relock); btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE); - trace_trans_restart_relock_next_node(trans, _THIS_IP_, path); + trace_and_count(trans->c, trans_restart_relock_next_node, trans, _THIS_IP_, path); ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_relock); goto err; } @@ -2773,7 +2773,7 @@ void *bch2_trans_kmalloc(struct btree_trans *trans, size_t size) trans->mem_bytes = new_bytes; if (old_bytes) { - trace_trans_restart_mem_realloced(trans, _RET_IP_, new_bytes); + trace_and_count(trans->c, trans_restart_mem_realloced, trans, _RET_IP_, new_bytes); return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_mem_realloced)); } } diff --git a/fs/bcachefs/btree_iter.h b/fs/bcachefs/btree_iter.h index 1081ea753be6..bdc703324b9a 100644 --- a/fs/bcachefs/btree_iter.h +++ b/fs/bcachefs/btree_iter.h @@ -388,7 +388,7 @@ static inline struct bkey_s_c bch2_btree_iter_peek_upto_type(struct btree_iter * static inline int btree_trans_too_many_iters(struct btree_trans *trans) { if (hweight64(trans->paths_allocated) > BTREE_ITER_MAX - 8) { - trace_trans_restart_too_many_iters(trans, _THIS_IP_); + trace_and_count(trans->c, trans_restart_too_many_iters, trans, _THIS_IP_); return btree_trans_restart(trans, BCH_ERR_transaction_restart_too_many_iters); } diff --git a/fs/bcachefs/btree_key_cache.c b/fs/bcachefs/btree_key_cache.c index cf41926b7f8e..127cb6edaff5 100644 --- a/fs/bcachefs/btree_key_cache.c +++ b/fs/bcachefs/btree_key_cache.c @@ -291,7 +291,7 @@ static int btree_key_cache_fill(struct btree_trans *trans, k = bch2_btree_path_peek_slot(path, &u); if (!bch2_btree_node_relock(trans, ck_path, 0)) { - trace_trans_restart_relock_key_cache_fill(trans, _THIS_IP_, ck_path); + trace_and_count(trans->c, trans_restart_relock_key_cache_fill, trans, _THIS_IP_, ck_path); ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_key_cache_raced); goto err; } @@ -414,7 +414,7 @@ fill: */ if (!path->locks_want && !__bch2_btree_path_upgrade(trans, path, 1)) { - trace_transaction_restart_key_cache_upgrade(trans, _THIS_IP_); + trace_and_count(trans->c, trans_restart_key_cache_upgrade, trans, _THIS_IP_); ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_key_cache_upgrade); goto err; } diff --git a/fs/bcachefs/btree_locking.c b/fs/bcachefs/btree_locking.c index 76d99c694948..301311763d59 100644 --- a/fs/bcachefs/btree_locking.c +++ b/fs/bcachefs/btree_locking.c @@ -152,7 +152,7 @@ int __bch2_btree_node_lock(struct btree_trans *trans, return btree_node_lock_type(trans, path, b, pos, level, type, should_sleep_fn, p); deadlock: - trace_trans_restart_would_deadlock(trans, ip, reason, linked, path, &pos); + trace_and_count(trans->c, trans_restart_would_deadlock, trans, ip, reason, linked, path, &pos); return btree_trans_restart(trans, BCH_ERR_transaction_restart_would_deadlock); } @@ -218,7 +218,7 @@ bool __bch2_btree_node_relock(struct btree_trans *trans, return true; } fail: - trace_btree_node_relock_fail(trans, _RET_IP_, path, level); + trace_and_count(trans->c, btree_path_relock_fail, trans, _RET_IP_, path, level); return false; } @@ -262,7 +262,7 @@ bool bch2_btree_node_upgrade(struct btree_trans *trans, goto success; } - trace_btree_node_upgrade_fail(trans, _RET_IP_, path, level); + trace_and_count(trans->c, btree_path_upgrade_fail, trans, _RET_IP_, path, level); return false; success: mark_btree_node_locked_noreset(path, level, SIX_LOCK_intent); @@ -285,7 +285,7 @@ int bch2_btree_path_relock_intent(struct btree_trans *trans, if (!bch2_btree_node_relock(trans, path, l)) { __bch2_btree_path_unlock(trans, path); btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE); - trace_trans_restart_relock_path_intent(trans, _RET_IP_, path); + trace_and_count(trans->c, trans_restart_relock_path_intent, trans, _RET_IP_, path); return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock_path_intent); } } @@ -304,7 +304,7 @@ int __bch2_btree_path_relock(struct btree_trans *trans, struct btree_path *path, unsigned long trace_ip) { if (!bch2_btree_path_relock_norestart(trans, path, trace_ip)) { - trace_trans_restart_relock_path(trans, trace_ip, path); + trace_and_count(trans->c, trans_restart_relock_path, trans, trace_ip, path); return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock_path); } @@ -416,7 +416,7 @@ int bch2_trans_relock(struct btree_trans *trans) trans_for_each_path(trans, path) if (path->should_be_locked && bch2_btree_path_relock(trans, path, _RET_IP_)) { - trace_trans_restart_relock(trans, _RET_IP_, path); + trace_and_count(trans->c, trans_restart_relock, trans, _RET_IP_, path); BUG_ON(!trans->restarted); return -BCH_ERR_transaction_restart_relock; } diff --git a/fs/bcachefs/btree_update_interior.c b/fs/bcachefs/btree_update_interior.c index dd9405c631f5..1f5b98a3d0a2 100644 --- a/fs/bcachefs/btree_update_interior.c +++ b/fs/bcachefs/btree_update_interior.c @@ -143,7 +143,7 @@ bool bch2_btree_node_format_fits(struct bch_fs *c, struct btree *b, static void __btree_node_free(struct bch_fs *c, struct btree *b) { - trace_btree_node_free(c, b); + trace_and_count(c, btree_node_free, c, b); BUG_ON(btree_node_dirty(b)); BUG_ON(btree_node_need_write(b)); @@ -305,7 +305,7 @@ static struct btree *bch2_btree_node_alloc(struct btree_update *as, unsigned lev ret = bch2_btree_node_hash_insert(&c->btree_cache, b, level, as->btree_id); BUG_ON(ret); - trace_btree_node_alloc(c, b); + trace_and_count(c, btree_node_alloc, c, b); return b; } @@ -995,7 +995,7 @@ bch2_btree_update_start(struct btree_trans *trans, struct btree_path *path, nr_nodes[1] += 1; if (!bch2_btree_path_upgrade(trans, path, U8_MAX)) { - trace_trans_restart_iter_upgrade(trans, _RET_IP_, path); + trace_and_count(c, trans_restart_iter_upgrade, trans, _RET_IP_, path); ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_upgrade); return ERR_PTR(ret); } @@ -1058,7 +1058,7 @@ bch2_btree_update_start(struct btree_trans *trans, struct btree_path *path, BTREE_UPDATE_JOURNAL_RES, journal_flags); if (ret) { - trace_trans_restart_journal_preres_get(trans, _RET_IP_, journal_flags); + trace_and_count(c, trans_restart_journal_preres_get, trans, _RET_IP_, journal_flags); ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_journal_preres_get); goto err; } @@ -1091,8 +1091,7 @@ bch2_btree_update_start(struct btree_trans *trans, struct btree_path *path, } if (ret) { - trace_btree_reserve_get_fail(trans->fn, _RET_IP_, - nr_nodes[0] + nr_nodes[1]); + trace_and_count(c, btree_reserve_get_fail, trans->fn, _RET_IP_, nr_nodes[0] + nr_nodes[1]); goto err; } @@ -1147,7 +1146,7 @@ static void bch2_btree_set_root(struct btree_update *as, struct bch_fs *c = as->c; struct btree *old; - trace_btree_set_root(c, b); + trace_and_count(c, btree_node_set_root, c, b); BUG_ON(!b->written); old = btree_node_root(c, b); @@ -1434,7 +1433,7 @@ static void btree_split(struct btree_update *as, struct btree_trans *trans, btree_split_insert_keys(as, trans, path, n1, keys); if (bset_u64s(&n1->set[0]) > BTREE_SPLIT_THRESHOLD(c)) { - trace_btree_split(c, b); + trace_and_count(c, btree_node_split, c, b); n2 = __btree_split_node(as, n1); @@ -1468,7 +1467,7 @@ static void btree_split(struct btree_update *as, struct btree_trans *trans, bch2_btree_node_write(c, n3, SIX_LOCK_intent, 0); } } else { - trace_btree_compact(c, b); + trace_and_count(c, btree_node_compact, c, b); bch2_btree_build_aux_trees(n1); six_unlock_write(&n1->c.lock); @@ -1737,7 +1736,7 @@ int __bch2_foreground_maybe_merge(struct btree_trans *trans, if (ret) goto err; - trace_btree_merge(c, b); + trace_and_count(c, btree_node_merge, c, b); bch2_btree_interior_update_will_free_node(as, b); bch2_btree_interior_update_will_free_node(as, m); @@ -1829,7 +1828,7 @@ int bch2_btree_node_rewrite(struct btree_trans *trans, bch2_btree_build_aux_trees(n); six_unlock_write(&n->c.lock); - trace_btree_rewrite(c, b); + trace_and_count(c, btree_node_rewrite, c, b); bch2_btree_node_write(c, n, SIX_LOCK_intent, 0); diff --git a/fs/bcachefs/btree_update_leaf.c b/fs/bcachefs/btree_update_leaf.c index e3501623931a..732d09d45041 100644 --- a/fs/bcachefs/btree_update_leaf.c +++ b/fs/bcachefs/btree_update_leaf.c @@ -285,7 +285,7 @@ bch2_trans_journal_preres_get_cold(struct btree_trans *trans, unsigned u64s, ret = bch2_trans_relock(trans); if (ret) { - trace_trans_restart_journal_preres_get(trans, trace_ip, 0); + trace_and_count(c, trans_restart_journal_preres_get, trans, trace_ip, 0); return ret; } @@ -375,7 +375,7 @@ btree_key_can_insert_cached(struct btree_trans *trans, * Keys returned by peek() are no longer valid pointers, so we need a * transaction restart: */ - trace_trans_restart_key_cache_key_realloced(trans, _RET_IP_, path, old_u64s, new_u64s); + trace_and_count(c, trans_restart_key_cache_key_realloced, trans, _RET_IP_, path, old_u64s, new_u64s); return btree_trans_restart_nounlock(trans, BCH_ERR_transaction_restart_key_cache_realloced); } @@ -567,7 +567,7 @@ bch2_trans_commit_write_locked(struct btree_trans *trans, int ret; if (race_fault()) { - trace_trans_restart_fault_inject(trans, trace_ip); + trace_and_count(c, trans_restart_fault_inject, trans, trace_ip); return btree_trans_restart_nounlock(trans, BCH_ERR_transaction_restart_fault_inject); } @@ -842,7 +842,7 @@ fail: bch2_btree_node_unlock_write_inlined(trans, i->path, insert_l(i)->b); } - trace_trans_restart_would_deadlock_write(trans); + trace_and_count(trans->c, trans_restart_would_deadlock_write, trans); return btree_trans_restart(trans, BCH_ERR_transaction_restart_would_deadlock_write); } @@ -975,7 +975,7 @@ int bch2_trans_commit_error(struct btree_trans *trans, case BTREE_INSERT_BTREE_NODE_FULL: ret = bch2_btree_split_leaf(trans, i->path, trans->flags); if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) - trace_trans_restart_btree_node_split(trans, trace_ip, i->path); + trace_and_count(c, trans_restart_btree_node_split, trans, trace_ip, i->path); break; case BTREE_INSERT_NEED_MARK_REPLICAS: bch2_trans_unlock(trans); @@ -986,7 +986,7 @@ int bch2_trans_commit_error(struct btree_trans *trans, ret = bch2_trans_relock(trans); if (ret) - trace_trans_restart_mark_replicas(trans, trace_ip); + trace_and_count(c, trans_restart_mark_replicas, trans, trace_ip); break; case BTREE_INSERT_NEED_JOURNAL_RES: bch2_trans_unlock(trans); @@ -1003,12 +1003,12 @@ int bch2_trans_commit_error(struct btree_trans *trans, ret = bch2_trans_relock(trans); if (ret) - trace_trans_restart_journal_res_get(trans, trace_ip); + trace_and_count(c, trans_restart_journal_res_get, trans, trace_ip); break; case BTREE_INSERT_NEED_JOURNAL_RECLAIM: bch2_trans_unlock(trans); - trace_trans_blocked_journal_reclaim(trans, trace_ip); + trace_and_count(c, trans_blocked_journal_reclaim, trans, trace_ip); wait_event_freezable(c->journal.reclaim_wait, (ret = journal_reclaim_wait_done(c))); @@ -1017,7 +1017,7 @@ int bch2_trans_commit_error(struct btree_trans *trans, ret = bch2_trans_relock(trans); if (ret) - trace_trans_restart_journal_reclaim(trans, trace_ip); + trace_and_count(c, trans_restart_journal_reclaim, trans, trace_ip); break; default: BUG_ON(ret >= 0); @@ -1120,7 +1120,7 @@ int __bch2_trans_commit(struct btree_trans *trans) BUG_ON(!i->path->should_be_locked); if (unlikely(!bch2_btree_path_upgrade(trans, i->path, i->level + 1))) { - trace_trans_restart_upgrade(trans, _RET_IP_, i->path); + trace_and_count(c, trans_restart_upgrade, trans, _RET_IP_, i->path); ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_upgrade); goto out; } @@ -1166,7 +1166,7 @@ retry: if (ret) goto err; - trace_transaction_commit(trans, _RET_IP_); + trace_and_count(c, transaction_commit, trans, _RET_IP_); out: bch2_journal_preres_put(&c->journal, &trans->journal_preres); @@ -1642,7 +1642,7 @@ int __must_check bch2_trans_update(struct btree_trans *trans, struct btree_iter ck = (void *) iter->key_cache_path->l[0].b; if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) { - trace_trans_restart_key_cache_raced(trans, _RET_IP_); + trace_and_count(trans->c, trans_restart_key_cache_raced, trans, _RET_IP_); return btree_trans_restart(trans, BCH_ERR_transaction_restart_key_cache_raced); } diff --git a/fs/bcachefs/data_update.c b/fs/bcachefs/data_update.c index f9eb147fe229..0b6f765bcad9 100644 --- a/fs/bcachefs/data_update.c +++ b/fs/bcachefs/data_update.c @@ -231,9 +231,12 @@ int bch2_data_update_index_update(struct bch_write_op *op) m->data_opts.btree_insert_flags); if (!ret) { bch2_btree_iter_set_pos(&iter, next_pos); - atomic_long_inc(&c->extent_migrate_done); + if (ec_ob) bch2_ob_add_backpointer(c, ec_ob, &insert->k); + + this_cpu_add(c->counters[BCH_COUNTER_move_extent_finish], new->k.size); + trace_move_extent_finish(&new->k); } err: if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) @@ -248,16 +251,16 @@ next: } continue; nomatch: - trace_data_update_fail(&old.k->p); - if (m->ctxt) { BUG_ON(k.k->p.offset <= iter.pos.offset); atomic64_inc(&m->ctxt->stats->keys_raced); atomic64_add(k.k->p.offset - iter.pos.offset, &m->ctxt->stats->sectors_raced); } - atomic_long_inc(&c->extent_migrate_raced); - trace_move_race(&new->k); + + this_cpu_add(c->counters[BCH_COUNTER_move_extent_fail], new->k.size); + trace_move_extent_fail(&new->k); + bch2_btree_iter_advance(&iter); goto next; } diff --git a/fs/bcachefs/io.c b/fs/bcachefs/io.c index 44fb14a5b5ae..ed78cb8d90a2 100644 --- a/fs/bcachefs/io.c +++ b/fs/bcachefs/io.c @@ -1496,7 +1496,7 @@ static void promote_start(struct promote_op *op, struct bch_read_bio *rbio) { struct bio *bio = &op->write.op.wbio.bio; - trace_promote(&rbio->bio); + trace_and_count(op->write.op.c, read_promote, &rbio->bio); /* we now own pages: */ BUG_ON(!rbio->bounce); @@ -1761,7 +1761,7 @@ static void bch2_rbio_retry(struct work_struct *work) }; struct bch_io_failures failed = { .nr = 0 }; - trace_read_retry(&rbio->bio); + trace_and_count(c, read_retry, &rbio->bio); if (rbio->retry == READ_RETRY_AVOID) bch2_mark_io_failure(&failed, &rbio->pick); @@ -2017,7 +2017,7 @@ static void bch2_read_endio(struct bio *bio) if (((rbio->flags & BCH_READ_RETRY_IF_STALE) && race_fault()) || ptr_stale(ca, &rbio->pick.ptr)) { - atomic_long_inc(&c->read_realloc_races); + trace_and_count(c, read_reuse_race, &rbio->bio); if (rbio->flags & BCH_READ_RETRY_IF_STALE) bch2_rbio_error(rbio, READ_RETRY, BLK_STS_AGAIN); @@ -2305,7 +2305,7 @@ get_bio: rbio->bio.bi_end_io = bch2_read_endio; if (rbio->bounce) - trace_read_bounce(&rbio->bio); + trace_and_count(c, read_bounce, &rbio->bio); this_cpu_add(c->counters[BCH_COUNTER_io_read], bio_sectors(&rbio->bio)); bch2_increment_clock(c, bio_sectors(&rbio->bio), READ); @@ -2320,7 +2320,7 @@ get_bio: if (!(flags & (BCH_READ_IN_RETRY|BCH_READ_LAST_FRAGMENT))) { bio_inc_remaining(&orig->bio); - trace_read_split(&orig->bio); + trace_and_count(c, read_split, &orig->bio); } if (!rbio->pick.idx) { diff --git a/fs/bcachefs/journal.c b/fs/bcachefs/journal.c index 26f60db751ca..9961cc674ad7 100644 --- a/fs/bcachefs/journal.c +++ b/fs/bcachefs/journal.c @@ -390,12 +390,12 @@ retry: ret = journal_entry_open(j); if (ret == JOURNAL_ERR_max_in_flight) - trace_journal_entry_full(c); + trace_and_count(c, journal_entry_full, c); unlock: if ((ret && ret != JOURNAL_ERR_insufficient_devices) && !j->res_get_blocked_start) { j->res_get_blocked_start = local_clock() ?: 1; - trace_journal_full(c); + trace_and_count(c, journal_full, c); } can_discard = j->can_discard; diff --git a/fs/bcachefs/journal_io.c b/fs/bcachefs/journal_io.c index acb2005c3b72..090a718b917f 100644 --- a/fs/bcachefs/journal_io.c +++ b/fs/bcachefs/journal_io.c @@ -1552,7 +1552,7 @@ static void do_journal_write(struct closure *cl) bch2_bio_map(bio, w->data, sectors << 9); - trace_journal_write(bio); + trace_and_count(c, journal_write, bio); closure_bio_submit(bio, cl); ca->journal.bucket_seq[ca->journal.cur_idx] = diff --git a/fs/bcachefs/journal_reclaim.c b/fs/bcachefs/journal_reclaim.c index 00d9e3a8e526..a4f9d01d33cc 100644 --- a/fs/bcachefs/journal_reclaim.c +++ b/fs/bcachefs/journal_reclaim.c @@ -642,7 +642,8 @@ static int __bch2_journal_reclaim(struct journal *j, bool direct, bool kicked) min_key_cache = min(bch2_nr_btree_keys_need_flush(c), (size_t) 128); - trace_journal_reclaim_start(c, direct, kicked, + trace_and_count(c, journal_reclaim_start, c, + direct, kicked, min_nr, min_key_cache, j->prereserved.reserved, j->prereserved.remaining, @@ -658,7 +659,7 @@ static int __bch2_journal_reclaim(struct journal *j, bool direct, bool kicked) j->nr_direct_reclaim += nr_flushed; else j->nr_background_reclaim += nr_flushed; - trace_journal_reclaim_finish(c, nr_flushed); + trace_and_count(c, journal_reclaim_finish, c, nr_flushed); if (nr_flushed) wake_up(&j->reclaim_wait); diff --git a/fs/bcachefs/move.c b/fs/bcachefs/move.c index ea9ce6d436a2..0486c7e14c56 100644 --- a/fs/bcachefs/move.c +++ b/fs/bcachefs/move.c @@ -245,8 +245,8 @@ static int bch2_move_extent(struct btree_trans *trans, atomic64_inc(&ctxt->stats->keys_moved); atomic64_add(k.k->size, &ctxt->stats->sectors_moved); this_cpu_add(c->counters[BCH_COUNTER_io_move], k.k->size); - - trace_move_extent(k.k); + this_cpu_add(c->counters[BCH_COUNTER_move_extent_read], k.k->size); + trace_move_extent_read(k.k); atomic_add(io->read_sectors, &ctxt->read_sectors); list_add_tail(&io->list, &ctxt->reads); @@ -268,7 +268,7 @@ err_free: kfree(io); err: percpu_ref_put(&c->writes); - trace_move_alloc_mem_fail(k.k); + trace_and_count(c, move_extent_alloc_mem_fail, k.k); return ret; } diff --git a/fs/bcachefs/movinggc.c b/fs/bcachefs/movinggc.c index 438ea22ad5bd..dca8d4a3a89c 100644 --- a/fs/bcachefs/movinggc.c +++ b/fs/bcachefs/movinggc.c @@ -339,7 +339,7 @@ static int bch2_copygc(struct bch_fs *c) atomic64_read(&move_stats.keys_raced), atomic64_read(&move_stats.sectors_raced)); - trace_copygc(c, + trace_and_count(c, copygc, c, atomic64_read(&move_stats.sectors_moved), sectors_not_moved, buckets_to_move, buckets_not_moved); return 0; @@ -397,7 +397,7 @@ static int bch2_copygc_thread(void *arg) wait = bch2_copygc_wait_amount(c); if (wait > clock->max_slop) { - trace_copygc_wait(c, wait, last + wait); + trace_and_count(c, copygc_wait, c, wait, last + wait); c->copygc_wait = last + wait; bch2_kthread_io_clock_wait(clock, last + wait, MAX_SCHEDULE_TIMEOUT); diff --git a/fs/bcachefs/super-io.c b/fs/bcachefs/super-io.c index 48ad158637e5..4953f54e94d6 100644 --- a/fs/bcachefs/super-io.c +++ b/fs/bcachefs/super-io.c @@ -801,7 +801,7 @@ int bch2_write_super(struct bch_fs *c) unsigned degraded_flags = BCH_FORCE_IF_DEGRADED; int ret = 0; - trace_write_super(c, _RET_IP_); + trace_and_count(c, write_super, c, _RET_IP_); if (c->opts.very_degraded) degraded_flags |= BCH_FORCE_IF_LOST; diff --git a/fs/bcachefs/sysfs.c b/fs/bcachefs/sysfs.c index 4e2b6285cf3a..d10ac84c10ce 100644 --- a/fs/bcachefs/sysfs.c +++ b/fs/bcachefs/sysfs.c @@ -190,11 +190,6 @@ read_attribute(internal_uuid); read_attribute(has_data); read_attribute(alloc_debug); -read_attribute(read_realloc_races); -read_attribute(extent_migrate_done); -read_attribute(extent_migrate_raced); -read_attribute(bucket_alloc_fail); - #define x(t, n, ...) read_attribute(t); BCH_PERSISTENT_COUNTERS() #undef x @@ -378,15 +373,6 @@ SHOW(bch2_fs) sysfs_hprint(btree_cache_size, bch2_btree_cache_size(c)); sysfs_hprint(btree_avg_write_size, bch2_btree_avg_write_size(c)); - sysfs_print(read_realloc_races, - atomic_long_read(&c->read_realloc_races)); - sysfs_print(extent_migrate_done, - atomic_long_read(&c->extent_migrate_done)); - sysfs_print(extent_migrate_raced, - atomic_long_read(&c->extent_migrate_raced)); - sysfs_print(bucket_alloc_fail, - atomic_long_read(&c->bucket_alloc_fail)); - sysfs_printf(btree_gc_periodic, "%u", (int) c->btree_gc_periodic); if (attr == &sysfs_gc_gens_pos) @@ -629,11 +615,6 @@ struct attribute *bch2_fs_internal_files[] = { &sysfs_trigger_invalidates, &sysfs_prune_cache, - &sysfs_read_realloc_races, - &sysfs_extent_migrate_done, - &sysfs_extent_migrate_raced, - &sysfs_bucket_alloc_fail, - &sysfs_gc_gens_pos, &sysfs_copy_gc_enabled, diff --git a/fs/bcachefs/trace.h b/fs/bcachefs/trace.h index 2c1661ab807b..1ef99af5cd03 100644 --- a/fs/bcachefs/trace.h +++ b/fs/bcachefs/trace.h @@ -52,6 +52,31 @@ DECLARE_EVENT_CLASS(bkey, __entry->offset, __entry->size) ); +DECLARE_EVENT_CLASS(btree_node, + TP_PROTO(struct bch_fs *c, struct btree *b), + TP_ARGS(c, b), + + TP_STRUCT__entry( + __field(dev_t, dev ) + __field(u8, level ) + __field(u8, btree_id ) + TRACE_BPOS_entries(pos) + ), + + TP_fast_assign( + __entry->dev = c->dev; + __entry->level = b->c.level; + __entry->btree_id = b->c.btree_id; + TRACE_BPOS_assign(pos, b->key.k.p); + ), + + TP_printk("%d,%d %u %s %llu:%llu:%u", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->level, + bch2_btree_ids[__entry->btree_id], + __entry->pos_inode, __entry->pos_offset, __entry->pos_snapshot) +); + DECLARE_EVENT_CLASS(bch_fs, TP_PROTO(struct bch_fs *c), TP_ARGS(c), @@ -112,7 +137,7 @@ TRACE_EVENT(write_super, /* io.c: */ -DEFINE_EVENT(bio, read_split, +DEFINE_EVENT(bio, read_promote, TP_PROTO(struct bio *bio), TP_ARGS(bio) ); @@ -122,12 +147,17 @@ DEFINE_EVENT(bio, read_bounce, TP_ARGS(bio) ); +DEFINE_EVENT(bio, read_split, + TP_PROTO(struct bio *bio), + TP_ARGS(bio) +); + DEFINE_EVENT(bio, read_retry, TP_PROTO(struct bio *bio), TP_ARGS(bio) ); -DEFINE_EVENT(bio, promote, +DEFINE_EVENT(bio, read_reuse_race, TP_PROTO(struct bio *bio), TP_ARGS(bio) ); @@ -220,8 +250,6 @@ TRACE_EVENT(journal_reclaim_finish, __entry->nr_flushed) ); -/* allocator: */ - /* bset.c: */ DEFINE_EVENT(bpos, bkey_pack_pos_fail, @@ -229,39 +257,61 @@ DEFINE_EVENT(bpos, bkey_pack_pos_fail, TP_ARGS(p) ); -/* Btree */ +/* Btree cache: */ -DECLARE_EVENT_CLASS(btree_node, - TP_PROTO(struct bch_fs *c, struct btree *b), - TP_ARGS(c, b), +TRACE_EVENT(btree_cache_scan, + TP_PROTO(long nr_to_scan, long can_free, long ret), + TP_ARGS(nr_to_scan, can_free, ret), TP_STRUCT__entry( - __field(dev_t, dev ) - __field(u8, level ) - __field(u8, btree_id ) - TRACE_BPOS_entries(pos) + __field(long, nr_to_scan ) + __field(long, can_free ) + __field(long, ret ) ), TP_fast_assign( - __entry->dev = c->dev; - __entry->level = b->c.level; - __entry->btree_id = b->c.btree_id; - TRACE_BPOS_assign(pos, b->key.k.p); + __entry->nr_to_scan = nr_to_scan; + __entry->can_free = can_free; + __entry->ret = ret; ), - TP_printk("%d,%d %u %s %llu:%llu:%u", - MAJOR(__entry->dev), MINOR(__entry->dev), - __entry->level, - bch2_btree_ids[__entry->btree_id], - __entry->pos_inode, __entry->pos_offset, __entry->pos_snapshot) + TP_printk("scanned for %li nodes, can free %li, ret %li", + __entry->nr_to_scan, __entry->can_free, __entry->ret) +); + +DEFINE_EVENT(btree_node, btree_cache_reap, + TP_PROTO(struct bch_fs *c, struct btree *b), + TP_ARGS(c, b) +); + +DEFINE_EVENT(bch_fs, btree_cache_cannibalize_lock_fail, + TP_PROTO(struct bch_fs *c), + TP_ARGS(c) +); + +DEFINE_EVENT(bch_fs, btree_cache_cannibalize_lock, + TP_PROTO(struct bch_fs *c), + TP_ARGS(c) +); + +DEFINE_EVENT(bch_fs, btree_cache_cannibalize, + TP_PROTO(struct bch_fs *c), + TP_ARGS(c) ); -DEFINE_EVENT(btree_node, btree_read, +DEFINE_EVENT(bch_fs, btree_cache_cannibalize_unlock, + TP_PROTO(struct bch_fs *c), + TP_ARGS(c) +); + +/* Btree */ + +DEFINE_EVENT(btree_node, btree_node_read, TP_PROTO(struct bch_fs *c, struct btree *b), TP_ARGS(c, b) ); -TRACE_EVENT(btree_write, +TRACE_EVENT(btree_node_write, TP_PROTO(struct btree *b, unsigned bytes, unsigned sectors), TP_ARGS(b, bytes, sectors), @@ -291,31 +341,6 @@ DEFINE_EVENT(btree_node, btree_node_free, TP_ARGS(c, b) ); -DEFINE_EVENT(btree_node, btree_node_reap, - TP_PROTO(struct bch_fs *c, struct btree *b), - TP_ARGS(c, b) -); - -DEFINE_EVENT(bch_fs, btree_node_cannibalize_lock_fail, - TP_PROTO(struct bch_fs *c), - TP_ARGS(c) -); - -DEFINE_EVENT(bch_fs, btree_node_cannibalize_lock, - TP_PROTO(struct bch_fs *c), - TP_ARGS(c) -); - -DEFINE_EVENT(bch_fs, btree_node_cannibalize, - TP_PROTO(struct bch_fs *c), - TP_ARGS(c) -); - -DEFINE_EVENT(bch_fs, btree_node_cannibalize_unlock, - TP_PROTO(struct bch_fs *c), - TP_ARGS(c) -); - TRACE_EVENT(btree_reserve_get_fail, TP_PROTO(const char *trans_fn, unsigned long caller_ip, @@ -340,52 +365,32 @@ TRACE_EVENT(btree_reserve_get_fail, __entry->required) ); -DEFINE_EVENT(btree_node, btree_split, +DEFINE_EVENT(btree_node, btree_node_compact, TP_PROTO(struct bch_fs *c, struct btree *b), TP_ARGS(c, b) ); -DEFINE_EVENT(btree_node, btree_compact, +DEFINE_EVENT(btree_node, btree_node_merge, TP_PROTO(struct bch_fs *c, struct btree *b), TP_ARGS(c, b) ); -DEFINE_EVENT(btree_node, btree_merge, +DEFINE_EVENT(btree_node, btree_node_split, TP_PROTO(struct bch_fs *c, struct btree *b), TP_ARGS(c, b) ); -DEFINE_EVENT(btree_node, btree_rewrite, +DEFINE_EVENT(btree_node, btree_node_rewrite, TP_PROTO(struct bch_fs *c, struct btree *b), TP_ARGS(c, b) ); -DEFINE_EVENT(btree_node, btree_set_root, +DEFINE_EVENT(btree_node, btree_node_set_root, TP_PROTO(struct bch_fs *c, struct btree *b), TP_ARGS(c, b) ); -TRACE_EVENT(btree_cache_scan, - TP_PROTO(long nr_to_scan, long can_free, long ret), - TP_ARGS(nr_to_scan, can_free, ret), - - TP_STRUCT__entry( - __field(long, nr_to_scan ) - __field(long, can_free ) - __field(long, ret ) - ), - - TP_fast_assign( - __entry->nr_to_scan = nr_to_scan; - __entry->can_free = can_free; - __entry->ret = ret; - ), - - TP_printk("scanned for %li nodes, can free %li, ret %li", - __entry->nr_to_scan, __entry->can_free, __entry->ret) -); - -TRACE_EVENT(btree_node_relock_fail, +TRACE_EVENT(btree_path_relock_fail, TP_PROTO(struct btree_trans *trans, unsigned long caller_ip, struct btree_path *path, @@ -429,7 +434,7 @@ TRACE_EVENT(btree_node_relock_fail, __entry->node_lock_seq) ); -TRACE_EVENT(btree_node_upgrade_fail, +TRACE_EVENT(btree_path_upgrade_fail, TP_PROTO(struct btree_trans *trans, unsigned long caller_ip, struct btree_path *path, @@ -617,7 +622,7 @@ TRACE_EVENT(discard_buckets, __entry->err) ); -TRACE_EVENT(invalidate_bucket, +TRACE_EVENT(bucket_invalidate, TP_PROTO(struct bch_fs *c, unsigned dev, u64 bucket, u32 sectors), TP_ARGS(c, dev, bucket, sectors), @@ -643,17 +648,27 @@ TRACE_EVENT(invalidate_bucket, /* Moving IO */ -DEFINE_EVENT(bkey, move_extent, +DEFINE_EVENT(bkey, move_extent_read, + TP_PROTO(const struct bkey *k), + TP_ARGS(k) +); + +DEFINE_EVENT(bkey, move_extent_write, TP_PROTO(const struct bkey *k), TP_ARGS(k) ); -DEFINE_EVENT(bkey, move_alloc_mem_fail, +DEFINE_EVENT(bkey, move_extent_finish, TP_PROTO(const struct bkey *k), TP_ARGS(k) ); -DEFINE_EVENT(bkey, move_race, +DEFINE_EVENT(bkey, move_extent_fail, + TP_PROTO(const struct bkey *k), + TP_ARGS(k) +); + +DEFINE_EVENT(bkey, move_extent_alloc_mem_fail, TP_PROTO(const struct bkey *k), TP_ARGS(k) ); @@ -732,11 +747,6 @@ TRACE_EVENT(copygc_wait, __entry->wait_amount, __entry->until) ); -DEFINE_EVENT(bpos, data_update_fail, - TP_PROTO(const struct bpos *p), - TP_ARGS(p) -); - /* btree transactions: */ DECLARE_EVENT_CLASS(transaction_event, @@ -763,7 +773,7 @@ DEFINE_EVENT(transaction_event, transaction_commit, TP_ARGS(trans, caller_ip) ); -DEFINE_EVENT(transaction_event, transaction_restart_injected, +DEFINE_EVENT(transaction_event, trans_restart_injected, TP_PROTO(struct btree_trans *trans, unsigned long caller_ip), TP_ARGS(trans, caller_ip) @@ -926,7 +936,7 @@ DEFINE_EVENT(transaction_restart_iter, trans_restart_relock_after_fill, TP_ARGS(trans, caller_ip, path) ); -DEFINE_EVENT(transaction_event, transaction_restart_key_cache_upgrade, +DEFINE_EVENT(transaction_event, trans_restart_key_cache_upgrade, TP_PROTO(struct btree_trans *trans, unsigned long caller_ip), TP_ARGS(trans, caller_ip) |