summaryrefslogtreecommitdiff
path: root/fs/bcachefs/clock.c
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2021-01-21 23:28:59 +0300
committerKent Overstreet <kent.overstreet@linux.dev>2023-10-23 00:08:52 +0300
commit2abe542087d9cb1bc7bb8ac7ae262afccbdb7aa6 (patch)
treec33f5733a5cf3f091e59e5084be29ef5819a9f71 /fs/bcachefs/clock.c
parent7f4e1d5d0faff0d72e9f6708bf98488d76533846 (diff)
downloadlinux-2abe542087d9cb1bc7bb8ac7ae262afccbdb7aa6.tar.xz
bcachefs: Persist 64 bit io clocks
Originally, bcachefs - going back to bcache - stored, for each bucket, a 16 bit counter corresponding to how long it had been since the bucket was read from. But, this required periodically rescaling counters on every bucket to avoid wraparound. That wasn't an issue in bcache, where we'd perodically rewrite the per bucket metadata all at once, but in bcachefs we're trying to avoid having to walk every single bucket. This patch switches to persisting 64 bit io clocks, corresponding to the 64 bit bucket timestaps introduced in the previous patch with KEY_TYPE_alloc_v2. Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com> Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'fs/bcachefs/clock.c')
-rw-r--r--fs/bcachefs/clock.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/fs/bcachefs/clock.c b/fs/bcachefs/clock.c
index 869ba1887757..da91c95e3ffc 100644
--- a/fs/bcachefs/clock.c
+++ b/fs/bcachefs/clock.c
@@ -19,7 +19,7 @@ void bch2_io_timer_add(struct io_clock *clock, struct io_timer *timer)
spin_lock(&clock->timer_lock);
- if (time_after_eq((unsigned long) atomic_long_read(&clock->now),
+ if (time_after_eq((unsigned long) atomic64_read(&clock->now),
timer->expire)) {
spin_unlock(&clock->timer_lock);
timer->fn(timer);
@@ -146,7 +146,7 @@ static struct io_timer *get_expired_timer(struct io_clock *clock,
void __bch2_increment_clock(struct io_clock *clock, unsigned sectors)
{
struct io_timer *timer;
- unsigned long now = atomic_long_add_return(sectors, &clock->now);
+ unsigned long now = atomic64_add_return(sectors, &clock->now);
while ((timer = get_expired_timer(clock, now)))
timer->fn(timer);
@@ -158,7 +158,7 @@ void bch2_io_timers_to_text(struct printbuf *out, struct io_clock *clock)
unsigned i;
spin_lock(&clock->timer_lock);
- now = atomic_long_read(&clock->now);
+ now = atomic64_read(&clock->now);
for (i = 0; i < clock->timers.used; i++)
pr_buf(out, "%ps:\t%li\n",
@@ -175,7 +175,7 @@ void bch2_io_clock_exit(struct io_clock *clock)
int bch2_io_clock_init(struct io_clock *clock)
{
- atomic_long_set(&clock->now, 0);
+ atomic64_set(&clock->now, 0);
spin_lock_init(&clock->timer_lock);
clock->max_slop = IO_CLOCK_PCPU_SECTORS * num_possible_cpus();