summaryrefslogtreecommitdiff
path: root/fs/bcachefs/clock.c
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2019-12-19 23:07:51 +0300
committerKent Overstreet <kent.overstreet@linux.dev>2023-10-23 00:08:33 +0300
commit5873efbfd9c3f53312aaa6c3024592a2a344f615 (patch)
tree3e7b0e642b152359825b76b9861109c4dcd73363 /fs/bcachefs/clock.c
parent187c71f6ab439582c80433ef9e04f615b8c0f576 (diff)
downloadlinux-5873efbfd9c3f53312aaa6c3024592a2a344f615.tar.xz
bcachefs: Make io timers less buggy
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com> Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'fs/bcachefs/clock.c')
-rw-r--r--fs/bcachefs/clock.c43
1 files changed, 29 insertions, 14 deletions
diff --git a/fs/bcachefs/clock.c b/fs/bcachefs/clock.c
index e227753563ab..51286520c5c7 100644
--- a/fs/bcachefs/clock.c
+++ b/fs/bcachefs/clock.c
@@ -18,6 +18,14 @@ void bch2_io_timer_add(struct io_clock *clock, struct io_timer *timer)
size_t i;
spin_lock(&clock->timer_lock);
+
+ if (time_after_eq((unsigned long) atomic_long_read(&clock->now),
+ timer->expire)) {
+ spin_unlock(&clock->timer_lock);
+ timer->fn(timer);
+ return;
+ }
+
for (i = 0; i < clock->timers.used; i++)
if (clock->timers.data[i] == timer)
goto out;
@@ -135,26 +143,31 @@ static struct io_timer *get_expired_timer(struct io_clock *clock,
return ret;
}
-void __bch2_increment_clock(struct io_clock *clock)
+void __bch2_increment_clock(struct io_clock *clock, unsigned sectors)
{
struct io_timer *timer;
- unsigned long now;
- unsigned sectors;
+ unsigned long now = atomic_long_add_return(sectors, &clock->now);
- /* Buffer up one megabyte worth of IO in the percpu counter */
- preempt_disable();
+ while ((timer = get_expired_timer(clock, now)))
+ timer->fn(timer);
+}
- if (this_cpu_read(*clock->pcpu_buf) < IO_CLOCK_PCPU_SECTORS) {
- preempt_enable();
- return;
- }
+ssize_t bch2_io_timers_show(struct io_clock *clock, char *buf)
+{
+ struct printbuf out = _PBUF(buf, PAGE_SIZE);
+ unsigned long now;
+ unsigned i;
- sectors = this_cpu_xchg(*clock->pcpu_buf, 0);
- preempt_enable();
- now = atomic_long_add_return(sectors, &clock->now);
+ spin_lock(&clock->timer_lock);
+ now = atomic_long_read(&clock->now);
- while ((timer = get_expired_timer(clock, now)))
- timer->fn(timer);
+ for (i = 0; i < clock->timers.used; i++)
+ pr_buf(&out, "%pf:\t%li\n",
+ clock->timers.data[i]->fn,
+ clock->timers.data[i]->expire - now);
+ spin_unlock(&clock->timer_lock);
+
+ return out.pos - buf;
}
void bch2_io_clock_exit(struct io_clock *clock)
@@ -168,6 +181,8 @@ int bch2_io_clock_init(struct io_clock *clock)
atomic_long_set(&clock->now, 0);
spin_lock_init(&clock->timer_lock);
+ clock->max_slop = IO_CLOCK_PCPU_SECTORS * num_possible_cpus();
+
clock->pcpu_buf = alloc_percpu(*clock->pcpu_buf);
if (!clock->pcpu_buf)
return -ENOMEM;