summaryrefslogtreecommitdiff
path: root/io_uring
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2023-01-05 14:22:29 +0300
committerJens Axboe <axboe@kernel.dk>2023-01-30 01:17:40 +0300
commitd33a39e577687e12d4468e9dd999375b9973d700 (patch)
tree1fb2d9c0352da1e3951bae9b94aa9539aed35f69 /io_uring
parent46ae7eef44f6dfd825a3bcfa43392d3ad9836ada (diff)
downloadlinux-d33a39e577687e12d4468e9dd999375b9973d700.tar.xz
io_uring: keep timeout in io_wait_queue
Move waiting timeout into io_wait_queue Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/e4b48a9e26a3b1cf97c80121e62d4b5ab873d28d.1672916894.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring')
-rw-r--r--io_uring/io_uring.c28
1 files changed, 14 insertions, 14 deletions
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 6229a49c0c33..fdea6fbc3fad 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -2414,6 +2414,7 @@ struct io_wait_queue {
struct io_ring_ctx *ctx;
unsigned cq_tail;
unsigned nr_timeouts;
+ ktime_t timeout;
};
static inline bool io_has_work(struct io_ring_ctx *ctx)
@@ -2466,8 +2467,7 @@ int io_run_task_work_sig(struct io_ring_ctx *ctx)
/* when returns >0, the caller should retry */
static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
- struct io_wait_queue *iowq,
- ktime_t *timeout)
+ struct io_wait_queue *iowq)
{
if (unlikely(READ_ONCE(ctx->check_cq)))
return 1;
@@ -2479,9 +2479,9 @@ static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
return -EINTR;
if (unlikely(io_should_wake(iowq)))
return 0;
- if (*timeout == KTIME_MAX)
+ if (iowq->timeout == KTIME_MAX)
schedule();
- else if (!schedule_hrtimeout(timeout, HRTIMER_MODE_ABS))
+ else if (!schedule_hrtimeout(&iowq->timeout, HRTIMER_MODE_ABS))
return -ETIME;
return 0;
}
@@ -2496,7 +2496,6 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
{
struct io_wait_queue iowq;
struct io_rings *rings = ctx->rings;
- ktime_t timeout = KTIME_MAX;
int ret;
if (!io_allowed_run_tw(ctx))
@@ -2522,20 +2521,21 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
return ret;
}
- if (uts) {
- struct timespec64 ts;
-
- if (get_timespec64(&ts, uts))
- return -EFAULT;
- timeout = ktime_add_ns(timespec64_to_ktime(ts), ktime_get_ns());
- }
-
init_waitqueue_func_entry(&iowq.wq, io_wake_function);
iowq.wq.private = current;
INIT_LIST_HEAD(&iowq.wq.entry);
iowq.ctx = ctx;
iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts);
iowq.cq_tail = READ_ONCE(ctx->rings->cq.head) + min_events;
+ iowq.timeout = KTIME_MAX;
+
+ if (uts) {
+ struct timespec64 ts;
+
+ if (get_timespec64(&ts, uts))
+ return -EFAULT;
+ iowq.timeout = ktime_add_ns(timespec64_to_ktime(ts), ktime_get_ns());
+ }
trace_io_uring_cqring_wait(ctx, min_events);
do {
@@ -2543,7 +2543,7 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
prepare_to_wait_exclusive(&ctx->cq_wait, &iowq.wq,
TASK_INTERRUPTIBLE);
- ret = io_cqring_wait_schedule(ctx, &iowq, &timeout);
+ ret = io_cqring_wait_schedule(ctx, &iowq);
if (ret < 0)
break;
__set_current_state(TASK_RUNNING);