summaryrefslogtreecommitdiff
path: root/fs/io_uring.c
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2022-03-17 05:03:42 +0300
committerJens Axboe <axboe@kernel.dk>2022-03-17 05:26:32 +0300
commit9aa8dfde4869ccdec0a7290b686dbc10e079e163 (patch)
tree4c3b1da830f66232ee6158524794be2090be531a /fs/io_uring.c
parent9333f6b4628c8037a89ed23e1188d4b7dc5d74e4 (diff)
downloadlinux-9aa8dfde4869ccdec0a7290b686dbc10e079e163.tar.xz
io_uring: fold evfd signalling under a slower path
Add ->has_evfd flag, which is true IFF there is an eventfd attached, and use it to hide io_eventfd_signal() into __io_commit_cqring_flush() and combine fast checks in a single if. Also, gcc 11.2 wasn't inlining io_cqring_ev_posted() without this change, so helps with that as well. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/f6168471997decded475a063f92915787975a30b.1647481208.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/io_uring.c')
-rw-r--r--fs/io_uring.c60
1 files changed, 34 insertions, 26 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index e69531ccd209..68498cebc0ef 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -347,6 +347,7 @@ struct io_ring_ctx {
unsigned int off_timeout_used: 1;
unsigned int drain_active: 1;
unsigned int drain_disabled: 1;
+ unsigned int has_evfd: 1;
} ____cacheline_aligned_in_smp;
/* submission data */
@@ -1181,6 +1182,7 @@ static int io_install_fixed_file(struct io_kiocb *req, struct file *file,
static int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags);
static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer);
+static void io_eventfd_signal(struct io_ring_ctx *ctx);
static struct kmem_cache *req_cachep;
@@ -1785,15 +1787,19 @@ static inline void io_commit_cqring(struct io_ring_ctx *ctx)
smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail);
}
-static __cold void __io_commit_cqring_flush(struct io_ring_ctx *ctx)
+static void __io_commit_cqring_flush(struct io_ring_ctx *ctx)
{
- spin_lock(&ctx->completion_lock);
- if (ctx->off_timeout_used)
- io_flush_timeouts(ctx);
- if (ctx->drain_active)
- io_queue_deferred(ctx);
- io_commit_cqring(ctx);
- spin_unlock(&ctx->completion_lock);
+ if (ctx->off_timeout_used || ctx->drain_active) {
+ spin_lock(&ctx->completion_lock);
+ if (ctx->off_timeout_used)
+ io_flush_timeouts(ctx);
+ if (ctx->drain_active)
+ io_queue_deferred(ctx);
+ io_commit_cqring(ctx);
+ spin_unlock(&ctx->completion_lock);
+ }
+ if (ctx->has_evfd)
+ io_eventfd_signal(ctx);
}
static inline bool io_sqring_full(struct io_ring_ctx *ctx)
@@ -1852,6 +1858,17 @@ out:
rcu_read_unlock();
}
+static inline void io_cqring_wake(struct io_ring_ctx *ctx)
+{
+ /*
+ * wake_up_all() may seem excessive, but io_wake_function() and
+ * io_should_wake() handle the termination of the loop and only
+ * wake as many waiters as we need to.
+ */
+ if (wq_has_sleeper(&ctx->cq_wait))
+ wake_up_all(&ctx->cq_wait);
+}
+
/*
* This should only get called when at least one event has been posted.
* Some applications rely on the eventfd notification count only changing
@@ -1861,31 +1878,21 @@ out:
*/
static inline void io_cqring_ev_posted(struct io_ring_ctx *ctx)
{
- if (unlikely(ctx->off_timeout_used || ctx->drain_active))
+ if (unlikely(ctx->off_timeout_used || ctx->drain_active ||
+ ctx->has_evfd))
__io_commit_cqring_flush(ctx);
- /*
- * wake_up_all() may seem excessive, but io_wake_function() and
- * io_should_wake() handle the termination of the loop and only
- * wake as many waiters as we need to.
- */
- if (wq_has_sleeper(&ctx->cq_wait))
- wake_up_all(&ctx->cq_wait);
- if (unlikely(rcu_dereference_raw(ctx->io_ev_fd)))
- io_eventfd_signal(ctx);
+ io_cqring_wake(ctx);
}
static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx)
{
- if (unlikely(ctx->off_timeout_used || ctx->drain_active))
+ if (unlikely(ctx->off_timeout_used || ctx->drain_active ||
+ ctx->has_evfd))
__io_commit_cqring_flush(ctx);
- if (ctx->flags & IORING_SETUP_SQPOLL) {
- if (wq_has_sleeper(&ctx->cq_wait))
- wake_up_all(&ctx->cq_wait);
- }
- if (unlikely(rcu_dereference_raw(ctx->io_ev_fd)))
- io_eventfd_signal(ctx);
+ if (ctx->flags & IORING_SETUP_SQPOLL)
+ io_cqring_wake(ctx);
}
/* Returns true if there are no backlogged entries after the flush */
@@ -9898,7 +9905,7 @@ static int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg,
return ret;
}
ev_fd->eventfd_async = eventfd_async;
-
+ ctx->has_evfd = true;
rcu_assign_pointer(ctx->io_ev_fd, ev_fd);
return 0;
}
@@ -9918,6 +9925,7 @@ static int io_eventfd_unregister(struct io_ring_ctx *ctx)
ev_fd = rcu_dereference_protected(ctx->io_ev_fd,
lockdep_is_held(&ctx->uring_lock));
if (ev_fd) {
+ ctx->has_evfd = false;
rcu_assign_pointer(ctx->io_ev_fd, NULL);
call_rcu(&ev_fd->rcu, io_eventfd_put);
return 0;