summaryrefslogtreecommitdiff
path: root/io_uring
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2023-12-23 02:05:07 +0300
committerPaolo Bonzini <pbonzini@redhat.com>2023-12-23 02:05:07 +0300
commitef5b28372c565128bdce7a59bc78402a8ce68e1b (patch)
tree77f549fcf73f8400a34e7a5d51548fafd1533644 /io_uring
parent5c2b2176ead1911d652b8848169bb44bdde75ca8 (diff)
parent4ad9843e1ea088bd2529290234c6c4c6374836a7 (diff)
downloadlinux-ef5b28372c565128bdce7a59bc78402a8ce68e1b.tar.xz
Merge tag 'kvm-riscv-fixes-6.7-1' of https://github.com/kvm-riscv/linux into kvm-master
KVM/riscv fixes for 6.7, take #1 - Fix a race condition in updating external interrupt for trap-n-emulated IMSIC swfile - Fix print_reg defaults in get-reg-list selftest
Diffstat (limited to 'io_uring')
-rw-r--r--io_uring/io_uring.c9
-rw-r--r--io_uring/kbuf.c8
-rw-r--r--io_uring/rsrc.h7
3 files changed, 7 insertions, 17 deletions
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index aba5657d287e..9626a363f121 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -271,6 +271,7 @@ static __cold void io_fallback_req_func(struct work_struct *work)
struct io_kiocb *req, *tmp;
struct io_tw_state ts = { .locked = true, };
+ percpu_ref_get(&ctx->refs);
mutex_lock(&ctx->uring_lock);
llist_for_each_entry_safe(req, tmp, node, io_task_work.node)
req->io_task_work.func(req, &ts);
@@ -278,6 +279,7 @@ static __cold void io_fallback_req_func(struct work_struct *work)
return;
io_submit_flush_completions(ctx);
mutex_unlock(&ctx->uring_lock);
+ percpu_ref_put(&ctx->refs);
}
static int io_alloc_hash_table(struct io_hash_table *table, unsigned bits)
@@ -3146,12 +3148,7 @@ static __cold void io_ring_exit_work(struct work_struct *work)
init_completion(&exit.completion);
init_task_work(&exit.task_work, io_tctx_exit_cb);
exit.ctx = ctx;
- /*
- * Some may use context even when all refs and requests have been put,
- * and they are free to do so while still holding uring_lock or
- * completion_lock, see io_req_task_submit(). Apart from other work,
- * this lock/unlock section also waits them to finish.
- */
+
mutex_lock(&ctx->uring_lock);
while (!list_empty(&ctx->tctx_list)) {
WARN_ON_ONCE(time_after(jiffies, timeout));
diff --git a/io_uring/kbuf.c b/io_uring/kbuf.c
index 268788305b61..72b6af1d2ed3 100644
--- a/io_uring/kbuf.c
+++ b/io_uring/kbuf.c
@@ -636,8 +636,8 @@ static int io_alloc_pbuf_ring(struct io_ring_ctx *ctx,
ibf = io_lookup_buf_free_entry(ctx, ring_size);
if (!ibf) {
ptr = io_mem_alloc(ring_size);
- if (!ptr)
- return -ENOMEM;
+ if (IS_ERR(ptr))
+ return PTR_ERR(ptr);
/* Allocate and store deferred free entry */
ibf = kmalloc(sizeof(*ibf), GFP_KERNEL_ACCOUNT);
@@ -756,6 +756,8 @@ void *io_pbuf_get_address(struct io_ring_ctx *ctx, unsigned long bgid)
bl = __io_buffer_get_list(ctx, smp_load_acquire(&ctx->io_bl), bgid);
+ if (!bl || !bl->is_mmap)
+ return NULL;
/*
* Ensure the list is fully setup. Only strictly needed for RCU lookup
* via mmap, and in that case only for the array indexed groups. For
@@ -763,8 +765,6 @@ void *io_pbuf_get_address(struct io_ring_ctx *ctx, unsigned long bgid)
*/
if (!smp_load_acquire(&bl->is_ready))
return NULL;
- if (!bl || !bl->is_mmap)
- return NULL;
return bl->buf_ring;
}
diff --git a/io_uring/rsrc.h b/io_uring/rsrc.h
index 8625181fb87a..08ac0d8e07ef 100644
--- a/io_uring/rsrc.h
+++ b/io_uring/rsrc.h
@@ -77,17 +77,10 @@ int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
int __io_scm_file_account(struct io_ring_ctx *ctx, struct file *file);
-#if defined(CONFIG_UNIX)
-static inline bool io_file_need_scm(struct file *filp)
-{
- return !!unix_get_socket(filp);
-}
-#else
static inline bool io_file_need_scm(struct file *filp)
{
return false;
}
-#endif
static inline int io_scm_file_account(struct io_ring_ctx *ctx,
struct file *file)