summaryrefslogtreecommitdiff
path: root/io_uring/cancel.c
diff options
context:
space:
mode:
Diffstat (limited to 'io_uring/cancel.c')
-rw-r--r--io_uring/cancel.c107
1 files changed, 107 insertions, 0 deletions
diff --git a/io_uring/cancel.c b/io_uring/cancel.c
index da486de07029..8435a1eba59a 100644
--- a/io_uring/cancel.c
+++ b/io_uring/cancel.c
@@ -6,6 +6,7 @@
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/namei.h>
+#include <linux/nospec.h>
#include <linux/io_uring.h>
#include <uapi/linux/io_uring.h>
@@ -206,3 +207,109 @@ void init_hash_table(struct io_hash_table *table, unsigned size)
INIT_HLIST_HEAD(&table->hbs[i].list);
}
}
+
+static int __io_sync_cancel(struct io_uring_task *tctx,
+ struct io_cancel_data *cd, int fd)
+{
+ struct io_ring_ctx *ctx = cd->ctx;
+
+ /* fixed must be grabbed every time since we drop the uring_lock */
+ if ((cd->flags & IORING_ASYNC_CANCEL_FD) &&
+ (cd->flags & IORING_ASYNC_CANCEL_FD_FIXED)) {
+ unsigned long file_ptr;
+
+ if (unlikely(fd > ctx->nr_user_files))
+ return -EBADF;
+ fd = array_index_nospec(fd, ctx->nr_user_files);
+ file_ptr = io_fixed_file_slot(&ctx->file_table, fd)->file_ptr;
+ cd->file = (struct file *) (file_ptr & FFS_MASK);
+ if (!cd->file)
+ return -EBADF;
+ }
+
+ return __io_async_cancel(cd, tctx, 0);
+}
+
+int io_sync_cancel(struct io_ring_ctx *ctx, void __user *arg)
+ __must_hold(&ctx->uring_lock)
+{
+ struct io_cancel_data cd = {
+ .ctx = ctx,
+ .seq = atomic_inc_return(&ctx->cancel_seq),
+ };
+ ktime_t timeout = KTIME_MAX;
+ struct io_uring_sync_cancel_reg sc;
+ struct fd f = { };
+ DEFINE_WAIT(wait);
+ int ret;
+
+ if (copy_from_user(&sc, arg, sizeof(sc)))
+ return -EFAULT;
+ if (sc.flags & ~CANCEL_FLAGS)
+ return -EINVAL;
+ if (sc.pad[0] || sc.pad[1] || sc.pad[2] || sc.pad[3])
+ return -EINVAL;
+
+ cd.data = sc.addr;
+ cd.flags = sc.flags;
+
+ /* we can grab a normal file descriptor upfront */
+ if ((cd.flags & IORING_ASYNC_CANCEL_FD) &&
+ !(cd.flags & IORING_ASYNC_CANCEL_FD_FIXED)) {
+ f = fdget(sc.fd);
+ if (!f.file)
+ return -EBADF;
+ cd.file = f.file;
+ }
+
+ ret = __io_sync_cancel(current->io_uring, &cd, sc.fd);
+
+ /* found something, done! */
+ if (ret != -EALREADY)
+ goto out;
+
+ if (sc.timeout.tv_sec != -1UL || sc.timeout.tv_nsec != -1UL) {
+ struct timespec64 ts = {
+ .tv_sec = sc.timeout.tv_sec,
+ .tv_nsec = sc.timeout.tv_nsec
+ };
+
+ timeout = ktime_add_ns(timespec64_to_ktime(ts), ktime_get_ns());
+ }
+
+ /*
+ * Keep looking until we get -ENOENT. we'll get woken everytime
+ * every time a request completes and will retry the cancelation.
+ */
+ do {
+ cd.seq = atomic_inc_return(&ctx->cancel_seq);
+
+ prepare_to_wait(&ctx->cq_wait, &wait, TASK_INTERRUPTIBLE);
+
+ ret = __io_sync_cancel(current->io_uring, &cd, sc.fd);
+
+ if (ret != -EALREADY)
+ break;
+
+ mutex_unlock(&ctx->uring_lock);
+ ret = io_run_task_work_sig();
+ if (ret < 0) {
+ mutex_lock(&ctx->uring_lock);
+ break;
+ }
+ ret = schedule_hrtimeout(&timeout, HRTIMER_MODE_ABS);
+ mutex_lock(&ctx->uring_lock);
+ if (!ret) {
+ ret = -ETIME;
+ break;
+ }
+ } while (1);
+
+ finish_wait(&ctx->cq_wait, &wait);
+
+ if (ret == -ENOENT || ret > 0)
+ ret = 0;
+out:
+ fdput(f);
+ return ret;
+}