summaryrefslogtreecommitdiff
path: root/meta-openbmc-mods/meta-common/recipes-kernel/linux/linux-aspeed/CVE-2022-29582.patch
diff options
context:
space:
mode:
Diffstat (limited to 'meta-openbmc-mods/meta-common/recipes-kernel/linux/linux-aspeed/CVE-2022-29582.patch')
-rw-r--r--meta-openbmc-mods/meta-common/recipes-kernel/linux/linux-aspeed/CVE-2022-29582.patch53
1 files changed, 53 insertions, 0 deletions
diff --git a/meta-openbmc-mods/meta-common/recipes-kernel/linux/linux-aspeed/CVE-2022-29582.patch b/meta-openbmc-mods/meta-common/recipes-kernel/linux/linux-aspeed/CVE-2022-29582.patch
new file mode 100644
index 000000000..e0fac6ec5
--- /dev/null
+++ b/meta-openbmc-mods/meta-common/recipes-kernel/linux/linux-aspeed/CVE-2022-29582.patch
@@ -0,0 +1,53 @@
+From e677edbcabee849bfdd43f1602bccbecf736a646 Mon Sep 17 00:00:00 2001
+From: Jens Axboe <axboe@kernel.dk>
+Date: Fri, 8 Apr 2022 11:08:58 -0600
+Subject: [PATCH] io_uring: fix race between timeout flush and removal
+
+io_flush_timeouts() assumes the timeout isn't in progress of triggering
+or being removed/canceled, so it unconditionally removes it from the
+timeout list and attempts to cancel it.
+
+Leave it on the list and let the normal timeout cancelation take care
+of it.
+
+Cc: stable@vger.kernel.org # 5.5+
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+---
+ fs/io_uring.c | 7 +++----
+ 1 file changed, 3 insertions(+), 4 deletions(-)
+
+diff --git a/fs/io_uring.c b/fs/io_uring.c
+index fafd1ca4780b6..659f8ecba5b79 100644
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -1736,12 +1736,11 @@ static __cold void io_flush_timeouts(struct io_ring_ctx *ctx)
+ __must_hold(&ctx->completion_lock)
+ {
+ u32 seq = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
++ struct io_kiocb *req, *tmp;
+
+ spin_lock_irq(&ctx->timeout_lock);
+- while (!list_empty(&ctx->timeout_list)) {
++ list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) {
+ u32 events_needed, events_got;
+- struct io_kiocb *req = list_first_entry(&ctx->timeout_list,
+- struct io_kiocb, timeout.list);
+
+ if (io_is_timeout_noseq(req))
+ break;
+@@ -1758,7 +1757,6 @@ static __cold void io_flush_timeouts(struct io_ring_ctx *ctx)
+ if (events_got < events_needed)
+ break;
+
+- list_del_init(&req->timeout.list);
+ io_kill_timeout(req, 0);
+ }
+ ctx->cq_last_tm_flush = seq;
+@@ -6628,6 +6626,7 @@ static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
+ if (data->ts.tv_sec < 0 || data->ts.tv_nsec < 0)
+ return -EINVAL;
+
++ INIT_LIST_HEAD(&req->timeout.list);
+ data->mode = io_translate_timeout_mode(flags);
+ hrtimer_init(&data->timer, io_timeout_get_clock(data), data->mode);
+