summaryrefslogtreecommitdiff
path: root/meta-openbmc-mods/meta-common/recipes-kernel/linux/linux-aspeed/CVE-2021-39698.patch
diff options
context:
space:
mode:
Diffstat (limited to 'meta-openbmc-mods/meta-common/recipes-kernel/linux/linux-aspeed/CVE-2021-39698.patch')
-rw-r--r--meta-openbmc-mods/meta-common/recipes-kernel/linux/linux-aspeed/CVE-2021-39698.patch107
1 files changed, 107 insertions, 0 deletions
diff --git a/meta-openbmc-mods/meta-common/recipes-kernel/linux/linux-aspeed/CVE-2021-39698.patch b/meta-openbmc-mods/meta-common/recipes-kernel/linux/linux-aspeed/CVE-2021-39698.patch
new file mode 100644
index 000000000..9a7250566
--- /dev/null
+++ b/meta-openbmc-mods/meta-common/recipes-kernel/linux/linux-aspeed/CVE-2021-39698.patch
@@ -0,0 +1,107 @@
+From 1ebb6cd8c754bfe1a5f9539027980756bce7cb08 Mon Sep 17 00:00:00 2001
+From: Eric Biggers <ebiggers@google.com>
+Date: Wed, 8 Dec 2021 17:04:51 -0800
+Subject: [PATCH] wait: add wake_up_pollfree()
+
+commit 42288cb44c4b5fff7653bc392b583a2b8bd6a8c0 upstream.
+
+Several ->poll() implementations are special in that they use a
+waitqueue whose lifetime is the current task, rather than the struct
+file as is normally the case. This is okay for blocking polls, since a
+blocking poll occurs within one task; however, non-blocking polls
+require another solution. This solution is for the queue to be cleared
+before it is freed, using 'wake_up_poll(wq, EPOLLHUP | POLLFREE);'.
+
+However, that has a bug: wake_up_poll() calls __wake_up() with
+nr_exclusive=1. Therefore, if there are multiple "exclusive" waiters,
+and the wakeup function for the first one returns a positive value, only
+that one will be called. That's *not* what's needed for POLLFREE;
+POLLFREE is special in that it really needs to wake up everyone.
+
+Considering the three non-blocking poll systems:
+
+- io_uring poll doesn't handle POLLFREE at all, so it is broken anyway.
+
+- aio poll is unaffected, since it doesn't support exclusive waits.
+ However, that's fragile, as someone could add this feature later.
+
+- epoll doesn't appear to be broken by this, since its wakeup function
+ returns 0 when it sees POLLFREE. But this is fragile.
+
+Although there is a workaround (see epoll), it's better to define a
+function which always sends POLLFREE to all waiters. Add such a
+function. Also make it verify that the queue really becomes empty after
+all waiters have been woken up.
+
+Reported-by: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20211209010455.42744-2-ebiggers@kernel.org
+Signed-off-by: Eric Biggers <ebiggers@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/wait.h | 26 ++++++++++++++++++++++++++
+ kernel/sched/wait.c | 7 +++++++
+ 2 files changed, 33 insertions(+)
+
+diff --git a/include/linux/wait.h b/include/linux/wait.h
+index 93dab0e9580f8d..d22cf2985b8fd6 100644
+--- a/include/linux/wait.h
++++ b/include/linux/wait.h
+@@ -217,6 +217,7 @@ void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void
+ void __wake_up_locked_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
+ void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr);
+ void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode);
++void __wake_up_pollfree(struct wait_queue_head *wq_head);
+
+ #define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL)
+ #define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL)
+@@ -245,6 +246,31 @@ void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode);
+ #define wake_up_interruptible_sync_poll_locked(x, m) \
+ __wake_up_locked_sync_key((x), TASK_INTERRUPTIBLE, poll_to_key(m))
+
++/**
++ * wake_up_pollfree - signal that a polled waitqueue is going away
++ * @wq_head: the wait queue head
++ *
++ * In the very rare cases where a ->poll() implementation uses a waitqueue whose
++ * lifetime is tied to a task rather than to the 'struct file' being polled,
++ * this function must be called before the waitqueue is freed so that
++ * non-blocking polls (e.g. epoll) are notified that the queue is going away.
++ *
++ * The caller must also RCU-delay the freeing of the wait_queue_head, e.g. via
++ * an explicit synchronize_rcu() or call_rcu(), or via SLAB_TYPESAFE_BY_RCU.
++ */
++static inline void wake_up_pollfree(struct wait_queue_head *wq_head)
++{
++ /*
++ * For performance reasons, we don't always take the queue lock here.
++ * Therefore, we might race with someone removing the last entry from
++ * the queue, and proceed while they still hold the queue lock.
++ * However, rcu_read_lock() is required to be held in such cases, so we
++ * can safely proceed with an RCU-delayed free.
++ */
++ if (waitqueue_active(wq_head))
++ __wake_up_pollfree(wq_head);
++}
++
+ #define ___wait_cond_timeout(condition) \
+ ({ \
+ bool __cond = (condition); \
+diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c
+index 76577d1642a5dc..eca38107b32f16 100644
+--- a/kernel/sched/wait.c
++++ b/kernel/sched/wait.c
+@@ -238,6 +238,13 @@ void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode)
+ }
+ EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */
+
++void __wake_up_pollfree(struct wait_queue_head *wq_head)
++{
++ __wake_up(wq_head, TASK_NORMAL, 0, poll_to_key(EPOLLHUP | POLLFREE));
++ /* POLLFREE must have cleared the queue. */
++ WARN_ON_ONCE(waitqueue_active(wq_head));
++}
++
+ /*
+ * Note: we use "set_current_state()" _after_ the wait-queue add,
+ * because we need a memory barrier there on SMP, so that any