summaryrefslogtreecommitdiff
path: root/meta-openbmc-mods/meta-common/recipes-kernel/linux/linux-aspeed
diff options
context:
space:
mode:
Diffstat (limited to 'meta-openbmc-mods/meta-common/recipes-kernel/linux/linux-aspeed')
-rw-r--r--meta-openbmc-mods/meta-common/recipes-kernel/linux/linux-aspeed/0004-soc-aspeed-lpc-mbox-Don-t-allow-partial-reads.patch40
-rw-r--r--meta-openbmc-mods/meta-common/recipes-kernel/linux/linux-aspeed/0005-ext4-add-EXT4_INODE_HAS_XATTR_SPACE-macro-in-xattr-h.patch42
-rw-r--r--meta-openbmc-mods/meta-common/recipes-kernel/linux/linux-aspeed/CVE-2020-36516.patch62
-rw-r--r--meta-openbmc-mods/meta-common/recipes-kernel/linux/linux-aspeed/CVE-2022-2978.patch62
-rw-r--r--meta-openbmc-mods/meta-common/recipes-kernel/linux/linux-aspeed/CVE-2022-3543.patch97
-rw-r--r--meta-openbmc-mods/meta-common/recipes-kernel/linux/linux-aspeed/CVE-2022-3623.patch175
-rw-r--r--meta-openbmc-mods/meta-common/recipes-kernel/linux/linux-aspeed/CVE-2022-42703.patch169
-rw-r--r--meta-openbmc-mods/meta-common/recipes-kernel/linux/linux-aspeed/CVE-2022-4378-1.patch107
-rw-r--r--meta-openbmc-mods/meta-common/recipes-kernel/linux/linux-aspeed/CVE-2022-4378-2.patch40
-rw-r--r--meta-openbmc-mods/meta-common/recipes-kernel/linux/linux-aspeed/CVE-2023-0394.patch43
-rw-r--r--meta-openbmc-mods/meta-common/recipes-kernel/linux/linux-aspeed/CVE-2023-1073.patch34
-rw-r--r--meta-openbmc-mods/meta-common/recipes-kernel/linux/linux-aspeed/CVE-2023-1077.patch50
-rw-r--r--meta-openbmc-mods/meta-common/recipes-kernel/linux/linux-aspeed/CVE-2023-1252.patch89
-rw-r--r--meta-openbmc-mods/meta-common/recipes-kernel/linux/linux-aspeed/CVE-2023-1582.patch228
-rw-r--r--meta-openbmc-mods/meta-common/recipes-kernel/linux/linux-aspeed/CVE-2023-2269.patch56
-rw-r--r--meta-openbmc-mods/meta-common/recipes-kernel/linux/linux-aspeed/CVE-2023-2513.patch120
16 files changed, 1414 insertions, 0 deletions
diff --git a/meta-openbmc-mods/meta-common/recipes-kernel/linux/linux-aspeed/0004-soc-aspeed-lpc-mbox-Don-t-allow-partial-reads.patch b/meta-openbmc-mods/meta-common/recipes-kernel/linux/linux-aspeed/0004-soc-aspeed-lpc-mbox-Don-t-allow-partial-reads.patch
new file mode 100644
index 000000000..e041e665a
--- /dev/null
+++ b/meta-openbmc-mods/meta-common/recipes-kernel/linux/linux-aspeed/0004-soc-aspeed-lpc-mbox-Don-t-allow-partial-reads.patch
@@ -0,0 +1,40 @@
+From d655ada2fc4b5065c2be6943f28aa4e4f694a1a3 Mon Sep 17 00:00:00 2001
+From: Iwona Winiarska <iwona.winiarska@intel.com>
+Date: Mon, 9 Jan 2023 21:09:44 +0100
+Subject: [PATCH] soc: aspeed: lpc-mbox: Don't allow partial reads
+
+IRQ handler always adds all registers to the fifo, while the userspace
+can potentially consume a smaller amount. Unfortunately, when smaller
+amount is consumed, it permanently shifts the fifo.
+Serialize the "empty" state check to avoid partial reads.
+
+Fixes: 60fde6cf7114 ("soc: aspeed: lpc-mbox: Avoid calling kfifo_to_user in atomic context")
+Reported-by: Arun P. Mohanan <arun.p.m@linux.intel.com>
+Signed-off-by: Iwona Winiarska <iwona.winiarska@intel.com>
+---
+ drivers/soc/aspeed/aspeed-lpc-mbox.c | 9 ++++++++-
+ 1 file changed, 8 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/soc/aspeed/aspeed-lpc-mbox.c b/drivers/soc/aspeed/aspeed-lpc-mbox.c
+index 7941792abacb..564c7318da10 100644
+--- a/drivers/soc/aspeed/aspeed-lpc-mbox.c
++++ b/drivers/soc/aspeed/aspeed-lpc-mbox.c
+@@ -172,7 +172,14 @@ static ssize_t aspeed_mbox_read(struct file *file, char __user *buf,
+ }
+
+ mutex_lock(&mbox->mutex);
+- if (kfifo_is_empty(&mbox->fifo)) {
++ /*
++ * Since fifo on the producer side will drop the oldest values, causing
++ * a shift if the data is not consumed fully, when we're using count ==
++ * num_regs reads, we need to serialize with the producer to make
++ * sure that all regs were inserted into fifo (avoiding a partial
++ * read).
++ */
++ if (kfifo_is_empty_spinlocked(&mbox->fifo, &mbox->lock)) {
+ if (file->f_flags & O_NONBLOCK) {
+ ret = -EAGAIN;
+ goto out_unlock;
+--
+2.17.1
+
diff --git a/meta-openbmc-mods/meta-common/recipes-kernel/linux/linux-aspeed/0005-ext4-add-EXT4_INODE_HAS_XATTR_SPACE-macro-in-xattr-h.patch b/meta-openbmc-mods/meta-common/recipes-kernel/linux/linux-aspeed/0005-ext4-add-EXT4_INODE_HAS_XATTR_SPACE-macro-in-xattr-h.patch
new file mode 100644
index 000000000..b4a5465fe
--- /dev/null
+++ b/meta-openbmc-mods/meta-common/recipes-kernel/linux/linux-aspeed/0005-ext4-add-EXT4_INODE_HAS_XATTR_SPACE-macro-in-xattr-h.patch
@@ -0,0 +1,42 @@
+From 179b14152dcb6a24c3415200603aebca70ff13af Mon Sep 17 00:00:00 2001
+From: Baokun Li <libaokun1@huawei.com>
+Date: Thu, 16 Jun 2022 10:13:55 +0800
+Subject: [PATCH] ext4: add EXT4_INODE_HAS_XATTR_SPACE macro in xattr.h
+
+When adding an xattr to an inode, we must ensure that the inode_size is
+not less than EXT4_GOOD_OLD_INODE_SIZE + extra_isize + pad. Otherwise,
+the end position may be greater than the start position, resulting in UAF.
+
+Signed-off-by: Baokun Li <libaokun1@huawei.com>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Reviewed-by: Ritesh Harjani (IBM) <ritesh.list@gmail.com>
+Link: https://lore.kernel.org/r/20220616021358.2504451-2-libaokun1@huawei.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+---
+ fs/ext4/xattr.h | 13 +++++++++++++
+ 1 file changed, 13 insertions(+)
+
+diff --git a/fs/ext4/xattr.h b/fs/ext4/xattr.h
+index 77efb9a627ad21..f885f362add4af 100644
+--- a/fs/ext4/xattr.h
++++ b/fs/ext4/xattr.h
+@@ -95,6 +95,19 @@ struct ext4_xattr_entry {
+
+ #define EXT4_ZERO_XATTR_VALUE ((void *)-1)
+
++/*
++ * If we want to add an xattr to the inode, we should make sure that
++ * i_extra_isize is not 0 and that the inode size is not less than
++ * EXT4_GOOD_OLD_INODE_SIZE + extra_isize + pad.
++ * EXT4_GOOD_OLD_INODE_SIZE extra_isize header entry pad data
++ * |--------------------------|------------|------|---------|---|-------|
++ */
++#define EXT4_INODE_HAS_XATTR_SPACE(inode) \
++ ((EXT4_I(inode)->i_extra_isize != 0) && \
++ (EXT4_GOOD_OLD_INODE_SIZE + EXT4_I(inode)->i_extra_isize + \
++ sizeof(struct ext4_xattr_ibody_header) + EXT4_XATTR_PAD <= \
++ EXT4_INODE_SIZE((inode)->i_sb)))
++
+ struct ext4_xattr_info {
+ const char *name;
+ const void *value;
diff --git a/meta-openbmc-mods/meta-common/recipes-kernel/linux/linux-aspeed/CVE-2020-36516.patch b/meta-openbmc-mods/meta-common/recipes-kernel/linux/linux-aspeed/CVE-2020-36516.patch
new file mode 100644
index 000000000..dd44c9ce7
--- /dev/null
+++ b/meta-openbmc-mods/meta-common/recipes-kernel/linux/linux-aspeed/CVE-2020-36516.patch
@@ -0,0 +1,62 @@
+From 23f57406b82de51809d5812afd96f210f8b627f3 Mon Sep 17 00:00:00 2001
+From: Eric Dumazet <edumazet@google.com>
+Date: Wed, 26 Jan 2022 17:10:22 -0800
+Subject: [PATCH] ipv4: avoid using shared IP generator for connected sockets
+
+ip_select_ident_segs() has been very conservative about using
+the connected socket private generator only for packets with IP_DF
+set, claiming it was needed for some VJ compression implementations.
+
+As mentioned in this referenced document, this can be abused.
+(Ref: Off-Path TCP Exploits of the Mixed IPID Assignment)
+
+Before switching to pure random IPID generation and possibly hurt
+some workloads, lets use the private inet socket generator.
+
+Not only this will remove one vulnerability, this will also
+improve performance of TCP flows using pmtudisc==IP_PMTUDISC_DONT
+
+Fixes: 73f156a6e8c1 ("inetpeer: get rid of ip_id_count")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reviewed-by: David Ahern <dsahern@kernel.org>
+Reported-by: Ray Che <xijiache@gmail.com>
+Cc: Willy Tarreau <w@1wt.eu>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+---
+ include/net/ip.h | 21 ++++++++++-----------
+ 1 file changed, 10 insertions(+), 11 deletions(-)
+
+diff --git a/include/net/ip.h b/include/net/ip.h
+index 81e23a102a0d5e..b51bae43b0ddb0 100644
+--- a/include/net/ip.h
++++ b/include/net/ip.h
+@@ -525,19 +525,18 @@ static inline void ip_select_ident_segs(struct net *net, struct sk_buff *skb,
+ {
+ struct iphdr *iph = ip_hdr(skb);
+
++ /* We had many attacks based on IPID, use the private
++ * generator as much as we can.
++ */
++ if (sk && inet_sk(sk)->inet_daddr) {
++ iph->id = htons(inet_sk(sk)->inet_id);
++ inet_sk(sk)->inet_id += segs;
++ return;
++ }
+ if ((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) {
+- /* This is only to work around buggy Windows95/2000
+- * VJ compression implementations. If the ID field
+- * does not change, they drop every other packet in
+- * a TCP stream using header compression.
+- */
+- if (sk && inet_sk(sk)->inet_daddr) {
+- iph->id = htons(inet_sk(sk)->inet_id);
+- inet_sk(sk)->inet_id += segs;
+- } else {
+- iph->id = 0;
+- }
++ iph->id = 0;
+ } else {
++ /* Unfortunately we need the big hammer to get a suitable IPID */
+ __ip_select_ident(net, iph, segs);
+ }
+ }
diff --git a/meta-openbmc-mods/meta-common/recipes-kernel/linux/linux-aspeed/CVE-2022-2978.patch b/meta-openbmc-mods/meta-common/recipes-kernel/linux/linux-aspeed/CVE-2022-2978.patch
new file mode 100644
index 000000000..c43941f9f
--- /dev/null
+++ b/meta-openbmc-mods/meta-common/recipes-kernel/linux/linux-aspeed/CVE-2022-2978.patch
@@ -0,0 +1,62 @@
+From 2a96b532098284ecf8e4849b8b9e5fc7a28bdee9 Mon Sep 17 00:00:00 2001
+From: Dongliang Mu <mudongliangabcd@gmail.com>
+Date: Tue, 16 Aug 2022 12:08:58 +0800
+Subject: [PATCH] fs: fix UAF/GPF bug in nilfs_mdt_destroy
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+commit 2e488f13755ffbb60f307e991b27024716a33b29 upstream.
+
+In alloc_inode, inode_init_always() could return -ENOMEM if
+security_inode_alloc() fails, which causes inode->i_private
+uninitialized. Then nilfs_is_metadata_file_inode() returns
+true and nilfs_free_inode() wrongly calls nilfs_mdt_destroy(),
+which frees the uninitialized inode->i_private
+and leads to crashes(e.g., UAF/GPF).
+
+Fix this by moving security_inode_alloc just prior to
+this_cpu_inc(nr_inodes)
+
+Link: https://lkml.kernel.org/r/CAFcO6XOcf1Jj2SeGt=jJV59wmhESeSKpfR0omdFRq+J9nD1vfQ@mail.gmail.com
+Reported-by: butt3rflyh4ck <butterflyhuangxx@gmail.com>
+Reported-by: Hao Sun <sunhao.th@gmail.com>
+Reported-by: Jiacheng Xu <stitch@zju.edu.cn>
+Reviewed-by: Christian Brauner (Microsoft) <brauner@kernel.org>
+Signed-off-by: Dongliang Mu <mudongliangabcd@gmail.com>
+Cc: Al Viro <viro@zeniv.linux.org.uk>
+Cc: stable@vger.kernel.org
+Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/inode.c | 7 +++----
+ 1 file changed, 3 insertions(+), 4 deletions(-)
+
+diff --git a/fs/inode.c b/fs/inode.c
+index ba1de23c13c1ed..b608528efd3a46 100644
+--- a/fs/inode.c
++++ b/fs/inode.c
+@@ -192,8 +192,6 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
+ inode->i_wb_frn_history = 0;
+ #endif
+
+- if (security_inode_alloc(inode))
+- goto out;
+ spin_lock_init(&inode->i_lock);
+ lockdep_set_class(&inode->i_lock, &sb->s_type->i_lock_key);
+
+@@ -228,11 +226,12 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
+ inode->i_fsnotify_mask = 0;
+ #endif
+ inode->i_flctx = NULL;
++
++ if (unlikely(security_inode_alloc(inode)))
++ return -ENOMEM;
+ this_cpu_inc(nr_inodes);
+
+ return 0;
+-out:
+- return -ENOMEM;
+ }
+ EXPORT_SYMBOL(inode_init_always);
+
diff --git a/meta-openbmc-mods/meta-common/recipes-kernel/linux/linux-aspeed/CVE-2022-3543.patch b/meta-openbmc-mods/meta-common/recipes-kernel/linux/linux-aspeed/CVE-2022-3543.patch
new file mode 100644
index 000000000..9d83b59af
--- /dev/null
+++ b/meta-openbmc-mods/meta-common/recipes-kernel/linux/linux-aspeed/CVE-2022-3543.patch
@@ -0,0 +1,97 @@
+From 2f415ad33bc1a729fb1050141921b5a9ec4e062c Mon Sep 17 00:00:00 2001
+From: Kuniyuki Iwashima <kuniyu@amazon.com>
+Date: Thu, 29 Sep 2022 08:52:04 -0700
+Subject: [PATCH] af_unix: Fix memory leaks of the whole sk due to OOB skb.
+
+[ Upstream commit 7a62ed61367b8fd01bae1e18e30602c25060d824 ]
+
+syzbot reported a sequence of memory leaks, and one of them indicated we
+failed to free a whole sk:
+
+ unreferenced object 0xffff8880126e0000 (size 1088):
+ comm "syz-executor419", pid 326, jiffies 4294773607 (age 12.609s)
+ hex dump (first 32 bytes):
+ 00 00 00 00 00 00 00 00 7d 00 00 00 00 00 00 00 ........}.......
+ 01 00 07 40 00 00 00 00 00 00 00 00 00 00 00 00 ...@............
+ backtrace:
+ [<000000006fefe750>] sk_prot_alloc+0x64/0x2a0 net/core/sock.c:1970
+ [<0000000074006db5>] sk_alloc+0x3b/0x800 net/core/sock.c:2029
+ [<00000000728cd434>] unix_create1+0xaf/0x920 net/unix/af_unix.c:928
+ [<00000000a279a139>] unix_create+0x113/0x1d0 net/unix/af_unix.c:997
+ [<0000000068259812>] __sock_create+0x2ab/0x550 net/socket.c:1516
+ [<00000000da1521e1>] sock_create net/socket.c:1566 [inline]
+ [<00000000da1521e1>] __sys_socketpair+0x1a8/0x550 net/socket.c:1698
+ [<000000007ab259e1>] __do_sys_socketpair net/socket.c:1751 [inline]
+ [<000000007ab259e1>] __se_sys_socketpair net/socket.c:1748 [inline]
+ [<000000007ab259e1>] __x64_sys_socketpair+0x97/0x100 net/socket.c:1748
+ [<000000007dedddc1>] do_syscall_x64 arch/x86/entry/common.c:50 [inline]
+ [<000000007dedddc1>] do_syscall_64+0x38/0x90 arch/x86/entry/common.c:80
+ [<000000009456679f>] entry_SYSCALL_64_after_hwframe+0x63/0xcd
+
+We can reproduce this issue by creating two AF_UNIX SOCK_STREAM sockets,
+send()ing an OOB skb to each other, and close()ing them without consuming
+the OOB skbs.
+
+ int skpair[2];
+
+ socketpair(AF_UNIX, SOCK_STREAM, 0, skpair);
+
+ send(skpair[0], "x", 1, MSG_OOB);
+ send(skpair[1], "x", 1, MSG_OOB);
+
+ close(skpair[0]);
+ close(skpair[1]);
+
+Currently, we free an OOB skb in unix_sock_destructor() which is called via
+__sk_free(), but it's too late because the receiver's unix_sk(sk)->oob_skb
+is accounted against the sender's sk->sk_wmem_alloc and __sk_free() is
+called only when sk->sk_wmem_alloc is 0.
+
+In the repro sequences, we do not consume the OOB skb, so both two sk's
+sock_put() never reach __sk_free() due to the positive sk->sk_wmem_alloc.
+Then, no one can consume the OOB skb nor call __sk_free(), and we finally
+leak the two whole sk.
+
+Thus, we must free the unconsumed OOB skb earlier when close()ing the
+socket.
+
+Fixes: 314001f0bf92 ("af_unix: Add OOB support")
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/unix/af_unix.c | 13 +++++++------
+ 1 file changed, 7 insertions(+), 6 deletions(-)
+
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index bf338b782fc4c4..d686804119c991 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -569,12 +569,6 @@ static void unix_sock_destructor(struct sock *sk)
+
+ skb_queue_purge(&sk->sk_receive_queue);
+
+-#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
+- if (u->oob_skb) {
+- kfree_skb(u->oob_skb);
+- u->oob_skb = NULL;
+- }
+-#endif
+ WARN_ON(refcount_read(&sk->sk_wmem_alloc));
+ WARN_ON(!sk_unhashed(sk));
+ WARN_ON(sk->sk_socket);
+@@ -620,6 +614,13 @@ static void unix_release_sock(struct sock *sk, int embrion)
+
+ unix_state_unlock(sk);
+
++#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
++ if (u->oob_skb) {
++ kfree_skb(u->oob_skb);
++ u->oob_skb = NULL;
++ }
++#endif
++
+ wake_up_interruptible_all(&u->peer_wait);
+
+ if (skpair != NULL) {
diff --git a/meta-openbmc-mods/meta-common/recipes-kernel/linux/linux-aspeed/CVE-2022-3623.patch b/meta-openbmc-mods/meta-common/recipes-kernel/linux/linux-aspeed/CVE-2022-3623.patch
new file mode 100644
index 000000000..d44f786de
--- /dev/null
+++ b/meta-openbmc-mods/meta-common/recipes-kernel/linux/linux-aspeed/CVE-2022-3623.patch
@@ -0,0 +1,175 @@
+From fac35ba763ed07ba93154c95ffc0c4a55023707f Mon Sep 17 00:00:00 2001
+From: Baolin Wang <baolin.wang@linux.alibaba.com>
+Date: Thu, 1 Sep 2022 18:41:31 +0800
+Subject: mm/hugetlb: fix races when looking up a CONT-PTE/PMD size hugetlb
+ page
+
+On some architectures (like ARM64), it can support CONT-PTE/PMD size
+hugetlb, which means it can support not only PMD/PUD size hugetlb (2M and
+1G), but also CONT-PTE/PMD size(64K and 32M) if a 4K page size specified.
+
+So when looking up a CONT-PTE size hugetlb page by follow_page(), it will
+use pte_offset_map_lock() to get the pte entry lock for the CONT-PTE size
+hugetlb in follow_page_pte(). However this pte entry lock is incorrect
+for the CONT-PTE size hugetlb, since we should use huge_pte_lock() to get
+the correct lock, which is mm->page_table_lock.
+
+That means the pte entry of the CONT-PTE size hugetlb under current pte
+lock is unstable in follow_page_pte(), we can continue to migrate or
+poison the pte entry of the CONT-PTE size hugetlb, which can cause some
+potential race issues, even though they are under the 'pte lock'.
+
+For example, suppose thread A is trying to look up a CONT-PTE size hugetlb
+page by move_pages() syscall under the lock, however antoher thread B can
+migrate the CONT-PTE hugetlb page at the same time, which will cause
+thread A to get an incorrect page, if thread A also wants to do page
+migration, then data inconsistency error occurs.
+
+Moreover we have the same issue for CONT-PMD size hugetlb in
+follow_huge_pmd().
+
+To fix above issues, rename the follow_huge_pmd() as follow_huge_pmd_pte()
+to handle PMD and PTE level size hugetlb, which uses huge_pte_lock() to
+get the correct pte entry lock to make the pte entry stable.
+
+Mike said:
+
+Support for CONT_PMD/_PTE was added with bb9dd3df8ee9 ("arm64: hugetlb:
+refactor find_num_contig()"). Patch series "Support for contiguous pte
+hugepages", v4. However, I do not believe these code paths were
+executed until migration support was added with 5480280d3f2d ("arm64/mm:
+enable HugeTLB migration for contiguous bit HugeTLB pages") I would go
+with 5480280d3f2d for the Fixes: targe.
+
+Link: https://lkml.kernel.org/r/635f43bdd85ac2615a58405da82b4d33c6e5eb05.1662017562.git.baolin.wang@linux.alibaba.com
+Fixes: 5480280d3f2d ("arm64/mm: enable HugeTLB migration for contiguous bit HugeTLB pages")
+Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com>
+Suggested-by: Mike Kravetz <mike.kravetz@oracle.com>
+Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com>
+Cc: David Hildenbrand <david@redhat.com>
+Cc: Muchun Song <songmuchun@bytedance.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+---
+ include/linux/hugetlb.h | 8 ++++----
+ mm/gup.c | 14 +++++++++++++-
+ mm/hugetlb.c | 27 +++++++++++++--------------
+ 3 files changed, 30 insertions(+), 19 deletions(-)
+
+diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
+index 3ec981a0d8b3a..67c88b82fc32d 100644
+--- a/include/linux/hugetlb.h
++++ b/include/linux/hugetlb.h
+@@ -207,8 +207,8 @@ struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
+ struct page *follow_huge_pd(struct vm_area_struct *vma,
+ unsigned long address, hugepd_t hpd,
+ int flags, int pdshift);
+-struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
+- pmd_t *pmd, int flags);
++struct page *follow_huge_pmd_pte(struct vm_area_struct *vma, unsigned long address,
++ int flags);
+ struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
+ pud_t *pud, int flags);
+ struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address,
+@@ -312,8 +312,8 @@ static inline struct page *follow_huge_pd(struct vm_area_struct *vma,
+ return NULL;
+ }
+
+-static inline struct page *follow_huge_pmd(struct mm_struct *mm,
+- unsigned long address, pmd_t *pmd, int flags)
++static inline struct page *follow_huge_pmd_pte(struct vm_area_struct *vma,
++ unsigned long address, int flags)
+ {
+ return NULL;
+ }
+diff --git a/mm/gup.c b/mm/gup.c
+index 00926abb44263..251cb6a10bc0d 100644
+--- a/mm/gup.c
++++ b/mm/gup.c
+@@ -530,6 +530,18 @@ static struct page *follow_page_pte(struct vm_area_struct *vma,
+ if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) ==
+ (FOLL_PIN | FOLL_GET)))
+ return ERR_PTR(-EINVAL);
++
++ /*
++ * Considering PTE level hugetlb, like continuous-PTE hugetlb on
++ * ARM64 architecture.
++ */
++ if (is_vm_hugetlb_page(vma)) {
++ page = follow_huge_pmd_pte(vma, address, flags);
++ if (page)
++ return page;
++ return no_page_table(vma, flags);
++ }
++
+ retry:
+ if (unlikely(pmd_bad(*pmd)))
+ return no_page_table(vma, flags);
+@@ -662,7 +674,7 @@ static struct page *follow_pmd_mask(struct vm_area_struct *vma,
+ if (pmd_none(pmdval))
+ return no_page_table(vma, flags);
+ if (pmd_huge(pmdval) && is_vm_hugetlb_page(vma)) {
+- page = follow_huge_pmd(mm, address, pmd, flags);
++ page = follow_huge_pmd_pte(vma, address, flags);
+ if (page)
+ return page;
+ return no_page_table(vma, flags);
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index 0bdfc7e1c933f..9564bf817e6a8 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -6946,12 +6946,13 @@ follow_huge_pd(struct vm_area_struct *vma,
+ }
+
+ struct page * __weak
+-follow_huge_pmd(struct mm_struct *mm, unsigned long address,
+- pmd_t *pmd, int flags)
++follow_huge_pmd_pte(struct vm_area_struct *vma, unsigned long address, int flags)
+ {
++ struct hstate *h = hstate_vma(vma);
++ struct mm_struct *mm = vma->vm_mm;
+ struct page *page = NULL;
+ spinlock_t *ptl;
+- pte_t pte;
++ pte_t *ptep, pte;
+
+ /* FOLL_GET and FOLL_PIN are mutually exclusive. */
+ if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) ==
+@@ -6961,17 +6962,15 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address,
+ return NULL;
+
+ retry:
+- ptl = pmd_lockptr(mm, pmd);
+- spin_lock(ptl);
+- /*
+- * make sure that the address range covered by this pmd is not
+- * unmapped from other threads.
+- */
+- if (!pmd_huge(*pmd))
+- goto out;
+- pte = huge_ptep_get((pte_t *)pmd);
++ ptep = huge_pte_offset(mm, address, huge_page_size(h));
++ if (!ptep)
++ return NULL;
++
++ ptl = huge_pte_lock(h, mm, ptep);
++ pte = huge_ptep_get(ptep);
+ if (pte_present(pte)) {
+- page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT);
++ page = pte_page(pte) +
++ ((address & ~huge_page_mask(h)) >> PAGE_SHIFT);
+ /*
+ * try_grab_page() should always succeed here, because: a) we
+ * hold the pmd (ptl) lock, and b) we've just checked that the
+@@ -6987,7 +6986,7 @@ retry:
+ } else {
+ if (is_hugetlb_entry_migration(pte)) {
+ spin_unlock(ptl);
+- __migration_entry_wait(mm, (pte_t *)pmd, ptl);
++ __migration_entry_wait_huge(ptep, ptl);
+ goto retry;
+ }
+ /*
+--
+cgit
+
diff --git a/meta-openbmc-mods/meta-common/recipes-kernel/linux/linux-aspeed/CVE-2022-42703.patch b/meta-openbmc-mods/meta-common/recipes-kernel/linux/linux-aspeed/CVE-2022-42703.patch
new file mode 100644
index 000000000..059081ce9
--- /dev/null
+++ b/meta-openbmc-mods/meta-common/recipes-kernel/linux/linux-aspeed/CVE-2022-42703.patch
@@ -0,0 +1,169 @@
+From 2555283eb40df89945557273121e9393ef9b542b Mon Sep 17 00:00:00 2001
+From: Jann Horn <jannh@google.com>
+Date: Wed, 31 Aug 2022 19:06:00 +0200
+Subject: mm/rmap: Fix anon_vma->degree ambiguity leading to double-reuse
+
+anon_vma->degree tracks the combined number of child anon_vmas and VMAs
+that use the anon_vma as their ->anon_vma.
+
+anon_vma_clone() then assumes that for any anon_vma attached to
+src->anon_vma_chain other than src->anon_vma, it is impossible for it to
+be a leaf node of the VMA tree, meaning that for such VMAs ->degree is
+elevated by 1 because of a child anon_vma, meaning that if ->degree
+equals 1 there are no VMAs that use the anon_vma as their ->anon_vma.
+
+This assumption is wrong because the ->degree optimization leads to leaf
+nodes being abandoned on anon_vma_clone() - an existing anon_vma is
+reused and no new parent-child relationship is created. So it is
+possible to reuse an anon_vma for one VMA while it is still tied to
+another VMA.
+
+This is an issue because is_mergeable_anon_vma() and its callers assume
+that if two VMAs have the same ->anon_vma, the list of anon_vmas
+attached to the VMAs is guaranteed to be the same. When this assumption
+is violated, vma_merge() can merge pages into a VMA that is not attached
+to the corresponding anon_vma, leading to dangling page->mapping
+pointers that will be dereferenced during rmap walks.
+
+Fix it by separately tracking the number of child anon_vmas and the
+number of VMAs using the anon_vma as their ->anon_vma.
+
+Fixes: 7a3ef208e662 ("mm: prevent endless growth of anon_vma hierarchy")
+Cc: stable@kernel.org
+Acked-by: Michal Hocko <mhocko@suse.com>
+Acked-by: Vlastimil Babka <vbabka@suse.cz>
+Signed-off-by: Jann Horn <jannh@google.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+---
+ include/linux/rmap.h | 7 +++++--
+ mm/rmap.c | 29 ++++++++++++++++-------------
+ 2 files changed, 21 insertions(+), 15 deletions(-)
+
+diff --git a/include/linux/rmap.h b/include/linux/rmap.h
+index bf80adca980b9..b89b4b86951f8 100644
+--- a/include/linux/rmap.h
++++ b/include/linux/rmap.h
+@@ -41,12 +41,15 @@ struct anon_vma {
+ atomic_t refcount;
+
+ /*
+- * Count of child anon_vmas and VMAs which points to this anon_vma.
++ * Count of child anon_vmas. Equals to the count of all anon_vmas that
++ * have ->parent pointing to this one, including itself.
+ *
+ * This counter is used for making decision about reusing anon_vma
+ * instead of forking new one. See comments in function anon_vma_clone.
+ */
+- unsigned degree;
++ unsigned long num_children;
++ /* Count of VMAs whose ->anon_vma pointer points to this object. */
++ unsigned long num_active_vmas;
+
+ struct anon_vma *parent; /* Parent of this anon_vma */
+
+diff --git a/mm/rmap.c b/mm/rmap.c
+index edc06c52bc82e..93d5a6f793d20 100644
+--- a/mm/rmap.c
++++ b/mm/rmap.c
+@@ -93,7 +93,8 @@ static inline struct anon_vma *anon_vma_alloc(void)
+ anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL);
+ if (anon_vma) {
+ atomic_set(&anon_vma->refcount, 1);
+- anon_vma->degree = 1; /* Reference for first vma */
++ anon_vma->num_children = 0;
++ anon_vma->num_active_vmas = 0;
+ anon_vma->parent = anon_vma;
+ /*
+ * Initialise the anon_vma root to point to itself. If called
+@@ -201,6 +202,7 @@ int __anon_vma_prepare(struct vm_area_struct *vma)
+ anon_vma = anon_vma_alloc();
+ if (unlikely(!anon_vma))
+ goto out_enomem_free_avc;
++ anon_vma->num_children++; /* self-parent link for new root */
+ allocated = anon_vma;
+ }
+
+@@ -210,8 +212,7 @@ int __anon_vma_prepare(struct vm_area_struct *vma)
+ if (likely(!vma->anon_vma)) {
+ vma->anon_vma = anon_vma;
+ anon_vma_chain_link(vma, avc, anon_vma);
+- /* vma reference or self-parent link for new root */
+- anon_vma->degree++;
++ anon_vma->num_active_vmas++;
+ allocated = NULL;
+ avc = NULL;
+ }
+@@ -296,19 +297,19 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
+ anon_vma_chain_link(dst, avc, anon_vma);
+
+ /*
+- * Reuse existing anon_vma if its degree lower than two,
+- * that means it has no vma and only one anon_vma child.
++ * Reuse existing anon_vma if it has no vma and only one
++ * anon_vma child.
+ *
+- * Do not chose parent anon_vma, otherwise first child
+- * will always reuse it. Root anon_vma is never reused:
++ * Root anon_vma is never reused:
+ * it has self-parent reference and at least one child.
+ */
+ if (!dst->anon_vma && src->anon_vma &&
+- anon_vma != src->anon_vma && anon_vma->degree < 2)
++ anon_vma->num_children < 2 &&
++ anon_vma->num_active_vmas == 0)
+ dst->anon_vma = anon_vma;
+ }
+ if (dst->anon_vma)
+- dst->anon_vma->degree++;
++ dst->anon_vma->num_active_vmas++;
+ unlock_anon_vma_root(root);
+ return 0;
+
+@@ -358,6 +359,7 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
+ anon_vma = anon_vma_alloc();
+ if (!anon_vma)
+ goto out_error;
++ anon_vma->num_active_vmas++;
+ avc = anon_vma_chain_alloc(GFP_KERNEL);
+ if (!avc)
+ goto out_error_free_anon_vma;
+@@ -378,7 +380,7 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
+ vma->anon_vma = anon_vma;
+ anon_vma_lock_write(anon_vma);
+ anon_vma_chain_link(vma, avc, anon_vma);
+- anon_vma->parent->degree++;
++ anon_vma->parent->num_children++;
+ anon_vma_unlock_write(anon_vma);
+
+ return 0;
+@@ -410,7 +412,7 @@ void unlink_anon_vmas(struct vm_area_struct *vma)
+ * to free them outside the lock.
+ */
+ if (RB_EMPTY_ROOT(&anon_vma->rb_root.rb_root)) {
+- anon_vma->parent->degree--;
++ anon_vma->parent->num_children--;
+ continue;
+ }
+
+@@ -418,7 +420,7 @@ void unlink_anon_vmas(struct vm_area_struct *vma)
+ anon_vma_chain_free(avc);
+ }
+ if (vma->anon_vma) {
+- vma->anon_vma->degree--;
++ vma->anon_vma->num_active_vmas--;
+
+ /*
+ * vma would still be needed after unlink, and anon_vma will be prepared
+@@ -436,7 +438,8 @@ void unlink_anon_vmas(struct vm_area_struct *vma)
+ list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
+ struct anon_vma *anon_vma = avc->anon_vma;
+
+- VM_WARN_ON(anon_vma->degree);
++ VM_WARN_ON(anon_vma->num_children);
++ VM_WARN_ON(anon_vma->num_active_vmas);
+ put_anon_vma(anon_vma);
+
+ list_del(&avc->same_vma);
+--
+cgit
+
diff --git a/meta-openbmc-mods/meta-common/recipes-kernel/linux/linux-aspeed/CVE-2022-4378-1.patch b/meta-openbmc-mods/meta-common/recipes-kernel/linux/linux-aspeed/CVE-2022-4378-1.patch
new file mode 100644
index 000000000..15fea2b32
--- /dev/null
+++ b/meta-openbmc-mods/meta-common/recipes-kernel/linux/linux-aspeed/CVE-2022-4378-1.patch
@@ -0,0 +1,107 @@
+From fdf2c95f28bf197bfab421d21e8c697d4f149ea1 Mon Sep 17 00:00:00 2001
+From: Linus Torvalds <torvalds@linux-foundation.org>
+Date: Mon, 5 Dec 2022 12:09:06 -0800
+Subject: [PATCH] proc: proc_skip_spaces() shouldn't think it is working on C
+ strings
+
+commit bce9332220bd677d83b19d21502776ad555a0e73 upstream.
+
+proc_skip_spaces() seems to think it is working on C strings, and ends
+up being just a wrapper around skip_spaces() with a really odd calling
+convention.
+
+Instead of basing it on skip_spaces(), it should have looked more like
+proc_skip_char(), which really is the exact same function (except it
+skips a particular character, rather than whitespace). So use that as
+inspiration, odd coding and all.
+
+Now the calling convention actually makes sense and works for the
+intended purpose.
+
+Reported-and-tested-by: Kyle Zeng <zengyhkyle@gmail.com>
+Acked-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/sysctl.c | 25 +++++++++++++------------
+ 1 file changed, 13 insertions(+), 12 deletions(-)
+
+diff --git a/kernel/sysctl.c b/kernel/sysctl.c
+index 25b44424d652cf..e9a3094c52e57a 100644
+--- a/kernel/sysctl.c
++++ b/kernel/sysctl.c
+@@ -265,13 +265,14 @@ int proc_dostring(struct ctl_table *table, int write,
+ ppos);
+ }
+
+-static size_t proc_skip_spaces(char **buf)
++static void proc_skip_spaces(char **buf, size_t *size)
+ {
+- size_t ret;
+- char *tmp = skip_spaces(*buf);
+- ret = tmp - *buf;
+- *buf = tmp;
+- return ret;
++ while (*size) {
++ if (!isspace(**buf))
++ break;
++ (*size)--;
++ (*buf)++;
++ }
+ }
+
+ static void proc_skip_char(char **buf, size_t *size, const char v)
+@@ -518,7 +519,7 @@ static int __do_proc_dointvec(void *tbl_data, struct ctl_table *table,
+ bool neg;
+
+ if (write) {
+- left -= proc_skip_spaces(&p);
++ proc_skip_spaces(&p, &left);
+
+ if (!left)
+ break;
+@@ -545,7 +546,7 @@ static int __do_proc_dointvec(void *tbl_data, struct ctl_table *table,
+ if (!write && !first && left && !err)
+ proc_put_char(&buffer, &left, '\n');
+ if (write && !err && left)
+- left -= proc_skip_spaces(&p);
++ proc_skip_spaces(&p, &left);
+ if (write && first)
+ return err ? : -EINVAL;
+ *lenp -= left;
+@@ -587,7 +588,7 @@ static int do_proc_douintvec_w(unsigned int *tbl_data,
+ if (left > PAGE_SIZE - 1)
+ left = PAGE_SIZE - 1;
+
+- left -= proc_skip_spaces(&p);
++ proc_skip_spaces(&p, &left);
+ if (!left) {
+ err = -EINVAL;
+ goto out_free;
+@@ -607,7 +608,7 @@ static int do_proc_douintvec_w(unsigned int *tbl_data,
+ }
+
+ if (!err && left)
+- left -= proc_skip_spaces(&p);
++ proc_skip_spaces(&p, &left);
+
+ out_free:
+ if (err)
+@@ -1072,7 +1073,7 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table,
+ if (write) {
+ bool neg;
+
+- left -= proc_skip_spaces(&p);
++ proc_skip_spaces(&p, &left);
+ if (!left)
+ break;
+
+@@ -1101,7 +1102,7 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table,
+ if (!write && !first && left && !err)
+ proc_put_char(&buffer, &left, '\n');
+ if (write && !err)
+- left -= proc_skip_spaces(&p);
++ proc_skip_spaces(&p, &left);
+ if (write && first)
+ return err ? : -EINVAL;
+ *lenp -= left;
diff --git a/meta-openbmc-mods/meta-common/recipes-kernel/linux/linux-aspeed/CVE-2022-4378-2.patch b/meta-openbmc-mods/meta-common/recipes-kernel/linux/linux-aspeed/CVE-2022-4378-2.patch
new file mode 100644
index 000000000..b11e065c8
--- /dev/null
+++ b/meta-openbmc-mods/meta-common/recipes-kernel/linux/linux-aspeed/CVE-2022-4378-2.patch
@@ -0,0 +1,40 @@
+From e04220518841708f68e7746232e3e54daef464a3 Mon Sep 17 00:00:00 2001
+From: Linus Torvalds <torvalds@linux-foundation.org>
+Date: Mon, 5 Dec 2022 11:33:40 -0800
+Subject: [PATCH] proc: avoid integer type confusion in get_proc_long
+
+commit e6cfaf34be9fcd1a8285a294e18986bfc41a409c upstream.
+
+proc_get_long() is passed a size_t, but then assigns it to an 'int'
+variable for the length. Let's not do that, even if our IO paths are
+limited to MAX_RW_COUNT (exactly because of these kinds of type errors).
+
+So do the proper test in the rigth type.
+
+Reported-by: Kyle Zeng <zengyhkyle@gmail.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/sysctl.c | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+diff --git a/kernel/sysctl.c b/kernel/sysctl.c
+index 205d605cacc5bb..25b44424d652cf 100644
+--- a/kernel/sysctl.c
++++ b/kernel/sysctl.c
+@@ -340,13 +340,12 @@ static int proc_get_long(char **buf, size_t *size,
+ unsigned long *val, bool *neg,
+ const char *perm_tr, unsigned perm_tr_len, char *tr)
+ {
+- int len;
+ char *p, tmp[TMPBUFLEN];
++ ssize_t len = *size;
+
+- if (!*size)
++ if (len <= 0)
+ return -EINVAL;
+
+- len = *size;
+ if (len > TMPBUFLEN - 1)
+ len = TMPBUFLEN - 1;
+
diff --git a/meta-openbmc-mods/meta-common/recipes-kernel/linux/linux-aspeed/CVE-2023-0394.patch b/meta-openbmc-mods/meta-common/recipes-kernel/linux/linux-aspeed/CVE-2023-0394.patch
new file mode 100644
index 000000000..25ffd9af5
--- /dev/null
+++ b/meta-openbmc-mods/meta-common/recipes-kernel/linux/linux-aspeed/CVE-2023-0394.patch
@@ -0,0 +1,43 @@
+From cb3e9864cdbe35ff6378966660edbcbac955fe17 Mon Sep 17 00:00:00 2001
+From: Herbert Xu <herbert@gondor.apana.org.au>
+Date: Tue, 10 Jan 2023 08:59:06 +0800
+Subject: ipv6: raw: Deduct extension header length in
+ rawv6_push_pending_frames
+
+The total cork length created by ip6_append_data includes extension
+headers, so we must exclude them when comparing them against the
+IPV6_CHECKSUM offset which does not include extension headers.
+
+Reported-by: Kyle Zeng <zengyhkyle@gmail.com>
+Fixes: 357b40a18b04 ("[IPV6]: IPV6_CHECKSUM socket option can corrupt kernel memory")
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ net/ipv6/raw.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
+index a06a9f847db5c..ada087b50541a 100644
+--- a/net/ipv6/raw.c
++++ b/net/ipv6/raw.c
+@@ -505,6 +505,7 @@ csum_copy_err:
+ static int rawv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6,
+ struct raw6_sock *rp)
+ {
++ struct ipv6_txoptions *opt;
+ struct sk_buff *skb;
+ int err = 0;
+ int offset;
+@@ -522,6 +523,9 @@ static int rawv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6,
+
+ offset = rp->offset;
+ total_len = inet_sk(sk)->cork.base.length;
++ opt = inet6_sk(sk)->cork.opt;
++ total_len -= opt ? opt->opt_flen : 0;
++
+ if (offset >= total_len - 1) {
+ err = -EINVAL;
+ ip6_flush_pending_frames(sk);
+--
+cgit
+
diff --git a/meta-openbmc-mods/meta-common/recipes-kernel/linux/linux-aspeed/CVE-2023-1073.patch b/meta-openbmc-mods/meta-common/recipes-kernel/linux/linux-aspeed/CVE-2023-1073.patch
new file mode 100644
index 000000000..fbad7dd75
--- /dev/null
+++ b/meta-openbmc-mods/meta-common/recipes-kernel/linux/linux-aspeed/CVE-2023-1073.patch
@@ -0,0 +1,34 @@
+From b12fece4c64857e5fab4290bf01b2e0317a88456 Mon Sep 17 00:00:00 2001
+From: Pietro Borrello <borrello@diag.uniroma1.it>
+Date: Mon, 16 Jan 2023 11:11:24 +0000
+Subject: [PATCH] HID: check empty report_list in hid_validate_values()
+
+Add a check for empty report_list in hid_validate_values().
+The missing check causes a type confusion when issuing a list_entry()
+on an empty report_list.
+The problem is caused by the assumption that the device must
+have valid report_list. While this will be true for all normal HID
+devices, a suitably malicious device can violate the assumption.
+
+Fixes: 1b15d2e5b807 ("HID: core: fix validation of report id 0")
+Signed-off-by: Pietro Borrello <borrello@diag.uniroma1.it>
+Signed-off-by: Jiri Kosina <jkosina@suse.cz>
+---
+ drivers/hid/hid-core.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
+index bd47628da6be0d..3e1803592bd4a2 100644
+--- a/drivers/hid/hid-core.c
++++ b/drivers/hid/hid-core.c
+@@ -993,8 +993,8 @@ struct hid_report *hid_validate_values(struct hid_device *hid,
+ * Validating on id 0 means we should examine the first
+ * report in the list.
+ */
+- report = list_entry(
+- hid->report_enum[type].report_list.next,
++ report = list_first_entry_or_null(
++ &hid->report_enum[type].report_list,
+ struct hid_report, list);
+ } else {
+ report = hid->report_enum[type].report_id_hash[id];
diff --git a/meta-openbmc-mods/meta-common/recipes-kernel/linux/linux-aspeed/CVE-2023-1077.patch b/meta-openbmc-mods/meta-common/recipes-kernel/linux/linux-aspeed/CVE-2023-1077.patch
new file mode 100644
index 000000000..7bf9fad09
--- /dev/null
+++ b/meta-openbmc-mods/meta-common/recipes-kernel/linux/linux-aspeed/CVE-2023-1077.patch
@@ -0,0 +1,50 @@
+From 71d8dc030a8ebf8401d96872d0ed2d151a4cdbba Mon Sep 17 00:00:00 2001
+From: Anjaliintel-21 <anjali.ray@intel.com>
+Date: Thu, 27 Apr 2023 07:01:53 +0000
+Subject: [PATCH] sched/rt: pick_next_rt_entity(): check list_entry
+
+Commit 326587b84078 ("sched: fix goto retry in pick_next_task_rt()")
+removed any path which could make pick_next_rt_entity() return NULL.
+However, BUG_ON(!rt_se) in _pick_next_task_rt() (the only caller of
+pick_next_rt_entity()) still checks the error condition, which can
+never happen, since list_entry() never returns NULL.
+Remove the BUG_ON check, and instead emit a warning in the only
+possible error condition here: the queue being empty which should
+never happen.
+
+Fixes: 326587b84078 ("sched: fix goto retry in pick_next_task_rt()")
+Signed-off-by: Pietro Borrello <borrello@diag.uniroma1.it>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Reviewed-by: Phil Auld <pauld@redhat.com>
+Reviewed-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Link: https://lore.kernel.org/r/20230128-list-entry-null-check-sched-v3-1-b1a71bd1ac6b@diag.uniroma1.it
+---
+ kernel/sched/rt.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
+index 3daf42a0f462..c70c328bd89a 100644
+--- a/kernel/sched/rt.c
++++ b/kernel/sched/rt.c
+@@ -1607,6 +1607,8 @@ static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
+ BUG_ON(idx >= MAX_RT_PRIO);
+
+ queue = array->queue + idx;
++ if (SCHED_WARN_ON(list_empty(queue)))
++ return NULL;
+ next = list_entry(queue->next, struct sched_rt_entity, run_list);
+
+ return next;
+@@ -1619,7 +1621,8 @@ static struct task_struct *_pick_next_task_rt(struct rq *rq)
+
+ do {
+ rt_se = pick_next_rt_entity(rq, rt_rq);
+- BUG_ON(!rt_se);
++ if (unlikely(!rt_se))
++ return NULL;
+ rt_rq = group_rt_rq(rt_se);
+ } while (rt_rq);
+
+--
+2.17.1
+
diff --git a/meta-openbmc-mods/meta-common/recipes-kernel/linux/linux-aspeed/CVE-2023-1252.patch b/meta-openbmc-mods/meta-common/recipes-kernel/linux/linux-aspeed/CVE-2023-1252.patch
new file mode 100644
index 000000000..89a7f7949
--- /dev/null
+++ b/meta-openbmc-mods/meta-common/recipes-kernel/linux/linux-aspeed/CVE-2023-1252.patch
@@ -0,0 +1,89 @@
+From 9a254403760041528bc8f69fe2f5e1ef86950991 Mon Sep 17 00:00:00 2001
+From: yangerkun <yangerkun@huawei.com>
+Date: Thu, 30 Sep 2021 11:22:28 +0800
+Subject: [PATCH] ovl: fix use after free in struct ovl_aio_req
+
+Example for triggering use after free in a overlay on ext4 setup:
+
+aio_read
+ ovl_read_iter
+ vfs_iter_read
+ ext4_file_read_iter
+ ext4_dio_read_iter
+ iomap_dio_rw -> -EIOCBQUEUED
+ /*
+ * Here IO is completed in a separate thread,
+ * ovl_aio_cleanup_handler() frees aio_req which has iocb embedded
+ */
+ file_accessed(iocb->ki_filp); /**BOOM**/
+
+Fix by introducing a refcount in ovl_aio_req similarly to aio_kiocb. This
+guarantees that iocb is only freed after vfs_read/write_iter() returns on
+underlying fs.
+
+Fixes: 2406a307ac7d ("ovl: implement async IO routines")
+Signed-off-by: yangerkun <yangerkun@huawei.com>
+Link: https://lore.kernel.org/r/20210930032228.3199690-3-yangerkun@huawei.com/
+Cc: <stable@vger.kernel.org> # v5.6
+Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>
+---
+ fs/overlayfs/file.c | 16 ++++++++++++++--
+ 1 file changed, 14 insertions(+), 2 deletions(-)
+
+diff --git a/fs/overlayfs/file.c b/fs/overlayfs/file.c
+index c88ac571593dc1..44fea16751f1db 100644
+--- a/fs/overlayfs/file.c
++++ b/fs/overlayfs/file.c
+@@ -17,6 +17,7 @@
+
+ struct ovl_aio_req {
+ struct kiocb iocb;
++ refcount_t ref;
+ struct kiocb *orig_iocb;
+ struct fd fd;
+ };
+@@ -252,6 +253,14 @@ static rwf_t ovl_iocb_to_rwf(int ifl)
+ return flags;
+ }
+
++static inline void ovl_aio_put(struct ovl_aio_req *aio_req)
++{
++ if (refcount_dec_and_test(&aio_req->ref)) {
++ fdput(aio_req->fd);
++ kmem_cache_free(ovl_aio_request_cachep, aio_req);
++ }
++}
++
+ static void ovl_aio_cleanup_handler(struct ovl_aio_req *aio_req)
+ {
+ struct kiocb *iocb = &aio_req->iocb;
+@@ -268,8 +277,7 @@ static void ovl_aio_cleanup_handler(struct ovl_aio_req *aio_req)
+ }
+
+ orig_iocb->ki_pos = iocb->ki_pos;
+- fdput(aio_req->fd);
+- kmem_cache_free(ovl_aio_request_cachep, aio_req);
++ ovl_aio_put(aio_req);
+ }
+
+ static void ovl_aio_rw_complete(struct kiocb *iocb, long res, long res2)
+@@ -319,7 +327,9 @@ static ssize_t ovl_read_iter(struct kiocb *iocb, struct iov_iter *iter)
+ aio_req->orig_iocb = iocb;
+ kiocb_clone(&aio_req->iocb, iocb, real.file);
+ aio_req->iocb.ki_complete = ovl_aio_rw_complete;
++ refcount_set(&aio_req->ref, 2);
+ ret = vfs_iocb_iter_read(real.file, &aio_req->iocb, iter);
++ ovl_aio_put(aio_req);
+ if (ret != -EIOCBQUEUED)
+ ovl_aio_cleanup_handler(aio_req);
+ }
+@@ -390,7 +400,9 @@ static ssize_t ovl_write_iter(struct kiocb *iocb, struct iov_iter *iter)
+ kiocb_clone(&aio_req->iocb, iocb, real.file);
+ aio_req->iocb.ki_flags = ifl;
+ aio_req->iocb.ki_complete = ovl_aio_rw_complete;
++ refcount_set(&aio_req->ref, 2);
+ ret = vfs_iocb_iter_write(real.file, &aio_req->iocb, iter);
++ ovl_aio_put(aio_req);
+ if (ret != -EIOCBQUEUED)
+ ovl_aio_cleanup_handler(aio_req);
+ }
diff --git a/meta-openbmc-mods/meta-common/recipes-kernel/linux/linux-aspeed/CVE-2023-1582.patch b/meta-openbmc-mods/meta-common/recipes-kernel/linux/linux-aspeed/CVE-2023-1582.patch
new file mode 100644
index 000000000..c38b93c78
--- /dev/null
+++ b/meta-openbmc-mods/meta-common/recipes-kernel/linux/linux-aspeed/CVE-2023-1582.patch
@@ -0,0 +1,228 @@
+From 24d7275ce2791829953ed4e72f68277ceb2571c6 Mon Sep 17 00:00:00 2001
+From: Yang Shi <shy828301@gmail.com>
+Date: Fri, 11 Feb 2022 16:32:26 -0800
+Subject: [PATCH] fs/proc: task_mmu.c: don't read mapcount for migration entry
+
+The syzbot reported the below BUG:
+
+ kernel BUG at include/linux/page-flags.h:785!
+ invalid opcode: 0000 [#1] PREEMPT SMP KASAN
+ CPU: 1 PID: 4392 Comm: syz-executor560 Not tainted 5.16.0-rc6-syzkaller #0
+ Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
+ RIP: 0010:PageDoubleMap include/linux/page-flags.h:785 [inline]
+ RIP: 0010:__page_mapcount+0x2d2/0x350 mm/util.c:744
+ Call Trace:
+ page_mapcount include/linux/mm.h:837 [inline]
+ smaps_account+0x470/0xb10 fs/proc/task_mmu.c:466
+ smaps_pte_entry fs/proc/task_mmu.c:538 [inline]
+ smaps_pte_range+0x611/0x1250 fs/proc/task_mmu.c:601
+ walk_pmd_range mm/pagewalk.c:128 [inline]
+ walk_pud_range mm/pagewalk.c:205 [inline]
+ walk_p4d_range mm/pagewalk.c:240 [inline]
+ walk_pgd_range mm/pagewalk.c:277 [inline]
+ __walk_page_range+0xe23/0x1ea0 mm/pagewalk.c:379
+ walk_page_vma+0x277/0x350 mm/pagewalk.c:530
+ smap_gather_stats.part.0+0x148/0x260 fs/proc/task_mmu.c:768
+ smap_gather_stats fs/proc/task_mmu.c:741 [inline]
+ show_smap+0xc6/0x440 fs/proc/task_mmu.c:822
+ seq_read_iter+0xbb0/0x1240 fs/seq_file.c:272
+ seq_read+0x3e0/0x5b0 fs/seq_file.c:162
+ vfs_read+0x1b5/0x600 fs/read_write.c:479
+ ksys_read+0x12d/0x250 fs/read_write.c:619
+ do_syscall_x64 arch/x86/entry/common.c:50 [inline]
+ do_syscall_64+0x35/0xb0 arch/x86/entry/common.c:80
+ entry_SYSCALL_64_after_hwframe+0x44/0xae
+
+The reproducer was trying to read /proc/$PID/smaps when calling
+MADV_FREE at the mean time. MADV_FREE may split THPs if it is called
+for partial THP. It may trigger the below race:
+
+ CPU A CPU B
+ ----- -----
+ smaps walk: MADV_FREE:
+ page_mapcount()
+ PageCompound()
+ split_huge_page()
+ page = compound_head(page)
+ PageDoubleMap(page)
+
+When calling PageDoubleMap() this page is not a tail page of THP anymore
+so the BUG is triggered.
+
+This could be fixed by elevated refcount of the page before calling
+mapcount, but that would prevent it from counting migration entries, and
+it seems overkilling because the race just could happen when PMD is
+split so all PTE entries of tail pages are actually migration entries,
+and smaps_account() does treat migration entries as mapcount == 1 as
+Kirill pointed out.
+
+Add a new parameter for smaps_account() to tell this entry is migration
+entry then skip calling page_mapcount(). Don't skip getting mapcount
+for device private entries since they do track references with mapcount.
+
+Pagemap also has the similar issue although it was not reported. Fixed
+it as well.
+
+[shy828301@gmail.com: v4]
+ Link: https://lkml.kernel.org/r/20220203182641.824731-1-shy828301@gmail.com
+[nathan@kernel.org: avoid unused variable warning in pagemap_pmd_range()]
+ Link: https://lkml.kernel.org/r/20220207171049.1102239-1-nathan@kernel.org
+Link: https://lkml.kernel.org/r/20220120202805.3369-1-shy828301@gmail.com
+Fixes: e9b61f19858a ("thp: reintroduce split_huge_page()")
+Signed-off-by: Yang Shi <shy828301@gmail.com>
+Signed-off-by: Nathan Chancellor <nathan@kernel.org>
+Reported-by: syzbot+1f52b3a18d5633fa7f82@syzkaller.appspotmail.com
+Acked-by: David Hildenbrand <david@redhat.com>
+Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
+Cc: Jann Horn <jannh@google.com>
+Cc: Matthew Wilcox <willy@infradead.org>
+Cc: Alexey Dobriyan <adobriyan@gmail.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+---
+ fs/proc/task_mmu.c | 40 +++++++++++++++++++++++++++++++---------
+ 1 file changed, 31 insertions(+), 9 deletions(-)
+
+diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
+index cf25be3e0321..3e68cc9e816c 100644
+--- a/fs/proc/task_mmu.c
++++ b/fs/proc/task_mmu.c
+@@ -430,7 +430,8 @@ static void smaps_page_accumulate(struct mem_size_stats *mss,
+ }
+
+ static void smaps_account(struct mem_size_stats *mss, struct page *page,
+- bool compound, bool young, bool dirty, bool locked)
++ bool compound, bool young, bool dirty, bool locked,
++ bool migration)
+ {
+ int i, nr = compound ? compound_nr(page) : 1;
+ unsigned long size = nr * PAGE_SIZE;
+@@ -457,8 +458,15 @@ static void smaps_account(struct mem_size_stats *mss, struct page *page,
+ * page_count(page) == 1 guarantees the page is mapped exactly once.
+ * If any subpage of the compound page mapped with PTE it would elevate
+ * page_count().
++ *
++ * The page_mapcount() is called to get a snapshot of the mapcount.
++ * Without holding the page lock this snapshot can be slightly wrong as
++ * we cannot always read the mapcount atomically. It is not safe to
++ * call page_mapcount() even with PTL held if the page is not mapped,
++ * especially for migration entries. Treat regular migration entries
++ * as mapcount == 1.
+ */
+- if (page_count(page) == 1) {
++ if ((page_count(page) == 1) || migration) {
+ smaps_page_accumulate(mss, page, size, size << PSS_SHIFT, dirty,
+ locked, true);
+ return;
+@@ -495,6 +503,7 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
+ struct vm_area_struct *vma = walk->vma;
+ bool locked = !!(vma->vm_flags & VM_LOCKED);
+ struct page *page = NULL;
++ bool migration = false;
+
+ if (pte_present(*pte)) {
+ page = vm_normal_page(vma, addr, *pte);
+@@ -514,8 +523,11 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
+ } else {
+ mss->swap_pss += (u64)PAGE_SIZE << PSS_SHIFT;
+ }
+- } else if (is_pfn_swap_entry(swpent))
++ } else if (is_pfn_swap_entry(swpent)){
++ if (is_migration_entry(swpent))
++ migration = true;
+ page = pfn_swap_entry_to_page(swpent);
++ }
+ } else if (unlikely(IS_ENABLED(CONFIG_SHMEM) && mss->check_shmem_swap
+ && pte_none(*pte))) {
+ page = xa_load(&vma->vm_file->f_mapping->i_pages,
+@@ -528,7 +540,8 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
+ if (!page)
+ return;
+
+- smaps_account(mss, page, false, pte_young(*pte), pte_dirty(*pte), locked);
++ smaps_account(mss, page, false, pte_young(*pte), pte_dirty(*pte),
++ locked, migration);
+ }
+
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+@@ -539,6 +552,7 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
+ struct vm_area_struct *vma = walk->vma;
+ bool locked = !!(vma->vm_flags & VM_LOCKED);
+ struct page *page = NULL;
++ bool migration = false;
+
+ if (pmd_present(*pmd)) {
+ /* FOLL_DUMP will return -EFAULT on huge zero page */
+@@ -546,8 +560,10 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
+ } else if (unlikely(thp_migration_supported() && is_swap_pmd(*pmd))) {
+ swp_entry_t entry = pmd_to_swp_entry(*pmd);
+
+- if (is_migration_entry(entry))
++ if (is_migration_entry(entry)) {
++ migration = true;
+ page = pfn_swap_entry_to_page(entry);
++ }
+ }
+ if (IS_ERR_OR_NULL(page))
+ return;
+@@ -559,7 +575,9 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
+ /* pass */;
+ else
+ mss->file_thp += HPAGE_PMD_SIZE;
+- smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd), locked);
++
++ smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd),
++ locked, migration);
+ }
+ #else
+ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
+@@ -1363,6 +1381,7 @@ static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm,
+ {
+ u64 frame = 0, flags = 0;
+ struct page *page = NULL;
++ bool migration = false;
+
+ if (pte_present(pte)) {
+ if (pm->show_pfn)
+@@ -1384,13 +1403,14 @@ static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm,
+ frame = swp_type(entry) |
+ (swp_offset(entry) << MAX_SWAPFILES_SHIFT);
+ flags |= PM_SWAP;
++ migration = is_migration_entry(entry);
+ if (is_pfn_swap_entry(entry))
+ page = pfn_swap_entry_to_page(entry);
+ }
+
+ if (page && !PageAnon(page))
+ flags |= PM_FILE;
+- if (page && page_mapcount(page) == 1)
++ if (page && !migration && page_mapcount(page) == 1)
+ flags |= PM_MMAP_EXCLUSIVE;
+ if (vma->vm_flags & VM_SOFTDIRTY)
+ flags |= PM_SOFT_DIRTY;
+@@ -1406,8 +1426,9 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
+ spinlock_t *ptl;
+ pte_t *pte, *orig_pte;
+ int err = 0;
+-
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
++ bool migration = false;
++
+ ptl = pmd_trans_huge_lock(pmdp, vma);
+ if (ptl) {
+ u64 flags = 0, frame = 0;
+@@ -1446,11 +1467,12 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
+ if (pmd_swp_uffd_wp(pmd))
+ flags |= PM_UFFD_WP;
+ VM_BUG_ON(!is_pmd_migration_entry(pmd));
++ migration = is_migration_entry(entry);
+ page = pfn_swap_entry_to_page(entry);
+ }
+ #endif
+
+- if (page && page_mapcount(page) == 1)
++ if (page && !migration && page_mapcount(page) == 1)
+ flags |= PM_MMAP_EXCLUSIVE;
+
+ for (; addr != end; addr += PAGE_SIZE) {
diff --git a/meta-openbmc-mods/meta-common/recipes-kernel/linux/linux-aspeed/CVE-2023-2269.patch b/meta-openbmc-mods/meta-common/recipes-kernel/linux/linux-aspeed/CVE-2023-2269.patch
new file mode 100644
index 000000000..538318fe0
--- /dev/null
+++ b/meta-openbmc-mods/meta-common/recipes-kernel/linux/linux-aspeed/CVE-2023-2269.patch
@@ -0,0 +1,56 @@
+From 3d32aaa7e66d5c1479a3c31d6c2c5d45dd0d3b89 Mon Sep 17 00:00:00 2001
+From: Mike Snitzer <snitzer@kernel.org>
+Date: Mon, 17 Apr 2023 11:59:56 -0400
+Subject: [PATCH] dm ioctl: fix nested locking in table_clear() to remove
+ deadlock concern
+
+syzkaller found the following problematic rwsem locking (with write
+lock already held):
+
+ down_read+0x9d/0x450 kernel/locking/rwsem.c:1509
+ dm_get_inactive_table+0x2b/0xc0 drivers/md/dm-ioctl.c:773
+ __dev_status+0x4fd/0x7c0 drivers/md/dm-ioctl.c:844
+ table_clear+0x197/0x280 drivers/md/dm-ioctl.c:1537
+
+In table_clear, it first acquires a write lock
+https://elixir.bootlin.com/linux/v6.2/source/drivers/md/dm-ioctl.c#L1520
+down_write(&_hash_lock);
+
+Then before the lock is released at L1539, there is a path shown above:
+table_clear -> __dev_status -> dm_get_inactive_table -> down_read
+https://elixir.bootlin.com/linux/v6.2/source/drivers/md/dm-ioctl.c#L773
+down_read(&_hash_lock);
+
+It tries to acquire the same read lock again, resulting in the deadlock
+problem.
+
+Fix this by moving table_clear()'s __dev_status() call to after its
+up_write(&_hash_lock);
+
+Cc: stable@vger.kernel.org
+Reported-by: Zheng Zhang <zheng.zhang@email.ucr.edu>
+Signed-off-by: Mike Snitzer <snitzer@kernel.org>
+---
+ drivers/md/dm-ioctl.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
+index 50a1259294d141..7d5c9c582ed2d6 100644
+--- a/drivers/md/dm-ioctl.c
++++ b/drivers/md/dm-ioctl.c
+@@ -1556,11 +1556,12 @@ static int table_clear(struct file *filp, struct dm_ioctl *param, size_t param_s
+ has_new_map = true;
+ }
+
+- param->flags &= ~DM_INACTIVE_PRESENT_FLAG;
+-
+- __dev_status(hc->md, param);
+ md = hc->md;
+ up_write(&_hash_lock);
++
++ param->flags &= ~DM_INACTIVE_PRESENT_FLAG;
++ __dev_status(md, param);
++
+ if (old_map) {
+ dm_sync_table(md);
+ dm_table_destroy(old_map);
diff --git a/meta-openbmc-mods/meta-common/recipes-kernel/linux/linux-aspeed/CVE-2023-2513.patch b/meta-openbmc-mods/meta-common/recipes-kernel/linux/linux-aspeed/CVE-2023-2513.patch
new file mode 100644
index 000000000..266fedb1f
--- /dev/null
+++ b/meta-openbmc-mods/meta-common/recipes-kernel/linux/linux-aspeed/CVE-2023-2513.patch
@@ -0,0 +1,120 @@
+From 67d7d8ad99beccd9fe92d585b87f1760dc9018e3 Mon Sep 17 00:00:00 2001
+From: Baokun Li <libaokun1@huawei.com>
+Date: Thu, 16 Jun 2022 10:13:56 +0800
+Subject: [PATCH] ext4: fix use-after-free in ext4_xattr_set_entry
+
+Hulk Robot reported a issue:
+==================================================================
+BUG: KASAN: use-after-free in ext4_xattr_set_entry+0x18ab/0x3500
+Write of size 4105 at addr ffff8881675ef5f4 by task syz-executor.0/7092
+
+CPU: 1 PID: 7092 Comm: syz-executor.0 Not tainted 4.19.90-dirty #17
+Call Trace:
+[...]
+ memcpy+0x34/0x50 mm/kasan/kasan.c:303
+ ext4_xattr_set_entry+0x18ab/0x3500 fs/ext4/xattr.c:1747
+ ext4_xattr_ibody_inline_set+0x86/0x2a0 fs/ext4/xattr.c:2205
+ ext4_xattr_set_handle+0x940/0x1300 fs/ext4/xattr.c:2386
+ ext4_xattr_set+0x1da/0x300 fs/ext4/xattr.c:2498
+ __vfs_setxattr+0x112/0x170 fs/xattr.c:149
+ __vfs_setxattr_noperm+0x11b/0x2a0 fs/xattr.c:180
+ __vfs_setxattr_locked+0x17b/0x250 fs/xattr.c:238
+ vfs_setxattr+0xed/0x270 fs/xattr.c:255
+ setxattr+0x235/0x330 fs/xattr.c:520
+ path_setxattr+0x176/0x190 fs/xattr.c:539
+ __do_sys_lsetxattr fs/xattr.c:561 [inline]
+ __se_sys_lsetxattr fs/xattr.c:557 [inline]
+ __x64_sys_lsetxattr+0xc2/0x160 fs/xattr.c:557
+ do_syscall_64+0xdf/0x530 arch/x86/entry/common.c:298
+ entry_SYSCALL_64_after_hwframe+0x44/0xa9
+RIP: 0033:0x459fe9
+RSP: 002b:00007fa5e54b4c08 EFLAGS: 00000246 ORIG_RAX: 00000000000000bd
+RAX: ffffffffffffffda RBX: 000000000051bf60 RCX: 0000000000459fe9
+RDX: 00000000200003c0 RSI: 0000000020000180 RDI: 0000000020000140
+RBP: 000000000051bf60 R08: 0000000000000001 R09: 0000000000000000
+R10: 0000000000001009 R11: 0000000000000246 R12: 0000000000000000
+R13: 00007ffc73c93fc0 R14: 000000000051bf60 R15: 00007fa5e54b4d80
+[...]
+==================================================================
+
+Above issue may happen as follows:
+-------------------------------------
+ext4_xattr_set
+ ext4_xattr_set_handle
+ ext4_xattr_ibody_find
+ >> s->end < s->base
+ >> no EXT4_STATE_XATTR
+ >> xattr_check_inode is not executed
+ ext4_xattr_ibody_set
+ ext4_xattr_set_entry
+ >> size_t min_offs = s->end - s->base
+ >> UAF in memcpy
+
+we can easily reproduce this problem with the following commands:
+ mkfs.ext4 -F /dev/sda
+ mount -o debug_want_extra_isize=128 /dev/sda /mnt
+ touch /mnt/file
+ setfattr -n user.cat -v `seq -s z 4096|tr -d '[:digit:]'` /mnt/file
+
+In ext4_xattr_ibody_find, we have the following assignment logic:
+ header = IHDR(inode, raw_inode)
+ = raw_inode + EXT4_GOOD_OLD_INODE_SIZE + i_extra_isize
+ is->s.base = IFIRST(header)
+ = header + sizeof(struct ext4_xattr_ibody_header)
+ is->s.end = raw_inode + s_inode_size
+
+In ext4_xattr_set_entry
+ min_offs = s->end - s->base
+ = s_inode_size - EXT4_GOOD_OLD_INODE_SIZE - i_extra_isize -
+ sizeof(struct ext4_xattr_ibody_header)
+ last = s->first
+ free = min_offs - ((void *)last - s->base) - sizeof(__u32)
+ = s_inode_size - EXT4_GOOD_OLD_INODE_SIZE - i_extra_isize -
+ sizeof(struct ext4_xattr_ibody_header) - sizeof(__u32)
+
+In the calculation formula, all values except s_inode_size and
+i_extra_size are fixed values. When i_extra_size is the maximum value
+s_inode_size - EXT4_GOOD_OLD_INODE_SIZE, min_offs is -4 and free is -8.
+The value overflows. As a result, the preceding issue is triggered when
+memcpy is executed.
+
+Therefore, when finding xattr or setting xattr, check whether
+there is space for storing xattr in the inode to resolve this issue.
+
+Cc: stable@kernel.org
+Reported-by: Hulk Robot <hulkci@huawei.com>
+Signed-off-by: Baokun Li <libaokun1@huawei.com>
+Reviewed-by: Ritesh Harjani (IBM) <ritesh.list@gmail.com>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Link: https://lore.kernel.org/r/20220616021358.2504451-3-libaokun1@huawei.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+---
+ fs/ext4/xattr.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
+index 564e28a1aa9428..c42b3e0d2d94b4 100644
+--- a/fs/ext4/xattr.c
++++ b/fs/ext4/xattr.c
+@@ -2175,8 +2175,9 @@ int ext4_xattr_ibody_find(struct inode *inode, struct ext4_xattr_info *i,
+ struct ext4_inode *raw_inode;
+ int error;
+
+- if (EXT4_I(inode)->i_extra_isize == 0)
++ if (!EXT4_INODE_HAS_XATTR_SPACE(inode))
+ return 0;
++
+ raw_inode = ext4_raw_inode(&is->iloc);
+ header = IHDR(inode, raw_inode);
+ is->s.base = is->s.first = IFIRST(header);
+@@ -2204,8 +2205,9 @@ int ext4_xattr_ibody_set(handle_t *handle, struct inode *inode,
+ struct ext4_xattr_search *s = &is->s;
+ int error;
+
+- if (EXT4_I(inode)->i_extra_isize == 0)
++ if (!EXT4_INODE_HAS_XATTR_SPACE(inode))
+ return -ENOSPC;
++
+ error = ext4_xattr_set_entry(i, s, handle, inode, false /* is_block */);
+ if (error)
+ return error;