summaryrefslogtreecommitdiff
path: root/meta-openbmc-mods/meta-common/recipes-kernel/linux/linux-aspeed/CVE-2023-1582.patch
diff options
context:
space:
mode:
Diffstat (limited to 'meta-openbmc-mods/meta-common/recipes-kernel/linux/linux-aspeed/CVE-2023-1582.patch')
-rw-r--r--meta-openbmc-mods/meta-common/recipes-kernel/linux/linux-aspeed/CVE-2023-1582.patch228
1 files changed, 228 insertions, 0 deletions
diff --git a/meta-openbmc-mods/meta-common/recipes-kernel/linux/linux-aspeed/CVE-2023-1582.patch b/meta-openbmc-mods/meta-common/recipes-kernel/linux/linux-aspeed/CVE-2023-1582.patch
new file mode 100644
index 000000000..c38b93c78
--- /dev/null
+++ b/meta-openbmc-mods/meta-common/recipes-kernel/linux/linux-aspeed/CVE-2023-1582.patch
@@ -0,0 +1,228 @@
+From 24d7275ce2791829953ed4e72f68277ceb2571c6 Mon Sep 17 00:00:00 2001
+From: Yang Shi <shy828301@gmail.com>
+Date: Fri, 11 Feb 2022 16:32:26 -0800
+Subject: [PATCH] fs/proc: task_mmu.c: don't read mapcount for migration entry
+
+The syzbot reported the below BUG:
+
+ kernel BUG at include/linux/page-flags.h:785!
+ invalid opcode: 0000 [#1] PREEMPT SMP KASAN
+ CPU: 1 PID: 4392 Comm: syz-executor560 Not tainted 5.16.0-rc6-syzkaller #0
+ Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
+ RIP: 0010:PageDoubleMap include/linux/page-flags.h:785 [inline]
+ RIP: 0010:__page_mapcount+0x2d2/0x350 mm/util.c:744
+ Call Trace:
+ page_mapcount include/linux/mm.h:837 [inline]
+ smaps_account+0x470/0xb10 fs/proc/task_mmu.c:466
+ smaps_pte_entry fs/proc/task_mmu.c:538 [inline]
+ smaps_pte_range+0x611/0x1250 fs/proc/task_mmu.c:601
+ walk_pmd_range mm/pagewalk.c:128 [inline]
+ walk_pud_range mm/pagewalk.c:205 [inline]
+ walk_p4d_range mm/pagewalk.c:240 [inline]
+ walk_pgd_range mm/pagewalk.c:277 [inline]
+ __walk_page_range+0xe23/0x1ea0 mm/pagewalk.c:379
+ walk_page_vma+0x277/0x350 mm/pagewalk.c:530
+ smap_gather_stats.part.0+0x148/0x260 fs/proc/task_mmu.c:768
+ smap_gather_stats fs/proc/task_mmu.c:741 [inline]
+ show_smap+0xc6/0x440 fs/proc/task_mmu.c:822
+ seq_read_iter+0xbb0/0x1240 fs/seq_file.c:272
+ seq_read+0x3e0/0x5b0 fs/seq_file.c:162
+ vfs_read+0x1b5/0x600 fs/read_write.c:479
+ ksys_read+0x12d/0x250 fs/read_write.c:619
+ do_syscall_x64 arch/x86/entry/common.c:50 [inline]
+ do_syscall_64+0x35/0xb0 arch/x86/entry/common.c:80
+ entry_SYSCALL_64_after_hwframe+0x44/0xae
+
+The reproducer was trying to read /proc/$PID/smaps when calling
+MADV_FREE at the mean time. MADV_FREE may split THPs if it is called
+for partial THP. It may trigger the below race:
+
+ CPU A CPU B
+ ----- -----
+ smaps walk: MADV_FREE:
+ page_mapcount()
+ PageCompound()
+ split_huge_page()
+ page = compound_head(page)
+ PageDoubleMap(page)
+
+When calling PageDoubleMap() this page is not a tail page of THP anymore
+so the BUG is triggered.
+
+This could be fixed by elevated refcount of the page before calling
+mapcount, but that would prevent it from counting migration entries, and
+it seems overkilling because the race just could happen when PMD is
+split so all PTE entries of tail pages are actually migration entries,
+and smaps_account() does treat migration entries as mapcount == 1 as
+Kirill pointed out.
+
+Add a new parameter for smaps_account() to tell this entry is migration
+entry then skip calling page_mapcount(). Don't skip getting mapcount
+for device private entries since they do track references with mapcount.
+
+Pagemap also has the similar issue although it was not reported. Fixed
+it as well.
+
+[shy828301@gmail.com: v4]
+ Link: https://lkml.kernel.org/r/20220203182641.824731-1-shy828301@gmail.com
+[nathan@kernel.org: avoid unused variable warning in pagemap_pmd_range()]
+ Link: https://lkml.kernel.org/r/20220207171049.1102239-1-nathan@kernel.org
+Link: https://lkml.kernel.org/r/20220120202805.3369-1-shy828301@gmail.com
+Fixes: e9b61f19858a ("thp: reintroduce split_huge_page()")
+Signed-off-by: Yang Shi <shy828301@gmail.com>
+Signed-off-by: Nathan Chancellor <nathan@kernel.org>
+Reported-by: syzbot+1f52b3a18d5633fa7f82@syzkaller.appspotmail.com
+Acked-by: David Hildenbrand <david@redhat.com>
+Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
+Cc: Jann Horn <jannh@google.com>
+Cc: Matthew Wilcox <willy@infradead.org>
+Cc: Alexey Dobriyan <adobriyan@gmail.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+---
+ fs/proc/task_mmu.c | 40 +++++++++++++++++++++++++++++++---------
+ 1 file changed, 31 insertions(+), 9 deletions(-)
+
+diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
+index cf25be3e0321..3e68cc9e816c 100644
+--- a/fs/proc/task_mmu.c
++++ b/fs/proc/task_mmu.c
+@@ -430,7 +430,8 @@ static void smaps_page_accumulate(struct mem_size_stats *mss,
+ }
+
+ static void smaps_account(struct mem_size_stats *mss, struct page *page,
+- bool compound, bool young, bool dirty, bool locked)
++ bool compound, bool young, bool dirty, bool locked,
++ bool migration)
+ {
+ int i, nr = compound ? compound_nr(page) : 1;
+ unsigned long size = nr * PAGE_SIZE;
+@@ -457,8 +458,15 @@ static void smaps_account(struct mem_size_stats *mss, struct page *page,
+ * page_count(page) == 1 guarantees the page is mapped exactly once.
+ * If any subpage of the compound page mapped with PTE it would elevate
+ * page_count().
++ *
++ * The page_mapcount() is called to get a snapshot of the mapcount.
++ * Without holding the page lock this snapshot can be slightly wrong as
++ * we cannot always read the mapcount atomically. It is not safe to
++ * call page_mapcount() even with PTL held if the page is not mapped,
++ * especially for migration entries. Treat regular migration entries
++ * as mapcount == 1.
+ */
+- if (page_count(page) == 1) {
++ if ((page_count(page) == 1) || migration) {
+ smaps_page_accumulate(mss, page, size, size << PSS_SHIFT, dirty,
+ locked, true);
+ return;
+@@ -495,6 +503,7 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
+ struct vm_area_struct *vma = walk->vma;
+ bool locked = !!(vma->vm_flags & VM_LOCKED);
+ struct page *page = NULL;
++ bool migration = false;
+
+ if (pte_present(*pte)) {
+ page = vm_normal_page(vma, addr, *pte);
+@@ -514,8 +523,11 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
+ } else {
+ mss->swap_pss += (u64)PAGE_SIZE << PSS_SHIFT;
+ }
+- } else if (is_pfn_swap_entry(swpent))
++ } else if (is_pfn_swap_entry(swpent)){
++ if (is_migration_entry(swpent))
++ migration = true;
+ page = pfn_swap_entry_to_page(swpent);
++ }
+ } else if (unlikely(IS_ENABLED(CONFIG_SHMEM) && mss->check_shmem_swap
+ && pte_none(*pte))) {
+ page = xa_load(&vma->vm_file->f_mapping->i_pages,
+@@ -528,7 +540,8 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
+ if (!page)
+ return;
+
+- smaps_account(mss, page, false, pte_young(*pte), pte_dirty(*pte), locked);
++ smaps_account(mss, page, false, pte_young(*pte), pte_dirty(*pte),
++ locked, migration);
+ }
+
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+@@ -539,6 +552,7 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
+ struct vm_area_struct *vma = walk->vma;
+ bool locked = !!(vma->vm_flags & VM_LOCKED);
+ struct page *page = NULL;
++ bool migration = false;
+
+ if (pmd_present(*pmd)) {
+ /* FOLL_DUMP will return -EFAULT on huge zero page */
+@@ -546,8 +560,10 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
+ } else if (unlikely(thp_migration_supported() && is_swap_pmd(*pmd))) {
+ swp_entry_t entry = pmd_to_swp_entry(*pmd);
+
+- if (is_migration_entry(entry))
++ if (is_migration_entry(entry)) {
++ migration = true;
+ page = pfn_swap_entry_to_page(entry);
++ }
+ }
+ if (IS_ERR_OR_NULL(page))
+ return;
+@@ -559,7 +575,9 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
+ /* pass */;
+ else
+ mss->file_thp += HPAGE_PMD_SIZE;
+- smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd), locked);
++
++ smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd),
++ locked, migration);
+ }
+ #else
+ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
+@@ -1363,6 +1381,7 @@ static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm,
+ {
+ u64 frame = 0, flags = 0;
+ struct page *page = NULL;
++ bool migration = false;
+
+ if (pte_present(pte)) {
+ if (pm->show_pfn)
+@@ -1384,13 +1403,14 @@ static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm,
+ frame = swp_type(entry) |
+ (swp_offset(entry) << MAX_SWAPFILES_SHIFT);
+ flags |= PM_SWAP;
++ migration = is_migration_entry(entry);
+ if (is_pfn_swap_entry(entry))
+ page = pfn_swap_entry_to_page(entry);
+ }
+
+ if (page && !PageAnon(page))
+ flags |= PM_FILE;
+- if (page && page_mapcount(page) == 1)
++ if (page && !migration && page_mapcount(page) == 1)
+ flags |= PM_MMAP_EXCLUSIVE;
+ if (vma->vm_flags & VM_SOFTDIRTY)
+ flags |= PM_SOFT_DIRTY;
+@@ -1406,8 +1426,9 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
+ spinlock_t *ptl;
+ pte_t *pte, *orig_pte;
+ int err = 0;
+-
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
++ bool migration = false;
++
+ ptl = pmd_trans_huge_lock(pmdp, vma);
+ if (ptl) {
+ u64 flags = 0, frame = 0;
+@@ -1446,11 +1467,12 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
+ if (pmd_swp_uffd_wp(pmd))
+ flags |= PM_UFFD_WP;
+ VM_BUG_ON(!is_pmd_migration_entry(pmd));
++ migration = is_migration_entry(entry);
+ page = pfn_swap_entry_to_page(entry);
+ }
+ #endif
+
+- if (page && page_mapcount(page) == 1)
++ if (page && !migration && page_mapcount(page) == 1)
+ flags |= PM_MMAP_EXCLUSIVE;
+
+ for (; addr != end; addr += PAGE_SIZE) {