From 6f12be792fde994ed934168f93c2a0d2a0cf0bc5 Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Fri, 16 Dec 2022 17:32:27 +0100 Subject: mm, mremap: fix mremap() expanding vma with addr inside vma MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Since 6.1 we have noticed random rpm install failures that were tracked to mremap() returning -ENOMEM and to commit ca3d76b0aa80 ("mm: add merging after mremap resize"). The problem occurs when mremap() expands a VMA in place, but using an starting address that's not vma->vm_start, but somewhere in the middle. The extension_pgoff calculation introduced by the commit is wrong in that case, so vma_merge() fails due to pgoffs not being compatible. Fix the calculation. By the way it seems that the situations, where rpm now expands a vma from the middle, were made possible also due to that commit, thanks to the improved vma merging. Yet it should work just fine, except for the buggy calculation. Link: https://lkml.kernel.org/r/20221216163227.24648-1-vbabka@suse.cz Reported-by: Jiri Slaby Link: https://bugzilla.suse.com/show_bug.cgi?id=1206359 Fixes: ca3d76b0aa80 ("mm: add merging after mremap resize") Signed-off-by: Vlastimil Babka Cc: Jakub Matěna Cc: "Kirill A . Shutemov" Cc: Liam Howlett Cc: Matthew Wilcox Cc: Mel Gorman Cc: Michal Hocko Cc: Signed-off-by: Andrew Morton --- mm/mremap.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/mm/mremap.c b/mm/mremap.c index e465ffe279bb..fe587c5d6591 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -1016,7 +1016,8 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len, long pages = (new_len - old_len) >> PAGE_SHIFT; unsigned long extension_start = addr + old_len; unsigned long extension_end = addr + new_len; - pgoff_t extension_pgoff = vma->vm_pgoff + (old_len >> PAGE_SHIFT); + pgoff_t extension_pgoff = vma->vm_pgoff + + ((extension_start - vma->vm_start) >> PAGE_SHIFT); if (vma->vm_flags & VM_ACCOUNT) { if (security_vm_enough_memory_mm(mm, pages)) { -- cgit v1.2.3 From 38ce7c9bdfc228c14d7621ba36d3eebedd9d4f76 Mon Sep 17 00:00:00 2001 From: Mathieu Desnoyers Date: Thu, 15 Dec 2022 14:46:21 -0500 Subject: mm/mempolicy: fix memory leak in set_mempolicy_home_node system call When encountering any vma in the range with policy other than MPOL_BIND or MPOL_PREFERRED_MANY, an error is returned without issuing a mpol_put on the policy just allocated with mpol_dup(). This allows arbitrary users to leak kernel memory. Link: https://lkml.kernel.org/r/20221215194621.202816-1-mathieu.desnoyers@efficios.com Fixes: c6018b4b2549 ("mm/mempolicy: add set_mempolicy_home_node syscall") Signed-off-by: Mathieu Desnoyers Reviewed-by: Randy Dunlap Reviewed-by: "Huang, Ying" Reviewed-by: Aneesh Kumar K.V Acked-by: Michal Hocko Cc: Aneesh Kumar K.V Cc: Dave Hansen Cc: Feng Tang Cc: Michal Hocko Cc: Andrea Arcangeli Cc: Mel Gorman Cc: Mike Kravetz Cc: Randy Dunlap Cc: Vlastimil Babka Cc: Andi Kleen Cc: Dan Williams Cc: Huang Ying Cc: [5.17+] Signed-off-by: Andrew Morton --- mm/mempolicy.c | 1 + 1 file changed, 1 insertion(+) diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 61aa9aedb728..02c8a712282f 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -1540,6 +1540,7 @@ SYSCALL_DEFINE4(set_mempolicy_home_node, unsigned long, start, unsigned long, le * the home node for vmas we already updated before. */ if (new->mode != MPOL_BIND && new->mode != MPOL_PREFERRED_MANY) { + mpol_put(new); err = -EOPNOTSUPP; break; } -- cgit v1.2.3 From aaa746ad8b30f38ef89a301faf339ef1c19cf33a Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Thu, 15 Dec 2022 17:30:17 +0100 Subject: kmsan: include linux/vmalloc.h This is needed for the vmap/vunmap declarations: mm/kmsan/kmsan_test.c:316:9: error: implicit declaration of function 'vmap' is invalid in C99 [-Werror,-Wimplicit-function-declaration] vbuf = vmap(pages, npages, VM_MAP, PAGE_KERNEL); ^ mm/kmsan/kmsan_test.c:316:29: error: use of undeclared identifier 'VM_MAP' vbuf = vmap(pages, npages, VM_MAP, PAGE_KERNEL); ^ mm/kmsan/kmsan_test.c:322:3: error: implicit declaration of function 'vunmap' is invalid in C99 [-Werror,-Wimplicit-function-declaration] vunmap(vbuf); ^ Link: https://lkml.kernel.org/r/20221215163046.4079767-1-arnd@kernel.org Fixes: 8ed691b02ade ("kmsan: add tests for KMSAN") Signed-off-by: Arnd Bergmann Reviewed-by: Alexander Potapenko Cc: Dmitry Vyukov Cc: Marco Elver Cc: Signed-off-by: Andrew Morton --- mm/kmsan/kmsan_test.c | 1 + 1 file changed, 1 insertion(+) diff --git a/mm/kmsan/kmsan_test.c b/mm/kmsan/kmsan_test.c index eb44ef3c5f29..088e21a48dc4 100644 --- a/mm/kmsan/kmsan_test.c +++ b/mm/kmsan/kmsan_test.c @@ -22,6 +22,7 @@ #include #include #include +#include #include static DEFINE_PER_CPU(int, per_cpu_var); -- cgit v1.2.3 From 7ba594d700998bafa96a75360d2e060aa39156d2 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Thu, 15 Dec 2022 17:26:57 +0100 Subject: kmsan: export kmsan_handle_urb USB support can be in a loadable module, and this causes a link failure with KMSAN: ERROR: modpost: "kmsan_handle_urb" [drivers/usb/core/usbcore.ko] undefined! Export the symbol so it can be used by this module. Link: https://lkml.kernel.org/r/20221215162710.3802378-1-arnd@kernel.org Fixes: 553a80188a5d ("kmsan: handle memory sent to/from USB") Signed-off-by: Arnd Bergmann Reviewed-by: Alexander Potapenko Cc: Dmitry Vyukov Cc: Marco Elver Cc: Signed-off-by: Andrew Morton --- mm/kmsan/hooks.c | 1 + 1 file changed, 1 insertion(+) diff --git a/mm/kmsan/hooks.c b/mm/kmsan/hooks.c index 35f6b6e6a908..3807502766a3 100644 --- a/mm/kmsan/hooks.c +++ b/mm/kmsan/hooks.c @@ -260,6 +260,7 @@ void kmsan_handle_urb(const struct urb *urb, bool is_out) urb->transfer_buffer_length, /*checked*/ false); } +EXPORT_SYMBOL_GPL(kmsan_handle_urb); static void kmsan_handle_dma_page(const void *addr, size_t size, enum dma_data_direction dir) -- cgit v1.2.3 From e700898fa075c69b3ae02b702ab57fb75e1a82ec Mon Sep 17 00:00:00 2001 From: Mike Kravetz Date: Mon, 12 Dec 2022 15:50:41 -0800 Subject: hugetlb: really allocate vma lock for all sharable vmas Commit bbff39cc6cbc ("hugetlb: allocate vma lock for all sharable vmas") removed the pmd sharable checks in the vma lock helper routines. However, it left the functional version of helper routines behind #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE. Therefore, the vma lock is not being used for sharable vmas on architectures that do not support pmd sharing. On these architectures, a potential fault/truncation race is exposed that could leave pages in a hugetlb file past i_size until the file is removed. Move the functional vma lock helpers outside the ifdef, and remove the non-functional stubs. Since the vma lock is not just for pmd sharing, rename the routine __vma_shareable_flags_pmd. Link: https://lkml.kernel.org/r/20221212235042.178355-1-mike.kravetz@oracle.com Fixes: bbff39cc6cbc ("hugetlb: allocate vma lock for all sharable vmas") Signed-off-by: Mike Kravetz Reviewed-by: Miaohe Lin Cc: "Aneesh Kumar K.V" Cc: David Hildenbrand Cc: James Houghton Cc: Mina Almasry Cc: Muchun Song Cc: Naoya Horiguchi Cc: Peter Xu Cc: Signed-off-by: Andrew Morton --- mm/hugetlb.c | 333 ++++++++++++++++++++++++++--------------------------------- 1 file changed, 148 insertions(+), 185 deletions(-) diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 77f36e3681e3..db895230ee7e 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -255,6 +255,152 @@ static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma) return subpool_inode(file_inode(vma->vm_file)); } +/* + * hugetlb vma_lock helper routines + */ +static bool __vma_shareable_lock(struct vm_area_struct *vma) +{ + return vma->vm_flags & (VM_MAYSHARE | VM_SHARED) && + vma->vm_private_data; +} + +void hugetlb_vma_lock_read(struct vm_area_struct *vma) +{ + if (__vma_shareable_lock(vma)) { + struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; + + down_read(&vma_lock->rw_sema); + } +} + +void hugetlb_vma_unlock_read(struct vm_area_struct *vma) +{ + if (__vma_shareable_lock(vma)) { + struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; + + up_read(&vma_lock->rw_sema); + } +} + +void hugetlb_vma_lock_write(struct vm_area_struct *vma) +{ + if (__vma_shareable_lock(vma)) { + struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; + + down_write(&vma_lock->rw_sema); + } +} + +void hugetlb_vma_unlock_write(struct vm_area_struct *vma) +{ + if (__vma_shareable_lock(vma)) { + struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; + + up_write(&vma_lock->rw_sema); + } +} + +int hugetlb_vma_trylock_write(struct vm_area_struct *vma) +{ + struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; + + if (!__vma_shareable_lock(vma)) + return 1; + + return down_write_trylock(&vma_lock->rw_sema); +} + +void hugetlb_vma_assert_locked(struct vm_area_struct *vma) +{ + if (__vma_shareable_lock(vma)) { + struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; + + lockdep_assert_held(&vma_lock->rw_sema); + } +} + +void hugetlb_vma_lock_release(struct kref *kref) +{ + struct hugetlb_vma_lock *vma_lock = container_of(kref, + struct hugetlb_vma_lock, refs); + + kfree(vma_lock); +} + +static void __hugetlb_vma_unlock_write_put(struct hugetlb_vma_lock *vma_lock) +{ + struct vm_area_struct *vma = vma_lock->vma; + + /* + * vma_lock structure may or not be released as a result of put, + * it certainly will no longer be attached to vma so clear pointer. + * Semaphore synchronizes access to vma_lock->vma field. + */ + vma_lock->vma = NULL; + vma->vm_private_data = NULL; + up_write(&vma_lock->rw_sema); + kref_put(&vma_lock->refs, hugetlb_vma_lock_release); +} + +static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma) +{ + if (__vma_shareable_lock(vma)) { + struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; + + __hugetlb_vma_unlock_write_put(vma_lock); + } +} + +static void hugetlb_vma_lock_free(struct vm_area_struct *vma) +{ + /* + * Only present in sharable vmas. + */ + if (!vma || !__vma_shareable_lock(vma)) + return; + + if (vma->vm_private_data) { + struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; + + down_write(&vma_lock->rw_sema); + __hugetlb_vma_unlock_write_put(vma_lock); + } +} + +static void hugetlb_vma_lock_alloc(struct vm_area_struct *vma) +{ + struct hugetlb_vma_lock *vma_lock; + + /* Only establish in (flags) sharable vmas */ + if (!vma || !(vma->vm_flags & VM_MAYSHARE)) + return; + + /* Should never get here with non-NULL vm_private_data */ + if (vma->vm_private_data) + return; + + vma_lock = kmalloc(sizeof(*vma_lock), GFP_KERNEL); + if (!vma_lock) { + /* + * If we can not allocate structure, then vma can not + * participate in pmd sharing. This is only a possible + * performance enhancement and memory saving issue. + * However, the lock is also used to synchronize page + * faults with truncation. If the lock is not present, + * unlikely races could leave pages in a file past i_size + * until the file is removed. Warn in the unlikely case of + * allocation failure. + */ + pr_warn_once("HugeTLB: unable to allocate vma specific lock\n"); + return; + } + + kref_init(&vma_lock->refs); + init_rwsem(&vma_lock->rw_sema); + vma_lock->vma = vma; + vma->vm_private_data = vma_lock; +} + /* Helper that removes a struct file_region from the resv_map cache and returns * it for use. */ @@ -6613,7 +6759,8 @@ bool hugetlb_reserve_pages(struct inode *inode, } /* - * vma specific semaphore used for pmd sharing synchronization + * vma specific semaphore used for pmd sharing and fault/truncation + * synchronization */ hugetlb_vma_lock_alloc(vma); @@ -6869,149 +7016,6 @@ void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma, *end = ALIGN(*end, PUD_SIZE); } -static bool __vma_shareable_flags_pmd(struct vm_area_struct *vma) -{ - return vma->vm_flags & (VM_MAYSHARE | VM_SHARED) && - vma->vm_private_data; -} - -void hugetlb_vma_lock_read(struct vm_area_struct *vma) -{ - if (__vma_shareable_flags_pmd(vma)) { - struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; - - down_read(&vma_lock->rw_sema); - } -} - -void hugetlb_vma_unlock_read(struct vm_area_struct *vma) -{ - if (__vma_shareable_flags_pmd(vma)) { - struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; - - up_read(&vma_lock->rw_sema); - } -} - -void hugetlb_vma_lock_write(struct vm_area_struct *vma) -{ - if (__vma_shareable_flags_pmd(vma)) { - struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; - - down_write(&vma_lock->rw_sema); - } -} - -void hugetlb_vma_unlock_write(struct vm_area_struct *vma) -{ - if (__vma_shareable_flags_pmd(vma)) { - struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; - - up_write(&vma_lock->rw_sema); - } -} - -int hugetlb_vma_trylock_write(struct vm_area_struct *vma) -{ - struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; - - if (!__vma_shareable_flags_pmd(vma)) - return 1; - - return down_write_trylock(&vma_lock->rw_sema); -} - -void hugetlb_vma_assert_locked(struct vm_area_struct *vma) -{ - if (__vma_shareable_flags_pmd(vma)) { - struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; - - lockdep_assert_held(&vma_lock->rw_sema); - } -} - -void hugetlb_vma_lock_release(struct kref *kref) -{ - struct hugetlb_vma_lock *vma_lock = container_of(kref, - struct hugetlb_vma_lock, refs); - - kfree(vma_lock); -} - -static void __hugetlb_vma_unlock_write_put(struct hugetlb_vma_lock *vma_lock) -{ - struct vm_area_struct *vma = vma_lock->vma; - - /* - * vma_lock structure may or not be released as a result of put, - * it certainly will no longer be attached to vma so clear pointer. - * Semaphore synchronizes access to vma_lock->vma field. - */ - vma_lock->vma = NULL; - vma->vm_private_data = NULL; - up_write(&vma_lock->rw_sema); - kref_put(&vma_lock->refs, hugetlb_vma_lock_release); -} - -static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma) -{ - if (__vma_shareable_flags_pmd(vma)) { - struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; - - __hugetlb_vma_unlock_write_put(vma_lock); - } -} - -static void hugetlb_vma_lock_free(struct vm_area_struct *vma) -{ - /* - * Only present in sharable vmas. - */ - if (!vma || !__vma_shareable_flags_pmd(vma)) - return; - - if (vma->vm_private_data) { - struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; - - down_write(&vma_lock->rw_sema); - __hugetlb_vma_unlock_write_put(vma_lock); - } -} - -static void hugetlb_vma_lock_alloc(struct vm_area_struct *vma) -{ - struct hugetlb_vma_lock *vma_lock; - - /* Only establish in (flags) sharable vmas */ - if (!vma || !(vma->vm_flags & VM_MAYSHARE)) - return; - - /* Should never get here with non-NULL vm_private_data */ - if (vma->vm_private_data) - return; - - vma_lock = kmalloc(sizeof(*vma_lock), GFP_KERNEL); - if (!vma_lock) { - /* - * If we can not allocate structure, then vma can not - * participate in pmd sharing. This is only a possible - * performance enhancement and memory saving issue. - * However, the lock is also used to synchronize page - * faults with truncation. If the lock is not present, - * unlikely races could leave pages in a file past i_size - * until the file is removed. Warn in the unlikely case of - * allocation failure. - */ - pr_warn_once("HugeTLB: unable to allocate vma specific lock\n"); - return; - } - - kref_init(&vma_lock->refs); - init_rwsem(&vma_lock->rw_sema); - vma_lock->vma = vma; - vma->vm_private_data = vma_lock; -} - /* * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc() * and returns the corresponding pte. While this is not necessary for the @@ -7100,47 +7104,6 @@ int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma, #else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */ -void hugetlb_vma_lock_read(struct vm_area_struct *vma) -{ -} - -void hugetlb_vma_unlock_read(struct vm_area_struct *vma) -{ -} - -void hugetlb_vma_lock_write(struct vm_area_struct *vma) -{ -} - -void hugetlb_vma_unlock_write(struct vm_area_struct *vma) -{ -} - -int hugetlb_vma_trylock_write(struct vm_area_struct *vma) -{ - return 1; -} - -void hugetlb_vma_assert_locked(struct vm_area_struct *vma) -{ -} - -void hugetlb_vma_lock_release(struct kref *kref) -{ -} - -static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma) -{ -} - -static void hugetlb_vma_lock_free(struct vm_area_struct *vma) -{ -} - -static void hugetlb_vma_lock_alloc(struct vm_area_struct *vma) -{ -} - pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, pud_t *pud) { -- cgit v1.2.3 From 0abb964aae3da746ea2fd4301599a6fa26da58db Mon Sep 17 00:00:00 2001 From: Liam Howlett Date: Mon, 19 Dec 2022 16:20:15 +0000 Subject: maple_tree: fix mas_spanning_rebalance() on insufficient data Mike Rapoport contacted me off-list with a regression in running criu. Periodic tests fail with an RCU stall during execution. Although rare, it is possible to hit this with other uses so this patch should be backported to fix the regression. This patchset adds the fix and a test case to the maple tree test suite. This patch (of 2): An insufficient node was causing an out-of-bounds access on the node in mas_leaf_max_gap(). The cause was the faulty detection of the new node being a root node when overwriting many entries at the end of the tree. Fix the detection of a new root and ensure there is sufficient data prior to entering the spanning rebalance loop. Link: https://lkml.kernel.org/r/20221219161922.2708732-1-Liam.Howlett@oracle.com Link: https://lkml.kernel.org/r/20221219161922.2708732-2-Liam.Howlett@oracle.com Fixes: 54a611b60590 ("Maple Tree: add new data structure") Signed-off-by: Liam R. Howlett Reported-by: Mike Rapoport Tested-by: Mike Rapoport Cc: Andrei Vagin Cc: Mike Rapoport Cc: Muhammad Usama Anjum Cc: Signed-off-by: Andrew Morton --- lib/maple_tree.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/lib/maple_tree.c b/lib/maple_tree.c index fe3947b80069..26e2045d3cda 100644 --- a/lib/maple_tree.c +++ b/lib/maple_tree.c @@ -2994,7 +2994,9 @@ static int mas_spanning_rebalance(struct ma_state *mas, mast->free = &free; mast->destroy = &destroy; l_mas.node = r_mas.node = m_mas.node = MAS_NONE; - if (!(mast->orig_l->min && mast->orig_r->max == ULONG_MAX) && + + /* Check if this is not root and has sufficient data. */ + if (((mast->orig_l->min != 0) || (mast->orig_r->max != ULONG_MAX)) && unlikely(mast->bn->b_end <= mt_min_slots[mast->bn->type])) mast_spanning_rebalance(mast); -- cgit v1.2.3 From c5651b31f51584bd1199b3a552c8211a8523d6e1 Mon Sep 17 00:00:00 2001 From: Liam Howlett Date: Mon, 19 Dec 2022 16:20:15 +0000 Subject: test_maple_tree: add test for mas_spanning_rebalance() on insufficient data Add a test to the maple tree test suite for the spanning rebalance insufficient node issue does not go undetected again. Link: https://lkml.kernel.org/r/20221219161922.2708732-3-Liam.Howlett@oracle.com Fixes: 54a611b60590 ("Maple Tree: add new data structure") Signed-off-by: Liam R. Howlett Cc: Andrei Vagin Cc: Mike Rapoport Cc: Muhammad Usama Anjum Cc: Signed-off-by: Andrew Morton --- lib/test_maple_tree.c | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/lib/test_maple_tree.c b/lib/test_maple_tree.c index f425f169ef08..497fc93ccf9e 100644 --- a/lib/test_maple_tree.c +++ b/lib/test_maple_tree.c @@ -2498,6 +2498,25 @@ static noinline void check_dup(struct maple_tree *mt) } } +static noinline void check_bnode_min_spanning(struct maple_tree *mt) +{ + int i = 50; + MA_STATE(mas, mt, 0, 0); + + mt_set_non_kernel(9999); + mas_lock(&mas); + do { + mas_set_range(&mas, i*10, i*10+9); + mas_store(&mas, check_bnode_min_spanning); + } while (i--); + + mas_set_range(&mas, 240, 509); + mas_store(&mas, NULL); + mas_unlock(&mas); + mas_destroy(&mas); + mt_set_non_kernel(0); +} + static DEFINE_MTREE(tree); static int maple_tree_seed(void) { @@ -2742,6 +2761,10 @@ static int maple_tree_seed(void) check_dup(&tree); mtree_destroy(&tree); + mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE); + check_bnode_min_spanning(&tree); + mtree_destroy(&tree); + #if defined(BENCH) skip: #endif -- cgit v1.2.3 From e96b95c2b7a63a454b6498e2df67aac14d046d13 Mon Sep 17 00:00:00 2001 From: Rickard x Andersson Date: Tue, 20 Dec 2022 11:23:18 +0100 Subject: gcov: add support for checksum field In GCC version 12.1 a checksum field was added. This patch fixes a kernel crash occurring during boot when using gcov-kernel with GCC version 12.2. The crash occurred on a system running on i.MX6SX. Link: https://lkml.kernel.org/r/20221220102318.3418501-1-rickaran@axis.com Fixes: 977ef30a7d88 ("gcov: support GCC 12.1 and newer compilers") Signed-off-by: Rickard x Andersson Reviewed-by: Peter Oberparleiter Tested-by: Peter Oberparleiter Reviewed-by: Martin Liska Cc: Signed-off-by: Andrew Morton --- kernel/gcov/gcc_4_7.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/kernel/gcov/gcc_4_7.c b/kernel/gcov/gcc_4_7.c index 7971e989e425..74a4ef1da9ad 100644 --- a/kernel/gcov/gcc_4_7.c +++ b/kernel/gcov/gcc_4_7.c @@ -82,6 +82,7 @@ struct gcov_fn_info { * @version: gcov version magic indicating the gcc version used for compilation * @next: list head for a singly-linked list * @stamp: uniquifying time stamp + * @checksum: unique object checksum * @filename: name of the associated gcov data file * @merge: merge functions (null for unused counter type) * @n_functions: number of instrumented functions @@ -94,6 +95,10 @@ struct gcov_info { unsigned int version; struct gcov_info *next; unsigned int stamp; + /* Since GCC 12.1 a checksum field is added. */ +#if (__GNUC__ >= 12) + unsigned int checksum; +#endif const char *filename; void (*merge[GCOV_COUNTERS])(gcov_type *, unsigned int); unsigned int n_functions; -- cgit v1.2.3