From 4e140f59d285c1ca1e5c81b4c13e27366865bd09 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Mon, 10 Jan 2022 23:15:27 +0000 Subject: mm/usercopy: Check kmap addresses properly If you are copying to an address in the kmap region, you may not copy across a page boundary, no matter what the size of the underlying allocation. You can't kmap() a slab page because slab pages always come from low memory. Signed-off-by: Matthew Wilcox (Oracle) Acked-by: Kees Cook Signed-off-by: Kees Cook Link: https://lore.kernel.org/r/20220110231530.665970-2-willy@infradead.org --- mm/usercopy.c | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) (limited to 'mm') diff --git a/mm/usercopy.c b/mm/usercopy.c index 2c235d5c2364..ff13e7708faa 100644 --- a/mm/usercopy.c +++ b/mm/usercopy.c @@ -229,12 +229,16 @@ static inline void check_heap_object(const void *ptr, unsigned long n, if (!virt_addr_valid(ptr)) return; - /* - * When CONFIG_HIGHMEM=y, kmap_to_page() will give either the - * highmem page or fallback to virt_to_page(). The following - * is effectively a highmem-aware virt_to_slab(). - */ - folio = page_folio(kmap_to_page((void *)ptr)); + if (is_kmap_addr(ptr)) { + unsigned long page_end = (unsigned long)ptr | (PAGE_SIZE - 1); + + if ((unsigned long)ptr + n - 1 > page_end) + usercopy_abort("kmap", NULL, to_user, + offset_in_page(ptr), n); + return; + } + + folio = virt_to_folio(ptr); if (folio_test_slab(folio)) { /* Check slab allocator for flags and size. */ -- cgit v1.2.3 From 0aef499f3172a60222ae7460d61b364c134d6e1a Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Mon, 10 Jan 2022 23:15:28 +0000 Subject: mm/usercopy: Detect vmalloc overruns If you have a vmalloc() allocation, or an address from calling vmap(), you cannot overrun the vm_area which describes it, regardless of the size of the underlying allocation. This probably doesn't do much for security because vmalloc comes with guard pages these days, but it prevents usercopy aborts when copying to a vmap() of smaller pages. Signed-off-by: Matthew Wilcox (Oracle) Acked-by: Kees Cook Signed-off-by: Kees Cook Link: https://lore.kernel.org/r/20220110231530.665970-3-willy@infradead.org --- mm/usercopy.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) (limited to 'mm') diff --git a/mm/usercopy.c b/mm/usercopy.c index ff13e7708faa..e1e856dca124 100644 --- a/mm/usercopy.c +++ b/mm/usercopy.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -238,6 +239,21 @@ static inline void check_heap_object(const void *ptr, unsigned long n, return; } + if (is_vmalloc_addr(ptr)) { + struct vm_struct *area = find_vm_area(ptr); + unsigned long offset; + + if (!area) { + usercopy_abort("vmalloc", "no area", to_user, 0, n); + return; + } + + offset = ptr - area->addr; + if (offset + n > get_vm_area_size(area)) + usercopy_abort("vmalloc", NULL, to_user, offset, n); + return; + } + folio = virt_to_folio(ptr); if (folio_test_slab(folio)) { -- cgit v1.2.3 From ab502103ae3ce4c0fc393e598455efede3e523c9 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Mon, 10 Jan 2022 23:15:29 +0000 Subject: mm/usercopy: Detect large folio overruns Move the compound page overrun detection out of CONFIG_HARDENED_USERCOPY_PAGESPAN and convert it to use folios so it's enabled for more people. Signed-off-by: Matthew Wilcox (Oracle) Acked-by: Kees Cook Reviewed-by: David Hildenbrand Signed-off-by: Kees Cook Link: https://lore.kernel.org/r/20220110231530.665970-4-willy@infradead.org --- mm/usercopy.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) (limited to 'mm') diff --git a/mm/usercopy.c b/mm/usercopy.c index e1e856dca124..9458c2b24b02 100644 --- a/mm/usercopy.c +++ b/mm/usercopy.c @@ -164,7 +164,6 @@ static inline void check_page_span(const void *ptr, unsigned long n, { #ifdef CONFIG_HARDENED_USERCOPY_PAGESPAN const void *end = ptr + n - 1; - struct page *endpage; bool is_reserved, is_cma; /* @@ -195,11 +194,6 @@ static inline void check_page_span(const void *ptr, unsigned long n, ((unsigned long)end & (unsigned long)PAGE_MASK))) return; - /* Allow if fully inside the same compound (__GFP_COMP) page. */ - endpage = virt_to_head_page(end); - if (likely(endpage == page)) - return; - /* * Reject if range is entirely either Reserved (i.e. special or * device memory), or CMA. Otherwise, reject since the object spans @@ -259,6 +253,10 @@ static inline void check_heap_object(const void *ptr, unsigned long n, if (folio_test_slab(folio)) { /* Check slab allocator for flags and size. */ __check_heap_object(ptr, n, folio_slab(folio), to_user); + } else if (folio_test_large(folio)) { + unsigned long offset = ptr - folio_address(folio); + if (offset + n > folio_size(folio)) + usercopy_abort("page alloc", NULL, to_user, offset, n); } else { /* Verify object does not incorrectly span multiple pages. */ check_page_span(ptr, n, folio_page(folio, 0), to_user); -- cgit v1.2.3 From 1109a5d907015005cdbe9eaa4fec40213e2f9010 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Mon, 10 Jan 2022 23:15:30 +0000 Subject: usercopy: Remove HARDENED_USERCOPY_PAGESPAN There isn't enough information to make this a useful check any more; the useful parts of it were moved in earlier patches, so remove this set of checks now. Signed-off-by: Matthew Wilcox (Oracle) Acked-by: Kees Cook Reviewed-by: David Hildenbrand Signed-off-by: Kees Cook Link: https://lore.kernel.org/r/20220110231530.665970-5-willy@infradead.org --- mm/usercopy.c | 61 -------------------------------------------------------- security/Kconfig | 13 +----------- 2 files changed, 1 insertion(+), 73 deletions(-) (limited to 'mm') diff --git a/mm/usercopy.c b/mm/usercopy.c index 9458c2b24b02..ac8a093e90c1 100644 --- a/mm/usercopy.c +++ b/mm/usercopy.c @@ -158,64 +158,6 @@ static inline void check_bogus_address(const unsigned long ptr, unsigned long n, usercopy_abort("null address", NULL, to_user, ptr, n); } -/* Checks for allocs that are marked in some way as spanning multiple pages. */ -static inline void check_page_span(const void *ptr, unsigned long n, - struct page *page, bool to_user) -{ -#ifdef CONFIG_HARDENED_USERCOPY_PAGESPAN - const void *end = ptr + n - 1; - bool is_reserved, is_cma; - - /* - * Sometimes the kernel data regions are not marked Reserved (see - * check below). And sometimes [_sdata,_edata) does not cover - * rodata and/or bss, so check each range explicitly. - */ - - /* Allow reads of kernel rodata region (if not marked as Reserved). */ - if (ptr >= (const void *)__start_rodata && - end <= (const void *)__end_rodata) { - if (!to_user) - usercopy_abort("rodata", NULL, to_user, 0, n); - return; - } - - /* Allow kernel data region (if not marked as Reserved). */ - if (ptr >= (const void *)_sdata && end <= (const void *)_edata) - return; - - /* Allow kernel bss region (if not marked as Reserved). */ - if (ptr >= (const void *)__bss_start && - end <= (const void *)__bss_stop) - return; - - /* Is the object wholly within one base page? */ - if (likely(((unsigned long)ptr & (unsigned long)PAGE_MASK) == - ((unsigned long)end & (unsigned long)PAGE_MASK))) - return; - - /* - * Reject if range is entirely either Reserved (i.e. special or - * device memory), or CMA. Otherwise, reject since the object spans - * several independently allocated pages. - */ - is_reserved = PageReserved(page); - is_cma = is_migrate_cma_page(page); - if (!is_reserved && !is_cma) - usercopy_abort("spans multiple pages", NULL, to_user, 0, n); - - for (ptr += PAGE_SIZE; ptr <= end; ptr += PAGE_SIZE) { - page = virt_to_head_page(ptr); - if (is_reserved && !PageReserved(page)) - usercopy_abort("spans Reserved and non-Reserved pages", - NULL, to_user, 0, n); - if (is_cma && !is_migrate_cma_page(page)) - usercopy_abort("spans CMA and non-CMA pages", NULL, - to_user, 0, n); - } -#endif -} - static inline void check_heap_object(const void *ptr, unsigned long n, bool to_user) { @@ -257,9 +199,6 @@ static inline void check_heap_object(const void *ptr, unsigned long n, unsigned long offset = ptr - folio_address(folio); if (offset + n > folio_size(folio)) usercopy_abort("page alloc", NULL, to_user, offset, n); - } else { - /* Verify object does not incorrectly span multiple pages. */ - check_page_span(ptr, n, folio_page(folio, 0), to_user); } } diff --git a/security/Kconfig b/security/Kconfig index 9b2c4925585a..f29e4c656983 100644 --- a/security/Kconfig +++ b/security/Kconfig @@ -160,20 +160,9 @@ config HARDENED_USERCOPY copy_from_user() functions) by rejecting memory ranges that are larger than the specified heap object, span multiple separately allocated pages, are not on the process stack, - or are part of the kernel text. This kills entire classes + or are part of the kernel text. This prevents entire classes of heap overflow exploits and similar kernel memory exposures. -config HARDENED_USERCOPY_PAGESPAN - bool "Refuse to copy allocations that span multiple pages" - depends on HARDENED_USERCOPY - depends on BROKEN - help - When a multi-page allocation is done without __GFP_COMP, - hardened usercopy will reject attempts to copy it. There are, - however, several cases of this in the kernel that have not all - been removed. This config is intended to be used only while - trying to find such users. - config FORTIFY_SOURCE bool "Harden common str/mem functions against buffer overflows" depends on ARCH_HAS_FORTIFY_SOURCE -- cgit v1.2.3 From a5f4d9df1f7beaaebbaa5943ceb789c34f10b8d5 Mon Sep 17 00:00:00 2001 From: Yuanzheng Song Date: Thu, 5 May 2022 07:10:37 +0000 Subject: mm: usercopy: move the virt_addr_valid() below the is_vmalloc_addr() The is_kmap_addr() and the is_vmalloc_addr() in the check_heap_object() will not work, because the virt_addr_valid() will exclude the kmap and vmalloc regions. So let's move the virt_addr_valid() below the is_vmalloc_addr(). Signed-off-by: Yuanzheng Song Fixes: 4e140f59d285 ("mm/usercopy: Check kmap addresses properly") Fixes: 0aef499f3172 ("mm/usercopy: Detect vmalloc overruns") Cc: Matthew Wilcox (Oracle) Signed-off-by: Kees Cook Link: https://lore.kernel.org/r/20220505071037.4121100-1-songyuanzheng@huawei.com --- mm/usercopy.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'mm') diff --git a/mm/usercopy.c b/mm/usercopy.c index ac8a093e90c1..baeacc735b83 100644 --- a/mm/usercopy.c +++ b/mm/usercopy.c @@ -163,9 +163,6 @@ static inline void check_heap_object(const void *ptr, unsigned long n, { struct folio *folio; - if (!virt_addr_valid(ptr)) - return; - if (is_kmap_addr(ptr)) { unsigned long page_end = (unsigned long)ptr | (PAGE_SIZE - 1); @@ -190,6 +187,9 @@ static inline void check_heap_object(const void *ptr, unsigned long n, return; } + if (!virt_addr_valid(ptr)) + return; + folio = virt_to_folio(ptr); if (folio_test_slab(folio)) { -- cgit v1.2.3