summaryrefslogtreecommitdiff
path: root/arch/s390/mm/vmem.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2023-06-26 19:31:06 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2023-06-26 19:31:06 +0300
commit9d9a9bf07ed9b09af3696997f02a557b428be092 (patch)
treee6f360fbce0400b320342b3774644ad55c7dcc49 /arch/s390/mm/vmem.c
parentbe5b52dc144415a88ce785575ec9b6d989fc5ac6 (diff)
parentad3d770b83afffd10abf624ec80c408254343a20 (diff)
downloadlinux-9d9a9bf07ed9b09af3696997f02a557b428be092.tar.xz
Merge tag 's390-6.4-4' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
Pull s390 updates from Alexander Gordeev: - Use correct type for size of memory allocated for ELF core header on kernel crash. - Fix insecure W+X mapping warning when KASAN shadow memory range is not aligned on page boundary. - Avoid allocation of short by one page KASAN shadow memory when the original memory range is less than (PAGE_SIZE << 3). - Fix virtual vs physical address confusion in physical memory enumerator. It is not a real issue, since virtual and physical addresses are currently the same. - Set CONFIG_NET_TC_SKB_EXT=y in s390 config files as it is required for offloading TC as well as bridges on switchdev capable ConnectX devices. * tag 's390-6.4-4' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: s390/defconfigs: set CONFIG_NET_TC_SKB_EXT=y s390/boot: fix physmem_info virtual vs physical address confusion s390/kasan: avoid short by one page shadow memory s390/kasan: fix insecure W+X mapping warning s390/crash: use the correct type for memory allocation
Diffstat (limited to 'arch/s390/mm/vmem.c')
-rw-r--r--arch/s390/mm/vmem.c14
1 files changed, 10 insertions, 4 deletions
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index 5b22c6e24528..b9dcb4ae6c59 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -667,7 +667,15 @@ static void __init memblock_region_swap(void *a, void *b, int size)
#ifdef CONFIG_KASAN
#define __sha(x) ((unsigned long)kasan_mem_to_shadow((void *)x))
+
+static inline int set_memory_kasan(unsigned long start, unsigned long end)
+{
+ start = PAGE_ALIGN_DOWN(__sha(start));
+ end = PAGE_ALIGN(__sha(end));
+ return set_memory_rwnx(start, (end - start) >> PAGE_SHIFT);
+}
#endif
+
/*
* map whole physical memory to virtual memory (identity mapping)
* we reserve enough space in the vmalloc area for vmemmap to hotplug
@@ -737,10 +745,8 @@ void __init vmem_map_init(void)
}
#ifdef CONFIG_KASAN
- for_each_mem_range(i, &base, &end) {
- set_memory_rwnx(__sha(base),
- (__sha(end) - __sha(base)) >> PAGE_SHIFT);
- }
+ for_each_mem_range(i, &base, &end)
+ set_memory_kasan(base, end);
#endif
set_memory_rox((unsigned long)_stext,
(unsigned long)(_etext - _stext) >> PAGE_SHIFT);