summaryrefslogtreecommitdiff
path: root/arch/s390/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2023-04-30 21:43:31 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2023-04-30 21:43:31 +0300
commit10de638d8ea57ebab4231ea077bed01d9bade775 (patch)
treef9de1b131e1a94c1cbe051e55cda1ba9b9418ee9 /arch/s390/mm
parentd55571c0084465f1f7e1e29f22bd910d366a6e1d (diff)
parent2a405f6bb3a5b2baaa74dfc5aaa0e1b99145bd1b (diff)
downloadlinux-10de638d8ea57ebab4231ea077bed01d9bade775.tar.xz
Merge tag 's390-6.4-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
Pull s390 updates from Vasily Gorbik: - Add support for stackleak feature. Also allow specifying architecture-specific stackleak poison function to enable faster implementation. On s390, the mvc-based implementation helps decrease typical overhead from a factor of 3 to just 25% - Convert all assembler files to use SYM* style macros, deprecating the ENTRY() macro and other annotations. Select ARCH_USE_SYM_ANNOTATIONS - Improve KASLR to also randomize module and special amode31 code base load addresses - Rework decompressor memory tracking to support memory holes and improve error handling - Add support for protected virtualization AP binding - Add support for set_direct_map() calls - Implement set_memory_rox() and noexec module_alloc() - Remove obsolete overriding of mem*() functions for KASAN - Rework kexec/kdump to avoid using nodat_stack to call purgatory - Convert the rest of the s390 code to use flexible-array member instead of a zero-length array - Clean up uaccess inline asm - Enable ARCH_HAS_MEMBARRIER_SYNC_CORE - Convert to using CONFIG_FUNCTION_ALIGNMENT and enable DEBUG_FORCE_FUNCTION_ALIGN_64B - Resolve last_break in userspace fault reports - Simplify one-level sysctl registration - Clean up branch prediction handling - Rework CPU counter facility to retrieve available counter sets just once - Other various small fixes and improvements all over the code * tag 's390-6.4-1' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (118 commits) s390/stackleak: provide fast __stackleak_poison() implementation stackleak: allow to specify arch specific stackleak poison function s390: select ARCH_USE_SYM_ANNOTATIONS s390/mm: use VM_FLUSH_RESET_PERMS in module_alloc() s390: wire up memfd_secret system call s390/mm: enable ARCH_HAS_SET_DIRECT_MAP s390/mm: use BIT macro to generate SET_MEMORY bit masks s390/relocate_kernel: adjust indentation s390/relocate_kernel: use SYM* macros instead of ENTRY(), etc. s390/entry: use SYM* macros instead of ENTRY(), etc. s390/purgatory: use SYM* macros instead of ENTRY(), etc. s390/kprobes: use SYM* macros instead of ENTRY(), etc. s390/reipl: use SYM* macros instead of ENTRY(), etc. s390/head64: use SYM* macros instead of ENTRY(), etc. s390/earlypgm: use SYM* macros instead of ENTRY(), etc. s390/mcount: use SYM* macros instead of ENTRY(), etc. s390/crc32le: use SYM* macros instead of ENTRY(), etc. s390/crc32be: use SYM* macros instead of ENTRY(), etc. s390/crypto,chacha: use SYM* macros instead of ENTRY(), etc. s390/amode31: use SYM* macros instead of ENTRY(), etc. ...
Diffstat (limited to 'arch/s390/mm')
-rw-r--r--arch/s390/mm/Makefile3
-rw-r--r--arch/s390/mm/cmm.c12
-rw-r--r--arch/s390/mm/init.c5
-rw-r--r--arch/s390/mm/kasan_init.c301
-rw-r--r--arch/s390/mm/pageattr.c94
-rw-r--r--arch/s390/mm/pgalloc.c20
-rw-r--r--arch/s390/mm/vmem.c35
7 files changed, 111 insertions, 359 deletions
diff --git a/arch/s390/mm/Makefile b/arch/s390/mm/Makefile
index 57e4f3a24829..d90db06a8af5 100644
--- a/arch/s390/mm/Makefile
+++ b/arch/s390/mm/Makefile
@@ -10,6 +10,3 @@ obj-$(CONFIG_CMM) += cmm.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
obj-$(CONFIG_PTDUMP_CORE) += dump_pagetables.o
obj-$(CONFIG_PGSTE) += gmap.o
-
-KASAN_SANITIZE_kasan_init.o := n
-obj-$(CONFIG_KASAN) += kasan_init.o
diff --git a/arch/s390/mm/cmm.c b/arch/s390/mm/cmm.c
index 9141ed4c52e9..5300c6867d5e 100644
--- a/arch/s390/mm/cmm.c
+++ b/arch/s390/mm/cmm.c
@@ -335,16 +335,6 @@ static struct ctl_table cmm_table[] = {
{ }
};
-static struct ctl_table cmm_dir_table[] = {
- {
- .procname = "vm",
- .maxlen = 0,
- .mode = 0555,
- .child = cmm_table,
- },
- { }
-};
-
#ifdef CONFIG_CMM_IUCV
#define SMSG_PREFIX "CMM"
static void cmm_smsg_target(const char *from, char *msg)
@@ -389,7 +379,7 @@ static int __init cmm_init(void)
{
int rc = -ENOMEM;
- cmm_sysctl_header = register_sysctl_table(cmm_dir_table);
+ cmm_sysctl_header = register_sysctl("vm", cmm_table);
if (!cmm_sysctl_header)
goto out_sysctl;
#ifdef CONFIG_CMM_IUCV
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 144447d5cb4c..8d94e29adcdb 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -176,9 +176,8 @@ void __init mem_init(void)
void free_initmem(void)
{
- __set_memory((unsigned long)_sinittext,
- (unsigned long)(_einittext - _sinittext) >> PAGE_SHIFT,
- SET_MEMORY_RW | SET_MEMORY_NX);
+ set_memory_rwnx((unsigned long)_sinittext,
+ (unsigned long)(_einittext - _sinittext) >> PAGE_SHIFT);
free_initmem_default(POISON_FREE_INITMEM);
}
diff --git a/arch/s390/mm/kasan_init.c b/arch/s390/mm/kasan_init.c
deleted file mode 100644
index ef89a5f26853..000000000000
--- a/arch/s390/mm/kasan_init.c
+++ /dev/null
@@ -1,301 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/kasan.h>
-#include <linux/sched/task.h>
-#include <linux/pgtable.h>
-#include <asm/pgalloc.h>
-#include <asm/kasan.h>
-#include <asm/mem_detect.h>
-#include <asm/processor.h>
-#include <asm/sclp.h>
-#include <asm/facility.h>
-#include <asm/sections.h>
-#include <asm/setup.h>
-#include <asm/uv.h>
-
-static unsigned long segment_pos __initdata;
-static unsigned long segment_low __initdata;
-static bool has_edat __initdata;
-static bool has_nx __initdata;
-
-#define __sha(x) ((unsigned long)kasan_mem_to_shadow((void *)x))
-
-static void __init kasan_early_panic(const char *reason)
-{
- sclp_early_printk("The Linux kernel failed to boot with the KernelAddressSanitizer:\n");
- sclp_early_printk(reason);
- disabled_wait();
-}
-
-static void * __init kasan_early_alloc_segment(void)
-{
- segment_pos -= _SEGMENT_SIZE;
-
- if (segment_pos < segment_low)
- kasan_early_panic("out of memory during initialisation\n");
-
- return __va(segment_pos);
-}
-
-static void * __init kasan_early_alloc_pages(unsigned int order)
-{
- pgalloc_pos -= (PAGE_SIZE << order);
-
- if (pgalloc_pos < pgalloc_low)
- kasan_early_panic("out of memory during initialisation\n");
-
- return __va(pgalloc_pos);
-}
-
-static void * __init kasan_early_crst_alloc(unsigned long val)
-{
- unsigned long *table;
-
- table = kasan_early_alloc_pages(CRST_ALLOC_ORDER);
- if (table)
- crst_table_init(table, val);
- return table;
-}
-
-static pte_t * __init kasan_early_pte_alloc(void)
-{
- static void *pte_leftover;
- pte_t *pte;
-
- BUILD_BUG_ON(_PAGE_TABLE_SIZE * 2 != PAGE_SIZE);
-
- if (!pte_leftover) {
- pte_leftover = kasan_early_alloc_pages(0);
- pte = pte_leftover + _PAGE_TABLE_SIZE;
- } else {
- pte = pte_leftover;
- pte_leftover = NULL;
- }
- memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE);
- return pte;
-}
-
-enum populate_mode {
- POPULATE_MAP,
- POPULATE_ZERO_SHADOW,
- POPULATE_SHALLOW
-};
-
-static inline pgprot_t pgprot_clear_bit(pgprot_t pgprot, unsigned long bit)
-{
- return __pgprot(pgprot_val(pgprot) & ~bit);
-}
-
-static void __init kasan_early_pgtable_populate(unsigned long address,
- unsigned long end,
- enum populate_mode mode)
-{
- pgprot_t pgt_prot_zero = PAGE_KERNEL_RO;
- pgprot_t pgt_prot = PAGE_KERNEL;
- pgprot_t sgt_prot = SEGMENT_KERNEL;
- pgd_t *pg_dir;
- p4d_t *p4_dir;
- pud_t *pu_dir;
- pmd_t *pm_dir;
- pte_t *pt_dir;
- pmd_t pmd;
- pte_t pte;
-
- if (!has_nx) {
- pgt_prot_zero = pgprot_clear_bit(pgt_prot_zero, _PAGE_NOEXEC);
- pgt_prot = pgprot_clear_bit(pgt_prot, _PAGE_NOEXEC);
- sgt_prot = pgprot_clear_bit(sgt_prot, _SEGMENT_ENTRY_NOEXEC);
- }
-
- while (address < end) {
- pg_dir = pgd_offset_k(address);
- if (pgd_none(*pg_dir)) {
- if (mode == POPULATE_ZERO_SHADOW &&
- IS_ALIGNED(address, PGDIR_SIZE) &&
- end - address >= PGDIR_SIZE) {
- pgd_populate(&init_mm, pg_dir,
- kasan_early_shadow_p4d);
- address = (address + PGDIR_SIZE) & PGDIR_MASK;
- continue;
- }
- p4_dir = kasan_early_crst_alloc(_REGION2_ENTRY_EMPTY);
- pgd_populate(&init_mm, pg_dir, p4_dir);
- }
-
- if (mode == POPULATE_SHALLOW) {
- address = (address + P4D_SIZE) & P4D_MASK;
- continue;
- }
-
- p4_dir = p4d_offset(pg_dir, address);
- if (p4d_none(*p4_dir)) {
- if (mode == POPULATE_ZERO_SHADOW &&
- IS_ALIGNED(address, P4D_SIZE) &&
- end - address >= P4D_SIZE) {
- p4d_populate(&init_mm, p4_dir,
- kasan_early_shadow_pud);
- address = (address + P4D_SIZE) & P4D_MASK;
- continue;
- }
- pu_dir = kasan_early_crst_alloc(_REGION3_ENTRY_EMPTY);
- p4d_populate(&init_mm, p4_dir, pu_dir);
- }
-
- pu_dir = pud_offset(p4_dir, address);
- if (pud_none(*pu_dir)) {
- if (mode == POPULATE_ZERO_SHADOW &&
- IS_ALIGNED(address, PUD_SIZE) &&
- end - address >= PUD_SIZE) {
- pud_populate(&init_mm, pu_dir,
- kasan_early_shadow_pmd);
- address = (address + PUD_SIZE) & PUD_MASK;
- continue;
- }
- pm_dir = kasan_early_crst_alloc(_SEGMENT_ENTRY_EMPTY);
- pud_populate(&init_mm, pu_dir, pm_dir);
- }
-
- pm_dir = pmd_offset(pu_dir, address);
- if (pmd_none(*pm_dir)) {
- if (IS_ALIGNED(address, PMD_SIZE) &&
- end - address >= PMD_SIZE) {
- if (mode == POPULATE_ZERO_SHADOW) {
- pmd_populate(&init_mm, pm_dir, kasan_early_shadow_pte);
- address = (address + PMD_SIZE) & PMD_MASK;
- continue;
- } else if (has_edat) {
- void *page = kasan_early_alloc_segment();
-
- memset(page, 0, _SEGMENT_SIZE);
- pmd = __pmd(__pa(page));
- pmd = set_pmd_bit(pmd, sgt_prot);
- set_pmd(pm_dir, pmd);
- address = (address + PMD_SIZE) & PMD_MASK;
- continue;
- }
- }
- pt_dir = kasan_early_pte_alloc();
- pmd_populate(&init_mm, pm_dir, pt_dir);
- } else if (pmd_large(*pm_dir)) {
- address = (address + PMD_SIZE) & PMD_MASK;
- continue;
- }
-
- pt_dir = pte_offset_kernel(pm_dir, address);
- if (pte_none(*pt_dir)) {
- void *page;
-
- switch (mode) {
- case POPULATE_MAP:
- page = kasan_early_alloc_pages(0);
- memset(page, 0, PAGE_SIZE);
- pte = __pte(__pa(page));
- pte = set_pte_bit(pte, pgt_prot);
- set_pte(pt_dir, pte);
- break;
- case POPULATE_ZERO_SHADOW:
- page = kasan_early_shadow_page;
- pte = __pte(__pa(page));
- pte = set_pte_bit(pte, pgt_prot_zero);
- set_pte(pt_dir, pte);
- break;
- case POPULATE_SHALLOW:
- /* should never happen */
- break;
- }
- }
- address += PAGE_SIZE;
- }
-}
-
-static void __init kasan_early_detect_facilities(void)
-{
- if (test_facility(8)) {
- has_edat = true;
- __ctl_set_bit(0, 23);
- }
- if (!noexec_disabled && test_facility(130)) {
- has_nx = true;
- __ctl_set_bit(0, 20);
- }
-}
-
-void __init kasan_early_init(void)
-{
- pte_t pte_z = __pte(__pa(kasan_early_shadow_page) | pgprot_val(PAGE_KERNEL_RO));
- pmd_t pmd_z = __pmd(__pa(kasan_early_shadow_pte) | _SEGMENT_ENTRY);
- pud_t pud_z = __pud(__pa(kasan_early_shadow_pmd) | _REGION3_ENTRY);
- p4d_t p4d_z = __p4d(__pa(kasan_early_shadow_pud) | _REGION2_ENTRY);
- unsigned long untracked_end = MODULES_VADDR;
- unsigned long shadow_alloc_size;
- unsigned long start, end;
- int i;
-
- kasan_early_detect_facilities();
- if (!has_nx)
- pte_z = clear_pte_bit(pte_z, __pgprot(_PAGE_NOEXEC));
-
- BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, P4D_SIZE));
- BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, P4D_SIZE));
-
- /* init kasan zero shadow */
- crst_table_init((unsigned long *)kasan_early_shadow_p4d, p4d_val(p4d_z));
- crst_table_init((unsigned long *)kasan_early_shadow_pud, pud_val(pud_z));
- crst_table_init((unsigned long *)kasan_early_shadow_pmd, pmd_val(pmd_z));
- memset64((u64 *)kasan_early_shadow_pte, pte_val(pte_z), PTRS_PER_PTE);
-
- if (has_edat) {
- shadow_alloc_size = get_mem_detect_usable_total() >> KASAN_SHADOW_SCALE_SHIFT;
- segment_pos = round_down(pgalloc_pos, _SEGMENT_SIZE);
- segment_low = segment_pos - shadow_alloc_size;
- segment_low = round_down(segment_low, _SEGMENT_SIZE);
- pgalloc_pos = segment_low;
- }
- /*
- * Current memory layout:
- * +- 0 -------------+ +- shadow start -+
- * |1:1 ident mapping| /|1/8 of ident map|
- * | | / | |
- * +-end of ident map+ / +----------------+
- * | ... gap ... | / | kasan |
- * | | / | zero page |
- * +- vmalloc area -+ / | mapping |
- * | vmalloc_size | / | (untracked) |
- * +- modules vaddr -+ / +----------------+
- * | 2Gb |/ | unmapped | allocated per module
- * +- shadow start -+ +----------------+
- * | 1/8 addr space | | zero pg mapping| (untracked)
- * +- shadow end ----+---------+- shadow end ---+
- *
- * Current memory layout (KASAN_VMALLOC):
- * +- 0 -------------+ +- shadow start -+
- * |1:1 ident mapping| /|1/8 of ident map|
- * | | / | |
- * +-end of ident map+ / +----------------+
- * | ... gap ... | / | kasan zero page| (untracked)
- * | | / | mapping |
- * +- vmalloc area -+ / +----------------+
- * | vmalloc_size | / |shallow populate|
- * +- modules vaddr -+ / +----------------+
- * | 2Gb |/ |shallow populate|
- * +- shadow start -+ +----------------+
- * | 1/8 addr space | | zero pg mapping| (untracked)
- * +- shadow end ----+---------+- shadow end ---+
- */
- /* populate kasan shadow (for identity mapping and zero page mapping) */
- for_each_mem_detect_usable_block(i, &start, &end)
- kasan_early_pgtable_populate(__sha(start), __sha(end), POPULATE_MAP);
- if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
- untracked_end = VMALLOC_START;
- /* shallowly populate kasan shadow for vmalloc and modules */
- kasan_early_pgtable_populate(__sha(VMALLOC_START), __sha(MODULES_END),
- POPULATE_SHALLOW);
- }
- /* populate kasan shadow for untracked memory */
- kasan_early_pgtable_populate(__sha(ident_map_size), __sha(untracked_end),
- POPULATE_ZERO_SHADOW);
- kasan_early_pgtable_populate(__sha(MODULES_END), __sha(_REGION1_SIZE),
- POPULATE_ZERO_SHADOW);
- /* enable kasan */
- init_task.kasan_depth = 0;
- sclp_early_printk("KernelAddressSanitizer initialized\n");
-}
diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c
index 85195c18b2e8..5ba3bd8a7b12 100644
--- a/arch/s390/mm/pageattr.c
+++ b/arch/s390/mm/pageattr.c
@@ -4,6 +4,7 @@
* Author(s): Jan Glauber <jang@linux.vnet.ibm.com>
*/
#include <linux/hugetlb.h>
+#include <linux/vmalloc.h>
#include <linux/mm.h>
#include <asm/cacheflush.h>
#include <asm/facility.h>
@@ -41,7 +42,7 @@ void __storage_key_init_range(unsigned long start, unsigned long end)
}
#ifdef CONFIG_PROC_FS
-atomic_long_t direct_pages_count[PG_DIRECT_MAP_MAX];
+atomic_long_t __bootdata_preserved(direct_pages_count[PG_DIRECT_MAP_MAX]);
void arch_report_meminfo(struct seq_file *m)
{
@@ -101,6 +102,14 @@ static int walk_pte_level(pmd_t *pmdp, unsigned long addr, unsigned long end,
new = set_pte_bit(new, __pgprot(_PAGE_NOEXEC));
else if (flags & SET_MEMORY_X)
new = clear_pte_bit(new, __pgprot(_PAGE_NOEXEC));
+ if (flags & SET_MEMORY_INV) {
+ new = set_pte_bit(new, __pgprot(_PAGE_INVALID));
+ } else if (flags & SET_MEMORY_DEF) {
+ new = __pte(pte_val(new) & PAGE_MASK);
+ new = set_pte_bit(new, PAGE_KERNEL);
+ if (!MACHINE_HAS_NX)
+ new = clear_pte_bit(new, __pgprot(_PAGE_NOEXEC));
+ }
pgt_set((unsigned long *)ptep, pte_val(new), addr, CRDTE_DTT_PAGE);
ptep++;
addr += PAGE_SIZE;
@@ -151,6 +160,14 @@ static void modify_pmd_page(pmd_t *pmdp, unsigned long addr,
new = set_pmd_bit(new, __pgprot(_SEGMENT_ENTRY_NOEXEC));
else if (flags & SET_MEMORY_X)
new = clear_pmd_bit(new, __pgprot(_SEGMENT_ENTRY_NOEXEC));
+ if (flags & SET_MEMORY_INV) {
+ new = set_pmd_bit(new, __pgprot(_SEGMENT_ENTRY_INVALID));
+ } else if (flags & SET_MEMORY_DEF) {
+ new = __pmd(pmd_val(new) & PMD_MASK);
+ new = set_pmd_bit(new, SEGMENT_KERNEL);
+ if (!MACHINE_HAS_NX)
+ new = clear_pmd_bit(new, __pgprot(_SEGMENT_ENTRY_NOEXEC));
+ }
pgt_set((unsigned long *)pmdp, pmd_val(new), addr, CRDTE_DTT_SEGMENT);
}
@@ -232,6 +249,14 @@ static void modify_pud_page(pud_t *pudp, unsigned long addr,
new = set_pud_bit(new, __pgprot(_REGION_ENTRY_NOEXEC));
else if (flags & SET_MEMORY_X)
new = clear_pud_bit(new, __pgprot(_REGION_ENTRY_NOEXEC));
+ if (flags & SET_MEMORY_INV) {
+ new = set_pud_bit(new, __pgprot(_REGION_ENTRY_INVALID));
+ } else if (flags & SET_MEMORY_DEF) {
+ new = __pud(pud_val(new) & PUD_MASK);
+ new = set_pud_bit(new, REGION3_KERNEL);
+ if (!MACHINE_HAS_NX)
+ new = clear_pud_bit(new, __pgprot(_REGION_ENTRY_NOEXEC));
+ }
pgt_set((unsigned long *)pudp, pud_val(new), addr, CRDTE_DTT_REGION3);
}
@@ -298,11 +323,6 @@ static int change_page_attr(unsigned long addr, unsigned long end,
int rc = -EINVAL;
pgd_t *pgdp;
- if (addr == end)
- return 0;
- if (end >= MODULES_END)
- return -EINVAL;
- mutex_lock(&cpa_mutex);
pgdp = pgd_offset_k(addr);
do {
if (pgd_none(*pgdp))
@@ -313,18 +333,76 @@ static int change_page_attr(unsigned long addr, unsigned long end,
break;
cond_resched();
} while (pgdp++, addr = next, addr < end && !rc);
- mutex_unlock(&cpa_mutex);
+ return rc;
+}
+
+static int change_page_attr_alias(unsigned long addr, unsigned long end,
+ unsigned long flags)
+{
+ unsigned long alias, offset, va_start, va_end;
+ struct vm_struct *area;
+ int rc = 0;
+
+ /*
+ * Changes to read-only permissions on kernel VA mappings are also
+ * applied to the kernel direct mapping. Execute permissions are
+ * intentionally not transferred to keep all allocated pages within
+ * the direct mapping non-executable.
+ */
+ flags &= SET_MEMORY_RO | SET_MEMORY_RW;
+ if (!flags)
+ return 0;
+ area = NULL;
+ while (addr < end) {
+ if (!area)
+ area = find_vm_area((void *)addr);
+ if (!area || !(area->flags & VM_ALLOC))
+ return 0;
+ va_start = (unsigned long)area->addr;
+ va_end = va_start + area->nr_pages * PAGE_SIZE;
+ offset = (addr - va_start) >> PAGE_SHIFT;
+ alias = (unsigned long)page_address(area->pages[offset]);
+ rc = change_page_attr(alias, alias + PAGE_SIZE, flags);
+ if (rc)
+ break;
+ addr += PAGE_SIZE;
+ if (addr >= va_end)
+ area = NULL;
+ }
return rc;
}
int __set_memory(unsigned long addr, int numpages, unsigned long flags)
{
+ unsigned long end;
+ int rc;
+
if (!MACHINE_HAS_NX)
flags &= ~(SET_MEMORY_NX | SET_MEMORY_X);
if (!flags)
return 0;
+ if (!numpages)
+ return 0;
addr &= PAGE_MASK;
- return change_page_attr(addr, addr + numpages * PAGE_SIZE, flags);
+ end = addr + numpages * PAGE_SIZE;
+ mutex_lock(&cpa_mutex);
+ rc = change_page_attr(addr, end, flags);
+ if (rc)
+ goto out;
+ rc = change_page_attr_alias(addr, end, flags);
+out:
+ mutex_unlock(&cpa_mutex);
+ return rc;
+}
+
+int set_direct_map_invalid_noflush(struct page *page)
+{
+ return __set_memory((unsigned long)page_to_virt(page), 1, SET_MEMORY_INV);
+}
+
+int set_direct_map_default_noflush(struct page *page)
+{
+ return __set_memory((unsigned long)page_to_virt(page), 1, SET_MEMORY_DEF);
}
#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)
diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c
index 2de48b2c1b04..66ab68db9842 100644
--- a/arch/s390/mm/pgalloc.c
+++ b/arch/s390/mm/pgalloc.c
@@ -33,19 +33,9 @@ static struct ctl_table page_table_sysctl[] = {
{ }
};
-static struct ctl_table page_table_sysctl_dir[] = {
- {
- .procname = "vm",
- .maxlen = 0,
- .mode = 0555,
- .child = page_table_sysctl,
- },
- { }
-};
-
static int __init page_table_register_sysctl(void)
{
- return register_sysctl_table(page_table_sysctl_dir) ? 0 : -ENOMEM;
+ return register_sysctl("vm", page_table_sysctl) ? 0 : -ENOMEM;
}
__initcall(page_table_register_sysctl);
@@ -143,13 +133,7 @@ err_p4d:
static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
{
- unsigned int old, new;
-
- do {
- old = atomic_read(v);
- new = old ^ bits;
- } while (atomic_cmpxchg(v, old, new) != old);
- return new;
+ return atomic_fetch_xor(bits, v) ^ bits;
}
#ifdef CONFIG_PGSTE
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index 4113a7ffa149..5b22c6e24528 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -5,6 +5,7 @@
#include <linux/memory_hotplug.h>
#include <linux/memblock.h>
+#include <linux/kasan.h>
#include <linux/pfn.h>
#include <linux/mm.h>
#include <linux/init.h>
@@ -664,6 +665,9 @@ static void __init memblock_region_swap(void *a, void *b, int size)
swap(*(struct memblock_region *)a, *(struct memblock_region *)b);
}
+#ifdef CONFIG_KASAN
+#define __sha(x) ((unsigned long)kasan_mem_to_shadow((void *)x))
+#endif
/*
* map whole physical memory to virtual memory (identity mapping)
* we reserve enough space in the vmalloc area for vmemmap to hotplug
@@ -728,23 +732,24 @@ void __init vmem_map_init(void)
memblock_region_cmp, memblock_region_swap);
__for_each_mem_range(i, &memblock.memory, &memory_rwx,
NUMA_NO_NODE, MEMBLOCK_NONE, &base, &end, NULL) {
- __set_memory((unsigned long)__va(base),
- (end - base) >> PAGE_SHIFT,
- SET_MEMORY_RW | SET_MEMORY_NX);
+ set_memory_rwnx((unsigned long)__va(base),
+ (end - base) >> PAGE_SHIFT);
}
- __set_memory((unsigned long)_stext,
- (unsigned long)(_etext - _stext) >> PAGE_SHIFT,
- SET_MEMORY_RO | SET_MEMORY_X);
- __set_memory((unsigned long)_etext,
- (unsigned long)(__end_rodata - _etext) >> PAGE_SHIFT,
- SET_MEMORY_RO);
- __set_memory((unsigned long)_sinittext,
- (unsigned long)(_einittext - _sinittext) >> PAGE_SHIFT,
- SET_MEMORY_RO | SET_MEMORY_X);
- __set_memory(__stext_amode31,
- (__etext_amode31 - __stext_amode31) >> PAGE_SHIFT,
- SET_MEMORY_RO | SET_MEMORY_X);
+#ifdef CONFIG_KASAN
+ for_each_mem_range(i, &base, &end) {
+ set_memory_rwnx(__sha(base),
+ (__sha(end) - __sha(base)) >> PAGE_SHIFT);
+ }
+#endif
+ set_memory_rox((unsigned long)_stext,
+ (unsigned long)(_etext - _stext) >> PAGE_SHIFT);
+ set_memory_ro((unsigned long)_etext,
+ (unsigned long)(__end_rodata - _etext) >> PAGE_SHIFT);
+ set_memory_rox((unsigned long)_sinittext,
+ (unsigned long)(_einittext - _sinittext) >> PAGE_SHIFT);
+ set_memory_rox(__stext_amode31,
+ (__etext_amode31 - __stext_amode31) >> PAGE_SHIFT);
/* lowcore must be executable for LPSWE */
if (static_key_enabled(&cpu_has_bear))