summaryrefslogtreecommitdiff
path: root/arch/s390/mm/vmem.c
diff options
context:
space:
mode:
authorHeiko Carstens <hca@linux.ibm.com>2023-10-27 15:12:39 +0300
committerVasily Gorbik <gor@linux.ibm.com>2023-11-06 00:34:58 +0300
commita51324c430db3fcf3e7d77c265491322c251a396 (patch)
tree8287b8d7b681c66ba708982b3c96cd8b26eb5279 /arch/s390/mm/vmem.c
parent65d37f163add1c6ead3a63788acb2f9590159f94 (diff)
downloadlinux-a51324c430db3fcf3e7d77c265491322c251a396.tar.xz
s390/cmma: rework no-dat handling
Rework the way physical pages are set no-dat / dat: The old way is: - Rely on that all pages are initially marked "dat" - Allocate page tables for the kernel mapping - Enable dat - Walk the whole kernel mapping and set PG_arch_1 bit in all struct pages that belong to pages of kernel page tables - Walk all struct pages and test and clear the PG_arch_1 bit. If the bit is not set, set the page state to no-dat - For all subsequent page table allocations, set the page state to dat (remove the no-dat state) on allocation time Change this rather complex logic to a simpler approach: - Set the whole physical memory (all pages) to "no-dat" - Explicitly set those page table pages to "dat" which are part of the kernel image (e.g. swapper_pg_dir) - For all subsequent page table allocations, set the page state to dat (remove the no-dat state) on allocation time In result the code is simpler, and this also allows to get rid of one odd usage of the PG_arch_1 bit. Reviewed-by: Claudio Imbrenda <imbrenda@linux.ibm.com> Signed-off-by: Heiko Carstens <hca@linux.ibm.com> Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
Diffstat (limited to 'arch/s390/mm/vmem.c')
-rw-r--r--arch/s390/mm/vmem.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index b13990776fae..186a020857cf 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -50,8 +50,7 @@ void *vmem_crst_alloc(unsigned long val)
if (!table)
return NULL;
crst_table_init(table, val);
- if (slab_is_available())
- __arch_set_page_dat(table, 1UL << CRST_ALLOC_ORDER);
+ __arch_set_page_dat(table, 1UL << CRST_ALLOC_ORDER);
return table;
}
@@ -67,6 +66,7 @@ pte_t __ref *vmem_pte_alloc(void)
if (!pte)
return NULL;
memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE);
+ __arch_set_page_dat(pte, 1);
return pte;
}