summaryrefslogtreecommitdiff
path: root/arch/s390/boot
diff options
context:
space:
mode:
authorVasily Gorbik <gor@linux.ibm.com>2023-02-08 20:11:25 +0300
committerHeiko Carstens <hca@linux.ibm.com>2023-03-20 13:02:50 +0300
commit8c37cb7d4ffcc827a9484282691b018715a5ae1a (patch)
treee0c11f73e39a8a78ad3125ec34339639d62323b1 /arch/s390/boot
parent53fcc7dbf17691d8eac382ee315970a75286dd4b (diff)
downloadlinux-8c37cb7d4ffcc827a9484282691b018715a5ae1a.tar.xz
s390/boot: rename mem_detect to physmem_info
In preparation to extending mem_detect with additional information like reserved ranges rename it to more generic physmem_info. This new naming also help to avoid confusion by using more exact terms like "physmem online ranges", etc. Acked-by: Heiko Carstens <hca@linux.ibm.com> Reviewed-by: Alexander Gordeev <agordeev@linux.ibm.com> Signed-off-by: Vasily Gorbik <gor@linux.ibm.com> Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
Diffstat (limited to 'arch/s390/boot')
-rw-r--r--arch/s390/boot/Makefile2
-rw-r--r--arch/s390/boot/boot.h2
-rw-r--r--arch/s390/boot/kaslr.c14
-rw-r--r--arch/s390/boot/physmem_info.c (renamed from arch/s390/boot/mem_detect.c)76
-rw-r--r--arch/s390/boot/startup.c6
-rw-r--r--arch/s390/boot/vmem.c6
6 files changed, 53 insertions, 53 deletions
diff --git a/arch/s390/boot/Makefile b/arch/s390/boot/Makefile
index cebd4ca16916..c7c81e5f9218 100644
--- a/arch/s390/boot/Makefile
+++ b/arch/s390/boot/Makefile
@@ -35,7 +35,7 @@ endif
CFLAGS_sclp_early_core.o += -I$(srctree)/drivers/s390/char
-obj-y := head.o als.o startup.o mem_detect.o ipl_parm.o ipl_report.o vmem.o
+obj-y := head.o als.o startup.o physmem_info.o ipl_parm.o ipl_report.o vmem.o
obj-y += string.o ebcdic.o sclp_early_core.o mem.o ipl_vmparm.o cmdline.o
obj-y += version.o pgm_check_info.o ctype.o ipl_data.o machine_kexec_reloc.o
obj-$(findstring y, $(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) $(CONFIG_PGSTE)) += uv.o
diff --git a/arch/s390/boot/boot.h b/arch/s390/boot/boot.h
index 58ce701d6110..d39895d5796e 100644
--- a/arch/s390/boot/boot.h
+++ b/arch/s390/boot/boot.h
@@ -34,7 +34,7 @@ struct vmlinux_info {
void startup_kernel(void);
unsigned long detect_memory(unsigned long *safe_addr);
-void mem_detect_set_usable_limit(unsigned long limit);
+void physmem_set_usable_limit(unsigned long limit);
bool is_ipl_block_dump(void);
void store_ipl_parmblock(void);
unsigned long read_ipl_report(unsigned long safe_addr);
diff --git a/arch/s390/boot/kaslr.c b/arch/s390/boot/kaslr.c
index 3e3d846400b4..22b7c5d8e94a 100644
--- a/arch/s390/boot/kaslr.c
+++ b/arch/s390/boot/kaslr.c
@@ -3,7 +3,7 @@
* Copyright IBM Corp. 2019
*/
#include <linux/pgtable.h>
-#include <asm/mem_detect.h>
+#include <asm/physmem_info.h>
#include <asm/cpacf.h>
#include <asm/timex.h>
#include <asm/sclp.h>
@@ -93,7 +93,7 @@ static int get_random(unsigned long limit, unsigned long *value)
/*
* To randomize kernel base address we have to consider several facts:
- * 1. physical online memory might not be continuous and have holes. mem_detect
+ * 1. physical online memory might not be continuous and have holes. physmem
* info contains list of online memory ranges we should consider.
* 2. we have several memory regions which are occupied and we should not
* overlap and destroy them. Currently safe_addr tells us the border below
@@ -108,7 +108,7 @@ static int get_random(unsigned long limit, unsigned long *value)
* (16 pages when the kernel is built with kasan enabled)
* Assumptions:
* 1. kernel size (including .bss size) and upper memory limit are page aligned.
- * 2. mem_detect memory region start is THREAD_SIZE aligned / end is PAGE_SIZE
+ * 2. physmem online region start is THREAD_SIZE aligned / end is PAGE_SIZE
* aligned (in practice memory configurations granularity on z/VM and LPAR
* is 1mb).
*
@@ -132,7 +132,7 @@ static unsigned long count_valid_kernel_positions(unsigned long kernel_size,
unsigned long start, end, pos = 0;
int i;
- for_each_mem_detect_usable_block(i, &start, &end) {
+ for_each_physmem_usable_range(i, &start, &end) {
if (_min >= end)
continue;
if (start >= _max)
@@ -153,7 +153,7 @@ static unsigned long position_to_address(unsigned long pos, unsigned long kernel
unsigned long start, end;
int i;
- for_each_mem_detect_usable_block(i, &start, &end) {
+ for_each_physmem_usable_range(i, &start, &end) {
if (_min >= end)
continue;
if (start >= _max)
@@ -172,8 +172,8 @@ static unsigned long position_to_address(unsigned long pos, unsigned long kernel
unsigned long get_random_base(unsigned long safe_addr)
{
- unsigned long usable_total = get_mem_detect_usable_total();
- unsigned long memory_limit = get_mem_detect_end();
+ unsigned long usable_total = get_physmem_usable_total();
+ unsigned long memory_limit = get_physmem_usable_end();
unsigned long base_pos, max_pos, kernel_size;
int i;
diff --git a/arch/s390/boot/mem_detect.c b/arch/s390/boot/physmem_info.c
index 35f4ba11f7fd..dc2e4d0abfab 100644
--- a/arch/s390/boot/mem_detect.c
+++ b/arch/s390/boot/physmem_info.c
@@ -5,44 +5,44 @@
#include <asm/processor.h>
#include <asm/sclp.h>
#include <asm/sections.h>
-#include <asm/mem_detect.h>
+#include <asm/physmem_info.h>
#include <asm/sparsemem.h>
#include "decompressor.h"
#include "boot.h"
-struct mem_detect_info __bootdata(mem_detect);
+struct physmem_info __bootdata(physmem_info);
/* up to 256 storage elements, 1020 subincrements each */
#define ENTRIES_EXTENDED_MAX \
- (256 * (1020 / 2) * sizeof(struct mem_detect_block))
+ (256 * (1020 / 2) * sizeof(struct physmem_range))
-static struct mem_detect_block *__get_mem_detect_block_ptr(u32 n)
+static struct physmem_range *__get_physmem_range_ptr(u32 n)
{
if (n < MEM_INLINED_ENTRIES)
- return &mem_detect.entries[n];
- return &mem_detect.entries_extended[n - MEM_INLINED_ENTRIES];
+ return &physmem_info.online[n];
+ return &physmem_info.online_extended[n - MEM_INLINED_ENTRIES];
}
/*
- * sequential calls to add_mem_detect_block with adjacent memory areas
- * are merged together into single memory block.
+ * sequential calls to add_physmem_online_range with adjacent memory ranges
+ * are merged together into single memory range.
*/
-void add_mem_detect_block(u64 start, u64 end)
+void add_physmem_online_range(u64 start, u64 end)
{
- struct mem_detect_block *block;
+ struct physmem_range *range;
- if (mem_detect.count) {
- block = __get_mem_detect_block_ptr(mem_detect.count - 1);
- if (block->end == start) {
- block->end = end;
+ if (physmem_info.range_count) {
+ range = __get_physmem_range_ptr(physmem_info.range_count - 1);
+ if (range->end == start) {
+ range->end = end;
return;
}
}
- block = __get_mem_detect_block_ptr(mem_detect.count);
- block->start = start;
- block->end = end;
- mem_detect.count++;
+ range = __get_physmem_range_ptr(physmem_info.range_count);
+ range->start = start;
+ range->end = end;
+ physmem_info.range_count++;
}
static int __diag260(unsigned long rx1, unsigned long rx2)
@@ -95,7 +95,7 @@ static int diag260(void)
return -1;
for (i = 0; i < min_t(int, rc, ARRAY_SIZE(storage_extents)); i++)
- add_mem_detect_block(storage_extents[i].start, storage_extents[i].end + 1);
+ add_physmem_online_range(storage_extents[i].start, storage_extents[i].end + 1);
return 0;
}
@@ -148,44 +148,44 @@ unsigned long detect_memory(unsigned long *safe_addr)
unsigned long max_physmem_end = 0;
sclp_early_get_memsize(&max_physmem_end);
- mem_detect.entries_extended = (struct mem_detect_block *)ALIGN(*safe_addr, sizeof(u64));
+ physmem_info.online_extended = (struct physmem_range *)ALIGN(*safe_addr, sizeof(u64));
if (!sclp_early_read_storage_info()) {
- mem_detect.info_source = MEM_DETECT_SCLP_STOR_INFO;
+ physmem_info.info_source = MEM_DETECT_SCLP_STOR_INFO;
} else if (!diag260()) {
- mem_detect.info_source = MEM_DETECT_DIAG260;
- max_physmem_end = max_physmem_end ?: get_mem_detect_end();
+ physmem_info.info_source = MEM_DETECT_DIAG260;
+ max_physmem_end = max_physmem_end ?: get_physmem_usable_end();
} else if (max_physmem_end) {
- add_mem_detect_block(0, max_physmem_end);
- mem_detect.info_source = MEM_DETECT_SCLP_READ_INFO;
+ add_physmem_online_range(0, max_physmem_end);
+ physmem_info.info_source = MEM_DETECT_SCLP_READ_INFO;
} else {
max_physmem_end = search_mem_end();
- add_mem_detect_block(0, max_physmem_end);
- mem_detect.info_source = MEM_DETECT_BIN_SEARCH;
+ add_physmem_online_range(0, max_physmem_end);
+ physmem_info.info_source = MEM_DETECT_BIN_SEARCH;
}
- if (mem_detect.count > MEM_INLINED_ENTRIES) {
- *safe_addr += (mem_detect.count - MEM_INLINED_ENTRIES) *
- sizeof(struct mem_detect_block);
+ if (physmem_info.range_count > MEM_INLINED_ENTRIES) {
+ *safe_addr += (physmem_info.range_count - MEM_INLINED_ENTRIES) *
+ sizeof(struct physmem_range);
}
return max_physmem_end;
}
-void mem_detect_set_usable_limit(unsigned long limit)
+void physmem_set_usable_limit(unsigned long limit)
{
- struct mem_detect_block *block;
+ struct physmem_range *range;
int i;
/* make sure mem_detect.usable ends up within online memory block */
- for (i = 0; i < mem_detect.count; i++) {
- block = __get_mem_detect_block_ptr(i);
- if (block->start >= limit)
+ for (i = 0; i < physmem_info.range_count; i++) {
+ range = __get_physmem_range_ptr(i);
+ if (range->start >= limit)
break;
- if (block->end >= limit) {
- mem_detect.usable = limit;
+ if (range->end >= limit) {
+ physmem_info.usable = limit;
break;
}
- mem_detect.usable = block->end;
+ physmem_info.usable = range->end;
}
}
diff --git a/arch/s390/boot/startup.c b/arch/s390/boot/startup.c
index 16ee3469f744..50475bf25ecd 100644
--- a/arch/s390/boot/startup.c
+++ b/arch/s390/boot/startup.c
@@ -12,7 +12,7 @@
#include <asm/diag.h>
#include <asm/uv.h>
#include <asm/abs_lowcore.h>
-#include <asm/mem_detect.h>
+#include <asm/physmem_info.h>
#include "decompressor.h"
#include "boot.h"
#include "uv.h"
@@ -139,7 +139,7 @@ static void handle_relocs(unsigned long offset)
*
* Consider the following factors:
* 1. max_physmem_end - end of physical memory online or standby.
- * Always <= end of the last online memory block (get_mem_detect_end()).
+ * Always >= end of the last online memory range (get_physmem_online_end()).
* 2. CONFIG_MAX_PHYSMEM_BITS - the maximum size of physical memory the
* kernel is able to support.
* 3. "mem=" kernel command line option which limits physical memory usage.
@@ -303,7 +303,7 @@ void startup_kernel(void)
setup_ident_map_size(max_physmem_end);
setup_vmalloc_size();
asce_limit = setup_kernel_memory_layout();
- mem_detect_set_usable_limit(ident_map_size);
+ physmem_set_usable_limit(ident_map_size);
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_enabled) {
random_lma = get_random_base(safe_addr);
diff --git a/arch/s390/boot/vmem.c b/arch/s390/boot/vmem.c
index 4d1d0d8e99cb..b89a6893f398 100644
--- a/arch/s390/boot/vmem.c
+++ b/arch/s390/boot/vmem.c
@@ -4,7 +4,7 @@
#include <asm/pgalloc.h>
#include <asm/facility.h>
#include <asm/sections.h>
-#include <asm/mem_detect.h>
+#include <asm/physmem_info.h>
#include <asm/maccess.h>
#include <asm/abs_lowcore.h>
#include "decompressor.h"
@@ -51,7 +51,7 @@ static void pgtable_populate_init(void)
pgalloc_low = max(pgalloc_low, initrd_end);
}
- pgalloc_end = round_down(get_mem_detect_end(), PAGE_SIZE);
+ pgalloc_end = round_down(get_physmem_usable_end(), PAGE_SIZE);
pgalloc_pos = pgalloc_end;
boot_check_oom();
@@ -252,7 +252,7 @@ void setup_vmem(unsigned long asce_limit)
*/
pgtable_populate_init();
pgtable_populate(0, sizeof(struct lowcore), POPULATE_ONE2ONE);
- for_each_mem_detect_usable_block(i, &start, &end)
+ for_each_physmem_usable_range(i, &start, &end)
pgtable_populate(start, end, POPULATE_ONE2ONE);
pgtable_populate(__abs_lowcore, __abs_lowcore + sizeof(struct lowcore),
POPULATE_ABS_LOWCORE);