From e2a86800d58639b3acde7eaeb9eb393dca066e08 Mon Sep 17 00:00:00 2001 From: Mike Rapoport Date: Mon, 17 May 2021 21:15:15 +0300 Subject: memblock: free_unused_memmap: use pageblock units instead of MAX_ORDER The code that frees unused memory map uses rounds start and end of the holes that are freed to MAX_ORDER_NR_PAGES to preserve continuity of the memory map for MAX_ORDER regions. Lots of core memory management functionality relies on homogeneity of the memory map within each pageblock which size may differ from MAX_ORDER in certain configurations. Although currently, for the architectures that use free_unused_memmap(), pageblock_order and MAX_ORDER are equivalent, it is cleaner to have common notation thought mm code. Replace MAX_ORDER_NR_PAGES with pageblock_nr_pages and update the comments to make it more clear why the alignment to pageblock boundaries is required. Signed-off-by: Mike Rapoport Tested-by: Tony Lindgren --- mm/memblock.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'mm') diff --git a/mm/memblock.c b/mm/memblock.c index afaefa8fc6ab..97fa87541b5f 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -1943,11 +1943,11 @@ static void __init free_unused_memmap(void) start = min(start, ALIGN(prev_end, PAGES_PER_SECTION)); #else /* - * Align down here since the VM subsystem insists that the - * memmap entries are valid from the bank start aligned to - * MAX_ORDER_NR_PAGES. + * Align down here since many operations in VM subsystem + * presume that there are no holes in the memory map inside + * a pageblock */ - start = round_down(start, MAX_ORDER_NR_PAGES); + start = round_down(start, pageblock_nr_pages); #endif /* @@ -1958,11 +1958,11 @@ static void __init free_unused_memmap(void) free_memmap(prev_end, start); /* - * Align up here since the VM subsystem insists that the - * memmap entries are valid from the bank end aligned to - * MAX_ORDER_NR_PAGES. + * Align up here since many operations in VM subsystem + * presume that there are no holes in the memory map inside + * a pageblock */ - prev_end = ALIGN(end, MAX_ORDER_NR_PAGES); + prev_end = ALIGN(end, pageblock_nr_pages); } #ifdef CONFIG_SPARSEMEM -- cgit v1.2.3 From f921f53e089a12a192808ac4319f28727b35dc0f Mon Sep 17 00:00:00 2001 From: Mike Rapoport Date: Mon, 17 May 2021 21:31:59 +0300 Subject: memblock: align freed memory map on pageblock boundaries with SPARSEMEM When CONFIG_SPARSEMEM=y the ranges of the memory map that are freed are not aligned to the pageblock boundaries which breaks assumptions about homogeneity of the memory map throughout core mm code. Make sure that the freed memory map is always aligned on pageblock boundaries regardless of the memory model selection. Signed-off-by: Mike Rapoport Tested-by: Tony Lindgren --- mm/memblock.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'mm') diff --git a/mm/memblock.c b/mm/memblock.c index 97fa87541b5f..2e25d69739e0 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -1941,14 +1941,13 @@ static void __init free_unused_memmap(void) * due to SPARSEMEM sections which aren't present. */ start = min(start, ALIGN(prev_end, PAGES_PER_SECTION)); -#else +#endif /* * Align down here since many operations in VM subsystem * presume that there are no holes in the memory map inside * a pageblock */ start = round_down(start, pageblock_nr_pages); -#endif /* * If we had a previous bank, and there is a space @@ -1966,8 +1965,10 @@ static void __init free_unused_memmap(void) } #ifdef CONFIG_SPARSEMEM - if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION)) + if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION)) { + prev_end = ALIGN(end, pageblock_nr_pages); free_memmap(prev_end, ALIGN(prev_end, PAGES_PER_SECTION)); + } #endif } -- cgit v1.2.3 From 023accf5cdc1e504a9b04187ec23ff156fe53d90 Mon Sep 17 00:00:00 2001 From: Mike Rapoport Date: Wed, 30 Jun 2021 09:12:13 +0300 Subject: memblock: ensure there is no overflow in memblock_overlaps_region() There maybe an overflow in memblock_overlaps_region() if it is called with base and size such that base + size > PHYS_ADDR_MAX Make sure that memblock_overlaps_region() caps the size to prevent such overflow and remove now duplicated call to memblock_cap_size() from memblock_is_region_reserved(). Signed-off-by: Mike Rapoport Tested-by: Tony Lindgren --- mm/memblock.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/memblock.c b/mm/memblock.c index 2e25d69739e0..67e0e24f8cc9 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -182,6 +182,8 @@ bool __init_memblock memblock_overlaps_region(struct memblock_type *type, { unsigned long i; + memblock_cap_size(base, &size); + for (i = 0; i < type->cnt; i++) if (memblock_addrs_overlap(base, size, type->regions[i].base, type->regions[i].size)) @@ -1794,7 +1796,6 @@ bool __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t siz */ bool __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size) { - memblock_cap_size(base, &size); return memblock_overlaps_region(&memblock.reserved, base, size); } -- cgit v1.2.3