summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorMike Rapoport <rppt@linux.ibm.com>2021-12-13 12:41:31 +0300
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2021-12-17 12:14:42 +0300
commitb4b54c7ba149ffa2dcb11b2a84ebf20189ff7f89 (patch)
tree59f9922f2913e5227faf248467d040e17006cd4d /arch
parentb6a1cbd187fc7addddcb9c669f7362dec4d54595 (diff)
downloadlinux-b4b54c7ba149ffa2dcb11b2a84ebf20189ff7f89.tar.xz
memblock: free_unused_memmap: use pageblock units instead of MAX_ORDER
[ Upstream commit e2a86800d58639b3acde7eaeb9eb393dca066e08 ] The code that frees unused memory map uses rounds start and end of the holes that are freed to MAX_ORDER_NR_PAGES to preserve continuity of the memory map for MAX_ORDER regions. Lots of core memory management functionality relies on homogeneity of the memory map within each pageblock which size may differ from MAX_ORDER in certain configurations. Although currently, for the architectures that use free_unused_memmap(), pageblock_order and MAX_ORDER are equivalent, it is cleaner to have common notation thought mm code. Replace MAX_ORDER_NR_PAGES with pageblock_nr_pages and update the comments to make it more clear why the alignment to pageblock boundaries is required. Signed-off-by: Mike Rapoport <rppt@linux.ibm.com> Tested-by: Tony Lindgren <tony@atomide.com> Link: https://lore.kernel.org/lkml/20210630071211.21011-1-rppt@kernel.org/ [backport upstream modification in mm/memblock.c to arch/arm/mm/init.c] Signed-off-by: Mark-PK Tsai <mark-pk.tsai@mediatek.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/mm/init.c16
1 files changed, 8 insertions, 8 deletions
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 75f3ab531bdf..8440b6027598 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -315,11 +315,11 @@ static void __init free_unused_memmap(void)
ALIGN(prev_end, PAGES_PER_SECTION));
#else
/*
- * Align down here since the VM subsystem insists that the
- * memmap entries are valid from the bank start aligned to
- * MAX_ORDER_NR_PAGES.
+ * Align down here since many operations in VM subsystem
+ * presume that there are no holes in the memory map inside
+ * a pageblock
*/
- start = round_down(start, MAX_ORDER_NR_PAGES);
+ start = round_down(start, pageblock_nr_pages);
#endif
/*
* If we had a previous bank, and there is a space
@@ -329,11 +329,11 @@ static void __init free_unused_memmap(void)
free_memmap(prev_end, start);
/*
- * Align up here since the VM subsystem insists that the
- * memmap entries are valid from the bank end aligned to
- * MAX_ORDER_NR_PAGES.
+ * Align up here since many operations in VM subsystem
+ * presume that there are no holes in the memory map inside
+ * a pageblock
*/
- prev_end = ALIGN(end, MAX_ORDER_NR_PAGES);
+ prev_end = ALIGN(end, pageblock_nr_pages);
}
#ifdef CONFIG_SPARSEMEM