summaryrefslogtreecommitdiff
path: root/arch/ia64/mm/discontig.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-12-15 23:53:37 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2020-12-15 23:53:37 +0300
commitac73e3dc8acd0a3be292755db30388c3580f5674 (patch)
tree5abef6cb82b205b5dbbb69dca950b8a5aae716de /arch/ia64/mm/discontig.c
parent148842c98a24e508aecb929718818fbf4c2a6ff3 (diff)
parentdfefd226b0bf7c435a58d75a0ce2f9273b9825f6 (diff)
downloadlinux-ac73e3dc8acd0a3be292755db30388c3580f5674.tar.xz
Merge branch 'akpm' (patches from Andrew)
Merge misc updates from Andrew Morton: - a few random little subsystems - almost all of the MM patches which are staged ahead of linux-next material. I'll trickle to post-linux-next work in as the dependents get merged up. Subsystems affected by this patch series: kthread, kbuild, ide, ntfs, ocfs2, arch, and mm (slab-generic, slab, slub, dax, debug, pagecache, gup, swap, shmem, memcg, pagemap, mremap, hmm, vmalloc, documentation, kasan, pagealloc, memory-failure, hugetlb, vmscan, z3fold, compaction, oom-kill, migration, cma, page-poison, userfaultfd, zswap, zsmalloc, uaccess, zram, and cleanups). * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (200 commits) mm: cleanup kstrto*() usage mm: fix fall-through warnings for Clang mm: slub: convert sysfs sprintf family to sysfs_emit/sysfs_emit_at mm: shmem: convert shmem_enabled_show to use sysfs_emit_at mm:backing-dev: use sysfs_emit in macro defining functions mm: huge_memory: convert remaining use of sprintf to sysfs_emit and neatening mm: use sysfs_emit for struct kobject * uses mm: fix kernel-doc markups zram: break the strict dependency from lzo zram: add stat to gather incompressible pages since zram set up zram: support page writeback mm/process_vm_access: remove redundant initialization of iov_r mm/zsmalloc.c: rework the list_add code in insert_zspage() mm/zswap: move to use crypto_acomp API for hardware acceleration mm/zswap: fix passing zero to 'PTR_ERR' warning mm/zswap: make struct kernel_param_ops definitions const userfaultfd/selftests: hint the test runner on required privilege userfaultfd/selftests: fix retval check for userfaultfd_open() userfaultfd/selftests: always dump something in modes userfaultfd: selftests: make __{s,u}64 format specifiers portable ...
Diffstat (limited to 'arch/ia64/mm/discontig.c')
-rw-r--r--arch/ia64/mm/discontig.c44
1 files changed, 21 insertions, 23 deletions
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index dbe829fc5298..c7311131156e 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -584,6 +584,25 @@ void call_pernode_memory(unsigned long start, unsigned long len, void *arg)
}
}
+static void __init virtual_map_init(void)
+{
+#ifdef CONFIG_VIRTUAL_MEM_MAP
+ int node;
+
+ VMALLOC_END -= PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) *
+ sizeof(struct page));
+ vmem_map = (struct page *) VMALLOC_END;
+ efi_memmap_walk(create_mem_map_page_table, NULL);
+ printk("Virtual mem_map starts at 0x%p\n", vmem_map);
+
+ for_each_online_node(node) {
+ unsigned long pfn_offset = mem_data[node].min_pfn;
+
+ NODE_DATA(node)->node_mem_map = vmem_map + pfn_offset;
+ }
+#endif
+}
+
/**
* paging_init - setup page tables
*
@@ -593,38 +612,17 @@ void call_pernode_memory(unsigned long start, unsigned long len, void *arg)
void __init paging_init(void)
{
unsigned long max_dma;
- unsigned long pfn_offset = 0;
- unsigned long max_pfn = 0;
- int node;
unsigned long max_zone_pfns[MAX_NR_ZONES];
max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
sparse_init();
-#ifdef CONFIG_VIRTUAL_MEM_MAP
- VMALLOC_END -= PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) *
- sizeof(struct page));
- vmem_map = (struct page *) VMALLOC_END;
- efi_memmap_walk(create_mem_map_page_table, NULL);
- printk("Virtual mem_map starts at 0x%p\n", vmem_map);
-#endif
-
- for_each_online_node(node) {
- pfn_offset = mem_data[node].min_pfn;
-
-#ifdef CONFIG_VIRTUAL_MEM_MAP
- NODE_DATA(node)->node_mem_map = vmem_map + pfn_offset;
-#endif
- if (mem_data[node].max_pfn > max_pfn)
- max_pfn = mem_data[node].max_pfn;
- }
+ virtual_map_init();
memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
-#ifdef CONFIG_ZONE_DMA32
max_zone_pfns[ZONE_DMA32] = max_dma;
-#endif
- max_zone_pfns[ZONE_NORMAL] = max_pfn;
+ max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
free_area_init(max_zone_pfns);
zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));