summaryrefslogtreecommitdiff
path: root/mm/sparse.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2017-12-29 10:53:54 +0300
committerDan Williams <dan.j.williams@intel.com>2018-01-08 22:46:23 +0300
commit7b73d978a5d0d2a3637bdd57191cb6ffbad3feca (patch)
treedfcdd031e80b541ad356ccdc5edc171f9d202fe2 /mm/sparse.c
parent24e6d5a59ac7d31adc0322de2d0117dfa370936f (diff)
downloadlinux-7b73d978a5d0d2a3637bdd57191cb6ffbad3feca.tar.xz
mm: pass the vmem_altmap to vmemmap_populate
We can just pass this on instead of having to do a radix tree lookup without proper locking a few levels into the callchain. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'mm/sparse.c')
-rw-r--r--mm/sparse.c20
1 files changed, 12 insertions, 8 deletions
diff --git a/mm/sparse.c b/mm/sparse.c
index 7a5dacaa06e3..5f4a0dac7836 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -417,7 +417,8 @@ static void __init sparse_early_usemaps_alloc_node(void *data,
}
#ifndef CONFIG_SPARSEMEM_VMEMMAP
-struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid)
+struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid,
+ struct vmem_altmap *altmap)
{
struct page *map;
unsigned long size;
@@ -472,7 +473,7 @@ void __init sparse_mem_maps_populate_node(struct page **map_map,
if (!present_section_nr(pnum))
continue;
- map_map[pnum] = sparse_mem_map_populate(pnum, nodeid);
+ map_map[pnum] = sparse_mem_map_populate(pnum, nodeid, NULL);
if (map_map[pnum])
continue;
ms = __nr_to_section(pnum);
@@ -500,7 +501,7 @@ static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum)
struct mem_section *ms = __nr_to_section(pnum);
int nid = sparse_early_nid(ms);
- map = sparse_mem_map_populate(pnum, nid);
+ map = sparse_mem_map_populate(pnum, nid, NULL);
if (map)
return map;
@@ -678,10 +679,11 @@ void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn)
#endif
#ifdef CONFIG_SPARSEMEM_VMEMMAP
-static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid)
+static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid,
+ struct vmem_altmap *altmap)
{
/* This will make the necessary allocations eventually. */
- return sparse_mem_map_populate(pnum, nid);
+ return sparse_mem_map_populate(pnum, nid, altmap);
}
static void __kfree_section_memmap(struct page *memmap)
{
@@ -721,7 +723,8 @@ got_map_ptr:
return ret;
}
-static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid)
+static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid,
+ struct vmem_altmap *altmap)
{
return __kmalloc_section_memmap();
}
@@ -773,7 +776,8 @@ static void free_map_bootmem(struct page *memmap)
* set. If this is <=0, then that means that the passed-in
* map was not consumed and must be freed.
*/
-int __meminit sparse_add_one_section(struct pglist_data *pgdat, unsigned long start_pfn)
+int __meminit sparse_add_one_section(struct pglist_data *pgdat,
+ unsigned long start_pfn, struct vmem_altmap *altmap)
{
unsigned long section_nr = pfn_to_section_nr(start_pfn);
struct mem_section *ms;
@@ -789,7 +793,7 @@ int __meminit sparse_add_one_section(struct pglist_data *pgdat, unsigned long st
ret = sparse_index_init(section_nr, pgdat->node_id);
if (ret < 0 && ret != -EEXIST)
return ret;
- memmap = kmalloc_section_memmap(section_nr, pgdat->node_id);
+ memmap = kmalloc_section_memmap(section_nr, pgdat->node_id, altmap);
if (!memmap)
return -ENOMEM;
usemap = __kmalloc_section_usemap();