summaryrefslogtreecommitdiff
path: root/mm/memblock.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/memblock.c')
-rw-r--r--mm/memblock.c70
1 files changed, 69 insertions, 1 deletions
diff --git a/mm/memblock.c b/mm/memblock.c
index 625905523c2a..1bcd9b970564 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -92,9 +92,58 @@ static long __init_memblock memblock_overlaps_region(struct memblock_type *type,
*
* Find @size free area aligned to @align in the specified range and node.
*
+ * If we have CONFIG_HAVE_MEMBLOCK_NODE_MAP defined, we need to check if the
+ * memory we found if not in hotpluggable ranges.
+ *
* RETURNS:
* Found address on success, %0 on failure.
*/
+#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
+phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t start,
+ phys_addr_t end, phys_addr_t size,
+ phys_addr_t align, int nid)
+{
+ phys_addr_t this_start, this_end, cand;
+ u64 i;
+ int curr = movablemem_map.nr_map - 1;
+
+ /* pump up @end */
+ if (end == MEMBLOCK_ALLOC_ACCESSIBLE)
+ end = memblock.current_limit;
+
+ /* avoid allocating the first page */
+ start = max_t(phys_addr_t, start, PAGE_SIZE);
+ end = max(start, end);
+
+ for_each_free_mem_range_reverse(i, nid, &this_start, &this_end, NULL) {
+ this_start = clamp(this_start, start, end);
+ this_end = clamp(this_end, start, end);
+
+restart:
+ if (this_end <= this_start || this_end < size)
+ continue;
+
+ for (; curr >= 0; curr--) {
+ if ((movablemem_map.map[curr].start_pfn << PAGE_SHIFT)
+ < this_end)
+ break;
+ }
+
+ cand = round_down(this_end - size, align);
+ if (curr >= 0 &&
+ cand < movablemem_map.map[curr].end_pfn << PAGE_SHIFT) {
+ this_end = movablemem_map.map[curr].start_pfn
+ << PAGE_SHIFT;
+ goto restart;
+ }
+
+ if (cand >= this_start)
+ return cand;
+ }
+
+ return 0;
+}
+#else /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t start,
phys_addr_t end, phys_addr_t size,
phys_addr_t align, int nid)
@@ -123,6 +172,7 @@ phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t start,
}
return 0;
}
+#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
/**
* memblock_find_in_range - find free area in given range
@@ -314,7 +364,8 @@ static void __init_memblock memblock_merge_regions(struct memblock_type *type)
}
this->size += next->size;
- memmove(next, next + 1, (type->cnt - (i + 1)) * sizeof(*next));
+ /* move forward from next + 1, index of which is i + 2 */
+ memmove(next, next + 1, (type->cnt - (i + 2)) * sizeof(*next));
type->cnt--;
}
}
@@ -827,6 +878,23 @@ phys_addr_t __init memblock_phys_mem_size(void)
return memblock.memory.total_size;
}
+phys_addr_t __init memblock_mem_size(unsigned long limit_pfn)
+{
+ unsigned long pages = 0;
+ struct memblock_region *r;
+ unsigned long start_pfn, end_pfn;
+
+ for_each_memblock(memory, r) {
+ start_pfn = memblock_region_memory_base_pfn(r);
+ end_pfn = memblock_region_memory_end_pfn(r);
+ start_pfn = min_t(unsigned long, start_pfn, limit_pfn);
+ end_pfn = min_t(unsigned long, end_pfn, limit_pfn);
+ pages += end_pfn - start_pfn;
+ }
+
+ return (phys_addr_t)pages << PAGE_SHIFT;
+}
+
/* lowest address */
phys_addr_t __init_memblock memblock_start_of_DRAM(void)
{