summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorDennis Zhou (Facebook) <dennisszhou@gmail.com>2017-07-25 02:02:01 +0300
committerTejun Heo <tj@kernel.org>2017-07-26 17:23:52 +0300
commitb9c39442ceffb202b98a88d492347eae125c5ba2 (patch)
tree4d76780f72dcbaa017b7245c2dde295533670907 /mm
parent4af1e6fbd8e46dc44a89edf215207fd7c8d5cdda (diff)
downloadlinux-b9c39442ceffb202b98a88d492347eae125c5ba2.tar.xz
percpu: setup_first_chunk remove dyn_size and consolidate logic
There is logic for setting variables in the static chunk init code that could be consolidated with the dynamic chunk init code. This combines this logic to setup for combining the allocation paths. reserved_size is used as the conditional as a dynamic region will always exist. Signed-off-by: Dennis Zhou <dennisszhou@gmail.com> Reviewed-by: Josef Bacik <jbacik@fb.com> Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/percpu.c18
1 files changed, 6 insertions, 12 deletions
diff --git a/mm/percpu.c b/mm/percpu.c
index 470e1a084a7c..851aa8109788 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -1562,8 +1562,7 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
{
static int smap[PERCPU_DYNAMIC_EARLY_SLOTS] __initdata;
static int dmap[PERCPU_DYNAMIC_EARLY_SLOTS] __initdata;
- size_t dyn_size = ai->dyn_size;
- size_t size_sum = ai->static_size + ai->reserved_size + dyn_size;
+ size_t size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
struct pcpu_chunk *schunk, *dchunk = NULL;
unsigned long *group_offsets;
size_t *group_sizes;
@@ -1690,14 +1689,7 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
bitmap_fill(schunk->populated, pcpu_unit_pages);
schunk->nr_populated = pcpu_unit_pages;
- if (ai->reserved_size) {
- schunk->free_size = ai->reserved_size;
- pcpu_reserved_chunk = schunk;
- } else {
- schunk->free_size = dyn_size;
- dyn_size = 0; /* dynamic area covered */
- }
-
+ schunk->free_size = ai->reserved_size ?: ai->dyn_size;
schunk->contig_hint = schunk->free_size;
schunk->map[0] = 1;
schunk->map[1] = schunk->start_offset;
@@ -1705,7 +1697,9 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
schunk->map_used = 2;
/* init dynamic chunk if necessary */
- if (dyn_size) {
+ if (ai->reserved_size) {
+ pcpu_reserved_chunk = schunk;
+
dchunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0);
INIT_LIST_HEAD(&dchunk->list);
INIT_LIST_HEAD(&dchunk->map_extend_list);
@@ -1717,7 +1711,7 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
bitmap_fill(dchunk->populated, pcpu_unit_pages);
dchunk->nr_populated = pcpu_unit_pages;
- dchunk->contig_hint = dchunk->free_size = dyn_size;
+ dchunk->contig_hint = dchunk->free_size = ai->dyn_size;
dchunk->map[0] = 1;
dchunk->map[1] = dchunk->start_offset;
dchunk->map[2] = (dchunk->start_offset + dchunk->free_size) | 1;