From f0d279654dea22b7a6ad34b9334aee80cda62cde Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Fri, 15 Aug 2014 16:06:06 -0400 Subject: percpu: fix pcpu_alloc_pages() failure path When pcpu_alloc_pages() fails midway, pcpu_free_pages() is invoked to free what has already been allocated. The invocation is across the whole requested range and pcpu_free_pages() will try to free all non-NULL pages; unfortunately, this is incorrect as pcpu_get_pages_and_bitmap(), unlike what its comment suggests, doesn't clear the pages array and thus the array may have entries from the previous invocations making the partial failure path free incorrect pages. Fix it by open-coding the partial freeing of the already allocated pages. Signed-off-by: Tejun Heo Cc: stable@vger.kernel.org --- mm/percpu-vm.c | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/mm/percpu-vm.c b/mm/percpu-vm.c index 3707c71ae4cd..8d9bb2c00c68 100644 --- a/mm/percpu-vm.c +++ b/mm/percpu-vm.c @@ -108,7 +108,7 @@ static int pcpu_alloc_pages(struct pcpu_chunk *chunk, int page_start, int page_end) { const gfp_t gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_COLD; - unsigned int cpu; + unsigned int cpu, tcpu; int i; for_each_possible_cpu(cpu) { @@ -116,14 +116,23 @@ static int pcpu_alloc_pages(struct pcpu_chunk *chunk, struct page **pagep = &pages[pcpu_page_idx(cpu, i)]; *pagep = alloc_pages_node(cpu_to_node(cpu), gfp, 0); - if (!*pagep) { - pcpu_free_pages(chunk, pages, populated, - page_start, page_end); - return -ENOMEM; - } + if (!*pagep) + goto err; } } return 0; + +err: + while (--i >= page_start) + __free_page(pages[pcpu_page_idx(cpu, i)]); + + for_each_possible_cpu(tcpu) { + if (tcpu == cpu) + break; + for (i = page_start; i < page_end; i++) + __free_page(pages[pcpu_page_idx(tcpu, i)]); + } + return -ENOMEM; } /** -- cgit v1.2.3 From 849f5169097e1ba35b90ac9df76b5bb6f9c0aabd Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Fri, 15 Aug 2014 16:06:10 -0400 Subject: percpu: perform tlb flush after pcpu_map_pages() failure If pcpu_map_pages() fails midway, it unmaps the already mapped pages. Currently, it doesn't flush tlb after the partial unmapping. This may be okay in most cases as the established mapping hasn't been used at that point but it can go wrong and when it goes wrong it'd be extremely difficult to track down. Flush tlb after the partial unmapping. Signed-off-by: Tejun Heo Cc: stable@vger.kernel.org --- mm/percpu-vm.c | 1 + 1 file changed, 1 insertion(+) diff --git a/mm/percpu-vm.c b/mm/percpu-vm.c index 8d9bb2c00c68..51108165f829 100644 --- a/mm/percpu-vm.c +++ b/mm/percpu-vm.c @@ -272,6 +272,7 @@ err: __pcpu_unmap_pages(pcpu_chunk_addr(chunk, tcpu, page_start), page_end - page_start); } + pcpu_post_unmap_tlb_flush(chunk, page_start, page_end); return err; } -- cgit v1.2.3 From 3189eddbcafcc4d827f7f19facbeddec4424eba8 Mon Sep 17 00:00:00 2001 From: Honggang Li Date: Tue, 12 Aug 2014 21:36:15 +0800 Subject: percpu: free percpu allocation info for uniprocessor system Currently, only SMP system free the percpu allocation info. Uniprocessor system should free it too. For example, one x86 UML virtual machine with 256MB memory, UML kernel wastes one page memory. Signed-off-by: Honggang Li Signed-off-by: Tejun Heo Cc: stable@vger.kernel.org --- mm/percpu.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/mm/percpu.c b/mm/percpu.c index 2139e30a4b44..da997f9800bd 100644 --- a/mm/percpu.c +++ b/mm/percpu.c @@ -1932,6 +1932,8 @@ void __init setup_per_cpu_areas(void) if (pcpu_setup_first_chunk(ai, fc) < 0) panic("Failed to initialize percpu areas."); + + pcpu_free_alloc_info(ai); } #endif /* CONFIG_SMP */ -- cgit v1.2.3