summaryrefslogtreecommitdiff
path: root/mm/vmalloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/vmalloc.c')
-rw-r--r--mm/vmalloc.c121
1 files changed, 75 insertions, 46 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index d0a7d89be091..b2ec7f751bd0 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -2567,6 +2567,7 @@ static void __vunmap(const void *addr, int deallocate_pages)
BUG_ON(!page);
__free_pages(page, page_order);
+ cond_resched();
}
atomic_long_sub(area->nr_pages, &nr_vmalloc_pages);
@@ -2758,6 +2759,54 @@ void *vmap_pfn(unsigned long *pfns, unsigned int count, pgprot_t prot)
EXPORT_SYMBOL_GPL(vmap_pfn);
#endif /* CONFIG_VMAP_PFN */
+static inline unsigned int
+vm_area_alloc_pages(gfp_t gfp, int nid,
+ unsigned int order, unsigned long nr_pages, struct page **pages)
+{
+ unsigned int nr_allocated = 0;
+
+ /*
+ * For order-0 pages we make use of bulk allocator, if
+ * the page array is partly or not at all populated due
+ * to fails, fallback to a single page allocator that is
+ * more permissive.
+ */
+ if (!order)
+ nr_allocated = alloc_pages_bulk_array_node(
+ gfp, nid, nr_pages, pages);
+ else
+ /*
+ * Compound pages required for remap_vmalloc_page if
+ * high-order pages.
+ */
+ gfp |= __GFP_COMP;
+
+ /* High-order pages or fallback path if "bulk" fails. */
+ while (nr_allocated < nr_pages) {
+ struct page *page;
+ int i;
+
+ page = alloc_pages_node(nid, gfp, order);
+ if (unlikely(!page))
+ break;
+
+ /*
+ * Careful, we allocate and map page-order pages, but
+ * tracking is done per PAGE_SIZE page so as to keep the
+ * vm_struct APIs independent of the physical/mapped size.
+ */
+ for (i = 0; i < (1U << order); i++)
+ pages[nr_allocated + i] = page + i;
+
+ if (gfpflags_allow_blocking(gfp))
+ cond_resched();
+
+ nr_allocated += 1U << order;
+ }
+
+ return nr_allocated;
+}
+
static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
pgprot_t prot, unsigned int page_shift,
int node)
@@ -2768,8 +2817,6 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
unsigned long array_size;
unsigned int nr_small_pages = size >> PAGE_SHIFT;
unsigned int page_order;
- struct page **pages;
- unsigned int i;
array_size = (unsigned long)nr_small_pages * sizeof(struct page *);
gfp_mask |= __GFP_NOWARN;
@@ -2778,62 +2825,44 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
/* Please note that the recursion is strictly bounded. */
if (array_size > PAGE_SIZE) {
- pages = __vmalloc_node(array_size, 1, nested_gfp, node,
+ area->pages = __vmalloc_node(array_size, 1, nested_gfp, node,
area->caller);
} else {
- pages = kmalloc_node(array_size, nested_gfp, node);
+ area->pages = kmalloc_node(array_size, nested_gfp, node);
}
- if (!pages) {
- free_vm_area(area);
+ if (!area->pages) {
warn_alloc(gfp_mask, NULL,
- "vmalloc size %lu allocation failure: "
- "page array size %lu allocation failed",
- nr_small_pages * PAGE_SIZE, array_size);
+ "vmalloc error: size %lu, failed to allocated page array size %lu",
+ nr_small_pages * PAGE_SIZE, array_size);
+ free_vm_area(area);
return NULL;
}
- area->pages = pages;
- area->nr_pages = nr_small_pages;
set_vm_area_page_order(area, page_shift - PAGE_SHIFT);
-
page_order = vm_area_page_order(area);
- /*
- * Careful, we allocate and map page_order pages, but tracking is done
- * per PAGE_SIZE page so as to keep the vm_struct APIs independent of
- * the physical/mapped size.
- */
- for (i = 0; i < area->nr_pages; i += 1U << page_order) {
- struct page *page;
- int p;
-
- /* Compound pages required for remap_vmalloc_page */
- page = alloc_pages_node(node, gfp_mask | __GFP_COMP, page_order);
- if (unlikely(!page)) {
- /* Successfully allocated i pages, free them in __vfree() */
- area->nr_pages = i;
- atomic_long_add(area->nr_pages, &nr_vmalloc_pages);
- warn_alloc(gfp_mask, NULL,
- "vmalloc size %lu allocation failure: "
- "page order %u allocation failed",
- area->nr_pages * PAGE_SIZE, page_order);
- goto fail;
- }
+ area->nr_pages = vm_area_alloc_pages(gfp_mask, node,
+ page_order, nr_small_pages, area->pages);
- for (p = 0; p < (1U << page_order); p++)
- area->pages[i + p] = page + p;
+ atomic_long_add(area->nr_pages, &nr_vmalloc_pages);
- if (gfpflags_allow_blocking(gfp_mask))
- cond_resched();
+ /*
+ * If not enough pages were obtained to accomplish an
+ * allocation request, free them via __vfree() if any.
+ */
+ if (area->nr_pages != nr_small_pages) {
+ warn_alloc(gfp_mask, NULL,
+ "vmalloc error: size %lu, page order %u, failed to allocate pages",
+ area->nr_pages * PAGE_SIZE, page_order);
+ goto fail;
}
- atomic_long_add(area->nr_pages, &nr_vmalloc_pages);
- if (vmap_pages_range(addr, addr + size, prot, pages, page_shift) < 0) {
+ if (vmap_pages_range(addr, addr + size, prot, area->pages,
+ page_shift) < 0) {
warn_alloc(gfp_mask, NULL,
- "vmalloc size %lu allocation failure: "
- "failed to map pages",
- area->nr_pages * PAGE_SIZE);
+ "vmalloc error: size %lu, failed to map pages",
+ area->nr_pages * PAGE_SIZE);
goto fail;
}
@@ -2878,8 +2907,8 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
if ((size >> PAGE_SHIFT) > totalram_pages()) {
warn_alloc(gfp_mask, NULL,
- "vmalloc size %lu allocation failure: "
- "exceeds total pages", real_size);
+ "vmalloc error: size %lu, exceeds total pages",
+ real_size);
return NULL;
}
@@ -2910,8 +2939,8 @@ again:
gfp_mask, caller);
if (!area) {
warn_alloc(gfp_mask, NULL,
- "vmalloc size %lu allocation failure: "
- "vm_struct allocation failed", real_size);
+ "vmalloc error: size %lu, vm_struct allocation failed",
+ real_size);
goto fail;
}