summaryrefslogtreecommitdiff
path: root/net/core
diff options
context:
space:
mode:
Diffstat (limited to 'net/core')
-rw-r--r--net/core/page_pool.c82
1 files changed, 56 insertions, 26 deletions
diff --git a/net/core/page_pool.c b/net/core/page_pool.c
index 40e1b2beaa6c..9ec1aa9640ad 100644
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -203,38 +203,17 @@ static bool page_pool_dma_map(struct page_pool *pool, struct page *page)
return true;
}
-/* slow path */
-noinline
-static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
- gfp_t _gfp)
+static struct page *__page_pool_alloc_page_order(struct page_pool *pool,
+ gfp_t gfp)
{
- unsigned int pp_flags = pool->p.flags;
struct page *page;
- gfp_t gfp = _gfp;
-
- /* We could always set __GFP_COMP, and avoid this branch, as
- * prep_new_page() can handle order-0 with __GFP_COMP.
- */
- if (pool->p.order)
- gfp |= __GFP_COMP;
-
- /* FUTURE development:
- *
- * Current slow-path essentially falls back to single page
- * allocations, which doesn't improve performance. This code
- * need bulk allocation support from the page allocator code.
- */
- /* Cache was empty, do real allocation */
-#ifdef CONFIG_NUMA
+ gfp |= __GFP_COMP;
page = alloc_pages_node(pool->p.nid, gfp, pool->p.order);
-#else
- page = alloc_pages(gfp, pool->p.order);
-#endif
- if (!page)
+ if (unlikely(!page))
return NULL;
- if ((pp_flags & PP_FLAG_DMA_MAP) &&
+ if ((pool->p.flags & PP_FLAG_DMA_MAP) &&
unlikely(!page_pool_dma_map(pool, page))) {
put_page(page);
return NULL;
@@ -243,6 +222,57 @@ static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
/* Track how many pages are held 'in-flight' */
pool->pages_state_hold_cnt++;
trace_page_pool_state_hold(pool, page, pool->pages_state_hold_cnt);
+ return page;
+}
+
+/* slow path */
+noinline
+static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
+ gfp_t gfp)
+{
+ const int bulk = PP_ALLOC_CACHE_REFILL;
+ unsigned int pp_flags = pool->p.flags;
+ unsigned int pp_order = pool->p.order;
+ struct page *page;
+ int i, nr_pages;
+
+ /* Don't support bulk alloc for high-order pages */
+ if (unlikely(pp_order))
+ return __page_pool_alloc_page_order(pool, gfp);
+
+ /* Unnecessary as alloc cache is empty, but guarantees zero count */
+ if (unlikely(pool->alloc.count > 0))
+ return pool->alloc.cache[--pool->alloc.count];
+
+ /* Mark empty alloc.cache slots "empty" for alloc_pages_bulk_array */
+ memset(&pool->alloc.cache, 0, sizeof(void *) * bulk);
+
+ nr_pages = alloc_pages_bulk_array(gfp, bulk, pool->alloc.cache);
+ if (unlikely(!nr_pages))
+ return NULL;
+
+ /* Pages have been filled into alloc.cache array, but count is zero and
+ * page element have not been (possibly) DMA mapped.
+ */
+ for (i = 0; i < nr_pages; i++) {
+ page = pool->alloc.cache[i];
+ if ((pp_flags & PP_FLAG_DMA_MAP) &&
+ unlikely(!page_pool_dma_map(pool, page))) {
+ put_page(page);
+ continue;
+ }
+ pool->alloc.cache[pool->alloc.count++] = page;
+ /* Track how many pages are held 'in-flight' */
+ pool->pages_state_hold_cnt++;
+ trace_page_pool_state_hold(pool, page,
+ pool->pages_state_hold_cnt);
+ }
+
+ /* Return last page */
+ if (likely(pool->alloc.count > 0))
+ page = pool->alloc.cache[--pool->alloc.count];
+ else
+ page = NULL;
/* When page just alloc'ed is should/must have refcnt 1. */
return page;