summaryrefslogtreecommitdiff
path: root/net/core/xdp.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/core/xdp.c')
-rw-r--r--net/core/xdp.c57
1 files changed, 56 insertions, 1 deletions
diff --git a/net/core/xdp.c b/net/core/xdp.c
index d900cebc0acd..3a8c9ab4ecbe 100644
--- a/net/core/xdp.c
+++ b/net/core/xdp.c
@@ -158,7 +158,7 @@ static void xdp_rxq_info_init(struct xdp_rxq_info *xdp_rxq)
/* Returns 0 on success, negative on failure */
int xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq,
- struct net_device *dev, u32 queue_index)
+ struct net_device *dev, u32 queue_index, unsigned int napi_id)
{
if (xdp_rxq->reg_state == REG_STATE_UNUSED) {
WARN(1, "Driver promised not to register this");
@@ -179,6 +179,7 @@ int xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq,
xdp_rxq_info_init(xdp_rxq);
xdp_rxq->dev = dev;
xdp_rxq->queue_index = queue_index;
+ xdp_rxq->napi_id = napi_id;
xdp_rxq->reg_state = REG_STATE_REGISTERED;
return 0;
@@ -383,6 +384,60 @@ void xdp_return_frame_rx_napi(struct xdp_frame *xdpf)
}
EXPORT_SYMBOL_GPL(xdp_return_frame_rx_napi);
+/* XDP bulk APIs introduce a defer/flush mechanism to return
+ * pages belonging to the same xdp_mem_allocator object
+ * (identified via the mem.id field) in bulk to optimize
+ * I-cache and D-cache.
+ * The bulk queue size is set to 16 to be aligned to how
+ * XDP_REDIRECT bulking works. The bulk is flushed when
+ * it is full or when mem.id changes.
+ * xdp_frame_bulk is usually stored/allocated on the function
+ * call-stack to avoid locking penalties.
+ */
+void xdp_flush_frame_bulk(struct xdp_frame_bulk *bq)
+{
+ struct xdp_mem_allocator *xa = bq->xa;
+
+ if (unlikely(!xa || !bq->count))
+ return;
+
+ page_pool_put_page_bulk(xa->page_pool, bq->q, bq->count);
+ /* bq->xa is not cleared to save lookup, if mem.id same in next bulk */
+ bq->count = 0;
+}
+EXPORT_SYMBOL_GPL(xdp_flush_frame_bulk);
+
+/* Must be called with rcu_read_lock held */
+void xdp_return_frame_bulk(struct xdp_frame *xdpf,
+ struct xdp_frame_bulk *bq)
+{
+ struct xdp_mem_info *mem = &xdpf->mem;
+ struct xdp_mem_allocator *xa;
+
+ if (mem->type != MEM_TYPE_PAGE_POOL) {
+ __xdp_return(xdpf->data, &xdpf->mem, false, NULL);
+ return;
+ }
+
+ xa = bq->xa;
+ if (unlikely(!xa)) {
+ xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
+ bq->count = 0;
+ bq->xa = xa;
+ }
+
+ if (bq->count == XDP_BULK_QUEUE_SIZE)
+ xdp_flush_frame_bulk(bq);
+
+ if (unlikely(mem->id != xa->mem.id)) {
+ xdp_flush_frame_bulk(bq);
+ bq->xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
+ }
+
+ bq->q[bq->count++] = xdpf->data;
+}
+EXPORT_SYMBOL_GPL(xdp_return_frame_bulk);
+
void xdp_return_buff(struct xdp_buff *xdp)
{
__xdp_return(xdp->data, &xdp->rxq->mem, true, xdp);