diff options
Diffstat (limited to 'drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c')
-rw-r--r-- | drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c | 433 |
1 files changed, 329 insertions, 104 deletions
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c b/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c index e6948939ccc2..5314c064ceae 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c @@ -1,5 +1,8 @@ // SPDX-License-Identifier: GPL-2.0+ +#include <linux/bpf.h> +#include <linux/filter.h> + #include "lan966x_main.h" static int lan966x_fdma_channel_active(struct lan966x *lan966x) @@ -10,50 +13,39 @@ static int lan966x_fdma_channel_active(struct lan966x *lan966x) static struct page *lan966x_fdma_rx_alloc_page(struct lan966x_rx *rx, struct lan966x_db *db) { - struct lan966x *lan966x = rx->lan966x; - dma_addr_t dma_addr; struct page *page; - page = dev_alloc_pages(rx->page_order); + page = page_pool_dev_alloc_pages(rx->page_pool); if (unlikely(!page)) return NULL; - dma_addr = dma_map_page(lan966x->dev, page, 0, - PAGE_SIZE << rx->page_order, - DMA_FROM_DEVICE); - if (unlikely(dma_mapping_error(lan966x->dev, dma_addr))) - goto free_page; - - db->dataptr = dma_addr; + db->dataptr = page_pool_get_dma_addr(page) + XDP_PACKET_HEADROOM; return page; - -free_page: - __free_pages(page, rx->page_order); - return NULL; } static void lan966x_fdma_rx_free_pages(struct lan966x_rx *rx) { - struct lan966x *lan966x = rx->lan966x; - struct lan966x_rx_dcb *dcb; - struct lan966x_db *db; int i, j; for (i = 0; i < FDMA_DCB_MAX; ++i) { - dcb = &rx->dcbs[i]; - - for (j = 0; j < FDMA_RX_DCB_MAX_DBS; ++j) { - db = &dcb->db[j]; - dma_unmap_single(lan966x->dev, - (dma_addr_t)db->dataptr, - PAGE_SIZE << rx->page_order, - DMA_FROM_DEVICE); - __free_pages(rx->page[i][j], rx->page_order); - } + for (j = 0; j < FDMA_RX_DCB_MAX_DBS; ++j) + page_pool_put_full_page(rx->page_pool, + rx->page[i][j], false); } } +static void lan966x_fdma_rx_free_page(struct lan966x_rx *rx) +{ + struct page *page; + + page = rx->page[rx->dcb_index][rx->db_index]; + if (unlikely(!page)) + return; + + page_pool_recycle_direct(rx->page_pool, page); +} + static void lan966x_fdma_rx_add_dcb(struct lan966x_rx *rx, struct lan966x_rx_dcb *dcb, u64 nextptr) @@ -73,6 +65,41 @@ static void lan966x_fdma_rx_add_dcb(struct lan966x_rx *rx, rx->last_entry = dcb; } +static int lan966x_fdma_rx_alloc_page_pool(struct lan966x_rx *rx) +{ + struct lan966x *lan966x = rx->lan966x; + struct page_pool_params pp_params = { + .order = rx->page_order, + .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV, + .pool_size = FDMA_DCB_MAX, + .nid = NUMA_NO_NODE, + .dev = lan966x->dev, + .dma_dir = DMA_FROM_DEVICE, + .offset = XDP_PACKET_HEADROOM, + .max_len = rx->max_mtu - + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)), + }; + + if (lan966x_xdp_present(lan966x)) + pp_params.dma_dir = DMA_BIDIRECTIONAL; + + rx->page_pool = page_pool_create(&pp_params); + + for (int i = 0; i < lan966x->num_phys_ports; ++i) { + struct lan966x_port *port; + + if (!lan966x->ports[i]) + continue; + + port = lan966x->ports[i]; + xdp_rxq_info_unreg_mem_model(&port->xdp_rxq); + xdp_rxq_info_reg_mem_model(&port->xdp_rxq, MEM_TYPE_PAGE_POOL, + rx->page_pool); + } + + return PTR_ERR_OR_ZERO(rx->page_pool); +} + static int lan966x_fdma_rx_alloc(struct lan966x_rx *rx) { struct lan966x *lan966x = rx->lan966x; @@ -82,6 +109,9 @@ static int lan966x_fdma_rx_alloc(struct lan966x_rx *rx) int i, j; int size; + if (lan966x_fdma_rx_alloc_page_pool(rx)) + return PTR_ERR(rx->page_pool); + /* calculate how many pages are needed to allocate the dcbs */ size = sizeof(struct lan966x_rx_dcb) * FDMA_DCB_MAX; size = ALIGN(size, PAGE_SIZE); @@ -116,6 +146,12 @@ static int lan966x_fdma_rx_alloc(struct lan966x_rx *rx) return 0; } +static void lan966x_fdma_rx_advance_dcb(struct lan966x_rx *rx) +{ + rx->dcb_index++; + rx->dcb_index &= FDMA_DCB_MAX - 1; +} + static void lan966x_fdma_rx_free(struct lan966x_rx *rx) { struct lan966x *lan966x = rx->lan966x; @@ -355,11 +391,14 @@ static void lan966x_fdma_tx_clear_buf(struct lan966x *lan966x, int weight) { struct lan966x_tx *tx = &lan966x->tx; struct lan966x_tx_dcb_buf *dcb_buf; + struct xdp_frame_bulk bq; struct lan966x_db *db; unsigned long flags; bool clear = false; int i; + xdp_frame_bulk_init(&bq); + spin_lock_irqsave(&lan966x->tx_lock, flags); for (i = 0; i < FDMA_DCB_MAX; ++i) { dcb_buf = &tx->dcbs_buf[i]; @@ -372,19 +411,35 @@ static void lan966x_fdma_tx_clear_buf(struct lan966x *lan966x, int weight) continue; dcb_buf->dev->stats.tx_packets++; - dcb_buf->dev->stats.tx_bytes += dcb_buf->skb->len; + dcb_buf->dev->stats.tx_bytes += dcb_buf->len; dcb_buf->used = false; - dma_unmap_single(lan966x->dev, - dcb_buf->dma_addr, - dcb_buf->skb->len, - DMA_TO_DEVICE); - if (!dcb_buf->ptp) - dev_kfree_skb_any(dcb_buf->skb); + if (dcb_buf->use_skb) { + dma_unmap_single(lan966x->dev, + dcb_buf->dma_addr, + dcb_buf->len, + DMA_TO_DEVICE); + + if (!dcb_buf->ptp) + napi_consume_skb(dcb_buf->data.skb, weight); + } else { + if (dcb_buf->xdp_ndo) + dma_unmap_single(lan966x->dev, + dcb_buf->dma_addr, + dcb_buf->len, + DMA_TO_DEVICE); + + if (dcb_buf->xdp_ndo) + xdp_return_frame_bulk(dcb_buf->data.xdpf, &bq); + else + xdp_return_frame_rx_napi(dcb_buf->data.xdpf); + } clear = true; } + xdp_flush_frame_bulk(&bq); + if (clear) lan966x_fdma_wakeup_netdev(lan966x); @@ -403,40 +458,61 @@ static bool lan966x_fdma_rx_more_frames(struct lan966x_rx *rx) return true; } -static struct sk_buff *lan966x_fdma_rx_get_frame(struct lan966x_rx *rx) +static int lan966x_fdma_rx_check_frame(struct lan966x_rx *rx, u64 *src_port) { struct lan966x *lan966x = rx->lan966x; - u64 src_port, timestamp; + struct lan966x_port *port; struct lan966x_db *db; - struct sk_buff *skb; struct page *page; - /* Get the received frame and unmap it */ db = &rx->dcbs[rx->dcb_index].db[rx->db_index]; page = rx->page[rx->dcb_index][rx->db_index]; + if (unlikely(!page)) + return FDMA_ERROR; - dma_sync_single_for_cpu(lan966x->dev, (dma_addr_t)db->dataptr, + dma_sync_single_for_cpu(lan966x->dev, + (dma_addr_t)db->dataptr + XDP_PACKET_HEADROOM, FDMA_DCB_STATUS_BLOCKL(db->status), DMA_FROM_DEVICE); + lan966x_ifh_get_src_port(page_address(page) + XDP_PACKET_HEADROOM, + src_port); + if (WARN_ON(*src_port >= lan966x->num_phys_ports)) + return FDMA_ERROR; + + port = lan966x->ports[*src_port]; + if (!lan966x_xdp_port_present(port)) + return FDMA_PASS; + + return lan966x_xdp_run(port, page, FDMA_DCB_STATUS_BLOCKL(db->status)); +} + +static struct sk_buff *lan966x_fdma_rx_get_frame(struct lan966x_rx *rx, + u64 src_port) +{ + struct lan966x *lan966x = rx->lan966x; + struct lan966x_db *db; + struct sk_buff *skb; + struct page *page; + u64 timestamp; + + /* Get the received frame and unmap it */ + db = &rx->dcbs[rx->dcb_index].db[rx->db_index]; + page = rx->page[rx->dcb_index][rx->db_index]; + skb = build_skb(page_address(page), PAGE_SIZE << rx->page_order); if (unlikely(!skb)) - goto unmap_page; + goto free_page; + + skb_mark_for_recycle(skb); + skb_reserve(skb, XDP_PACKET_HEADROOM); skb_put(skb, FDMA_DCB_STATUS_BLOCKL(db->status)); - lan966x_ifh_get_src_port(skb->data, &src_port); lan966x_ifh_get_timestamp(skb->data, ×tamp); - if (WARN_ON(src_port >= lan966x->num_phys_ports)) - goto free_skb; - - dma_unmap_single_attrs(lan966x->dev, (dma_addr_t)db->dataptr, - PAGE_SIZE << rx->page_order, DMA_FROM_DEVICE, - DMA_ATTR_SKIP_CPU_SYNC); - skb->dev = lan966x->ports[src_port]->dev; - skb_pull(skb, IFH_LEN * sizeof(u32)); + skb_pull(skb, IFH_LEN_BYTES); if (likely(!(skb->dev->features & NETIF_F_RXFCS))) skb_trim(skb, skb->len - ETH_FCS_LEN); @@ -457,13 +533,8 @@ static struct sk_buff *lan966x_fdma_rx_get_frame(struct lan966x_rx *rx) return skb; -free_skb: - kfree_skb(skb); -unmap_page: - dma_unmap_single_attrs(lan966x->dev, (dma_addr_t)db->dataptr, - PAGE_SIZE << rx->page_order, DMA_FROM_DEVICE, - DMA_ATTR_SKIP_CPU_SYNC); - __free_pages(page, rx->page_order); +free_page: + page_pool_recycle_direct(rx->page_pool, page); return NULL; } @@ -475,9 +546,11 @@ static int lan966x_fdma_napi_poll(struct napi_struct *napi, int weight) int dcb_reload = rx->dcb_index; struct lan966x_rx_dcb *old_dcb; struct lan966x_db *db; + bool redirect = false; struct sk_buff *skb; struct page *page; int counter = 0; + u64 src_port; u64 nextptr; lan966x_fdma_tx_clear_buf(lan966x, weight); @@ -487,19 +560,36 @@ static int lan966x_fdma_napi_poll(struct napi_struct *napi, int weight) if (!lan966x_fdma_rx_more_frames(rx)) break; - skb = lan966x_fdma_rx_get_frame(rx); + counter++; - rx->page[rx->dcb_index][rx->db_index] = NULL; - rx->dcb_index++; - rx->dcb_index &= FDMA_DCB_MAX - 1; + switch (lan966x_fdma_rx_check_frame(rx, &src_port)) { + case FDMA_PASS: + break; + case FDMA_ERROR: + lan966x_fdma_rx_free_page(rx); + lan966x_fdma_rx_advance_dcb(rx); + goto allocate_new; + case FDMA_REDIRECT: + redirect = true; + fallthrough; + case FDMA_TX: + lan966x_fdma_rx_advance_dcb(rx); + continue; + case FDMA_DROP: + lan966x_fdma_rx_free_page(rx); + lan966x_fdma_rx_advance_dcb(rx); + continue; + } + skb = lan966x_fdma_rx_get_frame(rx, src_port); + lan966x_fdma_rx_advance_dcb(rx); if (!skb) - break; + goto allocate_new; napi_gro_receive(&lan966x->napi, skb); - counter++; } +allocate_new: /* Allocate new pages and map them */ while (dcb_reload != rx->dcb_index) { db = &rx->dcbs[dcb_reload].db[rx->db_index]; @@ -521,6 +611,9 @@ static int lan966x_fdma_napi_poll(struct napi_struct *napi, int weight) if (counter < weight && napi_complete_done(napi, counter)) lan_wr(0xff, lan966x, FDMA_INTR_DB_ENA); + if (redirect) + xdp_do_flush(); + return counter; } @@ -565,14 +658,139 @@ static int lan966x_fdma_get_next_dcb(struct lan966x_tx *tx) return -1; } +static void lan966x_fdma_tx_setup_dcb(struct lan966x_tx *tx, + int next_to_use, int len, + dma_addr_t dma_addr) +{ + struct lan966x_tx_dcb *next_dcb; + struct lan966x_db *next_db; + + next_dcb = &tx->dcbs[next_to_use]; + next_dcb->nextptr = FDMA_DCB_INVALID_DATA; + + next_db = &next_dcb->db[0]; + next_db->dataptr = dma_addr; + next_db->status = FDMA_DCB_STATUS_SOF | + FDMA_DCB_STATUS_EOF | + FDMA_DCB_STATUS_INTR | + FDMA_DCB_STATUS_BLOCKO(0) | + FDMA_DCB_STATUS_BLOCKL(len); +} + +static void lan966x_fdma_tx_start(struct lan966x_tx *tx, int next_to_use) +{ + struct lan966x *lan966x = tx->lan966x; + struct lan966x_tx_dcb *dcb; + + if (likely(lan966x->tx.activated)) { + /* Connect current dcb to the next db */ + dcb = &tx->dcbs[tx->last_in_use]; + dcb->nextptr = tx->dma + (next_to_use * + sizeof(struct lan966x_tx_dcb)); + + lan966x_fdma_tx_reload(tx); + } else { + /* Because it is first time, then just activate */ + lan966x->tx.activated = true; + lan966x_fdma_tx_activate(tx); + } + + /* Move to next dcb because this last in use */ + tx->last_in_use = next_to_use; +} + +int lan966x_fdma_xmit_xdpf(struct lan966x_port *port, + struct xdp_frame *xdpf, + struct page *page, + bool dma_map) +{ + struct lan966x *lan966x = port->lan966x; + struct lan966x_tx_dcb_buf *next_dcb_buf; + struct lan966x_tx *tx = &lan966x->tx; + dma_addr_t dma_addr; + int next_to_use; + __be32 *ifh; + int ret = 0; + + spin_lock(&lan966x->tx_lock); + + /* Get next index */ + next_to_use = lan966x_fdma_get_next_dcb(tx); + if (next_to_use < 0) { + netif_stop_queue(port->dev); + ret = NETDEV_TX_BUSY; + goto out; + } + + /* Generate new IFH */ + if (dma_map) { + if (xdpf->headroom < IFH_LEN_BYTES) { + ret = NETDEV_TX_OK; + goto out; + } + + ifh = xdpf->data - IFH_LEN_BYTES; + memset(ifh, 0x0, sizeof(__be32) * IFH_LEN); + lan966x_ifh_set_bypass(ifh, 1); + lan966x_ifh_set_port(ifh, BIT_ULL(port->chip_port)); + + dma_addr = dma_map_single(lan966x->dev, + xdpf->data - IFH_LEN_BYTES, + xdpf->len + IFH_LEN_BYTES, + DMA_TO_DEVICE); + if (dma_mapping_error(lan966x->dev, dma_addr)) { + ret = NETDEV_TX_OK; + goto out; + } + + /* Setup next dcb */ + lan966x_fdma_tx_setup_dcb(tx, next_to_use, + xdpf->len + IFH_LEN_BYTES, + dma_addr); + } else { + ifh = page_address(page) + XDP_PACKET_HEADROOM; + memset(ifh, 0x0, sizeof(__be32) * IFH_LEN); + lan966x_ifh_set_bypass(ifh, 1); + lan966x_ifh_set_port(ifh, BIT_ULL(port->chip_port)); + + dma_addr = page_pool_get_dma_addr(page); + dma_sync_single_for_device(lan966x->dev, + dma_addr + XDP_PACKET_HEADROOM, + xdpf->len + IFH_LEN_BYTES, + DMA_TO_DEVICE); + + /* Setup next dcb */ + lan966x_fdma_tx_setup_dcb(tx, next_to_use, + xdpf->len + IFH_LEN_BYTES, + dma_addr + XDP_PACKET_HEADROOM); + } + + /* Fill up the buffer */ + next_dcb_buf = &tx->dcbs_buf[next_to_use]; + next_dcb_buf->use_skb = false; + next_dcb_buf->data.xdpf = xdpf; + next_dcb_buf->xdp_ndo = dma_map; + next_dcb_buf->len = xdpf->len + IFH_LEN_BYTES; + next_dcb_buf->dma_addr = dma_addr; + next_dcb_buf->used = true; + next_dcb_buf->ptp = false; + next_dcb_buf->dev = port->dev; + + /* Start the transmission */ + lan966x_fdma_tx_start(tx, next_to_use); + +out: + spin_unlock(&lan966x->tx_lock); + + return ret; +} + int lan966x_fdma_xmit(struct sk_buff *skb, __be32 *ifh, struct net_device *dev) { struct lan966x_port *port = netdev_priv(dev); struct lan966x *lan966x = port->lan966x; struct lan966x_tx_dcb_buf *next_dcb_buf; - struct lan966x_tx_dcb *next_dcb, *dcb; struct lan966x_tx *tx = &lan966x->tx; - struct lan966x_db *next_db; int needed_headroom; int needed_tailroom; dma_addr_t dma_addr; @@ -592,7 +810,7 @@ int lan966x_fdma_xmit(struct sk_buff *skb, __be32 *ifh, struct net_device *dev) } /* skb processing */ - needed_headroom = max_t(int, IFH_LEN * sizeof(u32) - skb_headroom(skb), 0); + needed_headroom = max_t(int, IFH_LEN_BYTES - skb_headroom(skb), 0); needed_tailroom = max_t(int, ETH_FCS_LEN - skb_tailroom(skb), 0); if (needed_headroom || needed_tailroom || skb_header_cloned(skb)) { err = pskb_expand_head(skb, needed_headroom, needed_tailroom, @@ -605,8 +823,8 @@ int lan966x_fdma_xmit(struct sk_buff *skb, __be32 *ifh, struct net_device *dev) } skb_tx_timestamp(skb); - skb_push(skb, IFH_LEN * sizeof(u32)); - memcpy(skb->data, ifh, IFH_LEN * sizeof(u32)); + skb_push(skb, IFH_LEN_BYTES); + memcpy(skb->data, ifh, IFH_LEN_BYTES); skb_put(skb, 4); dma_addr = dma_map_single(lan966x->dev, skb->data, skb->len, @@ -618,20 +836,14 @@ int lan966x_fdma_xmit(struct sk_buff *skb, __be32 *ifh, struct net_device *dev) } /* Setup next dcb */ - next_dcb = &tx->dcbs[next_to_use]; - next_dcb->nextptr = FDMA_DCB_INVALID_DATA; - - next_db = &next_dcb->db[0]; - next_db->dataptr = dma_addr; - next_db->status = FDMA_DCB_STATUS_SOF | - FDMA_DCB_STATUS_EOF | - FDMA_DCB_STATUS_INTR | - FDMA_DCB_STATUS_BLOCKO(0) | - FDMA_DCB_STATUS_BLOCKL(skb->len); + lan966x_fdma_tx_setup_dcb(tx, next_to_use, skb->len, dma_addr); /* Fill up the buffer */ next_dcb_buf = &tx->dcbs_buf[next_to_use]; - next_dcb_buf->skb = skb; + next_dcb_buf->use_skb = true; + next_dcb_buf->data.skb = skb; + next_dcb_buf->xdp_ndo = false; + next_dcb_buf->len = skb->len; next_dcb_buf->dma_addr = dma_addr; next_dcb_buf->used = true; next_dcb_buf->ptp = false; @@ -641,21 +853,8 @@ int lan966x_fdma_xmit(struct sk_buff *skb, __be32 *ifh, struct net_device *dev) LAN966X_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP) next_dcb_buf->ptp = true; - if (likely(lan966x->tx.activated)) { - /* Connect current dcb to the next db */ - dcb = &tx->dcbs[tx->last_in_use]; - dcb->nextptr = tx->dma + (next_to_use * - sizeof(struct lan966x_tx_dcb)); - - lan966x_fdma_tx_reload(tx); - } else { - /* Because it is first time, then just activate */ - lan966x->tx.activated = true; - lan966x_fdma_tx_activate(tx); - } - - /* Move to next dcb because this last in use */ - tx->last_in_use = next_to_use; + /* Start the transmission */ + lan966x_fdma_tx_start(tx, next_to_use); return NETDEV_TX_OK; @@ -696,6 +895,7 @@ static int lan966x_qsys_sw_status(struct lan966x *lan966x) static int lan966x_fdma_reload(struct lan966x *lan966x, int new_mtu) { + struct page_pool *page_pool; dma_addr_t rx_dma; void *rx_dcbs; u32 size; @@ -704,6 +904,7 @@ static int lan966x_fdma_reload(struct lan966x *lan966x, int new_mtu) /* Store these for later to free them */ rx_dma = lan966x->rx.dma; rx_dcbs = lan966x->rx.dcbs; + page_pool = lan966x->rx.page_pool; napi_synchronize(&lan966x->napi); napi_disable(&lan966x->napi); @@ -712,6 +913,7 @@ static int lan966x_fdma_reload(struct lan966x *lan966x, int new_mtu) lan966x_fdma_rx_disable(&lan966x->rx); lan966x_fdma_rx_free_pages(&lan966x->rx); lan966x->rx.page_order = round_up(new_mtu, PAGE_SIZE) / PAGE_SIZE - 1; + lan966x->rx.max_mtu = new_mtu; err = lan966x_fdma_rx_alloc(&lan966x->rx); if (err) goto restore; @@ -721,11 +923,14 @@ static int lan966x_fdma_reload(struct lan966x *lan966x, int new_mtu) size = ALIGN(size, PAGE_SIZE); dma_free_coherent(lan966x->dev, size, rx_dcbs, rx_dma); + page_pool_destroy(page_pool); + lan966x_fdma_wakeup_netdev(lan966x); napi_enable(&lan966x->napi); return err; restore: + lan966x->rx.page_pool = page_pool; lan966x->rx.dma = rx_dma; lan966x->rx.dcbs = rx_dcbs; lan966x_fdma_rx_start(&lan966x->rx); @@ -733,21 +938,20 @@ restore: return err; } -int lan966x_fdma_change_mtu(struct lan966x *lan966x) +static int lan966x_fdma_get_max_frame(struct lan966x *lan966x) +{ + return lan966x_fdma_get_max_mtu(lan966x) + + IFH_LEN_BYTES + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + + VLAN_HLEN * 2 + + XDP_PACKET_HEADROOM; +} + +static int __lan966x_fdma_reload(struct lan966x *lan966x, int max_mtu) { - int max_mtu; int err; u32 val; - max_mtu = lan966x_fdma_get_max_mtu(lan966x); - max_mtu += IFH_LEN * sizeof(u32); - max_mtu += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); - max_mtu += VLAN_HLEN * 2; - - if (round_up(max_mtu, PAGE_SIZE) / PAGE_SIZE - 1 == - lan966x->rx.page_order) - return 0; - /* Disable the CPU port */ lan_rmw(QSYS_SW_PORT_MODE_PORT_ENA_SET(0), QSYS_SW_PORT_MODE_PORT_ENA, @@ -773,6 +977,25 @@ int lan966x_fdma_change_mtu(struct lan966x *lan966x) return err; } +int lan966x_fdma_change_mtu(struct lan966x *lan966x) +{ + int max_mtu; + + max_mtu = lan966x_fdma_get_max_frame(lan966x); + if (max_mtu == lan966x->rx.max_mtu) + return 0; + + return __lan966x_fdma_reload(lan966x, max_mtu); +} + +int lan966x_fdma_reload_page_pool(struct lan966x *lan966x) +{ + int max_mtu; + + max_mtu = lan966x_fdma_get_max_frame(lan966x); + return __lan966x_fdma_reload(lan966x, max_mtu); +} + void lan966x_fdma_netdev_init(struct lan966x *lan966x, struct net_device *dev) { if (lan966x->fdma_ndev) @@ -800,6 +1023,7 @@ int lan966x_fdma_init(struct lan966x *lan966x) lan966x->rx.lan966x = lan966x; lan966x->rx.channel_id = FDMA_XTR_CHANNEL; + lan966x->rx.max_mtu = lan966x_fdma_get_max_frame(lan966x); lan966x->tx.lan966x = lan966x; lan966x->tx.channel_id = FDMA_INJ_CHANNEL; lan966x->tx.last_in_use = -1; @@ -832,5 +1056,6 @@ void lan966x_fdma_deinit(struct lan966x *lan966x) lan966x_fdma_rx_free_pages(&lan966x->rx); lan966x_fdma_rx_free(&lan966x->rx); + page_pool_destroy(lan966x->rx.page_pool); lan966x_fdma_tx_free(&lan966x->tx); } |