summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/marvell/mvneta.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/marvell/mvneta.c')
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c70
1 files changed, 31 insertions, 39 deletions
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index dfcb1767acbb..54b0bf574c05 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -330,7 +330,6 @@
#define MVNETA_SKB_HEADROOM ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8)
#define MVNETA_SKB_PAD (SKB_DATA_ALIGN(sizeof(struct skb_shared_info) + \
MVNETA_SKB_HEADROOM))
-#define MVNETA_SKB_SIZE(len) (SKB_DATA_ALIGN(len) + MVNETA_SKB_PAD)
#define MVNETA_MAX_RX_BUF_SIZE (PAGE_SIZE - MVNETA_SKB_PAD)
#define IS_TSO_HEADER(txq, addr) \
@@ -752,13 +751,12 @@ static void mvneta_txq_inc_put(struct mvneta_tx_queue *txq)
static void mvneta_mib_counters_clear(struct mvneta_port *pp)
{
int i;
- u32 dummy;
/* Perform dummy reads from MIB counters */
for (i = 0; i < MVNETA_MIB_LATE_COLLISION; i += 4)
- dummy = mvreg_read(pp, (MVNETA_MIB_COUNTERS_BASE + i));
- dummy = mvreg_read(pp, MVNETA_RX_DISCARD_FRAME_COUNT);
- dummy = mvreg_read(pp, MVNETA_OVERRUN_FRAME_COUNT);
+ mvreg_read(pp, (MVNETA_MIB_COUNTERS_BASE + i));
+ mvreg_read(pp, MVNETA_RX_DISCARD_FRAME_COUNT);
+ mvreg_read(pp, MVNETA_OVERRUN_FRAME_COUNT);
}
/* Get System Network Statistics */
@@ -1833,7 +1831,7 @@ static struct mvneta_tx_queue *mvneta_tx_done_policy(struct mvneta_port *pp,
/* Free tx queue skbuffs */
static void mvneta_txq_bufs_free(struct mvneta_port *pp,
struct mvneta_tx_queue *txq, int num,
- struct netdev_queue *nq)
+ struct netdev_queue *nq, bool napi)
{
unsigned int bytes_compl = 0, pkts_compl = 0;
int i;
@@ -1856,7 +1854,10 @@ static void mvneta_txq_bufs_free(struct mvneta_port *pp,
dev_kfree_skb_any(buf->skb);
} else if (buf->type == MVNETA_TYPE_XDP_TX ||
buf->type == MVNETA_TYPE_XDP_NDO) {
- xdp_return_frame(buf->xdpf);
+ if (napi && buf->type == MVNETA_TYPE_XDP_TX)
+ xdp_return_frame_rx_napi(buf->xdpf);
+ else
+ xdp_return_frame(buf->xdpf);
}
}
@@ -1874,7 +1875,7 @@ static void mvneta_txq_done(struct mvneta_port *pp,
if (!tx_done)
return;
- mvneta_txq_bufs_free(pp, txq, tx_done, nq);
+ mvneta_txq_bufs_free(pp, txq, tx_done, nq, true);
txq->count -= tx_done;
@@ -2029,11 +2030,11 @@ mvneta_xdp_put_buff(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
int i;
- page_pool_put_page(rxq->page_pool, virt_to_head_page(xdp->data),
- sync_len, napi);
for (i = 0; i < sinfo->nr_frags; i++)
page_pool_put_full_page(rxq->page_pool,
skb_frag_page(&sinfo->frags[i]), napi);
+ page_pool_put_page(rxq->page_pool, virt_to_head_page(xdp->data),
+ sync_len, napi);
}
static int
@@ -2227,8 +2228,7 @@ mvneta_swbm_rx_frame(struct mvneta_port *pp,
struct mvneta_rx_desc *rx_desc,
struct mvneta_rx_queue *rxq,
struct xdp_buff *xdp, int *size,
- struct page *page,
- struct mvneta_stats *stats)
+ struct page *page)
{
unsigned char *data = page_address(page);
int data_len = -MVNETA_MH_SIZE, len;
@@ -2236,19 +2236,22 @@ mvneta_swbm_rx_frame(struct mvneta_port *pp,
enum dma_data_direction dma_dir;
struct skb_shared_info *sinfo;
- if (MVNETA_SKB_SIZE(rx_desc->data_size) > PAGE_SIZE) {
+ if (*size > MVNETA_MAX_RX_BUF_SIZE) {
len = MVNETA_MAX_RX_BUF_SIZE;
data_len += len;
} else {
- len = rx_desc->data_size;
+ len = *size;
data_len += len - ETH_FCS_LEN;
}
+ *size = *size - len;
dma_dir = page_pool_get_dma_dir(rxq->page_pool);
dma_sync_single_for_cpu(dev->dev.parent,
rx_desc->buf_phys_addr,
len, dma_dir);
+ rx_desc->buf_phys_addr = 0;
+
/* Prefetch header */
prefetch(data);
@@ -2259,9 +2262,6 @@ mvneta_swbm_rx_frame(struct mvneta_port *pp,
sinfo = xdp_get_shared_info_from_buff(xdp);
sinfo->nr_frags = 0;
-
- *size = rx_desc->data_size - len;
- rx_desc->buf_phys_addr = 0;
}
static void
@@ -2307,11 +2307,8 @@ mvneta_swbm_build_skb(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
{
struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
int i, num_frags = sinfo->nr_frags;
- skb_frag_t frags[MAX_SKB_FRAGS];
struct sk_buff *skb;
- memcpy(frags, sinfo->frags, sizeof(skb_frag_t) * num_frags);
-
skb = build_skb(xdp->data_hard_start, PAGE_SIZE);
if (!skb)
return ERR_PTR(-ENOMEM);
@@ -2323,12 +2320,12 @@ mvneta_swbm_build_skb(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
mvneta_rx_csum(pp, desc_status, skb);
for (i = 0; i < num_frags; i++) {
- struct page *page = skb_frag_page(&frags[i]);
+ skb_frag_t *frag = &sinfo->frags[i];
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
- page, skb_frag_off(&frags[i]),
- skb_frag_size(&frags[i]), PAGE_SIZE);
- page_pool_release_page(rxq->page_pool, page);
+ skb_frag_page(frag), skb_frag_off(frag),
+ skb_frag_size(frag), PAGE_SIZE);
+ page_pool_release_page(rxq->page_pool, skb_frag_page(frag));
}
return skb;
@@ -2378,13 +2375,17 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
size = rx_desc->data_size;
frame_sz = size - ETH_FCS_LEN;
- desc_status = rx_desc->status;
+ desc_status = rx_status;
mvneta_swbm_rx_frame(pp, rx_desc, rxq, &xdp_buf,
- &size, page, &ps);
+ &size, page);
} else {
- if (unlikely(!xdp_buf.data_hard_start))
+ if (unlikely(!xdp_buf.data_hard_start)) {
+ rx_desc->buf_phys_addr = 0;
+ page_pool_put_full_page(rxq->page_pool, page,
+ true);
continue;
+ }
mvneta_swbm_add_rx_fragment(pp, rx_desc, rxq, &xdp_buf,
&size, page);
@@ -2861,7 +2862,7 @@ static void mvneta_txq_done_force(struct mvneta_port *pp,
struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
int tx_done = txq->count;
- mvneta_txq_bufs_free(pp, txq, tx_done, nq);
+ mvneta_txq_bufs_free(pp, txq, tx_done, nq, false);
/* reset txq */
txq->count = 0;
@@ -3396,24 +3397,15 @@ static int mvneta_txq_sw_init(struct mvneta_port *pp,
txq->last_desc = txq->size - 1;
txq->buf = kmalloc_array(txq->size, sizeof(*txq->buf), GFP_KERNEL);
- if (!txq->buf) {
- dma_free_coherent(pp->dev->dev.parent,
- txq->size * MVNETA_DESC_ALIGNED_SIZE,
- txq->descs, txq->descs_phys);
+ if (!txq->buf)
return -ENOMEM;
- }
/* Allocate DMA buffers for TSO MAC/IP/TCP headers */
txq->tso_hdrs = dma_alloc_coherent(pp->dev->dev.parent,
txq->size * TSO_HEADER_SIZE,
&txq->tso_hdrs_phys, GFP_KERNEL);
- if (!txq->tso_hdrs) {
- kfree(txq->buf);
- dma_free_coherent(pp->dev->dev.parent,
- txq->size * MVNETA_DESC_ALIGNED_SIZE,
- txq->descs, txq->descs_phys);
+ if (!txq->tso_hdrs)
return -ENOMEM;
- }
/* Setup XPS mapping */
if (txq_number > 1)