summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/broadcom/bgmac.c
diff options
context:
space:
mode:
authorFelix Fietkau <nbd@openwrt.org>2015-03-23 14:35:36 +0300
committerDavid S. Miller <davem@davemloft.net>2015-03-23 23:56:28 +0300
commit45c9b3c09490ef4a4ac19df75b5919849423f3e7 (patch)
tree77418d96c3b36a0ef4a8cfd6b08f5e3e941652bd /drivers/net/ethernet/broadcom/bgmac.c
parent0addb83d4c71a68fa716fbd69a605ac0ba5e2b50 (diff)
downloadlinux-45c9b3c09490ef4a4ac19df75b5919849423f3e7.tar.xz
bgmac: implement GRO and use build_skb
This improves performance for routing and local rx Signed-off-by: Felix Fietkau <nbd@openwrt.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/broadcom/bgmac.c')
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c80
1 files changed, 56 insertions, 24 deletions
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index efda7cf82394..c7da37aef035 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -276,31 +276,31 @@ static int bgmac_dma_rx_skb_for_slot(struct bgmac *bgmac,
struct bgmac_slot_info *slot)
{
struct device *dma_dev = bgmac->core->dma_dev;
- struct sk_buff *skb;
dma_addr_t dma_addr;
struct bgmac_rx_header *rx;
+ void *buf;
/* Alloc skb */
- skb = netdev_alloc_skb(bgmac->net_dev, BGMAC_RX_BUF_SIZE);
- if (!skb)
+ buf = netdev_alloc_frag(BGMAC_RX_ALLOC_SIZE);
+ if (!buf)
return -ENOMEM;
/* Poison - if everything goes fine, hardware will overwrite it */
- rx = (struct bgmac_rx_header *)skb->data;
+ rx = buf;
rx->len = cpu_to_le16(0xdead);
rx->flags = cpu_to_le16(0xbeef);
/* Map skb for the DMA */
- dma_addr = dma_map_single(dma_dev, skb->data,
- BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
+ dma_addr = dma_map_single(dma_dev, buf, BGMAC_RX_BUF_SIZE,
+ DMA_FROM_DEVICE);
if (dma_mapping_error(dma_dev, dma_addr)) {
bgmac_err(bgmac, "DMA mapping error\n");
- dev_kfree_skb(skb);
+ put_page(virt_to_head_page(buf));
return -ENOMEM;
}
/* Update the slot */
- slot->skb = skb;
+ slot->buf = buf;
slot->dma_addr = dma_addr;
return 0;
@@ -343,8 +343,9 @@ static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
while (ring->start != ring->end) {
struct device *dma_dev = bgmac->core->dma_dev;
struct bgmac_slot_info *slot = &ring->slots[ring->start];
- struct sk_buff *skb = slot->skb;
- struct bgmac_rx_header *rx;
+ struct bgmac_rx_header *rx = slot->buf;
+ struct sk_buff *skb;
+ void *buf = slot->buf;
u16 len, flags;
/* Unmap buffer to make it accessible to the CPU */
@@ -352,7 +353,6 @@ static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
/* Get info from the header */
- rx = (struct bgmac_rx_header *)skb->data;
len = le16_to_cpu(rx->len);
flags = le16_to_cpu(rx->flags);
@@ -393,12 +393,13 @@ static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
dma_unmap_single(dma_dev, old_dma_addr,
BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
+ skb = build_skb(buf, BGMAC_RX_ALLOC_SIZE);
skb_put(skb, BGMAC_RX_FRAME_OFFSET + len);
skb_pull(skb, BGMAC_RX_FRAME_OFFSET);
skb_checksum_none_assert(skb);
skb->protocol = eth_type_trans(skb, bgmac->net_dev);
- netif_receive_skb(skb);
+ napi_gro_receive(&bgmac->napi, skb);
handled++;
} while (0);
@@ -434,12 +435,11 @@ static bool bgmac_dma_unaligned(struct bgmac *bgmac,
return false;
}
-static void bgmac_dma_ring_free(struct bgmac *bgmac,
- struct bgmac_dma_ring *ring)
+static void bgmac_dma_tx_ring_free(struct bgmac *bgmac,
+ struct bgmac_dma_ring *ring)
{
struct device *dma_dev = bgmac->core->dma_dev;
struct bgmac_slot_info *slot;
- int size;
int i;
for (i = 0; i < ring->num_slots; i++) {
@@ -451,23 +451,55 @@ static void bgmac_dma_ring_free(struct bgmac *bgmac,
dev_kfree_skb(slot->skb);
}
}
+}
- if (ring->cpu_base) {
- /* Free ring of descriptors */
- size = ring->num_slots * sizeof(struct bgmac_dma_desc);
- dma_free_coherent(dma_dev, size, ring->cpu_base,
- ring->dma_base);
+static void bgmac_dma_rx_ring_free(struct bgmac *bgmac,
+ struct bgmac_dma_ring *ring)
+{
+ struct device *dma_dev = bgmac->core->dma_dev;
+ struct bgmac_slot_info *slot;
+ int i;
+
+ for (i = 0; i < ring->num_slots; i++) {
+ slot = &ring->slots[i];
+ if (!slot->buf)
+ continue;
+
+ if (slot->dma_addr)
+ dma_unmap_single(dma_dev, slot->dma_addr,
+ BGMAC_RX_BUF_SIZE,
+ DMA_FROM_DEVICE);
+ put_page(virt_to_head_page(slot->buf));
}
}
+static void bgmac_dma_ring_desc_free(struct bgmac *bgmac,
+ struct bgmac_dma_ring *ring)
+{
+ struct device *dma_dev = bgmac->core->dma_dev;
+ int size;
+
+ if (!ring->cpu_base)
+ return;
+
+ /* Free ring of descriptors */
+ size = ring->num_slots * sizeof(struct bgmac_dma_desc);
+ dma_free_coherent(dma_dev, size, ring->cpu_base,
+ ring->dma_base);
+}
+
static void bgmac_dma_free(struct bgmac *bgmac)
{
int i;
- for (i = 0; i < BGMAC_MAX_TX_RINGS; i++)
- bgmac_dma_ring_free(bgmac, &bgmac->tx_ring[i]);
- for (i = 0; i < BGMAC_MAX_RX_RINGS; i++)
- bgmac_dma_ring_free(bgmac, &bgmac->rx_ring[i]);
+ for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) {
+ bgmac_dma_tx_ring_free(bgmac, &bgmac->tx_ring[i]);
+ bgmac_dma_ring_desc_free(bgmac, &bgmac->tx_ring[i]);
+ }
+ for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) {
+ bgmac_dma_rx_ring_free(bgmac, &bgmac->rx_ring[i]);
+ bgmac_dma_ring_desc_free(bgmac, &bgmac->rx_ring[i]);
+ }
}
static int bgmac_dma_alloc(struct bgmac *bgmac)