summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/chelsio/cxgb4/sge.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/chelsio/cxgb4/sge.c')
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c1044
1 files changed, 826 insertions, 218 deletions
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index b3da81e90132..97cda501e7e8 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -55,6 +55,8 @@
#include "t4fw_api.h"
#include "cxgb4_ptp.h"
#include "cxgb4_uld.h"
+#include "cxgb4_tc_mqprio.h"
+#include "sched.h"
/*
* Rx buffer size. We use largish buffers if possible but settle for single
@@ -269,7 +271,6 @@ out_err:
}
EXPORT_SYMBOL(cxgb4_map_skb);
-#ifdef CONFIG_NEED_DMA_MAP_STATE
static void unmap_skb(struct device *dev, const struct sk_buff *skb,
const dma_addr_t *addr)
{
@@ -284,6 +285,7 @@ static void unmap_skb(struct device *dev, const struct sk_buff *skb,
dma_unmap_page(dev, *addr++, skb_frag_size(fp), DMA_TO_DEVICE);
}
+#ifdef CONFIG_NEED_DMA_MAP_STATE
/**
* deferred_unmap_destructor - unmap a packet when it is freed
* @skb: the packet
@@ -298,65 +300,6 @@ static void deferred_unmap_destructor(struct sk_buff *skb)
}
#endif
-static void unmap_sgl(struct device *dev, const struct sk_buff *skb,
- const struct ulptx_sgl *sgl, const struct sge_txq *q)
-{
- const struct ulptx_sge_pair *p;
- unsigned int nfrags = skb_shinfo(skb)->nr_frags;
-
- if (likely(skb_headlen(skb)))
- dma_unmap_single(dev, be64_to_cpu(sgl->addr0), ntohl(sgl->len0),
- DMA_TO_DEVICE);
- else {
- dma_unmap_page(dev, be64_to_cpu(sgl->addr0), ntohl(sgl->len0),
- DMA_TO_DEVICE);
- nfrags--;
- }
-
- /*
- * the complexity below is because of the possibility of a wrap-around
- * in the middle of an SGL
- */
- for (p = sgl->sge; nfrags >= 2; nfrags -= 2) {
- if (likely((u8 *)(p + 1) <= (u8 *)q->stat)) {
-unmap: dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
- ntohl(p->len[0]), DMA_TO_DEVICE);
- dma_unmap_page(dev, be64_to_cpu(p->addr[1]),
- ntohl(p->len[1]), DMA_TO_DEVICE);
- p++;
- } else if ((u8 *)p == (u8 *)q->stat) {
- p = (const struct ulptx_sge_pair *)q->desc;
- goto unmap;
- } else if ((u8 *)p + 8 == (u8 *)q->stat) {
- const __be64 *addr = (const __be64 *)q->desc;
-
- dma_unmap_page(dev, be64_to_cpu(addr[0]),
- ntohl(p->len[0]), DMA_TO_DEVICE);
- dma_unmap_page(dev, be64_to_cpu(addr[1]),
- ntohl(p->len[1]), DMA_TO_DEVICE);
- p = (const struct ulptx_sge_pair *)&addr[2];
- } else {
- const __be64 *addr = (const __be64 *)q->desc;
-
- dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
- ntohl(p->len[0]), DMA_TO_DEVICE);
- dma_unmap_page(dev, be64_to_cpu(addr[0]),
- ntohl(p->len[1]), DMA_TO_DEVICE);
- p = (const struct ulptx_sge_pair *)&addr[1];
- }
- }
- if (nfrags) {
- __be64 addr;
-
- if ((u8 *)p == (u8 *)q->stat)
- p = (const struct ulptx_sge_pair *)q->desc;
- addr = (u8 *)p + 16 <= (u8 *)q->stat ? p->addr[0] :
- *(const __be64 *)q->desc;
- dma_unmap_page(dev, be64_to_cpu(addr), ntohl(p->len[0]),
- DMA_TO_DEVICE);
- }
-}
-
/**
* free_tx_desc - reclaims Tx descriptors and their buffers
* @adapter: the adapter
@@ -370,15 +313,16 @@ unmap: dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
void free_tx_desc(struct adapter *adap, struct sge_txq *q,
unsigned int n, bool unmap)
{
- struct tx_sw_desc *d;
unsigned int cidx = q->cidx;
- struct device *dev = adap->pdev_dev;
+ struct tx_sw_desc *d;
d = &q->sdesc[cidx];
while (n--) {
if (d->skb) { /* an SGL is present */
- if (unmap)
- unmap_sgl(dev, d->skb, d->sgl, q);
+ if (unmap && d->addr[0]) {
+ unmap_skb(adap->pdev_dev, d->skb, d->addr);
+ memset(d->addr, 0, sizeof(d->addr));
+ }
dev_consume_skb_any(d->skb);
d->skb = NULL;
}
@@ -790,6 +734,8 @@ static inline int is_eth_imm(const struct sk_buff *skb, unsigned int chip_ver)
chip_ver > CHELSIO_T5) {
hdrlen = sizeof(struct cpl_tx_tnl_lso);
hdrlen += sizeof(struct cpl_tx_pkt_core);
+ } else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
+ return 0;
} else {
hdrlen = skb_shinfo(skb)->gso_size ?
sizeof(struct cpl_tx_pkt_lso_core) : 0;
@@ -831,12 +777,20 @@ static inline unsigned int calc_tx_flits(const struct sk_buff *skb,
*/
flits = sgl_len(skb_shinfo(skb)->nr_frags + 1);
if (skb_shinfo(skb)->gso_size) {
- if (skb->encapsulation && chip_ver > CHELSIO_T5)
+ if (skb->encapsulation && chip_ver > CHELSIO_T5) {
hdrlen = sizeof(struct fw_eth_tx_pkt_wr) +
sizeof(struct cpl_tx_tnl_lso);
- else
+ } else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
+ u32 pkt_hdrlen;
+
+ pkt_hdrlen = eth_get_headlen(skb->dev, skb->data,
+ skb_headlen(skb));
+ hdrlen = sizeof(struct fw_eth_tx_eo_wr) +
+ round_up(pkt_hdrlen, 16);
+ } else {
hdrlen = sizeof(struct fw_eth_tx_pkt_wr) +
sizeof(struct cpl_tx_pkt_lso_core);
+ }
hdrlen += sizeof(struct cpl_tx_pkt_core);
flits += (hdrlen / sizeof(__be64));
@@ -1309,6 +1263,35 @@ static inline void t6_fill_tnl_lso(struct sk_buff *skb,
tnl_lso->EthLenOffset_Size = htonl(CPL_TX_TNL_LSO_SIZE_V(skb->len));
}
+static inline void *write_tso_wr(struct adapter *adap, struct sk_buff *skb,
+ struct cpl_tx_pkt_lso_core *lso)
+{
+ int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
+ int l3hdr_len = skb_network_header_len(skb);
+ const struct skb_shared_info *ssi;
+ bool ipv6 = false;
+
+ ssi = skb_shinfo(skb);
+ if (ssi->gso_type & SKB_GSO_TCPV6)
+ ipv6 = true;
+
+ lso->lso_ctrl = htonl(LSO_OPCODE_V(CPL_TX_PKT_LSO) |
+ LSO_FIRST_SLICE_F | LSO_LAST_SLICE_F |
+ LSO_IPV6_V(ipv6) |
+ LSO_ETHHDR_LEN_V(eth_xtra_len / 4) |
+ LSO_IPHDR_LEN_V(l3hdr_len / 4) |
+ LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff));
+ lso->ipid_ofst = htons(0);
+ lso->mss = htons(ssi->gso_size);
+ lso->seqno_offset = htonl(0);
+ if (is_t4(adap->params.chip))
+ lso->len = htonl(skb->len);
+ else
+ lso->len = htonl(LSO_T5_XFER_SIZE_V(skb->len));
+
+ return (void *)(lso + 1);
+}
+
/**
* t4_sge_eth_txq_egress_update - handle Ethernet TX Queue update
* @adap: the adapter
@@ -1347,6 +1330,50 @@ int t4_sge_eth_txq_egress_update(struct adapter *adap, struct sge_eth_txq *eq,
return reclaimed;
}
+static inline int cxgb4_validate_skb(struct sk_buff *skb,
+ struct net_device *dev,
+ u32 min_pkt_len)
+{
+ u32 max_pkt_len;
+
+ /* The chip min packet length is 10 octets but some firmware
+ * commands have a minimum packet length requirement. So, play
+ * safe and reject anything shorter than @min_pkt_len.
+ */
+ if (unlikely(skb->len < min_pkt_len))
+ return -EINVAL;
+
+ /* Discard the packet if the length is greater than mtu */
+ max_pkt_len = ETH_HLEN + dev->mtu;
+
+ if (skb_vlan_tagged(skb))
+ max_pkt_len += VLAN_HLEN;
+
+ if (!skb_shinfo(skb)->gso_size && (unlikely(skb->len > max_pkt_len)))
+ return -EINVAL;
+
+ return 0;
+}
+
+static void *write_eo_udp_wr(struct sk_buff *skb, struct fw_eth_tx_eo_wr *wr,
+ u32 hdr_len)
+{
+ wr->u.udpseg.type = FW_ETH_TX_EO_TYPE_UDPSEG;
+ wr->u.udpseg.ethlen = skb_network_offset(skb);
+ wr->u.udpseg.iplen = cpu_to_be16(skb_network_header_len(skb));
+ wr->u.udpseg.udplen = sizeof(struct udphdr);
+ wr->u.udpseg.rtplen = 0;
+ wr->u.udpseg.r4 = 0;
+ if (skb_shinfo(skb)->gso_size)
+ wr->u.udpseg.mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
+ else
+ wr->u.udpseg.mss = cpu_to_be16(skb->len - hdr_len);
+ wr->u.udpseg.schedpktsize = wr->u.udpseg.mss;
+ wr->u.udpseg.plen = cpu_to_be32(skb->len - hdr_len);
+
+ return (void *)(wr + 1);
+}
+
/**
* cxgb4_eth_xmit - add a packet to an Ethernet Tx queue
* @skb: the packet
@@ -1356,41 +1383,25 @@ int t4_sge_eth_txq_egress_update(struct adapter *adap, struct sge_eth_txq *eq,
*/
static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
{
- u32 wr_mid, ctrl0, op;
- u64 cntrl, *end, *sgl;
- int qidx, credits;
- unsigned int flits, ndesc;
- struct adapter *adap;
- struct sge_eth_txq *q;
- const struct port_info *pi;
+ enum cpl_tx_tnl_lso_type tnl_type = TX_TNL_TYPE_OPAQUE;
+ bool ptp_enabled = is_ptp_enabled(skb, dev);
+ unsigned int last_desc, flits, ndesc;
+ u32 wr_mid, ctrl0, op, sgl_off = 0;
+ const struct skb_shared_info *ssi;
+ int len, qidx, credits, ret, left;
+ struct tx_sw_desc *sgl_sdesc;
+ struct fw_eth_tx_eo_wr *eowr;
struct fw_eth_tx_pkt_wr *wr;
struct cpl_tx_pkt_core *cpl;
- const struct skb_shared_info *ssi;
- dma_addr_t addr[MAX_SKB_FRAGS + 1];
+ const struct port_info *pi;
bool immediate = false;
- int len, max_pkt_len;
- bool ptp_enabled = is_ptp_enabled(skb, dev);
+ u64 cntrl, *end, *sgl;
+ struct sge_eth_txq *q;
unsigned int chip_ver;
- enum cpl_tx_tnl_lso_type tnl_type = TX_TNL_TYPE_OPAQUE;
-
-#ifdef CONFIG_CHELSIO_T4_FCOE
- int err;
-#endif /* CONFIG_CHELSIO_T4_FCOE */
-
- /*
- * The chip min packet length is 10 octets but play safe and reject
- * anything shorter than an Ethernet header.
- */
- if (unlikely(skb->len < ETH_HLEN)) {
-out_free: dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
+ struct adapter *adap;
- /* Discard the packet if the length is greater than mtu */
- max_pkt_len = ETH_HLEN + dev->mtu;
- if (skb_vlan_tagged(skb))
- max_pkt_len += VLAN_HLEN;
- if (!skb_shinfo(skb)->gso_size && (unlikely(skb->len > max_pkt_len)))
+ ret = cxgb4_validate_skb(skb, dev, ETH_HLEN);
+ if (ret)
goto out_free;
pi = netdev_priv(dev);
@@ -1421,8 +1432,8 @@ out_free: dev_kfree_skb_any(skb);
cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
#ifdef CONFIG_CHELSIO_T4_FCOE
- err = cxgb_fcoe_offload(skb, adap, pi, &cntrl);
- if (unlikely(err == -ENOTSUPP)) {
+ ret = cxgb_fcoe_offload(skb, adap, pi, &cntrl);
+ if (unlikely(ret == -ENOTSUPP)) {
if (ptp_enabled)
spin_unlock(&adap->ptp_lock);
goto out_free;
@@ -1450,8 +1461,14 @@ out_free: dev_kfree_skb_any(skb);
if (skb->encapsulation && chip_ver > CHELSIO_T5)
tnl_type = cxgb_encap_offload_supported(skb);
+ last_desc = q->q.pidx + ndesc - 1;
+ if (last_desc >= q->q.size)
+ last_desc -= q->q.size;
+ sgl_sdesc = &q->q.sdesc[last_desc];
+
if (!immediate &&
- unlikely(cxgb4_map_skb(adap->pdev_dev, skb, addr) < 0)) {
+ unlikely(cxgb4_map_skb(adap->pdev_dev, skb, sgl_sdesc->addr) < 0)) {
+ memset(sgl_sdesc->addr, 0, sizeof(sgl_sdesc->addr));
q->mapping_err++;
if (ptp_enabled)
spin_unlock(&adap->ptp_lock);
@@ -1482,17 +1499,18 @@ out_free: dev_kfree_skb_any(skb);
}
wr = (void *)&q->q.desc[q->q.pidx];
+ eowr = (void *)&q->q.desc[q->q.pidx];
wr->equiq_to_len16 = htonl(wr_mid);
wr->r3 = cpu_to_be64(0);
- end = (u64 *)wr + flits;
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
+ end = (u64 *)eowr + flits;
+ else
+ end = (u64 *)wr + flits;
len = immediate ? skb->len : 0;
len += sizeof(*cpl);
- if (ssi->gso_size) {
+ if (ssi->gso_size && !(ssi->gso_type & SKB_GSO_UDP_L4)) {
struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1);
- bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0;
- int l3hdr_len = skb_network_header_len(skb);
- int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
struct cpl_tx_tnl_lso *tnl_lso = (void *)(wr + 1);
if (tnl_type)
@@ -1519,46 +1537,33 @@ out_free: dev_kfree_skb_any(skb);
if (skb->ip_summed == CHECKSUM_PARTIAL)
cntrl = hwcsum(adap->params.chip, skb);
} else {
- lso->lso_ctrl = htonl(LSO_OPCODE_V(CPL_TX_PKT_LSO) |
- LSO_FIRST_SLICE_F | LSO_LAST_SLICE_F |
- LSO_IPV6_V(v6) |
- LSO_ETHHDR_LEN_V(eth_xtra_len / 4) |
- LSO_IPHDR_LEN_V(l3hdr_len / 4) |
- LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff));
- lso->ipid_ofst = htons(0);
- lso->mss = htons(ssi->gso_size);
- lso->seqno_offset = htonl(0);
- if (is_t4(adap->params.chip))
- lso->len = htonl(skb->len);
- else
- lso->len = htonl(LSO_T5_XFER_SIZE_V(skb->len));
- cpl = (void *)(lso + 1);
-
- if (CHELSIO_CHIP_VERSION(adap->params.chip)
- <= CHELSIO_T5)
- cntrl = TXPKT_ETHHDR_LEN_V(eth_xtra_len);
- else
- cntrl = T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len);
-
- cntrl |= TXPKT_CSUM_TYPE_V(v6 ?
- TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
- TXPKT_IPHDR_LEN_V(l3hdr_len);
+ cpl = write_tso_wr(adap, skb, lso);
+ cntrl = hwcsum(adap->params.chip, skb);
}
sgl = (u64 *)(cpl + 1); /* sgl start here */
- if (unlikely((u8 *)sgl >= (u8 *)q->q.stat)) {
- /* If current position is already at the end of the
- * txq, reset the current to point to start of the queue
- * and update the end ptr as well.
- */
- if (sgl == (u64 *)q->q.stat) {
- int left = (u8 *)end - (u8 *)q->q.stat;
-
- end = (void *)q->q.desc + left;
- sgl = (void *)q->q.desc;
- }
- }
q->tso++;
q->tx_cso += ssi->gso_segs;
+ } else if (ssi->gso_size) {
+ u64 *start;
+ u32 hdrlen;
+
+ hdrlen = eth_get_headlen(dev, skb->data, skb_headlen(skb));
+ len += hdrlen;
+ wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_EO_WR) |
+ FW_ETH_TX_EO_WR_IMMDLEN_V(len));
+ cpl = write_eo_udp_wr(skb, eowr, hdrlen);
+ cntrl = hwcsum(adap->params.chip, skb);
+
+ start = (u64 *)(cpl + 1);
+ sgl = (u64 *)inline_tx_skb_header(skb, &q->q, (void *)start,
+ hdrlen);
+ if (unlikely(start > sgl)) {
+ left = (u8 *)end - (u8 *)q->q.stat;
+ end = (void *)q->q.desc + left;
+ }
+ sgl_off = hdrlen;
+ q->uso++;
+ q->tx_cso += ssi->gso_segs;
} else {
if (ptp_enabled)
op = FW_PTP_TX_PKT_WR;
@@ -1575,6 +1580,16 @@ out_free: dev_kfree_skb_any(skb);
}
}
+ if (unlikely((u8 *)sgl >= (u8 *)q->q.stat)) {
+ /* If current position is already at the end of the
+ * txq, reset the current to point to start of the queue
+ * and update the end ptr as well.
+ */
+ left = (u8 *)end - (u8 *)q->q.stat;
+ end = (void *)q->q.desc + left;
+ sgl = (void *)q->q.desc;
+ }
+
if (skb_vlan_tag_present(skb)) {
q->vlan_ins++;
cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb));
@@ -1604,16 +1619,10 @@ out_free: dev_kfree_skb_any(skb);
cxgb4_inline_tx_skb(skb, &q->q, sgl);
dev_consume_skb_any(skb);
} else {
- int last_desc;
-
- cxgb4_write_sgl(skb, &q->q, (void *)sgl, end, 0, addr);
+ cxgb4_write_sgl(skb, &q->q, (void *)sgl, end, sgl_off,
+ sgl_sdesc->addr);
skb_orphan(skb);
-
- last_desc = q->q.pidx + ndesc - 1;
- if (last_desc >= q->q.size)
- last_desc -= q->q.size;
- q->q.sdesc[last_desc].skb = skb;
- q->q.sdesc[last_desc].sgl = (struct ulptx_sgl *)sgl;
+ sgl_sdesc->skb = skb;
}
txq_advance(&q->q, ndesc);
@@ -1622,6 +1631,10 @@ out_free: dev_kfree_skb_any(skb);
if (ptp_enabled)
spin_unlock(&adap->ptp_lock);
return NETDEV_TX_OK;
+
+out_free:
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
}
/* Constants ... */
@@ -1707,35 +1720,28 @@ static inline unsigned int t4vf_calc_tx_flits(const struct sk_buff *skb)
static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb,
struct net_device *dev)
{
- dma_addr_t addr[MAX_SKB_FRAGS + 1];
+ unsigned int last_desc, flits, ndesc;
const struct skb_shared_info *ssi;
struct fw_eth_tx_pkt_vm_wr *wr;
- int qidx, credits, max_pkt_len;
+ struct tx_sw_desc *sgl_sdesc;
struct cpl_tx_pkt_core *cpl;
const struct port_info *pi;
- unsigned int flits, ndesc;
struct sge_eth_txq *txq;
struct adapter *adapter;
+ int qidx, credits, ret;
+ size_t fw_hdr_copy_len;
u64 cntrl, *end;
u32 wr_mid;
- const size_t fw_hdr_copy_len = sizeof(wr->ethmacdst) +
- sizeof(wr->ethmacsrc) +
- sizeof(wr->ethtype) +
- sizeof(wr->vlantci);
/* The chip minimum packet length is 10 octets but the firmware
* command that we are using requires that we copy the Ethernet header
* (including the VLAN tag) into the header so we reject anything
* smaller than that ...
*/
- if (unlikely(skb->len < fw_hdr_copy_len))
- goto out_free;
-
- /* Discard the packet if the length is greater than mtu */
- max_pkt_len = ETH_HLEN + dev->mtu;
- if (skb_vlan_tag_present(skb))
- max_pkt_len += VLAN_HLEN;
- if (!skb_shinfo(skb)->gso_size && (unlikely(skb->len > max_pkt_len)))
+ fw_hdr_copy_len = sizeof(wr->ethmacdst) + sizeof(wr->ethmacsrc) +
+ sizeof(wr->ethtype) + sizeof(wr->vlantci);
+ ret = cxgb4_validate_skb(skb, dev, fw_hdr_copy_len);
+ if (ret)
goto out_free;
/* Figure out which TX Queue we're going to use. */
@@ -1771,12 +1777,19 @@ static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb,
return NETDEV_TX_BUSY;
}
+ last_desc = txq->q.pidx + ndesc - 1;
+ if (last_desc >= txq->q.size)
+ last_desc -= txq->q.size;
+ sgl_sdesc = &txq->q.sdesc[last_desc];
+
if (!t4vf_is_eth_imm(skb) &&
- unlikely(cxgb4_map_skb(adapter->pdev_dev, skb, addr) < 0)) {
+ unlikely(cxgb4_map_skb(adapter->pdev_dev, skb,
+ sgl_sdesc->addr) < 0)) {
/* We need to map the skb into PCI DMA space (because it can't
* be in-lined directly into the Work Request) and the mapping
* operation failed. Record the error and drop the packet.
*/
+ memset(sgl_sdesc->addr, 0, sizeof(sgl_sdesc->addr));
txq->mapping_err++;
goto out_free;
}
@@ -1951,7 +1964,6 @@ static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb,
*/
struct ulptx_sgl *sgl = (struct ulptx_sgl *)(cpl + 1);
struct sge_txq *tq = &txq->q;
- int last_desc;
/* If the Work Request header was an exact multiple of our TX
* Descriptor length, then it's possible that the starting SGL
@@ -1965,14 +1977,9 @@ static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb,
((void *)end - (void *)tq->stat));
}
- cxgb4_write_sgl(skb, tq, sgl, end, 0, addr);
+ cxgb4_write_sgl(skb, tq, sgl, end, 0, sgl_sdesc->addr);
skb_orphan(skb);
-
- last_desc = tq->pidx + ndesc - 1;
- if (last_desc >= tq->size)
- last_desc -= tq->size;
- tq->sdesc[last_desc].skb = skb;
- tq->sdesc[last_desc].sgl = sgl;
+ sgl_sdesc->skb = skb;
}
/* Advance our internal TX Queue state, tell the hardware about
@@ -1991,34 +1998,473 @@ out_free:
return NETDEV_TX_OK;
}
+/**
+ * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
+ * @q: the SGE control Tx queue
+ *
+ * This is a variant of cxgb4_reclaim_completed_tx() that is used
+ * for Tx queues that send only immediate data (presently just
+ * the control queues) and thus do not have any sk_buffs to release.
+ */
+static inline void reclaim_completed_tx_imm(struct sge_txq *q)
+{
+ int hw_cidx = ntohs(READ_ONCE(q->stat->cidx));
+ int reclaim = hw_cidx - q->cidx;
+
+ if (reclaim < 0)
+ reclaim += q->size;
+
+ q->in_use -= reclaim;
+ q->cidx = hw_cidx;
+}
+
+static inline void eosw_txq_advance_index(u32 *idx, u32 n, u32 max)
+{
+ u32 val = *idx + n;
+
+ if (val >= max)
+ val -= max;
+
+ *idx = val;
+}
+
+void cxgb4_eosw_txq_free_desc(struct adapter *adap,
+ struct sge_eosw_txq *eosw_txq, u32 ndesc)
+{
+ struct tx_sw_desc *d;
+
+ d = &eosw_txq->desc[eosw_txq->last_cidx];
+ while (ndesc--) {
+ if (d->skb) {
+ if (d->addr[0]) {
+ unmap_skb(adap->pdev_dev, d->skb, d->addr);
+ memset(d->addr, 0, sizeof(d->addr));
+ }
+ dev_consume_skb_any(d->skb);
+ d->skb = NULL;
+ }
+ eosw_txq_advance_index(&eosw_txq->last_cidx, 1,
+ eosw_txq->ndesc);
+ d = &eosw_txq->desc[eosw_txq->last_cidx];
+ }
+}
+
+static inline void eosw_txq_advance(struct sge_eosw_txq *eosw_txq, u32 n)
+{
+ eosw_txq_advance_index(&eosw_txq->pidx, n, eosw_txq->ndesc);
+ eosw_txq->inuse += n;
+}
+
+static inline int eosw_txq_enqueue(struct sge_eosw_txq *eosw_txq,
+ struct sk_buff *skb)
+{
+ if (eosw_txq->inuse == eosw_txq->ndesc)
+ return -ENOMEM;
+
+ eosw_txq->desc[eosw_txq->pidx].skb = skb;
+ return 0;
+}
+
+static inline struct sk_buff *eosw_txq_peek(struct sge_eosw_txq *eosw_txq)
+{
+ return eosw_txq->desc[eosw_txq->last_pidx].skb;
+}
+
+static inline u8 ethofld_calc_tx_flits(struct adapter *adap,
+ struct sk_buff *skb, u32 hdr_len)
+{
+ u8 flits, nsgl = 0;
+ u32 wrlen;
+
+ wrlen = sizeof(struct fw_eth_tx_eo_wr) + sizeof(struct cpl_tx_pkt_core);
+ if (skb_shinfo(skb)->gso_size &&
+ !(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4))
+ wrlen += sizeof(struct cpl_tx_pkt_lso_core);
+
+ wrlen += roundup(hdr_len, 16);
+
+ /* Packet headers + WR + CPLs */
+ flits = DIV_ROUND_UP(wrlen, 8);
+
+ if (skb_shinfo(skb)->nr_frags > 0) {
+ if (skb_headlen(skb) - hdr_len)
+ nsgl = sgl_len(skb_shinfo(skb)->nr_frags + 1);
+ else
+ nsgl = sgl_len(skb_shinfo(skb)->nr_frags);
+ } else if (skb->len - hdr_len) {
+ nsgl = sgl_len(1);
+ }
+
+ return flits + nsgl;
+}
+
+static inline void *write_eo_wr(struct adapter *adap,
+ struct sge_eosw_txq *eosw_txq,
+ struct sk_buff *skb, struct fw_eth_tx_eo_wr *wr,
+ u32 hdr_len, u32 wrlen)
+{
+ const struct skb_shared_info *ssi = skb_shinfo(skb);
+ struct cpl_tx_pkt_core *cpl;
+ u32 immd_len, wrlen16;
+ bool compl = false;
+ u8 ver, proto;
+
+ ver = ip_hdr(skb)->version;
+ proto = (ver == 6) ? ipv6_hdr(skb)->nexthdr : ip_hdr(skb)->protocol;
+
+ wrlen16 = DIV_ROUND_UP(wrlen, 16);
+ immd_len = sizeof(struct cpl_tx_pkt_core);
+ if (skb_shinfo(skb)->gso_size &&
+ !(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4))
+ immd_len += sizeof(struct cpl_tx_pkt_lso_core);
+ immd_len += hdr_len;
+
+ if (!eosw_txq->ncompl ||
+ eosw_txq->last_compl >= adap->params.ofldq_wr_cred / 2) {
+ compl = true;
+ eosw_txq->ncompl++;
+ eosw_txq->last_compl = 0;
+ }
+
+ wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_EO_WR) |
+ FW_ETH_TX_EO_WR_IMMDLEN_V(immd_len) |
+ FW_WR_COMPL_V(compl));
+ wr->equiq_to_len16 = cpu_to_be32(FW_WR_LEN16_V(wrlen16) |
+ FW_WR_FLOWID_V(eosw_txq->hwtid));
+ wr->r3 = 0;
+ if (proto == IPPROTO_UDP) {
+ cpl = write_eo_udp_wr(skb, wr, hdr_len);
+ } else {
+ wr->u.tcpseg.type = FW_ETH_TX_EO_TYPE_TCPSEG;
+ wr->u.tcpseg.ethlen = skb_network_offset(skb);
+ wr->u.tcpseg.iplen = cpu_to_be16(skb_network_header_len(skb));
+ wr->u.tcpseg.tcplen = tcp_hdrlen(skb);
+ wr->u.tcpseg.tsclk_tsoff = 0;
+ wr->u.tcpseg.r4 = 0;
+ wr->u.tcpseg.r5 = 0;
+ wr->u.tcpseg.plen = cpu_to_be32(skb->len - hdr_len);
+
+ if (ssi->gso_size) {
+ struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1);
+
+ wr->u.tcpseg.mss = cpu_to_be16(ssi->gso_size);
+ cpl = write_tso_wr(adap, skb, lso);
+ } else {
+ wr->u.tcpseg.mss = cpu_to_be16(0xffff);
+ cpl = (void *)(wr + 1);
+ }
+ }
+
+ eosw_txq->cred -= wrlen16;
+ eosw_txq->last_compl += wrlen16;
+ return cpl;
+}
+
+static void ethofld_hard_xmit(struct net_device *dev,
+ struct sge_eosw_txq *eosw_txq)
+{
+ struct port_info *pi = netdev2pinfo(dev);
+ struct adapter *adap = netdev2adap(dev);
+ u32 wrlen, wrlen16, hdr_len, data_len;
+ enum sge_eosw_state next_state;
+ u64 cntrl, *start, *end, *sgl;
+ struct sge_eohw_txq *eohw_txq;
+ struct cpl_tx_pkt_core *cpl;
+ struct fw_eth_tx_eo_wr *wr;
+ bool skip_eotx_wr = false;
+ struct tx_sw_desc *d;
+ struct sk_buff *skb;
+ u8 flits, ndesc;
+ int left;
+
+ eohw_txq = &adap->sge.eohw_txq[eosw_txq->hwqid];
+ spin_lock(&eohw_txq->lock);
+ reclaim_completed_tx_imm(&eohw_txq->q);
+
+ d = &eosw_txq->desc[eosw_txq->last_pidx];
+ skb = d->skb;
+ skb_tx_timestamp(skb);
+
+ wr = (struct fw_eth_tx_eo_wr *)&eohw_txq->q.desc[eohw_txq->q.pidx];
+ if (unlikely(eosw_txq->state != CXGB4_EO_STATE_ACTIVE &&
+ eosw_txq->last_pidx == eosw_txq->flowc_idx)) {
+ hdr_len = skb->len;
+ data_len = 0;
+ flits = DIV_ROUND_UP(hdr_len, 8);
+ if (eosw_txq->state == CXGB4_EO_STATE_FLOWC_OPEN_SEND)
+ next_state = CXGB4_EO_STATE_FLOWC_OPEN_REPLY;
+ else
+ next_state = CXGB4_EO_STATE_FLOWC_CLOSE_REPLY;
+ skip_eotx_wr = true;
+ } else {
+ hdr_len = eth_get_headlen(dev, skb->data, skb_headlen(skb));
+ data_len = skb->len - hdr_len;
+ flits = ethofld_calc_tx_flits(adap, skb, hdr_len);
+ }
+ ndesc = flits_to_desc(flits);
+ wrlen = flits * 8;
+ wrlen16 = DIV_ROUND_UP(wrlen, 16);
+
+ /* If there are no CPL credits, then wait for credits
+ * to come back and retry again
+ */
+ if (unlikely(wrlen16 > eosw_txq->cred))
+ goto out_unlock;
+
+ if (unlikely(skip_eotx_wr)) {
+ start = (u64 *)wr;
+ eosw_txq->state = next_state;
+ goto write_wr_headers;
+ }
+
+ cpl = write_eo_wr(adap, eosw_txq, skb, wr, hdr_len, wrlen);
+ cntrl = hwcsum(adap->params.chip, skb);
+ if (skb_vlan_tag_present(skb))
+ cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb));
+
+ cpl->ctrl0 = cpu_to_be32(TXPKT_OPCODE_V(CPL_TX_PKT_XT) |
+ TXPKT_INTF_V(pi->tx_chan) |
+ TXPKT_PF_V(adap->pf));
+ cpl->pack = 0;
+ cpl->len = cpu_to_be16(skb->len);
+ cpl->ctrl1 = cpu_to_be64(cntrl);
+
+ start = (u64 *)(cpl + 1);
+
+write_wr_headers:
+ sgl = (u64 *)inline_tx_skb_header(skb, &eohw_txq->q, (void *)start,
+ hdr_len);
+ if (data_len) {
+ if (unlikely(cxgb4_map_skb(adap->pdev_dev, skb, d->addr))) {
+ memset(d->addr, 0, sizeof(d->addr));
+ eohw_txq->mapping_err++;
+ goto out_unlock;
+ }
+
+ end = (u64 *)wr + flits;
+ if (unlikely(start > sgl)) {
+ left = (u8 *)end - (u8 *)eohw_txq->q.stat;
+ end = (void *)eohw_txq->q.desc + left;
+ }
+
+ if (unlikely((u8 *)sgl >= (u8 *)eohw_txq->q.stat)) {
+ /* If current position is already at the end of the
+ * txq, reset the current to point to start of the queue
+ * and update the end ptr as well.
+ */
+ left = (u8 *)end - (u8 *)eohw_txq->q.stat;
+
+ end = (void *)eohw_txq->q.desc + left;
+ sgl = (void *)eohw_txq->q.desc;
+ }
+
+ cxgb4_write_sgl(skb, &eohw_txq->q, (void *)sgl, end, hdr_len,
+ d->addr);
+ }
+
+ if (skb_shinfo(skb)->gso_size) {
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
+ eohw_txq->uso++;
+ else
+ eohw_txq->tso++;
+ eohw_txq->tx_cso += skb_shinfo(skb)->gso_segs;
+ } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ eohw_txq->tx_cso++;
+ }
+
+ if (skb_vlan_tag_present(skb))
+ eohw_txq->vlan_ins++;
+
+ txq_advance(&eohw_txq->q, ndesc);
+ cxgb4_ring_tx_db(adap, &eohw_txq->q, ndesc);
+ eosw_txq_advance_index(&eosw_txq->last_pidx, 1, eosw_txq->ndesc);
+
+out_unlock:
+ spin_unlock(&eohw_txq->lock);
+}
+
+static void ethofld_xmit(struct net_device *dev, struct sge_eosw_txq *eosw_txq)
+{
+ struct sk_buff *skb;
+ int pktcount;
+
+ switch (eosw_txq->state) {
+ case CXGB4_EO_STATE_ACTIVE:
+ case CXGB4_EO_STATE_FLOWC_OPEN_SEND:
+ case CXGB4_EO_STATE_FLOWC_CLOSE_SEND:
+ pktcount = eosw_txq->pidx - eosw_txq->last_pidx;
+ if (pktcount < 0)
+ pktcount += eosw_txq->ndesc;
+ break;
+ case CXGB4_EO_STATE_FLOWC_OPEN_REPLY:
+ case CXGB4_EO_STATE_FLOWC_CLOSE_REPLY:
+ case CXGB4_EO_STATE_CLOSED:
+ default:
+ return;
+ }
+
+ while (pktcount--) {
+ skb = eosw_txq_peek(eosw_txq);
+ if (!skb) {
+ eosw_txq_advance_index(&eosw_txq->last_pidx, 1,
+ eosw_txq->ndesc);
+ continue;
+ }
+
+ ethofld_hard_xmit(dev, eosw_txq);
+ }
+}
+
+static netdev_tx_t cxgb4_ethofld_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct cxgb4_tc_port_mqprio *tc_port_mqprio;
+ struct port_info *pi = netdev2pinfo(dev);
+ struct adapter *adap = netdev2adap(dev);
+ struct sge_eosw_txq *eosw_txq;
+ u32 qid;
+ int ret;
+
+ ret = cxgb4_validate_skb(skb, dev, ETH_HLEN);
+ if (ret)
+ goto out_free;
+
+ tc_port_mqprio = &adap->tc_mqprio->port_mqprio[pi->port_id];
+ qid = skb_get_queue_mapping(skb) - pi->nqsets;
+ eosw_txq = &tc_port_mqprio->eosw_txq[qid];
+ spin_lock_bh(&eosw_txq->lock);
+ if (eosw_txq->state != CXGB4_EO_STATE_ACTIVE)
+ goto out_unlock;
+
+ ret = eosw_txq_enqueue(eosw_txq, skb);
+ if (ret)
+ goto out_unlock;
+
+ /* SKB is queued for processing until credits are available.
+ * So, call the destructor now and we'll free the skb later
+ * after it has been successfully transmitted.
+ */
+ skb_orphan(skb);
+
+ eosw_txq_advance(eosw_txq, 1);
+ ethofld_xmit(dev, eosw_txq);
+ spin_unlock_bh(&eosw_txq->lock);
+ return NETDEV_TX_OK;
+
+out_unlock:
+ spin_unlock_bh(&eosw_txq->lock);
+out_free:
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+}
+
netdev_tx_t t4_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct port_info *pi = netdev_priv(dev);
+ u16 qid = skb_get_queue_mapping(skb);
if (unlikely(pi->eth_flags & PRIV_FLAG_PORT_TX_VM))
return cxgb4_vf_eth_xmit(skb, dev);
+ if (unlikely(qid >= pi->nqsets))
+ return cxgb4_ethofld_xmit(skb, dev);
+
return cxgb4_eth_xmit(skb, dev);
}
/**
- * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
- * @q: the SGE control Tx queue
+ * cxgb4_ethofld_send_flowc - Send ETHOFLD flowc request to bind eotid to tc.
+ * @dev - netdevice
+ * @eotid - ETHOFLD tid to bind/unbind
+ * @tc - traffic class. If set to FW_SCHED_CLS_NONE, then unbinds the @eotid
*
- * This is a variant of cxgb4_reclaim_completed_tx() that is used
- * for Tx queues that send only immediate data (presently just
- * the control queues) and thus do not have any sk_buffs to release.
+ * Send a FLOWC work request to bind an ETHOFLD TID to a traffic class.
+ * If @tc is set to FW_SCHED_CLS_NONE, then the @eotid is unbound from
+ * a traffic class.
*/
-static inline void reclaim_completed_tx_imm(struct sge_txq *q)
+int cxgb4_ethofld_send_flowc(struct net_device *dev, u32 eotid, u32 tc)
{
- int hw_cidx = ntohs(READ_ONCE(q->stat->cidx));
- int reclaim = hw_cidx - q->cidx;
+ struct port_info *pi = netdev2pinfo(dev);
+ struct adapter *adap = netdev2adap(dev);
+ enum sge_eosw_state next_state;
+ struct sge_eosw_txq *eosw_txq;
+ u32 len, len16, nparams = 6;
+ struct fw_flowc_wr *flowc;
+ struct eotid_entry *entry;
+ struct sge_ofld_rxq *rxq;
+ struct sk_buff *skb;
+ int ret = 0;
- if (reclaim < 0)
- reclaim += q->size;
+ len = sizeof(*flowc) + sizeof(struct fw_flowc_mnemval) * nparams;
+ len16 = DIV_ROUND_UP(len, 16);
- q->in_use -= reclaim;
- q->cidx = hw_cidx;
+ entry = cxgb4_lookup_eotid(&adap->tids, eotid);
+ if (!entry)
+ return -ENOMEM;
+
+ eosw_txq = (struct sge_eosw_txq *)entry->data;
+ if (!eosw_txq)
+ return -ENOMEM;
+
+ skb = alloc_skb(len, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ spin_lock_bh(&eosw_txq->lock);
+ if (tc != FW_SCHED_CLS_NONE) {
+ if (eosw_txq->state != CXGB4_EO_STATE_CLOSED)
+ goto out_unlock;
+
+ next_state = CXGB4_EO_STATE_FLOWC_OPEN_SEND;
+ } else {
+ if (eosw_txq->state != CXGB4_EO_STATE_ACTIVE)
+ goto out_unlock;
+
+ next_state = CXGB4_EO_STATE_FLOWC_CLOSE_SEND;
+ }
+
+ flowc = __skb_put(skb, len);
+ memset(flowc, 0, len);
+
+ rxq = &adap->sge.eohw_rxq[eosw_txq->hwqid];
+ flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16_V(len16) |
+ FW_WR_FLOWID_V(eosw_txq->hwtid));
+ flowc->op_to_nparams = cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR) |
+ FW_FLOWC_WR_NPARAMS_V(nparams) |
+ FW_WR_COMPL_V(1));
+ flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
+ flowc->mnemval[0].val = cpu_to_be32(FW_PFVF_CMD_PFN_V(adap->pf));
+ flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
+ flowc->mnemval[1].val = cpu_to_be32(pi->tx_chan);
+ flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
+ flowc->mnemval[2].val = cpu_to_be32(pi->tx_chan);
+ flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID;
+ flowc->mnemval[3].val = cpu_to_be32(rxq->rspq.abs_id);
+ flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SCHEDCLASS;
+ flowc->mnemval[4].val = cpu_to_be32(tc);
+ flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_EOSTATE;
+ flowc->mnemval[5].val = cpu_to_be32(tc == FW_SCHED_CLS_NONE ?
+ FW_FLOWC_MNEM_EOSTATE_CLOSING :
+ FW_FLOWC_MNEM_EOSTATE_ESTABLISHED);
+
+ eosw_txq->cred -= len16;
+ eosw_txq->ncompl++;
+ eosw_txq->last_compl = 0;
+
+ ret = eosw_txq_enqueue(eosw_txq, skb);
+ if (ret) {
+ dev_consume_skb_any(skb);
+ goto out_unlock;
+ }
+
+ eosw_txq->state = next_state;
+ eosw_txq->flowc_idx = eosw_txq->pidx;
+ eosw_txq_advance(eosw_txq, 1);
+ ethofld_xmit(dev, eosw_txq);
+
+out_unlock:
+ spin_unlock_bh(&eosw_txq->lock);
+ return ret;
}
/**
@@ -3311,6 +3757,112 @@ static int napi_rx_handler(struct napi_struct *napi, int budget)
return work_done;
}
+void cxgb4_ethofld_restart(unsigned long data)
+{
+ struct sge_eosw_txq *eosw_txq = (struct sge_eosw_txq *)data;
+ int pktcount;
+
+ spin_lock(&eosw_txq->lock);
+ pktcount = eosw_txq->cidx - eosw_txq->last_cidx;
+ if (pktcount < 0)
+ pktcount += eosw_txq->ndesc;
+
+ if (pktcount) {
+ cxgb4_eosw_txq_free_desc(netdev2adap(eosw_txq->netdev),
+ eosw_txq, pktcount);
+ eosw_txq->inuse -= pktcount;
+ }
+
+ /* There may be some packets waiting for completions. So,
+ * attempt to send these packets now.
+ */
+ ethofld_xmit(eosw_txq->netdev, eosw_txq);
+ spin_unlock(&eosw_txq->lock);
+}
+
+/* cxgb4_ethofld_rx_handler - Process ETHOFLD Tx completions
+ * @q: the response queue that received the packet
+ * @rsp: the response queue descriptor holding the CPL message
+ * @si: the gather list of packet fragments
+ *
+ * Process a ETHOFLD Tx completion. Increment the cidx here, but
+ * free up the descriptors in a tasklet later.
+ */
+int cxgb4_ethofld_rx_handler(struct sge_rspq *q, const __be64 *rsp,
+ const struct pkt_gl *si)
+{
+ u8 opcode = ((const struct rss_header *)rsp)->opcode;
+
+ /* skip RSS header */
+ rsp++;
+
+ if (opcode == CPL_FW4_ACK) {
+ const struct cpl_fw4_ack *cpl;
+ struct sge_eosw_txq *eosw_txq;
+ struct eotid_entry *entry;
+ struct sk_buff *skb;
+ u32 hdr_len, eotid;
+ u8 flits, wrlen16;
+ int credits;
+
+ cpl = (const struct cpl_fw4_ack *)rsp;
+ eotid = CPL_FW4_ACK_FLOWID_G(ntohl(OPCODE_TID(cpl))) -
+ q->adap->tids.eotid_base;
+ entry = cxgb4_lookup_eotid(&q->adap->tids, eotid);
+ if (!entry)
+ goto out_done;
+
+ eosw_txq = (struct sge_eosw_txq *)entry->data;
+ if (!eosw_txq)
+ goto out_done;
+
+ spin_lock(&eosw_txq->lock);
+ credits = cpl->credits;
+ while (credits > 0) {
+ skb = eosw_txq->desc[eosw_txq->cidx].skb;
+ if (!skb)
+ break;
+
+ if (unlikely((eosw_txq->state ==
+ CXGB4_EO_STATE_FLOWC_OPEN_REPLY ||
+ eosw_txq->state ==
+ CXGB4_EO_STATE_FLOWC_CLOSE_REPLY) &&
+ eosw_txq->cidx == eosw_txq->flowc_idx)) {
+ flits = DIV_ROUND_UP(skb->len, 8);
+ if (eosw_txq->state ==
+ CXGB4_EO_STATE_FLOWC_OPEN_REPLY)
+ eosw_txq->state = CXGB4_EO_STATE_ACTIVE;
+ else
+ eosw_txq->state = CXGB4_EO_STATE_CLOSED;
+ complete(&eosw_txq->completion);
+ } else {
+ hdr_len = eth_get_headlen(eosw_txq->netdev,
+ skb->data,
+ skb_headlen(skb));
+ flits = ethofld_calc_tx_flits(q->adap, skb,
+ hdr_len);
+ }
+ eosw_txq_advance_index(&eosw_txq->cidx, 1,
+ eosw_txq->ndesc);
+ wrlen16 = DIV_ROUND_UP(flits * 8, 16);
+ credits -= wrlen16;
+ }
+
+ eosw_txq->cred += cpl->credits;
+ eosw_txq->ncompl--;
+
+ spin_unlock(&eosw_txq->lock);
+
+ /* Schedule a tasklet to reclaim SKBs and restart ETHOFLD Tx,
+ * if there were packets waiting for completion.
+ */
+ tasklet_schedule(&eosw_txq->qresume_tsk);
+ }
+
+out_done:
+ return 0;
+}
+
/*
* The MSI-X interrupt handler for an SGE response queue.
*/
@@ -3791,15 +4343,11 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
* write the CIDX Updates into the Status Page at the end of the
* TX Queue.
*/
- c.autoequiqe_to_viid = htonl((dbqt
- ? FW_EQ_ETH_CMD_AUTOEQUIQE_F
- : FW_EQ_ETH_CMD_AUTOEQUEQE_F) |
+ c.autoequiqe_to_viid = htonl(FW_EQ_ETH_CMD_AUTOEQUEQE_F |
FW_EQ_ETH_CMD_VIID_V(pi->viid));
c.fetchszm_to_iqid =
- htonl(FW_EQ_ETH_CMD_HOSTFCMODE_V(dbqt
- ? HOSTFCMODE_INGRESS_QUEUE_X
- : HOSTFCMODE_STATUS_PAGE_X) |
+ htonl(FW_EQ_ETH_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) |
FW_EQ_ETH_CMD_PCIECHN_V(pi->tx_chan) |
FW_EQ_ETH_CMD_FETCHRO_F | FW_EQ_ETH_CMD_IQID_V(iqid));
@@ -3839,7 +4387,10 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
txq->q.q_type = CXGB4_TXQ_ETH;
init_txq(adap, &txq->q, FW_EQ_ETH_CMD_EQID_G(ntohl(c.eqid_pkd)));
txq->txq = netdevq;
- txq->tso = txq->tx_cso = txq->vlan_ins = 0;
+ txq->tso = 0;
+ txq->uso = 0;
+ txq->tx_cso = 0;
+ txq->vlan_ins = 0;
txq->mapping_err = 0;
txq->dbqt = dbqt;
@@ -3916,30 +4467,30 @@ int t4_sge_mod_ctrl_txq(struct adapter *adap, unsigned int eqid,
return t4_set_params(adap, adap->mbox, adap->pf, 0, 1, &param, &val);
}
-int t4_sge_alloc_uld_txq(struct adapter *adap, struct sge_uld_txq *txq,
- struct net_device *dev, unsigned int iqid,
- unsigned int uld_type)
+static int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_txq *q,
+ struct net_device *dev, u32 cmd, u32 iqid)
{
unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip);
- int ret, nentries;
- struct fw_eq_ofld_cmd c;
- struct sge *s = &adap->sge;
struct port_info *pi = netdev_priv(dev);
- int cmd = FW_EQ_OFLD_CMD;
+ struct sge *s = &adap->sge;
+ struct fw_eq_ofld_cmd c;
+ u32 fb_min, nentries;
+ int ret;
/* Add status entries */
- nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
-
- txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size,
- sizeof(struct tx_desc), sizeof(struct tx_sw_desc),
- &txq->q.phys_addr, &txq->q.sdesc, s->stat_len,
- NUMA_NO_NODE);
- if (!txq->q.desc)
+ nentries = q->size + s->stat_len / sizeof(struct tx_desc);
+ q->desc = alloc_ring(adap->pdev_dev, q->size, sizeof(struct tx_desc),
+ sizeof(struct tx_sw_desc), &q->phys_addr,
+ &q->sdesc, s->stat_len, NUMA_NO_NODE);
+ if (!q->desc)
return -ENOMEM;
+ if (chip_ver <= CHELSIO_T5)
+ fb_min = FETCHBURSTMIN_64B_X;
+ else
+ fb_min = FETCHBURSTMIN_64B_T6_X;
+
memset(&c, 0, sizeof(c));
- if (unlikely(uld_type == CXGB4_TX_CRYPTO))
- cmd = FW_EQ_CTRL_CMD;
c.op_to_vfn = htonl(FW_CMD_OP_V(cmd) | FW_CMD_REQUEST_F |
FW_CMD_WRITE_F | FW_CMD_EXEC_F |
FW_EQ_OFLD_CMD_PFN_V(adap->pf) |
@@ -3951,27 +4502,42 @@ int t4_sge_alloc_uld_txq(struct adapter *adap, struct sge_uld_txq *txq,
FW_EQ_OFLD_CMD_PCIECHN_V(pi->tx_chan) |
FW_EQ_OFLD_CMD_FETCHRO_F | FW_EQ_OFLD_CMD_IQID_V(iqid));
c.dcaen_to_eqsize =
- htonl(FW_EQ_OFLD_CMD_FBMIN_V(chip_ver <= CHELSIO_T5
- ? FETCHBURSTMIN_64B_X
- : FETCHBURSTMIN_64B_T6_X) |
+ htonl(FW_EQ_OFLD_CMD_FBMIN_V(fb_min) |
FW_EQ_OFLD_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) |
FW_EQ_OFLD_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) |
FW_EQ_OFLD_CMD_EQSIZE_V(nentries));
- c.eqaddr = cpu_to_be64(txq->q.phys_addr);
+ c.eqaddr = cpu_to_be64(q->phys_addr);
ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
if (ret) {
- kfree(txq->q.sdesc);
- txq->q.sdesc = NULL;
+ kfree(q->sdesc);
+ q->sdesc = NULL;
dma_free_coherent(adap->pdev_dev,
nentries * sizeof(struct tx_desc),
- txq->q.desc, txq->q.phys_addr);
- txq->q.desc = NULL;
+ q->desc, q->phys_addr);
+ q->desc = NULL;
return ret;
}
+ init_txq(adap, q, FW_EQ_OFLD_CMD_EQID_G(ntohl(c.eqid_pkd)));
+ return 0;
+}
+
+int t4_sge_alloc_uld_txq(struct adapter *adap, struct sge_uld_txq *txq,
+ struct net_device *dev, unsigned int iqid,
+ unsigned int uld_type)
+{
+ u32 cmd = FW_EQ_OFLD_CMD;
+ int ret;
+
+ if (unlikely(uld_type == CXGB4_TX_CRYPTO))
+ cmd = FW_EQ_CTRL_CMD;
+
+ ret = t4_sge_alloc_ofld_txq(adap, &txq->q, dev, cmd, iqid);
+ if (ret)
+ return ret;
+
txq->q.q_type = CXGB4_TXQ_ULD;
- init_txq(adap, &txq->q, FW_EQ_OFLD_CMD_EQID_G(ntohl(c.eqid_pkd)));
txq->adap = adap;
skb_queue_head_init(&txq->sendq);
tasklet_init(&txq->qresume_tsk, restart_ofldq, (unsigned long)txq);
@@ -3980,6 +4546,26 @@ int t4_sge_alloc_uld_txq(struct adapter *adap, struct sge_uld_txq *txq,
return 0;
}
+int t4_sge_alloc_ethofld_txq(struct adapter *adap, struct sge_eohw_txq *txq,
+ struct net_device *dev, u32 iqid)
+{
+ int ret;
+
+ ret = t4_sge_alloc_ofld_txq(adap, &txq->q, dev, FW_EQ_OFLD_CMD, iqid);
+ if (ret)
+ return ret;
+
+ txq->q.q_type = CXGB4_TXQ_ULD;
+ spin_lock_init(&txq->lock);
+ txq->adap = adap;
+ txq->tso = 0;
+ txq->uso = 0;
+ txq->tx_cso = 0;
+ txq->vlan_ins = 0;
+ txq->mapping_err = 0;
+ return 0;
+}
+
void free_txq(struct adapter *adap, struct sge_txq *q)
{
struct sge *s = &adap->sge;
@@ -4035,6 +4621,17 @@ void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q)
q->fl.size ? &q->fl : NULL);
}
+void t4_sge_free_ethofld_txq(struct adapter *adap, struct sge_eohw_txq *txq)
+{
+ if (txq->q.desc) {
+ t4_ofld_eq_free(adap, adap->mbox, adap->pf, 0,
+ txq->q.cntxt_id);
+ free_tx_desc(adap, &txq->q, txq->q.in_use, false);
+ kfree(txq->q.sdesc);
+ free_txq(adap, &txq->q);
+ }
+}
+
/**
* t4_free_sge_resources - free SGE resources
* @adap: the adapter
@@ -4064,6 +4661,10 @@ void t4_free_sge_resources(struct adapter *adap)
if (eq->rspq.desc)
free_rspq_fl(adap, &eq->rspq,
eq->fl.size ? &eq->fl : NULL);
+ if (eq->msix) {
+ cxgb4_free_msix_idx_in_bmap(adap, eq->msix->idx);
+ eq->msix = NULL;
+ }
etq = &adap->sge.ethtxq[i];
if (etq->q.desc) {
@@ -4090,8 +4691,15 @@ void t4_free_sge_resources(struct adapter *adap)
}
}
- if (adap->sge.fw_evtq.desc)
+ if (adap->sge.fw_evtq.desc) {
free_rspq_fl(adap, &adap->sge.fw_evtq, NULL);
+ if (adap->sge.fwevtq_msix_idx >= 0)
+ cxgb4_free_msix_idx_in_bmap(adap,
+ adap->sge.fwevtq_msix_idx);
+ }
+
+ if (adap->sge.nd_msix_idx >= 0)
+ cxgb4_free_msix_idx_in_bmap(adap, adap->sge.nd_msix_idx);
if (adap->sge.intrq.desc)
free_rspq_fl(adap, &adap->sge.intrq, NULL);