summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/amd/xgbe/xgbe-dev.c')
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c446
1 files changed, 348 insertions, 98 deletions
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index 9da3a03e8c07..53f5f66ec2ee 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -335,6 +335,161 @@ static void xgbe_config_tso_mode(struct xgbe_prv_data *pdata)
}
}
+static void xgbe_config_sph_mode(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->rx_ring)
+ break;
+
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_CR, SPH, 1);
+ }
+
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, HDSMS, XGBE_SPH_HDSMS_SIZE);
+}
+
+static int xgbe_write_rss_reg(struct xgbe_prv_data *pdata, unsigned int type,
+ unsigned int index, unsigned int val)
+{
+ unsigned int wait;
+ int ret = 0;
+
+ mutex_lock(&pdata->rss_mutex);
+
+ if (XGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB)) {
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ XGMAC_IOWRITE(pdata, MAC_RSSDR, val);
+
+ XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, RSSIA, index);
+ XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, ADDRT, type);
+ XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, CT, 0);
+ XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, OB, 1);
+
+ wait = 1000;
+ while (wait--) {
+ if (!XGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB))
+ goto unlock;
+
+ usleep_range(1000, 1500);
+ }
+
+ ret = -EBUSY;
+
+unlock:
+ mutex_unlock(&pdata->rss_mutex);
+
+ return ret;
+}
+
+static int xgbe_write_rss_hash_key(struct xgbe_prv_data *pdata)
+{
+ unsigned int key_regs = sizeof(pdata->rss_key) / sizeof(u32);
+ unsigned int *key = (unsigned int *)&pdata->rss_key;
+ int ret;
+
+ while (key_regs--) {
+ ret = xgbe_write_rss_reg(pdata, XGBE_RSS_HASH_KEY_TYPE,
+ key_regs, *key++);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int xgbe_write_rss_lookup_table(struct xgbe_prv_data *pdata)
+{
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) {
+ ret = xgbe_write_rss_reg(pdata,
+ XGBE_RSS_LOOKUP_TABLE_TYPE, i,
+ pdata->rss_table[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int xgbe_set_rss_hash_key(struct xgbe_prv_data *pdata, const u8 *key)
+{
+ memcpy(pdata->rss_key, key, sizeof(pdata->rss_key));
+
+ return xgbe_write_rss_hash_key(pdata);
+}
+
+static int xgbe_set_rss_lookup_table(struct xgbe_prv_data *pdata,
+ const u32 *table)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++)
+ XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH, table[i]);
+
+ return xgbe_write_rss_lookup_table(pdata);
+}
+
+static int xgbe_enable_rss(struct xgbe_prv_data *pdata)
+{
+ int ret;
+
+ if (!pdata->hw_feat.rss)
+ return -EOPNOTSUPP;
+
+ /* Program the hash key */
+ ret = xgbe_write_rss_hash_key(pdata);
+ if (ret)
+ return ret;
+
+ /* Program the lookup table */
+ ret = xgbe_write_rss_lookup_table(pdata);
+ if (ret)
+ return ret;
+
+ /* Set the RSS options */
+ XGMAC_IOWRITE(pdata, MAC_RSSCR, pdata->rss_options);
+
+ /* Enable RSS */
+ XGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 1);
+
+ return 0;
+}
+
+static int xgbe_disable_rss(struct xgbe_prv_data *pdata)
+{
+ if (!pdata->hw_feat.rss)
+ return -EOPNOTSUPP;
+
+ XGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 0);
+
+ return 0;
+}
+
+static void xgbe_config_rss(struct xgbe_prv_data *pdata)
+{
+ int ret;
+
+ if (!pdata->hw_feat.rss)
+ return;
+
+ if (pdata->netdev->features & NETIF_F_RXHASH)
+ ret = xgbe_enable_rss(pdata);
+ else
+ ret = xgbe_disable_rss(pdata);
+
+ if (ret)
+ netdev_err(pdata->netdev,
+ "error configuring RSS, RSS disabled\n");
+}
+
static int xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata)
{
unsigned int max_q_count, q_count;
@@ -465,17 +620,21 @@ static void xgbe_enable_dma_interrupts(struct xgbe_prv_data *pdata)
if (channel->tx_ring) {
/* Enable the following Tx interrupts
- * TIE - Transmit Interrupt Enable (unless polling)
+ * TIE - Transmit Interrupt Enable (unless using
+ * per channel interrupts)
*/
- XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 1);
+ if (!pdata->per_channel_irq)
+ XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 1);
}
if (channel->rx_ring) {
/* Enable following Rx interrupts
* RBUE - Receive Buffer Unavailable Enable
- * RIE - Receive Interrupt Enable
+ * RIE - Receive Interrupt Enable (unless using
+ * per channel interrupts)
*/
XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 1);
- XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 1);
+ if (!pdata->per_channel_irq)
+ XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 1);
}
XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier);
@@ -880,13 +1039,15 @@ static void xgbe_tx_desc_reset(struct xgbe_ring_data *rdata)
rdesc->desc1 = 0;
rdesc->desc2 = 0;
rdesc->desc3 = 0;
+
+ /* Make sure ownership is written to the descriptor */
+ wmb();
}
static void xgbe_tx_desc_init(struct xgbe_channel *channel)
{
struct xgbe_ring *ring = channel->tx_ring;
struct xgbe_ring_data *rdata;
- struct xgbe_ring_desc *rdesc;
int i;
int start_index = ring->cur;
@@ -895,26 +1056,11 @@ static void xgbe_tx_desc_init(struct xgbe_channel *channel)
/* Initialze all descriptors */
for (i = 0; i < ring->rdesc_count; i++) {
rdata = XGBE_GET_DESC_DATA(ring, i);
- rdesc = rdata->rdesc;
- /* Initialize Tx descriptor
- * Set buffer 1 (lo) address to zero
- * Set buffer 1 (hi) address to zero
- * Reset all other control bits (IC, TTSE, B2L & B1L)
- * Reset all other control bits (OWN, CTXT, FD, LD, CPC, CIC,
- * etc)
- */
- rdesc->desc0 = 0;
- rdesc->desc1 = 0;
- rdesc->desc2 = 0;
- rdesc->desc3 = 0;
+ /* Initialize Tx descriptor */
+ xgbe_tx_desc_reset(rdata);
}
- /* Make sure everything is written to the descriptor(s) before
- * telling the device about them
- */
- wmb();
-
/* Update the total number of Tx descriptors */
XGMAC_DMA_IOWRITE(channel, DMA_CH_TDRLR, ring->rdesc_count - 1);
@@ -933,19 +1079,19 @@ static void xgbe_rx_desc_reset(struct xgbe_ring_data *rdata)
struct xgbe_ring_desc *rdesc = rdata->rdesc;
/* Reset the Rx descriptor
- * Set buffer 1 (lo) address to dma address (lo)
- * Set buffer 1 (hi) address to dma address (hi)
- * Set buffer 2 (lo) address to zero
- * Set buffer 2 (hi) address to zero and set control bits
- * OWN and INTE
+ * Set buffer 1 (lo) address to header dma address (lo)
+ * Set buffer 1 (hi) address to header dma address (hi)
+ * Set buffer 2 (lo) address to buffer dma address (lo)
+ * Set buffer 2 (hi) address to buffer dma address (hi) and
+ * set control bits OWN and INTE
*/
- rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma));
- rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma));
- rdesc->desc2 = 0;
+ rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->rx.hdr.dma));
+ rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->rx.hdr.dma));
+ rdesc->desc2 = cpu_to_le32(lower_32_bits(rdata->rx.buf.dma));
+ rdesc->desc3 = cpu_to_le32(upper_32_bits(rdata->rx.buf.dma));
- rdesc->desc3 = 0;
- if (rdata->interrupt)
- XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, 1);
+ XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE,
+ rdata->interrupt ? 1 : 0);
/* Since the Rx DMA engine is likely running, make sure everything
* is written to the descriptor(s) before setting the OWN bit
@@ -964,7 +1110,6 @@ static void xgbe_rx_desc_init(struct xgbe_channel *channel)
struct xgbe_prv_data *pdata = channel->pdata;
struct xgbe_ring *ring = channel->rx_ring;
struct xgbe_ring_data *rdata;
- struct xgbe_ring_desc *rdesc;
unsigned int start_index = ring->cur;
unsigned int rx_coalesce, rx_frames;
unsigned int i;
@@ -977,34 +1122,16 @@ static void xgbe_rx_desc_init(struct xgbe_channel *channel)
/* Initialize all descriptors */
for (i = 0; i < ring->rdesc_count; i++) {
rdata = XGBE_GET_DESC_DATA(ring, i);
- rdesc = rdata->rdesc;
- /* Initialize Rx descriptor
- * Set buffer 1 (lo) address to dma address (lo)
- * Set buffer 1 (hi) address to dma address (hi)
- * Set buffer 2 (lo) address to zero
- * Set buffer 2 (hi) address to zero and set control
- * bits OWN and INTE appropriateley
- */
- rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma));
- rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma));
- rdesc->desc2 = 0;
- rdesc->desc3 = 0;
- XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN, 1);
- XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, 1);
- rdata->interrupt = 1;
- if (rx_coalesce && (!rx_frames || ((i + 1) % rx_frames))) {
- /* Clear interrupt on completion bit */
- XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE,
- 0);
+ /* Set interrupt on completion bit as appropriate */
+ if (rx_coalesce && (!rx_frames || ((i + 1) % rx_frames)))
rdata->interrupt = 0;
- }
- }
+ else
+ rdata->interrupt = 1;
- /* Make sure everything is written to the descriptors before
- * telling the device about them
- */
- wmb();
+ /* Initialize Rx descriptor */
+ xgbe_rx_desc_reset(rdata);
+ }
/* Update the total number of Rx descriptors */
XGMAC_DMA_IOWRITE(channel, DMA_CH_RDRLR, ring->rdesc_count - 1);
@@ -1198,7 +1325,30 @@ static void xgbe_config_dcb_pfc(struct xgbe_prv_data *pdata)
xgbe_config_flow_control(pdata);
}
-static void xgbe_pre_xmit(struct xgbe_channel *channel)
+static void xgbe_tx_start_xmit(struct xgbe_channel *channel,
+ struct xgbe_ring *ring)
+{
+ struct xgbe_prv_data *pdata = channel->pdata;
+ struct xgbe_ring_data *rdata;
+
+ /* Issue a poll command to Tx DMA by writing address
+ * of next immediate free descriptor */
+ rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_TDTR_LO,
+ lower_32_bits(rdata->rdesc_dma));
+
+ /* Start the Tx coalescing timer */
+ if (pdata->tx_usecs && !channel->tx_timer_active) {
+ channel->tx_timer_active = 1;
+ hrtimer_start(&channel->tx_timer,
+ ktime_set(0, pdata->tx_usecs * NSEC_PER_USEC),
+ HRTIMER_MODE_REL);
+ }
+
+ ring->tx.xmit_more = 0;
+}
+
+static void xgbe_dev_xmit(struct xgbe_channel *channel)
{
struct xgbe_prv_data *pdata = channel->pdata;
struct xgbe_ring *ring = channel->tx_ring;
@@ -1207,11 +1357,11 @@ static void xgbe_pre_xmit(struct xgbe_channel *channel)
struct xgbe_packet_data *packet = &ring->packet_data;
unsigned int csum, tso, vlan;
unsigned int tso_context, vlan_context;
- unsigned int tx_coalesce, tx_frames;
+ unsigned int tx_set_ic;
int start_index = ring->cur;
int i;
- DBGPR("-->xgbe_pre_xmit\n");
+ DBGPR("-->xgbe_dev_xmit\n");
csum = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
CSUM_ENABLE);
@@ -1230,10 +1380,26 @@ static void xgbe_pre_xmit(struct xgbe_channel *channel)
else
vlan_context = 0;
- tx_coalesce = (pdata->tx_usecs || pdata->tx_frames) ? 1 : 0;
- tx_frames = pdata->tx_frames;
- if (tx_coalesce && !channel->tx_timer_active)
- ring->coalesce_count = 0;
+ /* Determine if an interrupt should be generated for this Tx:
+ * Interrupt:
+ * - Tx frame count exceeds the frame count setting
+ * - Addition of Tx frame count to the frame count since the
+ * last interrupt was set exceeds the frame count setting
+ * No interrupt:
+ * - No frame count setting specified (ethtool -C ethX tx-frames 0)
+ * - Addition of Tx frame count to the frame count since the
+ * last interrupt was set does not exceed the frame count setting
+ */
+ ring->coalesce_count += packet->tx_packets;
+ if (!pdata->tx_frames)
+ tx_set_ic = 0;
+ else if (packet->tx_packets > pdata->tx_frames)
+ tx_set_ic = 1;
+ else if ((ring->coalesce_count % pdata->tx_frames) <
+ packet->tx_packets)
+ tx_set_ic = 1;
+ else
+ tx_set_ic = 0;
rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
rdesc = rdata->rdesc;
@@ -1300,13 +1466,6 @@ static void xgbe_pre_xmit(struct xgbe_channel *channel)
if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP))
XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, TTSE, 1);
- /* Set IC bit based on Tx coalescing settings */
- XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 1);
- if (tx_coalesce && (!tx_frames ||
- (++ring->coalesce_count % tx_frames)))
- /* Clear IC bit */
- XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 0);
-
/* Mark it as First Descriptor */
XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, FD, 1);
@@ -1351,13 +1510,6 @@ static void xgbe_pre_xmit(struct xgbe_channel *channel)
XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, HL_B1L,
rdata->skb_dma_len);
- /* Set IC bit based on Tx coalescing settings */
- XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 1);
- if (tx_coalesce && (!tx_frames ||
- (++ring->coalesce_count % tx_frames)))
- /* Clear IC bit */
- XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 0);
-
/* Set OWN bit */
XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
@@ -1373,6 +1525,14 @@ static void xgbe_pre_xmit(struct xgbe_channel *channel)
/* Set LAST bit for the last descriptor */
XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, LD, 1);
+ /* Set IC bit based on Tx coalescing settings */
+ if (tx_set_ic)
+ XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 1);
+
+ /* Save the Tx info to report back during cleanup */
+ rdata->tx.packets = packet->tx_packets;
+ rdata->tx.bytes = packet->tx_bytes;
+
/* In case the Tx DMA engine is running, make sure everything
* is written to the descriptor(s) before setting the OWN bit
* for the first descriptor
@@ -1391,26 +1551,19 @@ static void xgbe_pre_xmit(struct xgbe_channel *channel)
/* Make sure ownership is written to the descriptor */
wmb();
- /* Issue a poll command to Tx DMA by writing address
- * of next immediate free descriptor */
ring->cur++;
- rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
- XGMAC_DMA_IOWRITE(channel, DMA_CH_TDTR_LO,
- lower_32_bits(rdata->rdesc_dma));
-
- /* Start the Tx coalescing timer */
- if (tx_coalesce && !channel->tx_timer_active) {
- channel->tx_timer_active = 1;
- hrtimer_start(&channel->tx_timer,
- ktime_set(0, pdata->tx_usecs * NSEC_PER_USEC),
- HRTIMER_MODE_REL);
- }
+ if (!packet->skb->xmit_more ||
+ netif_xmit_stopped(netdev_get_tx_queue(pdata->netdev,
+ channel->queue_index)))
+ xgbe_tx_start_xmit(channel, ring);
+ else
+ ring->tx.xmit_more = 1;
DBGPR(" %s: descriptors %u to %u written\n",
channel->name, start_index & (ring->rdesc_count - 1),
(ring->cur - 1) & (ring->rdesc_count - 1));
- DBGPR("<--xgbe_pre_xmit\n");
+ DBGPR("<--xgbe_dev_xmit\n");
}
static int xgbe_dev_read(struct xgbe_channel *channel)
@@ -1420,7 +1573,7 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
struct xgbe_ring_desc *rdesc;
struct xgbe_packet_data *packet = &ring->packet_data;
struct net_device *netdev = channel->pdata->netdev;
- unsigned int err, etlt;
+ unsigned int err, etlt, l34t;
DBGPR("-->xgbe_dev_read: cur = %d\n", ring->cur);
@@ -1431,6 +1584,9 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN))
return 1;
+ /* Make sure descriptor fields are read after reading the OWN bit */
+ rmb();
+
#ifdef XGMAC_ENABLE_RX_DESC_DUMP
xgbe_dump_rx_desc(ring, rdesc, ring->cur);
#endif
@@ -1454,8 +1610,33 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
CONTEXT_NEXT, 1);
+ /* Get the header length */
+ if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, FD))
+ rdata->rx.hdr_len = XGMAC_GET_BITS_LE(rdesc->desc2,
+ RX_NORMAL_DESC2, HL);
+
+ /* Get the RSS hash */
+ if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, RSV)) {
+ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+ RSS_HASH, 1);
+
+ packet->rss_hash = le32_to_cpu(rdesc->desc1);
+
+ l34t = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, L34T);
+ switch (l34t) {
+ case RX_DESC3_L34T_IPV4_TCP:
+ case RX_DESC3_L34T_IPV4_UDP:
+ case RX_DESC3_L34T_IPV6_TCP:
+ case RX_DESC3_L34T_IPV6_UDP:
+ packet->rss_hash_type = PKT_HASH_TYPE_L4;
+ break;
+ default:
+ packet->rss_hash_type = PKT_HASH_TYPE_L3;
+ }
+ }
+
/* Get the packet length */
- rdata->len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL);
+ rdata->rx.len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL);
if (!XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, LD)) {
/* Not all the data has been transferred for this packet */
@@ -1478,7 +1659,8 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
etlt = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ETLT);
DBGPR(" err=%u, etlt=%#x\n", err, etlt);
- if (!err || (err && !etlt)) {
+ if (!err || !etlt) {
+ /* No error if err is 0 or etlt is 0 */
if ((etlt == 0x09) &&
(netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
@@ -2298,6 +2480,47 @@ static void xgbe_config_mmc(struct xgbe_prv_data *pdata)
XGMAC_IOWRITE_BITS(pdata, MMC_CR, CR, 1);
}
+static void xgbe_prepare_tx_stop(struct xgbe_prv_data *pdata,
+ struct xgbe_channel *channel)
+{
+ unsigned int tx_dsr, tx_pos, tx_qidx;
+ unsigned int tx_status;
+ unsigned long tx_timeout;
+
+ /* Calculate the status register to read and the position within */
+ if (channel->queue_index < DMA_DSRX_FIRST_QUEUE) {
+ tx_dsr = DMA_DSR0;
+ tx_pos = (channel->queue_index * DMA_DSR_Q_WIDTH) +
+ DMA_DSR0_TPS_START;
+ } else {
+ tx_qidx = channel->queue_index - DMA_DSRX_FIRST_QUEUE;
+
+ tx_dsr = DMA_DSR1 + ((tx_qidx / DMA_DSRX_QPR) * DMA_DSRX_INC);
+ tx_pos = ((tx_qidx % DMA_DSRX_QPR) * DMA_DSR_Q_WIDTH) +
+ DMA_DSRX_TPS_START;
+ }
+
+ /* The Tx engine cannot be stopped if it is actively processing
+ * descriptors. Wait for the Tx engine to enter the stopped or
+ * suspended state. Don't wait forever though...
+ */
+ tx_timeout = jiffies + (XGBE_DMA_STOP_TIMEOUT * HZ);
+ while (time_before(jiffies, tx_timeout)) {
+ tx_status = XGMAC_IOREAD(pdata, tx_dsr);
+ tx_status = GET_BITS(tx_status, tx_pos, DMA_DSR_TPS_WIDTH);
+ if ((tx_status == DMA_TPS_STOPPED) ||
+ (tx_status == DMA_TPS_SUSPENDED))
+ break;
+
+ usleep_range(500, 1000);
+ }
+
+ if (!time_before(jiffies, tx_timeout))
+ netdev_info(pdata->netdev,
+ "timed out waiting for Tx DMA channel %u to stop\n",
+ channel->queue_index);
+}
+
static void xgbe_enable_tx(struct xgbe_prv_data *pdata)
{
struct xgbe_channel *channel;
@@ -2326,6 +2549,15 @@ static void xgbe_disable_tx(struct xgbe_prv_data *pdata)
struct xgbe_channel *channel;
unsigned int i;
+ /* Prepare for Tx DMA channel stop */
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->tx_ring)
+ break;
+
+ xgbe_prepare_tx_stop(pdata, channel);
+ }
+
/* Disable MAC Tx */
XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0);
@@ -2417,6 +2649,15 @@ static void xgbe_powerdown_tx(struct xgbe_prv_data *pdata)
struct xgbe_channel *channel;
unsigned int i;
+ /* Prepare for Tx DMA channel stop */
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->tx_ring)
+ break;
+
+ xgbe_prepare_tx_stop(pdata, channel);
+ }
+
/* Disable MAC Tx */
XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0);
@@ -2485,6 +2726,8 @@ static int xgbe_init(struct xgbe_prv_data *pdata)
xgbe_config_tx_coalesce(pdata);
xgbe_config_rx_buffer_size(pdata);
xgbe_config_tso_mode(pdata);
+ xgbe_config_sph_mode(pdata);
+ xgbe_config_rss(pdata);
desc_if->wrapper_tx_desc_init(pdata);
desc_if->wrapper_rx_desc_init(pdata);
xgbe_enable_dma_interrupts(pdata);
@@ -2561,7 +2804,7 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
hw_if->powerup_rx = xgbe_powerup_rx;
hw_if->powerdown_rx = xgbe_powerdown_rx;
- hw_if->pre_xmit = xgbe_pre_xmit;
+ hw_if->dev_xmit = xgbe_dev_xmit;
hw_if->dev_read = xgbe_dev_read;
hw_if->enable_int = xgbe_enable_int;
hw_if->disable_int = xgbe_disable_int;
@@ -2575,6 +2818,7 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
hw_if->rx_desc_reset = xgbe_rx_desc_reset;
hw_if->is_last_desc = xgbe_is_last_desc;
hw_if->is_context_desc = xgbe_is_context_desc;
+ hw_if->tx_start_xmit = xgbe_tx_start_xmit;
/* For FLOW ctrl */
hw_if->config_tx_flow_control = xgbe_config_tx_flow_control;
@@ -2620,5 +2864,11 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
hw_if->config_dcb_tc = xgbe_config_dcb_tc;
hw_if->config_dcb_pfc = xgbe_config_dcb_pfc;
+ /* For Receive Side Scaling */
+ hw_if->enable_rss = xgbe_enable_rss;
+ hw_if->disable_rss = xgbe_disable_rss;
+ hw_if->set_rss_hash_key = xgbe_set_rss_hash_key;
+ hw_if->set_rss_lookup_table = xgbe_set_rss_lookup_table;
+
DBGPR("<--xgbe_init_function_ptrs\n");
}