summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000.h65
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c140
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_hw.c558
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c322
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_param.c29
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c7
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/igbvf/igbvf.h2
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c52
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h17
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c257
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c21
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c125
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c36
14 files changed, 973 insertions, 660 deletions
diff --git a/drivers/net/ethernet/intel/e1000/e1000.h b/drivers/net/ethernet/intel/e1000/e1000.h
index 2b6cd02bfba0..26d9cd59ec75 100644
--- a/drivers/net/ethernet/intel/e1000/e1000.h
+++ b/drivers/net/ethernet/intel/e1000/e1000.h
@@ -81,68 +81,69 @@ struct e1000_adapter;
#include "e1000_hw.h"
-#define E1000_MAX_INTR 10
+#define E1000_MAX_INTR 10
/* TX/RX descriptor defines */
-#define E1000_DEFAULT_TXD 256
-#define E1000_MAX_TXD 256
-#define E1000_MIN_TXD 48
-#define E1000_MAX_82544_TXD 4096
+#define E1000_DEFAULT_TXD 256
+#define E1000_MAX_TXD 256
+#define E1000_MIN_TXD 48
+#define E1000_MAX_82544_TXD 4096
-#define E1000_DEFAULT_RXD 256
-#define E1000_MAX_RXD 256
-#define E1000_MIN_RXD 48
-#define E1000_MAX_82544_RXD 4096
+#define E1000_DEFAULT_RXD 256
+#define E1000_MAX_RXD 256
+#define E1000_MIN_RXD 48
+#define E1000_MAX_82544_RXD 4096
#define E1000_MIN_ITR_USECS 10 /* 100000 irq/sec */
#define E1000_MAX_ITR_USECS 10000 /* 100 irq/sec */
/* this is the size past which hardware will drop packets when setting LPE=0 */
-#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
+#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
/* Supported Rx Buffer Sizes */
-#define E1000_RXBUFFER_128 128 /* Used for packet split */
-#define E1000_RXBUFFER_256 256 /* Used for packet split */
-#define E1000_RXBUFFER_512 512
-#define E1000_RXBUFFER_1024 1024
-#define E1000_RXBUFFER_2048 2048
-#define E1000_RXBUFFER_4096 4096
-#define E1000_RXBUFFER_8192 8192
-#define E1000_RXBUFFER_16384 16384
+#define E1000_RXBUFFER_128 128 /* Used for packet split */
+#define E1000_RXBUFFER_256 256 /* Used for packet split */
+#define E1000_RXBUFFER_512 512
+#define E1000_RXBUFFER_1024 1024
+#define E1000_RXBUFFER_2048 2048
+#define E1000_RXBUFFER_4096 4096
+#define E1000_RXBUFFER_8192 8192
+#define E1000_RXBUFFER_16384 16384
/* SmartSpeed delimiters */
-#define E1000_SMARTSPEED_DOWNSHIFT 3
-#define E1000_SMARTSPEED_MAX 15
+#define E1000_SMARTSPEED_DOWNSHIFT 3
+#define E1000_SMARTSPEED_MAX 15
/* Packet Buffer allocations */
-#define E1000_PBA_BYTES_SHIFT 0xA
-#define E1000_TX_HEAD_ADDR_SHIFT 7
-#define E1000_PBA_TX_MASK 0xFFFF0000
+#define E1000_PBA_BYTES_SHIFT 0xA
+#define E1000_TX_HEAD_ADDR_SHIFT 7
+#define E1000_PBA_TX_MASK 0xFFFF0000
/* Flow Control Watermarks */
-#define E1000_FC_HIGH_DIFF 0x1638 /* High: 5688 bytes below Rx FIFO size */
-#define E1000_FC_LOW_DIFF 0x1640 /* Low: 5696 bytes below Rx FIFO size */
+#define E1000_FC_HIGH_DIFF 0x1638 /* High: 5688 bytes below Rx FIFO size */
+#define E1000_FC_LOW_DIFF 0x1640 /* Low: 5696 bytes below Rx FIFO size */
-#define E1000_FC_PAUSE_TIME 0xFFFF /* pause for the max or until send xon */
+#define E1000_FC_PAUSE_TIME 0xFFFF /* pause for the max or until send xon */
/* How many Tx Descriptors do we need to call netif_wake_queue ? */
#define E1000_TX_QUEUE_WAKE 16
/* How many Rx Buffers do we bundle into one write to the hardware ? */
-#define E1000_RX_BUFFER_WRITE 16 /* Must be power of 2 */
+#define E1000_RX_BUFFER_WRITE 16 /* Must be power of 2 */
-#define AUTO_ALL_MODES 0
-#define E1000_EEPROM_82544_APM 0x0004
-#define E1000_EEPROM_APME 0x0400
+#define AUTO_ALL_MODES 0
+#define E1000_EEPROM_82544_APM 0x0004
+#define E1000_EEPROM_APME 0x0400
#ifndef E1000_MASTER_SLAVE
/* Switch to override PHY master/slave setting */
#define E1000_MASTER_SLAVE e1000_ms_hw_default
#endif
-#define E1000_MNG_VLAN_NONE (-1)
+#define E1000_MNG_VLAN_NONE (-1)
/* wrapper around a pointer to a socket buffer,
- * so a DMA handle can be stored along with the buffer */
+ * so a DMA handle can be stored along with the buffer
+ */
struct e1000_buffer {
struct sk_buff *skb;
dma_addr_t dma;
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index 14e30515f6aa..43462d596a4e 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -115,12 +115,12 @@ static int e1000_get_settings(struct net_device *netdev,
if (hw->media_type == e1000_media_type_copper) {
ecmd->supported = (SUPPORTED_10baseT_Half |
- SUPPORTED_10baseT_Full |
- SUPPORTED_100baseT_Half |
- SUPPORTED_100baseT_Full |
- SUPPORTED_1000baseT_Full|
- SUPPORTED_Autoneg |
- SUPPORTED_TP);
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Full|
+ SUPPORTED_Autoneg |
+ SUPPORTED_TP);
ecmd->advertising = ADVERTISED_TP;
if (hw->autoneg == 1) {
@@ -161,8 +161,8 @@ static int e1000_get_settings(struct net_device *netdev,
ethtool_cmd_speed_set(ecmd, adapter->link_speed);
/* unfortunately FULL_DUPLEX != DUPLEX_FULL
- * and HALF_DUPLEX != DUPLEX_HALF */
-
+ * and HALF_DUPLEX != DUPLEX_HALF
+ */
if (adapter->link_duplex == FULL_DUPLEX)
ecmd->duplex = DUPLEX_FULL;
else
@@ -179,8 +179,7 @@ static int e1000_get_settings(struct net_device *netdev,
if ((hw->media_type == e1000_media_type_copper) &&
netif_carrier_ok(netdev))
ecmd->eth_tp_mdix = (!!adapter->phy_info.mdix_mode ?
- ETH_TP_MDI_X :
- ETH_TP_MDI);
+ ETH_TP_MDI_X : ETH_TP_MDI);
else
ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
@@ -197,8 +196,7 @@ static int e1000_set_settings(struct net_device *netdev,
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- /*
- * MDI setting is only allowed when autoneg enabled because
+ /* MDI setting is only allowed when autoneg enabled because
* some hardware doesn't allow MDI setting when speed or
* duplex is forced.
*/
@@ -224,8 +222,8 @@ static int e1000_set_settings(struct net_device *netdev,
ADVERTISED_Autoneg;
else
hw->autoneg_advertised = ecmd->advertising |
- ADVERTISED_TP |
- ADVERTISED_Autoneg;
+ ADVERTISED_TP |
+ ADVERTISED_Autoneg;
ecmd->advertising = hw->autoneg_advertised;
} else {
u32 speed = ethtool_cmd_speed(ecmd);
@@ -260,8 +258,7 @@ static u32 e1000_get_link(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
- /*
- * If the link is not reported up to netdev, interrupts are disabled,
+ /* If the link is not reported up to netdev, interrupts are disabled,
* and so the physical link state may have changed since we last
* looked. Set get_link_status to make sure that the true link
* state is interrogated, rather than pulling a cached and possibly
@@ -484,7 +481,7 @@ static int e1000_get_eeprom(struct net_device *netdev,
le16_to_cpus(&eeprom_buff[i]);
memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1),
- eeprom->len);
+ eeprom->len);
kfree(eeprom_buff);
return ret_val;
@@ -517,15 +514,17 @@ static int e1000_set_eeprom(struct net_device *netdev,
ptr = (void *)eeprom_buff;
if (eeprom->offset & 1) {
- /* need read/modify/write of first changed EEPROM word */
- /* only the second byte of the word is being modified */
+ /* need read/modify/write of first changed EEPROM word
+ * only the second byte of the word is being modified
+ */
ret_val = e1000_read_eeprom(hw, first_word, 1,
&eeprom_buff[0]);
ptr++;
}
if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) {
- /* need read/modify/write of last changed EEPROM word */
- /* only the first byte of the word is being modified */
+ /* need read/modify/write of last changed EEPROM word
+ * only the first byte of the word is being modified
+ */
ret_val = e1000_read_eeprom(hw, last_word, 1,
&eeprom_buff[last_word - first_word]);
}
@@ -606,11 +605,13 @@ static int e1000_set_ringparam(struct net_device *netdev,
rx_old = adapter->rx_ring;
err = -ENOMEM;
- txdr = kcalloc(adapter->num_tx_queues, sizeof(struct e1000_tx_ring), GFP_KERNEL);
+ txdr = kcalloc(adapter->num_tx_queues, sizeof(struct e1000_tx_ring),
+ GFP_KERNEL);
if (!txdr)
goto err_alloc_tx;
- rxdr = kcalloc(adapter->num_rx_queues, sizeof(struct e1000_rx_ring), GFP_KERNEL);
+ rxdr = kcalloc(adapter->num_rx_queues, sizeof(struct e1000_rx_ring),
+ GFP_KERNEL);
if (!rxdr)
goto err_alloc_rx;
@@ -619,12 +620,12 @@ static int e1000_set_ringparam(struct net_device *netdev,
rxdr->count = max(ring->rx_pending,(u32)E1000_MIN_RXD);
rxdr->count = min(rxdr->count,(u32)(mac_type < e1000_82544 ?
- E1000_MAX_RXD : E1000_MAX_82544_RXD));
+ E1000_MAX_RXD : E1000_MAX_82544_RXD));
rxdr->count = ALIGN(rxdr->count, REQ_RX_DESCRIPTOR_MULTIPLE);
txdr->count = max(ring->tx_pending,(u32)E1000_MIN_TXD);
txdr->count = min(txdr->count,(u32)(mac_type < e1000_82544 ?
- E1000_MAX_TXD : E1000_MAX_82544_TXD));
+ E1000_MAX_TXD : E1000_MAX_82544_TXD));
txdr->count = ALIGN(txdr->count, REQ_TX_DESCRIPTOR_MULTIPLE);
for (i = 0; i < adapter->num_tx_queues; i++)
@@ -642,7 +643,8 @@ static int e1000_set_ringparam(struct net_device *netdev,
goto err_setup_tx;
/* save the new, restore the old in order to free it,
- * then restore the new back again */
+ * then restore the new back again
+ */
adapter->rx_ring = rx_old;
adapter->tx_ring = tx_old;
@@ -784,7 +786,6 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
REG_SET_AND_CHECK(TCTL, 0xFFFFFFFF, 0x00000000);
if (hw->mac_type >= e1000_82543) {
-
REG_SET_AND_CHECK(RCTL, before, 0xFFFFFFFF);
REG_PATTERN_TEST(RDBAL, 0xFFFFFFF0, 0xFFFFFFFF);
REG_PATTERN_TEST(TXCW, 0xC000FFFF, 0x0000FFFF);
@@ -795,14 +796,11 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
REG_PATTERN_TEST(RA + (((i << 1) + 1) << 2), 0x8003FFFF,
0xFFFFFFFF);
}
-
} else {
-
REG_SET_AND_CHECK(RCTL, 0xFFFFFFFF, 0x01FFFFFF);
REG_PATTERN_TEST(RDBAL, 0xFFFFF000, 0xFFFFFFFF);
REG_PATTERN_TEST(TXCW, 0x0000FFFF, 0x0000FFFF);
REG_PATTERN_TEST(TDBAL, 0xFFFFF000, 0xFFFFFFFF);
-
}
value = E1000_MC_TBL_SIZE;
@@ -858,13 +856,14 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
*data = 0;
- /* NOTE: we don't test MSI interrupts here, yet */
- /* Hook up test interrupt handler just for this test */
+ /* NOTE: we don't test MSI interrupts here, yet
+ * Hook up test interrupt handler just for this test
+ */
if (!request_irq(irq, e1000_test_intr, IRQF_PROBE_SHARED, netdev->name,
- netdev))
+ netdev))
shared_int = false;
else if (request_irq(irq, e1000_test_intr, IRQF_SHARED,
- netdev->name, netdev)) {
+ netdev->name, netdev)) {
*data = 1;
return -1;
}
@@ -1253,14 +1252,15 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */
- E1000_CTRL_FD); /* Force Duplex to FULL */
+ E1000_CTRL_FD); /* Force Duplex to FULL */
if (hw->media_type == e1000_media_type_copper &&
hw->phy_type == e1000_phy_m88)
ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */
else {
/* Set the ILOS bit on the fiber Nic is half
- * duplex link is detected. */
+ * duplex link is detected.
+ */
stat_reg = er32(STATUS);
if ((stat_reg & E1000_STATUS_FD) == 0)
ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU);
@@ -1446,7 +1446,7 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
ret_val = e1000_check_lbtest_frame(
rxdr->buffer_info[l].skb,
- 1024);
+ 1024);
if (!ret_val)
good_cnt++;
if (unlikely(++l == rxdr->count)) l = 0;
@@ -1493,7 +1493,8 @@ static int e1000_link_test(struct e1000_adapter *adapter, u64 *data)
hw->serdes_has_link = false;
/* On some blade server designs, link establishment
- * could take as long as 2-3 minutes */
+ * could take as long as 2-3 minutes
+ */
do {
e1000_check_for_link(hw);
if (hw->serdes_has_link)
@@ -1545,7 +1546,8 @@ static void e1000_diag_test(struct net_device *netdev,
e_info(hw, "offline testing starting\n");
/* Link test performed before hardware reset so autoneg doesn't
- * interfere with test result */
+ * interfere with test result
+ */
if (e1000_link_test(adapter, &data[4]))
eth_test->flags |= ETH_TEST_FL_FAILED;
@@ -1639,7 +1641,8 @@ static int e1000_wol_exclusion(struct e1000_adapter *adapter,
default:
/* dual port cards only support WoL on port A from now on
* unless it was enabled in the eeprom for port B
- * so exclude FUNC_1 ports from having WoL enabled */
+ * so exclude FUNC_1 ports from having WoL enabled
+ */
if (er32(STATUS) & E1000_STATUS_FUNC_1 &&
!adapter->eeprom_wol) {
wol->supported = 0;
@@ -1663,7 +1666,8 @@ static void e1000_get_wol(struct net_device *netdev,
wol->wolopts = 0;
/* this function will set ->supported = 0 and return 1 if wol is not
- * supported by this hardware */
+ * supported by this hardware
+ */
if (e1000_wol_exclusion(adapter, wol) ||
!device_can_wakeup(&adapter->pdev->dev))
return;
@@ -1839,7 +1843,7 @@ static void e1000_get_ethtool_stats(struct net_device *netdev,
data[i] = (e1000_gstrings_stats[i].sizeof_stat ==
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
-/* BUG_ON(i != E1000_STATS_LEN); */
+/* BUG_ON(i != E1000_STATS_LEN); */
}
static void e1000_get_strings(struct net_device *netdev, u32 stringset,
@@ -1859,37 +1863,37 @@ static void e1000_get_strings(struct net_device *netdev, u32 stringset,
ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
-/* BUG_ON(p - data != E1000_STATS_LEN * ETH_GSTRING_LEN); */
+ /* BUG_ON(p - data != E1000_STATS_LEN * ETH_GSTRING_LEN); */
break;
}
}
static const struct ethtool_ops e1000_ethtool_ops = {
- .get_settings = e1000_get_settings,
- .set_settings = e1000_set_settings,
- .get_drvinfo = e1000_get_drvinfo,
- .get_regs_len = e1000_get_regs_len,
- .get_regs = e1000_get_regs,
- .get_wol = e1000_get_wol,
- .set_wol = e1000_set_wol,
- .get_msglevel = e1000_get_msglevel,
- .set_msglevel = e1000_set_msglevel,
- .nway_reset = e1000_nway_reset,
- .get_link = e1000_get_link,
- .get_eeprom_len = e1000_get_eeprom_len,
- .get_eeprom = e1000_get_eeprom,
- .set_eeprom = e1000_set_eeprom,
- .get_ringparam = e1000_get_ringparam,
- .set_ringparam = e1000_set_ringparam,
- .get_pauseparam = e1000_get_pauseparam,
- .set_pauseparam = e1000_set_pauseparam,
- .self_test = e1000_diag_test,
- .get_strings = e1000_get_strings,
- .set_phys_id = e1000_set_phys_id,
- .get_ethtool_stats = e1000_get_ethtool_stats,
- .get_sset_count = e1000_get_sset_count,
- .get_coalesce = e1000_get_coalesce,
- .set_coalesce = e1000_set_coalesce,
+ .get_settings = e1000_get_settings,
+ .set_settings = e1000_set_settings,
+ .get_drvinfo = e1000_get_drvinfo,
+ .get_regs_len = e1000_get_regs_len,
+ .get_regs = e1000_get_regs,
+ .get_wol = e1000_get_wol,
+ .set_wol = e1000_set_wol,
+ .get_msglevel = e1000_get_msglevel,
+ .set_msglevel = e1000_set_msglevel,
+ .nway_reset = e1000_nway_reset,
+ .get_link = e1000_get_link,
+ .get_eeprom_len = e1000_get_eeprom_len,
+ .get_eeprom = e1000_get_eeprom,
+ .set_eeprom = e1000_set_eeprom,
+ .get_ringparam = e1000_get_ringparam,
+ .set_ringparam = e1000_set_ringparam,
+ .get_pauseparam = e1000_get_pauseparam,
+ .set_pauseparam = e1000_set_pauseparam,
+ .self_test = e1000_diag_test,
+ .get_strings = e1000_get_strings,
+ .set_phys_id = e1000_set_phys_id,
+ .get_ethtool_stats = e1000_get_ethtool_stats,
+ .get_sset_count = e1000_get_sset_count,
+ .get_coalesce = e1000_get_coalesce,
+ .set_coalesce = e1000_set_coalesce,
.get_ts_info = ethtool_op_get_ts_info,
};
diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.c b/drivers/net/ethernet/intel/e1000/e1000_hw.c
index 8fedd2451538..2879b9631e15 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_hw.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_hw.c
@@ -164,8 +164,9 @@ static void e1000_phy_init_script(struct e1000_hw *hw)
if (hw->phy_init_script) {
msleep(20);
- /* Save off the current value of register 0x2F5B to be restored at
- * the end of this routine. */
+ /* Save off the current value of register 0x2F5B to be restored
+ * at the end of this routine.
+ */
ret_val = e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data);
/* Disabled the PHY transmitter */
@@ -466,7 +467,8 @@ s32 e1000_reset_hw(struct e1000_hw *hw)
case e1000_82541:
case e1000_82541_rev_2:
/* These controllers can't ack the 64-bit write when issuing the
- * reset, so use IO-mapping as a workaround to issue the reset */
+ * reset, so use IO-mapping as a workaround to issue the reset
+ */
E1000_WRITE_REG_IO(hw, CTRL, (ctrl | E1000_CTRL_RST));
break;
case e1000_82545_rev_3:
@@ -480,9 +482,9 @@ s32 e1000_reset_hw(struct e1000_hw *hw)
break;
}
- /* After MAC reset, force reload of EEPROM to restore power-on settings to
- * device. Later controllers reload the EEPROM automatically, so just wait
- * for reload to complete.
+ /* After MAC reset, force reload of EEPROM to restore power-on settings
+ * to device. Later controllers reload the EEPROM automatically, so
+ * just wait for reload to complete.
*/
switch (hw->mac_type) {
case e1000_82542_rev2_0:
@@ -591,8 +593,8 @@ s32 e1000_init_hw(struct e1000_hw *hw)
msleep(5);
}
- /* Setup the receive address. This involves initializing all of the Receive
- * Address Registers (RARs 0 - 15).
+ /* Setup the receive address. This involves initializing all of the
+ * Receive Address Registers (RARs 0 - 15).
*/
e1000_init_rx_addrs(hw);
@@ -611,7 +613,8 @@ s32 e1000_init_hw(struct e1000_hw *hw)
for (i = 0; i < mta_size; i++) {
E1000_WRITE_REG_ARRAY(hw, MTA, i, 0);
/* use write flush to prevent Memory Write Block (MWB) from
- * occurring when accessing our register space */
+ * occurring when accessing our register space
+ */
E1000_WRITE_FLUSH();
}
@@ -630,7 +633,9 @@ s32 e1000_init_hw(struct e1000_hw *hw)
case e1000_82546_rev_3:
break;
default:
- /* Workaround for PCI-X problem when BIOS sets MMRBC incorrectly. */
+ /* Workaround for PCI-X problem when BIOS sets MMRBC
+ * incorrectly.
+ */
if (hw->bus_type == e1000_bus_type_pcix
&& e1000_pcix_get_mmrbc(hw) > 2048)
e1000_pcix_set_mmrbc(hw, 2048);
@@ -660,7 +665,8 @@ s32 e1000_init_hw(struct e1000_hw *hw)
hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3) {
ctrl_ext = er32(CTRL_EXT);
/* Relaxed ordering must be disabled to avoid a parity
- * error crash in a PCI slot. */
+ * error crash in a PCI slot.
+ */
ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
ew32(CTRL_EXT, ctrl_ext);
}
@@ -810,8 +816,9 @@ s32 e1000_setup_link(struct e1000_hw *hw)
ew32(FCRTL, 0);
ew32(FCRTH, 0);
} else {
- /* We need to set up the Receive Threshold high and low water marks
- * as well as (optionally) enabling the transmission of XON frames.
+ /* We need to set up the Receive Threshold high and low water
+ * marks as well as (optionally) enabling the transmission of
+ * XON frames.
*/
if (hw->fc_send_xon) {
ew32(FCRTL, (hw->fc_low_water | E1000_FCRTL_XONE));
@@ -868,42 +875,46 @@ static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
e1000_config_collision_dist(hw);
/* Check for a software override of the flow control settings, and setup
- * the device accordingly. If auto-negotiation is enabled, then software
- * will have to set the "PAUSE" bits to the correct value in the Tranmsit
- * Config Word Register (TXCW) and re-start auto-negotiation. However, if
- * auto-negotiation is disabled, then software will have to manually
- * configure the two flow control enable bits in the CTRL register.
+ * the device accordingly. If auto-negotiation is enabled, then
+ * software will have to set the "PAUSE" bits to the correct value in
+ * the Tranmsit Config Word Register (TXCW) and re-start
+ * auto-negotiation. However, if auto-negotiation is disabled, then
+ * software will have to manually configure the two flow control enable
+ * bits in the CTRL register.
*
* The possible values of the "fc" parameter are:
- * 0: Flow control is completely disabled
- * 1: Rx flow control is enabled (we can receive pause frames, but
- * not send pause frames).
- * 2: Tx flow control is enabled (we can send pause frames but we do
- * not support receiving pause frames).
- * 3: Both Rx and TX flow control (symmetric) are enabled.
+ * 0: Flow control is completely disabled
+ * 1: Rx flow control is enabled (we can receive pause frames, but
+ * not send pause frames).
+ * 2: Tx flow control is enabled (we can send pause frames but we do
+ * not support receiving pause frames).
+ * 3: Both Rx and TX flow control (symmetric) are enabled.
*/
switch (hw->fc) {
case E1000_FC_NONE:
- /* Flow control is completely disabled by a software over-ride. */
+ /* Flow ctrl is completely disabled by a software over-ride */
txcw = (E1000_TXCW_ANE | E1000_TXCW_FD);
break;
case E1000_FC_RX_PAUSE:
- /* RX Flow control is enabled and TX Flow control is disabled by a
- * software over-ride. Since there really isn't a way to advertise
- * that we are capable of RX Pause ONLY, we will advertise that we
- * support both symmetric and asymmetric RX PAUSE. Later, we will
- * disable the adapter's ability to send PAUSE frames.
+ /* Rx Flow control is enabled and Tx Flow control is disabled by
+ * a software over-ride. Since there really isn't a way to
+ * advertise that we are capable of Rx Pause ONLY, we will
+ * advertise that we support both symmetric and asymmetric Rx
+ * PAUSE. Later, we will disable the adapter's ability to send
+ * PAUSE frames.
*/
txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
break;
case E1000_FC_TX_PAUSE:
- /* TX Flow control is enabled, and RX Flow control is disabled, by a
- * software over-ride.
+ /* Tx Flow control is enabled, and Rx Flow control is disabled,
+ * by a software over-ride.
*/
txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR);
break;
case E1000_FC_FULL:
- /* Flow control (both RX and TX) is enabled by a software over-ride. */
+ /* Flow control (both Rx and Tx) is enabled by a software
+ * over-ride.
+ */
txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
break;
default:
@@ -912,11 +923,11 @@ static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
break;
}
- /* Since auto-negotiation is enabled, take the link out of reset (the link
- * will be in reset, because we previously reset the chip). This will
- * restart auto-negotiation. If auto-negotiation is successful then the
- * link-up status bit will be set and the flow control enable bits (RFCE
- * and TFCE) will be set according to their negotiated value.
+ /* Since auto-negotiation is enabled, take the link out of reset (the
+ * link will be in reset, because we previously reset the chip). This
+ * will restart auto-negotiation. If auto-negotiation is successful
+ * then the link-up status bit will be set and the flow control enable
+ * bits (RFCE and TFCE) will be set according to their negotiated value.
*/
e_dbg("Auto-negotiation enabled\n");
@@ -927,11 +938,12 @@ static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
hw->txcw = txcw;
msleep(1);
- /* If we have a signal (the cable is plugged in) then poll for a "Link-Up"
- * indication in the Device Status Register. Time-out if a link isn't
- * seen in 500 milliseconds seconds (Auto-negotiation should complete in
- * less than 500 milliseconds even if the other end is doing it in SW).
- * For internal serdes, we just assume a signal is present, then poll.
+ /* If we have a signal (the cable is plugged in) then poll for a
+ * "Link-Up" indication in the Device Status Register. Time-out if a
+ * link isn't seen in 500 milliseconds seconds (Auto-negotiation should
+ * complete in less than 500 milliseconds even if the other end is doing
+ * it in SW). For internal serdes, we just assume a signal is present,
+ * then poll.
*/
if (hw->media_type == e1000_media_type_internal_serdes ||
(er32(CTRL) & E1000_CTRL_SWDPIN1) == signal) {
@@ -946,9 +958,9 @@ static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
e_dbg("Never got a valid link from auto-neg!!!\n");
hw->autoneg_failed = 1;
/* AutoNeg failed to achieve a link, so we'll call
- * e1000_check_for_link. This routine will force the link up if
- * we detect a signal. This will allow us to communicate with
- * non-autonegotiating link partners.
+ * e1000_check_for_link. This routine will force the
+ * link up if we detect a signal. This will allow us to
+ * communicate with non-autonegotiating link partners.
*/
ret_val = e1000_check_for_link(hw);
if (ret_val) {
@@ -1042,9 +1054,9 @@ static s32 e1000_copper_link_preconfig(struct e1000_hw *hw)
e_dbg("e1000_copper_link_preconfig");
ctrl = er32(CTRL);
- /* With 82543, we need to force speed and duplex on the MAC equal to what
- * the PHY speed and duplex configuration is. In addition, we need to
- * perform a hardware reset on the PHY to take it out of reset.
+ /* With 82543, we need to force speed and duplex on the MAC equal to
+ * what the PHY speed and duplex configuration is. In addition, we need
+ * to perform a hardware reset on the PHY to take it out of reset.
*/
if (hw->mac_type > e1000_82543) {
ctrl |= E1000_CTRL_SLU;
@@ -1175,7 +1187,8 @@ static s32 e1000_copper_link_igp_setup(struct e1000_hw *hw)
/* when autonegotiation advertisement is only 1000Mbps then we
* should disable SmartSpeed and enable Auto MasterSlave
- * resolution as hardware default. */
+ * resolution as hardware default.
+ */
if (hw->autoneg_advertised == ADVERTISE_1000_FULL) {
/* Disable SmartSpeed */
ret_val =
@@ -1485,13 +1498,15 @@ static s32 e1000_setup_copper_link(struct e1000_hw *hw)
if (hw->autoneg) {
/* Setup autoneg and flow control advertisement
- * and perform autonegotiation */
+ * and perform autonegotiation
+ */
ret_val = e1000_copper_link_autoneg(hw);
if (ret_val)
return ret_val;
} else {
/* PHY will be set to 10H, 10F, 100H,or 100F
- * depending on value from forced_speed_duplex. */
+ * depending on value from forced_speed_duplex.
+ */
e_dbg("Forcing speed and duplex\n");
ret_val = e1000_phy_force_speed_duplex(hw);
if (ret_val) {
@@ -1609,7 +1624,8 @@ s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
* setup the PHY advertisement registers accordingly. If
* auto-negotiation is enabled, then software will have to set the
* "PAUSE" bits to the correct value in the Auto-Negotiation
- * Advertisement Register (PHY_AUTONEG_ADV) and re-start auto-negotiation.
+ * Advertisement Register (PHY_AUTONEG_ADV) and re-start
+ * auto-negotiation.
*
* The possible values of the "fc" parameter are:
* 0: Flow control is completely disabled
@@ -1636,7 +1652,7 @@ s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
* capable of RX Pause ONLY, we will advertise that we
* support both symmetric and asymmetric RX PAUSE. Later
* (in e1000_config_fc_after_link_up) we will disable the
- *hw's ability to send PAUSE frames.
+ * hw's ability to send PAUSE frames.
*/
mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
break;
@@ -1720,15 +1736,15 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
/* Are we forcing Full or Half Duplex? */
if (hw->forced_speed_duplex == e1000_100_full ||
hw->forced_speed_duplex == e1000_10_full) {
- /* We want to force full duplex so we SET the full duplex bits in the
- * Device and MII Control Registers.
+ /* We want to force full duplex so we SET the full duplex bits
+ * in the Device and MII Control Registers.
*/
ctrl |= E1000_CTRL_FD;
mii_ctrl_reg |= MII_CR_FULL_DUPLEX;
e_dbg("Full Duplex\n");
} else {
- /* We want to force half duplex so we CLEAR the full duplex bits in
- * the Device and MII Control Registers.
+ /* We want to force half duplex so we CLEAR the full duplex bits
+ * in the Device and MII Control Registers.
*/
ctrl &= ~E1000_CTRL_FD;
mii_ctrl_reg &= ~MII_CR_FULL_DUPLEX;
@@ -1762,8 +1778,8 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
if (ret_val)
return ret_val;
- /* Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI
- * forced whenever speed are duplex are forced.
+ /* Clear Auto-Crossover to force MDI manually. M88E1000 requires
+ * MDI forced whenever speed are duplex are forced.
*/
phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
ret_val =
@@ -1814,10 +1830,10 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
e_dbg("Waiting for forced speed/duplex link.\n");
mii_status_reg = 0;
- /* We will wait for autoneg to complete or 4.5 seconds to expire. */
+ /* Wait for autoneg to complete or 4.5 seconds to expire */
for (i = PHY_FORCE_TIME; i > 0; i--) {
- /* Read the MII Status Register and wait for Auto-Neg Complete bit
- * to be set.
+ /* Read the MII Status Register and wait for Auto-Neg
+ * Complete bit to be set.
*/
ret_val =
e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
@@ -1834,20 +1850,24 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
msleep(100);
}
if ((i == 0) && (hw->phy_type == e1000_phy_m88)) {
- /* We didn't get link. Reset the DSP and wait again for link. */
+ /* We didn't get link. Reset the DSP and wait again
+ * for link.
+ */
ret_val = e1000_phy_reset_dsp(hw);
if (ret_val) {
e_dbg("Error Resetting PHY DSP\n");
return ret_val;
}
}
- /* This loop will early-out if the link condition has been met. */
+ /* This loop will early-out if the link condition has been
+ * met
+ */
for (i = PHY_FORCE_TIME; i > 0; i--) {
if (mii_status_reg & MII_SR_LINK_STATUS)
break;
msleep(100);
- /* Read the MII Status Register and wait for Auto-Neg Complete bit
- * to be set.
+ /* Read the MII Status Register and wait for Auto-Neg
+ * Complete bit to be set.
*/
ret_val =
e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
@@ -1862,9 +1882,10 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
}
if (hw->phy_type == e1000_phy_m88) {
- /* Because we reset the PHY above, we need to re-force TX_CLK in the
- * Extended PHY Specific Control Register to 25MHz clock. This value
- * defaults back to a 2.5MHz clock when the PHY is reset.
+ /* Because we reset the PHY above, we need to re-force TX_CLK in
+ * the Extended PHY Specific Control Register to 25MHz clock.
+ * This value defaults back to a 2.5MHz clock when the PHY is
+ * reset.
*/
ret_val =
e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
@@ -1879,8 +1900,9 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
if (ret_val)
return ret_val;
- /* In addition, because of the s/w reset above, we need to enable CRS on
- * TX. This must be set for both full and half duplex operation.
+ /* In addition, because of the s/w reset above, we need to
+ * enable CRS on Tx. This must be set for both full and half
+ * duplex operation.
*/
ret_val =
e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
@@ -1951,7 +1973,8 @@ static s32 e1000_config_mac_to_phy(struct e1000_hw *hw)
e_dbg("e1000_config_mac_to_phy");
/* 82544 or newer MAC, Auto Speed Detection takes care of
- * MAC speed/duplex configuration.*/
+ * MAC speed/duplex configuration.
+ */
if ((hw->mac_type >= e1000_82544) && (hw->mac_type != e1000_ce4100))
return E1000_SUCCESS;
@@ -1985,7 +2008,7 @@ static s32 e1000_config_mac_to_phy(struct e1000_hw *hw)
* registers depending on negotiated values.
*/
ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS,
- &phy_data);
+ &phy_data);
if (ret_val)
return ret_val;
@@ -2002,7 +2025,7 @@ static s32 e1000_config_mac_to_phy(struct e1000_hw *hw)
if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS)
ctrl |= E1000_CTRL_SPD_1000;
else if ((phy_data & M88E1000_PSSR_SPEED) ==
- M88E1000_PSSR_100MBS)
+ M88E1000_PSSR_100MBS)
ctrl |= E1000_CTRL_SPD_100;
}
@@ -2135,9 +2158,9 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
if (mii_status_reg & MII_SR_AUTONEG_COMPLETE) {
/* The AutoNeg process has completed, so we now need to
* read both the Auto Negotiation Advertisement Register
- * (Address 4) and the Auto_Negotiation Base Page Ability
- * Register (Address 5) to determine how flow control was
- * negotiated.
+ * (Address 4) and the Auto_Negotiation Base Page
+ * Ability Register (Address 5) to determine how flow
+ * control was negotiated.
*/
ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV,
&mii_nway_adv_reg);
@@ -2148,18 +2171,19 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
if (ret_val)
return ret_val;
- /* Two bits in the Auto Negotiation Advertisement Register
- * (Address 4) and two bits in the Auto Negotiation Base
- * Page Ability Register (Address 5) determine flow control
- * for both the PHY and the link partner. The following
- * table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
- * 1999, describes these PAUSE resolution bits and how flow
- * control is determined based upon these settings.
+ /* Two bits in the Auto Negotiation Advertisement
+ * Register (Address 4) and two bits in the Auto
+ * Negotiation Base Page Ability Register (Address 5)
+ * determine flow control for both the PHY and the link
+ * partner. The following table, taken out of the IEEE
+ * 802.3ab/D6.0 dated March 25, 1999, describes these
+ * PAUSE resolution bits and how flow control is
+ * determined based upon these settings.
* NOTE: DC = Don't Care
*
* LOCAL DEVICE | LINK PARTNER
* PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
- *-------|---------|-------|---------|--------------------
+ *-------|---------|-------|---------|------------------
* 0 | 0 | DC | DC | E1000_FC_NONE
* 0 | 1 | 0 | DC | E1000_FC_NONE
* 0 | 1 | 1 | 0 | E1000_FC_NONE
@@ -2178,17 +2202,18 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
*
* LOCAL DEVICE | LINK PARTNER
* PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
- *-------|---------|-------|---------|--------------------
+ *-------|---------|-------|---------|------------------
* 1 | DC | 1 | DC | E1000_FC_FULL
*
*/
if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
- /* Now we need to check if the user selected RX ONLY
- * of pause frames. In this case, we had to advertise
- * FULL flow control because we could not advertise RX
- * ONLY. Hence, we must now check to see if we need to
- * turn OFF the TRANSMISSION of PAUSE frames.
+ /* Now we need to check if the user selected Rx
+ * ONLY of pause frames. In this case, we had
+ * to advertise FULL flow control because we
+ * could not advertise Rx ONLY. Hence, we must
+ * now check to see if we need to turn OFF the
+ * TRANSMISSION of PAUSE frames.
*/
if (hw->original_fc == E1000_FC_FULL) {
hw->fc = E1000_FC_FULL;
@@ -2203,7 +2228,7 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
*
* LOCAL DEVICE | LINK PARTNER
* PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
- *-------|---------|-------|---------|--------------------
+ *-------|---------|-------|---------|------------------
* 0 | 1 | 1 | 1 | E1000_FC_TX_PAUSE
*
*/
@@ -2220,7 +2245,7 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
*
* LOCAL DEVICE | LINK PARTNER
* PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
- *-------|---------|-------|---------|--------------------
+ *-------|---------|-------|---------|------------------
* 1 | 1 | 0 | 1 | E1000_FC_RX_PAUSE
*
*/
@@ -2233,25 +2258,27 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
e_dbg
("Flow Control = RX PAUSE frames only.\n");
}
- /* Per the IEEE spec, at this point flow control should be
- * disabled. However, we want to consider that we could
- * be connected to a legacy switch that doesn't advertise
- * desired flow control, but can be forced on the link
- * partner. So if we advertised no flow control, that is
- * what we will resolve to. If we advertised some kind of
- * receive capability (Rx Pause Only or Full Flow Control)
- * and the link partner advertised none, we will configure
- * ourselves to enable Rx Flow Control only. We can do
- * this safely for two reasons: If the link partner really
- * didn't want flow control enabled, and we enable Rx, no
- * harm done since we won't be receiving any PAUSE frames
- * anyway. If the intent on the link partner was to have
- * flow control enabled, then by us enabling RX only, we
- * can at least receive pause frames and process them.
- * This is a good idea because in most cases, since we are
- * predominantly a server NIC, more times than not we will
- * be asked to delay transmission of packets than asking
- * our link partner to pause transmission of frames.
+ /* Per the IEEE spec, at this point flow control should
+ * be disabled. However, we want to consider that we
+ * could be connected to a legacy switch that doesn't
+ * advertise desired flow control, but can be forced on
+ * the link partner. So if we advertised no flow
+ * control, that is what we will resolve to. If we
+ * advertised some kind of receive capability (Rx Pause
+ * Only or Full Flow Control) and the link partner
+ * advertised none, we will configure ourselves to
+ * enable Rx Flow Control only. We can do this safely
+ * for two reasons: If the link partner really
+ * didn't want flow control enabled, and we enable Rx,
+ * no harm done since we won't be receiving any PAUSE
+ * frames anyway. If the intent on the link partner was
+ * to have flow control enabled, then by us enabling Rx
+ * only, we can at least receive pause frames and
+ * process them. This is a good idea because in most
+ * cases, since we are predominantly a server NIC, more
+ * times than not we will be asked to delay transmission
+ * of packets than asking our link partner to pause
+ * transmission of frames.
*/
else if ((hw->original_fc == E1000_FC_NONE ||
hw->original_fc == E1000_FC_TX_PAUSE) ||
@@ -2316,8 +2343,7 @@ static s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw)
status = er32(STATUS);
rxcw = er32(RXCW);
- /*
- * If we don't have link (auto-negotiation failed or link partner
+ /* If we don't have link (auto-negotiation failed or link partner
* cannot auto-negotiate), and our link partner is not trying to
* auto-negotiate with us (we are receiving idles or data),
* we need to force link up. We also need to give auto-negotiation
@@ -2346,8 +2372,7 @@ static s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw)
goto out;
}
} else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
- /*
- * If we are forcing link and we are receiving /C/ ordered
+ /* If we are forcing link and we are receiving /C/ ordered
* sets, re-enable auto-negotiation in the TXCW register
* and disable forced link in the Device Control register
* in an attempt to auto-negotiate with our link partner.
@@ -2358,8 +2383,7 @@ static s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw)
hw->serdes_has_link = true;
} else if (!(E1000_TXCW_ANE & er32(TXCW))) {
- /*
- * If we force link for non-auto-negotiation switch, check
+ /* If we force link for non-auto-negotiation switch, check
* link status based on MAC synchronization for internal
* serdes media type.
*/
@@ -2468,15 +2492,17 @@ s32 e1000_check_for_link(struct e1000_hw *hw)
if (phy_data & MII_SR_LINK_STATUS) {
hw->get_link_status = false;
- /* Check if there was DownShift, must be checked immediately after
- * link-up */
+ /* Check if there was DownShift, must be checked
+ * immediately after link-up
+ */
e1000_check_downshift(hw);
/* If we are on 82544 or 82543 silicon and speed/duplex
- * are forced to 10H or 10F, then we will implement the polarity
- * reversal workaround. We disable interrupts first, and upon
- * returning, place the devices interrupt state to its previous
- * value except for the link status change interrupt which will
+ * are forced to 10H or 10F, then we will implement the
+ * polarity reversal workaround. We disable interrupts
+ * first, and upon returning, place the devices
+ * interrupt state to its previous value except for the
+ * link status change interrupt which will
* happen due to the execution of this workaround.
*/
@@ -2527,9 +2553,10 @@ s32 e1000_check_for_link(struct e1000_hw *hw)
}
}
- /* Configure Flow Control now that Auto-Neg has completed. First, we
- * need to restore the desired flow control settings because we may
- * have had to re-autoneg with a different link partner.
+ /* Configure Flow Control now that Auto-Neg has completed.
+ * First, we need to restore the desired flow control settings
+ * because we may have had to re-autoneg with a different link
+ * partner.
*/
ret_val = e1000_config_fc_after_link_up(hw);
if (ret_val) {
@@ -2538,11 +2565,12 @@ s32 e1000_check_for_link(struct e1000_hw *hw)
}
/* At this point we know that we are on copper and we have
- * auto-negotiated link. These are conditions for checking the link
- * partner capability register. We use the link speed to determine if
- * TBI compatibility needs to be turned on or off. If the link is not
- * at gigabit speed, then TBI compatibility is not needed. If we are
- * at gigabit speed, we turn on TBI compatibility.
+ * auto-negotiated link. These are conditions for checking the
+ * link partner capability register. We use the link speed to
+ * determine if TBI compatibility needs to be turned on or off.
+ * If the link is not at gigabit speed, then TBI compatibility
+ * is not needed. If we are at gigabit speed, we turn on TBI
+ * compatibility.
*/
if (hw->tbi_compatibility_en) {
u16 speed, duplex;
@@ -2554,20 +2582,23 @@ s32 e1000_check_for_link(struct e1000_hw *hw)
return ret_val;
}
if (speed != SPEED_1000) {
- /* If link speed is not set to gigabit speed, we do not need
- * to enable TBI compatibility.
+ /* If link speed is not set to gigabit speed, we
+ * do not need to enable TBI compatibility.
*/
if (hw->tbi_compatibility_on) {
- /* If we previously were in the mode, turn it off. */
+ /* If we previously were in the mode,
+ * turn it off.
+ */
rctl = er32(RCTL);
rctl &= ~E1000_RCTL_SBP;
ew32(RCTL, rctl);
hw->tbi_compatibility_on = false;
}
} else {
- /* If TBI compatibility is was previously off, turn it on. For
- * compatibility with a TBI link partner, we will store bad
- * packets. Some frames have an additional byte on the end and
+ /* If TBI compatibility is was previously off,
+ * turn it on. For compatibility with a TBI link
+ * partner, we will store bad packets. Some
+ * frames have an additional byte on the end and
* will look like CRC errors to to the hardware.
*/
if (!hw->tbi_compatibility_on) {
@@ -2629,9 +2660,9 @@ s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex)
*duplex = FULL_DUPLEX;
}
- /* IGP01 PHY may advertise full duplex operation after speed downgrade even
- * if it is operating at half duplex. Here we set the duplex settings to
- * match the duplex in the link partner's capabilities.
+ /* IGP01 PHY may advertise full duplex operation after speed downgrade
+ * even if it is operating at half duplex. Here we set the duplex
+ * settings to match the duplex in the link partner's capabilities.
*/
if (hw->phy_type == e1000_phy_igp && hw->speed_downgraded) {
ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_EXP, &phy_data);
@@ -2697,8 +2728,8 @@ static s32 e1000_wait_autoneg(struct e1000_hw *hw)
*/
static void e1000_raise_mdi_clk(struct e1000_hw *hw, u32 *ctrl)
{
- /* Raise the clock input to the Management Data Clock (by setting the MDC
- * bit), and then delay 10 microseconds.
+ /* Raise the clock input to the Management Data Clock (by setting the
+ * MDC bit), and then delay 10 microseconds.
*/
ew32(CTRL, (*ctrl | E1000_CTRL_MDC));
E1000_WRITE_FLUSH();
@@ -2712,8 +2743,8 @@ static void e1000_raise_mdi_clk(struct e1000_hw *hw, u32 *ctrl)
*/
static void e1000_lower_mdi_clk(struct e1000_hw *hw, u32 *ctrl)
{
- /* Lower the clock input to the Management Data Clock (by clearing the MDC
- * bit), and then delay 10 microseconds.
+ /* Lower the clock input to the Management Data Clock (by clearing the
+ * MDC bit), and then delay 10 microseconds.
*/
ew32(CTRL, (*ctrl & ~E1000_CTRL_MDC));
E1000_WRITE_FLUSH();
@@ -2746,10 +2777,10 @@ static void e1000_shift_out_mdi_bits(struct e1000_hw *hw, u32 data, u16 count)
ctrl |= (E1000_CTRL_MDIO_DIR | E1000_CTRL_MDC_DIR);
while (mask) {
- /* A "1" is shifted out to the PHY by setting the MDIO bit to "1" and
- * then raising and lowering the Management Data Clock. A "0" is
- * shifted out to the PHY by setting the MDIO bit to "0" and then
- * raising and lowering the clock.
+ /* A "1" is shifted out to the PHY by setting the MDIO bit to
+ * "1" and then raising and lowering the Management Data Clock.
+ * A "0" is shifted out to the PHY by setting the MDIO bit to
+ * "0" and then raising and lowering the clock.
*/
if (data & mask)
ctrl |= E1000_CTRL_MDIO;
@@ -2781,24 +2812,26 @@ static u16 e1000_shift_in_mdi_bits(struct e1000_hw *hw)
u8 i;
/* In order to read a register from the PHY, we need to shift in a total
- * of 18 bits from the PHY. The first two bit (turnaround) times are used
- * to avoid contention on the MDIO pin when a read operation is performed.
- * These two bits are ignored by us and thrown away. Bits are "shifted in"
- * by raising the input to the Management Data Clock (setting the MDC bit),
- * and then reading the value of the MDIO bit.
+ * of 18 bits from the PHY. The first two bit (turnaround) times are
+ * used to avoid contention on the MDIO pin when a read operation is
+ * performed. These two bits are ignored by us and thrown away. Bits are
+ * "shifted in" by raising the input to the Management Data Clock
+ * (setting the MDC bit), and then reading the value of the MDIO bit.
*/
ctrl = er32(CTRL);
- /* Clear MDIO_DIR (SWDPIO1) to indicate this bit is to be used as input. */
+ /* Clear MDIO_DIR (SWDPIO1) to indicate this bit is to be used as
+ * input.
+ */
ctrl &= ~E1000_CTRL_MDIO_DIR;
ctrl &= ~E1000_CTRL_MDIO;
ew32(CTRL, ctrl);
E1000_WRITE_FLUSH();
- /* Raise and Lower the clock before reading in the data. This accounts for
- * the turnaround bits. The first clock occurred when we clocked out the
- * last bit of the Register Address.
+ /* Raise and Lower the clock before reading in the data. This accounts
+ * for the turnaround bits. The first clock occurred when we clocked out
+ * the last bit of the Register Address.
*/
e1000_raise_mdi_clk(hw, &ctrl);
e1000_lower_mdi_clk(hw, &ctrl);
@@ -2870,8 +2903,8 @@ static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
if (hw->mac_type > e1000_82543) {
/* Set up Op-code, Phy Address, and register address in the MDI
- * Control register. The MAC will take care of interfacing with the
- * PHY to retrieve the desired data.
+ * Control register. The MAC will take care of interfacing with
+ * the PHY to retrieve the desired data.
*/
if (hw->mac_type == e1000_ce4100) {
mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) |
@@ -2929,31 +2962,32 @@ static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
*phy_data = (u16) mdic;
}
} else {
- /* We must first send a preamble through the MDIO pin to signal the
- * beginning of an MII instruction. This is done by sending 32
- * consecutive "1" bits.
+ /* We must first send a preamble through the MDIO pin to signal
+ * the beginning of an MII instruction. This is done by sending
+ * 32 consecutive "1" bits.
*/
e1000_shift_out_mdi_bits(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE);
/* Now combine the next few fields that are required for a read
* operation. We use this method instead of calling the
- * e1000_shift_out_mdi_bits routine five different times. The format of
- * a MII read instruction consists of a shift out of 14 bits and is
- * defined as follows:
+ * e1000_shift_out_mdi_bits routine five different times. The
+ * format of a MII read instruction consists of a shift out of
+ * 14 bits and is defined as follows:
* <Preamble><SOF><Op Code><Phy Addr><Reg Addr>
- * followed by a shift in of 18 bits. This first two bits shifted in
- * are TurnAround bits used to avoid contention on the MDIO pin when a
- * READ operation is performed. These two bits are thrown away
- * followed by a shift in of 16 bits which contains the desired data.
+ * followed by a shift in of 18 bits. This first two bits
+ * shifted in are TurnAround bits used to avoid contention on
+ * the MDIO pin when a READ operation is performed. These two
+ * bits are thrown away followed by a shift in of 16 bits which
+ * contains the desired data.
*/
mdic = ((reg_addr) | (phy_addr << 5) |
(PHY_OP_READ << 10) | (PHY_SOF << 12));
e1000_shift_out_mdi_bits(hw, mdic, 14);
- /* Now that we've shifted out the read command to the MII, we need to
- * "shift in" the 16-bit value (18 total bits) of the requested PHY
- * register address.
+ /* Now that we've shifted out the read command to the MII, we
+ * need to "shift in" the 16-bit value (18 total bits) of the
+ * requested PHY register address.
*/
*phy_data = e1000_shift_in_mdi_bits(hw);
}
@@ -3060,18 +3094,18 @@ static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
}
}
} else {
- /* We'll need to use the SW defined pins to shift the write command
- * out to the PHY. We first send a preamble to the PHY to signal the
- * beginning of the MII instruction. This is done by sending 32
- * consecutive "1" bits.
+ /* We'll need to use the SW defined pins to shift the write
+ * command out to the PHY. We first send a preamble to the PHY
+ * to signal the beginning of the MII instruction. This is done
+ * by sending 32 consecutive "1" bits.
*/
e1000_shift_out_mdi_bits(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE);
- /* Now combine the remaining required fields that will indicate a
- * write operation. We use this method instead of calling the
- * e1000_shift_out_mdi_bits routine for each field in the command. The
- * format of a MII write instruction is as follows:
- * <Preamble><SOF><Op Code><Phy Addr><Reg Addr><Turnaround><Data>.
+ /* Now combine the remaining required fields that will indicate
+ * a write operation. We use this method instead of calling the
+ * e1000_shift_out_mdi_bits routine for each field in the
+ * command. The format of a MII write instruction is as follows:
+ * <Preamble><SOF><OpCode><PhyAddr><RegAddr><Turnaround><Data>.
*/
mdic = ((PHY_TURNAROUND) | (reg_addr << 2) | (phy_addr << 7) |
(PHY_OP_WRITE << 12) | (PHY_SOF << 14));
@@ -3100,10 +3134,10 @@ s32 e1000_phy_hw_reset(struct e1000_hw *hw)
e_dbg("Resetting Phy...\n");
if (hw->mac_type > e1000_82543) {
- /* Read the device control register and assert the E1000_CTRL_PHY_RST
- * bit. Then, take it out of reset.
+ /* Read the device control register and assert the
+ * E1000_CTRL_PHY_RST bit. Then, take it out of reset.
* For e1000 hardware, we delay for 10ms between the assert
- * and deassert.
+ * and de-assert.
*/
ctrl = er32(CTRL);
ew32(CTRL, ctrl | E1000_CTRL_PHY_RST);
@@ -3115,8 +3149,9 @@ s32 e1000_phy_hw_reset(struct e1000_hw *hw)
E1000_WRITE_FLUSH();
} else {
- /* Read the Extended Device Control Register, assert the PHY_RESET_DIR
- * bit to put the PHY into reset. Then, take it out of reset.
+ /* Read the Extended Device Control Register, assert the
+ * PHY_RESET_DIR bit to put the PHY into reset. Then, take it
+ * out of reset.
*/
ctrl_ext = er32(CTRL_EXT);
ctrl_ext |= E1000_CTRL_EXT_SDP4_DIR;
@@ -3301,7 +3336,8 @@ static s32 e1000_phy_igp_get_info(struct e1000_hw *hw,
e_dbg("e1000_phy_igp_get_info");
/* The downshift status is checked only once, after link is established,
- * and it stored in the hw->speed_downgraded parameter. */
+ * and it stored in the hw->speed_downgraded parameter.
+ */
phy_info->downshift = (e1000_downshift) hw->speed_downgraded;
/* IGP01E1000 does not need to support it. */
@@ -3327,7 +3363,9 @@ static s32 e1000_phy_igp_get_info(struct e1000_hw *hw,
if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) ==
IGP01E1000_PSSR_SPEED_1000MBPS) {
- /* Local/Remote Receiver Information are only valid at 1000 Mbps */
+ /* Local/Remote Receiver Information are only valid @ 1000
+ * Mbps
+ */
ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data);
if (ret_val)
return ret_val;
@@ -3379,7 +3417,8 @@ static s32 e1000_phy_m88_get_info(struct e1000_hw *hw,
e_dbg("e1000_phy_m88_get_info");
/* The downshift status is checked only once, after link is established,
- * and it stored in the hw->speed_downgraded parameter. */
+ * and it stored in the hw->speed_downgraded parameter.
+ */
phy_info->downshift = (e1000_downshift) hw->speed_downgraded;
ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
@@ -3574,8 +3613,8 @@ s32 e1000_init_eeprom_params(struct e1000_hw *hw)
}
if (eeprom->type == e1000_eeprom_spi) {
- /* eeprom_size will be an enum [0..8] that maps to eeprom sizes 128B to
- * 32KB (incremented by powers of 2).
+ /* eeprom_size will be an enum [0..8] that maps to eeprom sizes
+ * 128B to 32KB (incremented by powers of 2).
*/
/* Set to default value for initial eeprom read. */
eeprom->word_size = 64;
@@ -3585,8 +3624,9 @@ s32 e1000_init_eeprom_params(struct e1000_hw *hw)
eeprom_size =
(eeprom_size & EEPROM_SIZE_MASK) >> EEPROM_SIZE_SHIFT;
/* 256B eeprom size was not supported in earlier hardware, so we
- * bump eeprom_size up one to ensure that "1" (which maps to 256B)
- * is never the result used in the shifting logic below. */
+ * bump eeprom_size up one to ensure that "1" (which maps to
+ * 256B) is never the result used in the shifting logic below.
+ */
if (eeprom_size)
eeprom_size++;
@@ -3618,8 +3658,8 @@ static void e1000_raise_ee_clk(struct e1000_hw *hw, u32 *eecd)
*/
static void e1000_lower_ee_clk(struct e1000_hw *hw, u32 *eecd)
{
- /* Lower the clock input to the EEPROM (by clearing the SK bit), and then
- * wait 50 microseconds.
+ /* Lower the clock input to the EEPROM (by clearing the SK bit), and
+ * then wait 50 microseconds.
*/
*eecd = *eecd & ~E1000_EECD_SK;
ew32(EECD, *eecd);
@@ -3651,10 +3691,11 @@ static void e1000_shift_out_ee_bits(struct e1000_hw *hw, u16 data, u16 count)
eecd |= E1000_EECD_DO;
}
do {
- /* A "1" is shifted out to the EEPROM by setting bit "DI" to a "1",
- * and then raising and then lowering the clock (the SK bit controls
- * the clock input to the EEPROM). A "0" is shifted out to the EEPROM
- * by setting "DI" to "0" and then raising and then lowering the clock.
+ /* A "1" is shifted out to the EEPROM by setting bit "DI" to a
+ * "1", and then raising and then lowering the clock (the SK bit
+ * controls the clock input to the EEPROM). A "0" is shifted
+ * out to the EEPROM by setting "DI" to "0" and then raising and
+ * then lowering the clock.
*/
eecd &= ~E1000_EECD_DI;
@@ -3691,9 +3732,9 @@ static u16 e1000_shift_in_ee_bits(struct e1000_hw *hw, u16 count)
/* In order to read a register from the EEPROM, we need to shift 'count'
* bits in from the EEPROM. Bits are "shifted in" by raising the clock
- * input to the EEPROM (setting the SK bit), and then reading the value of
- * the "DO" bit. During this "shifting in" process the "DI" bit should
- * always be clear.
+ * input to the EEPROM (setting the SK bit), and then reading the value
+ * of the "DO" bit. During this "shifting in" process the "DI" bit
+ * should always be clear.
*/
eecd = er32(EECD);
@@ -3945,8 +3986,8 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
if (eeprom->word_size == 0)
e1000_init_eeprom_params(hw);
- /* A check for invalid values: offset too large, too many words, and not
- * enough words.
+ /* A check for invalid values: offset too large, too many words, and
+ * not enough words.
*/
if ((offset >= eeprom->word_size)
|| (words > eeprom->word_size - offset) || (words == 0)) {
@@ -3964,7 +4005,8 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
return -E1000_ERR_EEPROM;
/* Set up the SPI or Microwire EEPROM for bit-bang reading. We have
- * acquired the EEPROM at this point, so any returns should release it */
+ * acquired the EEPROM at this point, so any returns should release it
+ */
if (eeprom->type == e1000_eeprom_spi) {
u16 word_in;
u8 read_opcode = EEPROM_READ_OPCODE_SPI;
@@ -3976,7 +4018,9 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
e1000_standby_eeprom(hw);
- /* Some SPI eeproms use the 8th address bit embedded in the opcode */
+ /* Some SPI eeproms use the 8th address bit embedded in the
+ * opcode
+ */
if ((eeprom->address_bits == 8) && (offset >= 128))
read_opcode |= EEPROM_A8_OPCODE_SPI;
@@ -3985,11 +4029,13 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
e1000_shift_out_ee_bits(hw, (u16) (offset * 2),
eeprom->address_bits);
- /* Read the data. The address of the eeprom internally increments with
- * each byte (spi) being read, saving on the overhead of eeprom setup
- * and tear-down. The address counter will roll over if reading beyond
- * the size of the eeprom, thus allowing the entire memory to be read
- * starting from any offset. */
+ /* Read the data. The address of the eeprom internally
+ * increments with each byte (spi) being read, saving on the
+ * overhead of eeprom setup and tear-down. The address counter
+ * will roll over if reading beyond the size of the eeprom, thus
+ * allowing the entire memory to be read starting from any
+ * offset.
+ */
for (i = 0; i < words; i++) {
word_in = e1000_shift_in_ee_bits(hw, 16);
data[i] = (word_in >> 8) | (word_in << 8);
@@ -4003,8 +4049,9 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
e1000_shift_out_ee_bits(hw, (u16) (offset + i),
eeprom->address_bits);
- /* Read the data. For microwire, each word requires the overhead
- * of eeprom setup and tear-down. */
+ /* Read the data. For microwire, each word requires the
+ * overhead of eeprom setup and tear-down.
+ */
data[i] = e1000_shift_in_ee_bits(hw, 16);
e1000_standby_eeprom(hw);
}
@@ -4119,8 +4166,8 @@ static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
if (eeprom->word_size == 0)
e1000_init_eeprom_params(hw);
- /* A check for invalid values: offset too large, too many words, and not
- * enough words.
+ /* A check for invalid values: offset too large, too many words, and
+ * not enough words.
*/
if ((offset >= eeprom->word_size)
|| (words > eeprom->word_size - offset) || (words == 0)) {
@@ -4174,7 +4221,9 @@ static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset, u16 words,
e1000_standby_eeprom(hw);
- /* Some SPI eeproms use the 8th address bit embedded in the opcode */
+ /* Some SPI eeproms use the 8th address bit embedded in the
+ * opcode
+ */
if ((eeprom->address_bits == 8) && (offset >= 128))
write_opcode |= EEPROM_A8_OPCODE_SPI;
@@ -4186,16 +4235,19 @@ static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset, u16 words,
/* Send the data */
- /* Loop to allow for up to whole page write (32 bytes) of eeprom */
+ /* Loop to allow for up to whole page write (32 bytes) of
+ * eeprom
+ */
while (widx < words) {
u16 word_out = data[widx];
word_out = (word_out >> 8) | (word_out << 8);
e1000_shift_out_ee_bits(hw, word_out, 16);
widx++;
- /* Some larger eeprom sizes are capable of a 32-byte PAGE WRITE
- * operation, while the smaller eeproms are capable of an 8-byte
- * PAGE WRITE operation. Break the inner loop to pass new address
+ /* Some larger eeprom sizes are capable of a 32-byte
+ * PAGE WRITE operation, while the smaller eeproms are
+ * capable of an 8-byte PAGE WRITE operation. Break the
+ * inner loop to pass new address
*/
if ((((offset + widx) * 2) % eeprom->page_size) == 0) {
e1000_standby_eeprom(hw);
@@ -4249,14 +4301,15 @@ static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, u16 offset,
/* Send the data */
e1000_shift_out_ee_bits(hw, data[words_written], 16);
- /* Toggle the CS line. This in effect tells the EEPROM to execute
- * the previous command.
+ /* Toggle the CS line. This in effect tells the EEPROM to
+ * execute the previous command.
*/
e1000_standby_eeprom(hw);
- /* Read DO repeatedly until it is high (equal to '1'). The EEPROM will
- * signal that the command has been completed by raising the DO signal.
- * If DO does not go high in 10 milliseconds, then error out.
+ /* Read DO repeatedly until it is high (equal to '1'). The
+ * EEPROM will signal that the command has been completed by
+ * raising the DO signal. If DO does not go high in 10
+ * milliseconds, then error out.
*/
for (i = 0; i < 200; i++) {
eecd = er32(EECD);
@@ -4483,7 +4536,8 @@ static void e1000_clear_vfta(struct e1000_hw *hw)
for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
/* If the offset we want to clear is the same offset of the
* manageability VLAN ID, then clear all bits except that of the
- * manageability unit */
+ * manageability unit
+ */
vfta_value = (offset == vfta_offset) ? vfta_bit_in_reg : 0;
E1000_WRITE_REG_ARRAY(hw, VFTA, offset, vfta_value);
E1000_WRITE_FLUSH();
@@ -4911,12 +4965,12 @@ void e1000_tbi_adjust_stats(struct e1000_hw *hw, struct e1000_hw_stats *stats,
* counters overcount this packet as a CRC error and undercount
* the packet as a good packet
*/
- /* This packet should not be counted as a CRC error. */
+ /* This packet should not be counted as a CRC error. */
stats->crcerrs--;
- /* This packet does count as a Good Packet Received. */
+ /* This packet does count as a Good Packet Received. */
stats->gprc++;
- /* Adjust the Good Octets received counters */
+ /* Adjust the Good Octets received counters */
carry_bit = 0x80000000 & stats->gorcl;
stats->gorcl += frame_len;
/* If the high bit of Gorcl (the low 32 bits of the Good Octets
@@ -5196,8 +5250,9 @@ static s32 e1000_check_polarity(struct e1000_hw *hw,
if (ret_val)
return ret_val;
- /* If speed is 1000 Mbps, must read the IGP01E1000_PHY_PCS_INIT_REG to
- * find the polarity status */
+ /* If speed is 1000 Mbps, must read the
+ * IGP01E1000_PHY_PCS_INIT_REG to find the polarity status
+ */
if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) ==
IGP01E1000_PSSR_SPEED_1000MBPS) {
@@ -5213,8 +5268,9 @@ static s32 e1000_check_polarity(struct e1000_hw *hw,
e1000_rev_polarity_reversed :
e1000_rev_polarity_normal;
} else {
- /* For 10 Mbps, read the polarity bit in the status register. (for
- * 100 Mbps this bit is always 0) */
+ /* For 10 Mbps, read the polarity bit in the status
+ * register. (for 100 Mbps this bit is always 0)
+ */
*polarity =
(phy_data & IGP01E1000_PSSR_POLARITY_REVERSED) ?
e1000_rev_polarity_reversed :
@@ -5374,8 +5430,9 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up)
}
} else {
if (hw->dsp_config_state == e1000_dsp_config_activated) {
- /* Save off the current value of register 0x2F5B to be restored at
- * the end of the routines. */
+ /* Save off the current value of register 0x2F5B to be
+ * restored at the end of the routines.
+ */
ret_val =
e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data);
@@ -5391,7 +5448,7 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up)
msleep(20);
ret_val = e1000_write_phy_reg(hw, 0x0000,
- IGP01E1000_IEEE_FORCE_GIGA);
+ IGP01E1000_IEEE_FORCE_GIGA);
if (ret_val)
return ret_val;
for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) {
@@ -5412,7 +5469,7 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up)
}
ret_val = e1000_write_phy_reg(hw, 0x0000,
- IGP01E1000_IEEE_RESTART_AUTONEG);
+ IGP01E1000_IEEE_RESTART_AUTONEG);
if (ret_val)
return ret_val;
@@ -5429,8 +5486,9 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up)
}
if (hw->ffe_config_state == e1000_ffe_config_active) {
- /* Save off the current value of register 0x2F5B to be restored at
- * the end of the routines. */
+ /* Save off the current value of register 0x2F5B to be
+ * restored at the end of the routines.
+ */
ret_val =
e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data);
@@ -5446,7 +5504,7 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up)
msleep(20);
ret_val = e1000_write_phy_reg(hw, 0x0000,
- IGP01E1000_IEEE_FORCE_GIGA);
+ IGP01E1000_IEEE_FORCE_GIGA);
if (ret_val)
return ret_val;
ret_val =
@@ -5456,7 +5514,7 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up)
return ret_val;
ret_val = e1000_write_phy_reg(hw, 0x0000,
- IGP01E1000_IEEE_RESTART_AUTONEG);
+ IGP01E1000_IEEE_RESTART_AUTONEG);
if (ret_val)
return ret_val;
@@ -5542,8 +5600,9 @@ static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active)
return E1000_SUCCESS;
/* During driver activity LPLU should not be used or it will attain link
- * from the lowest speeds starting from 10Mbps. The capability is used for
- * Dx transitions and states */
+ * from the lowest speeds starting from 10Mbps. The capability is used
+ * for Dx transitions and states
+ */
if (hw->mac_type == e1000_82541_rev_2
|| hw->mac_type == e1000_82547_rev_2) {
ret_val =
@@ -5563,10 +5622,11 @@ static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active)
return ret_val;
}
- /* LPLU and SmartSpeed are mutually exclusive. LPLU is used during
- * Dx states where the power conservation is most important. During
- * driver activity we should enable SmartSpeed, so performance is
- * maintained. */
+ /* LPLU and SmartSpeed are mutually exclusive. LPLU is used
+ * during Dx states where the power conservation is most
+ * important. During driver activity we should enable
+ * SmartSpeed, so performance is maintained.
+ */
if (hw->smart_speed == e1000_smart_speed_on) {
ret_val =
e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index d947e3aae1e8..8502c625dbef 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -239,7 +239,6 @@ struct net_device *e1000_get_hw_dev(struct e1000_hw *hw)
* e1000_init_module is the first routine called when the driver is
* loaded. All it does is register with the PCI subsystem.
**/
-
static int __init e1000_init_module(void)
{
int ret;
@@ -266,7 +265,6 @@ module_init(e1000_init_module);
* e1000_exit_module is called just before the driver is removed
* from memory.
**/
-
static void __exit e1000_exit_module(void)
{
pci_unregister_driver(&e1000_driver);
@@ -301,7 +299,6 @@ static void e1000_free_irq(struct e1000_adapter *adapter)
* e1000_irq_disable - Mask off interrupt generation on the NIC
* @adapter: board private structure
**/
-
static void e1000_irq_disable(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
@@ -315,7 +312,6 @@ static void e1000_irq_disable(struct e1000_adapter *adapter)
* e1000_irq_enable - Enable default interrupt generation settings
* @adapter: board private structure
**/
-
static void e1000_irq_enable(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
@@ -398,11 +394,12 @@ static void e1000_configure(struct e1000_adapter *adapter)
e1000_configure_rx(adapter);
/* call E1000_DESC_UNUSED which always leaves
* at least 1 descriptor unused to make sure
- * next_to_use != next_to_clean */
+ * next_to_use != next_to_clean
+ */
for (i = 0; i < adapter->num_rx_queues; i++) {
struct e1000_rx_ring *ring = &adapter->rx_ring[i];
adapter->alloc_rx_buf(adapter, ring,
- E1000_DESC_UNUSED(ring));
+ E1000_DESC_UNUSED(ring));
}
}
@@ -433,9 +430,7 @@ int e1000_up(struct e1000_adapter *adapter)
* The phy may be powered down to save power and turn off link when the
* driver is unloaded and wake on lan is not enabled (among others)
* *** this routine MUST be followed by a call to e1000_reset ***
- *
**/
-
void e1000_power_up_phy(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
@@ -444,7 +439,8 @@ void e1000_power_up_phy(struct e1000_adapter *adapter)
/* Just clear the power down bit to wake the phy back up */
if (hw->media_type == e1000_media_type_copper) {
/* according to the manual, the phy will retain its
- * settings across a power-down/up cycle */
+ * settings across a power-down/up cycle
+ */
e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
mii_reg &= ~MII_CR_POWER_DOWN;
e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
@@ -459,7 +455,8 @@ static void e1000_power_down_phy(struct e1000_adapter *adapter)
* The PHY cannot be powered down if any of the following is true *
* (a) WoL is enabled
* (b) AMT is active
- * (c) SoL/IDER session is active */
+ * (c) SoL/IDER session is active
+ */
if (!adapter->wol && hw->mac_type >= e1000_82540 &&
hw->media_type == e1000_media_type_copper) {
u16 mii_reg = 0;
@@ -529,8 +526,7 @@ void e1000_down(struct e1000_adapter *adapter)
e1000_irq_disable(adapter);
- /*
- * Setting DOWN must be after irq_disable to prevent
+ /* Setting DOWN must be after irq_disable to prevent
* a screaming interrupt. Setting DOWN also prevents
* tasks from rescheduling.
*/
@@ -627,14 +623,14 @@ void e1000_reset(struct e1000_adapter *adapter)
* rounded up to the next 1KB and expressed in KB. Likewise,
* the Rx FIFO should be large enough to accommodate at least
* one full receive packet and is similarly rounded up and
- * expressed in KB. */
+ * expressed in KB.
+ */
pba = er32(PBA);
/* upper 16 bits has Tx packet buffer allocation size in KB */
tx_space = pba >> 16;
/* lower 16 bits has Rx packet buffer allocation size in KB */
pba &= 0xffff;
- /*
- * the tx fifo also stores 16 bytes of information about the tx
+ /* the Tx fifo also stores 16 bytes of information about the Tx
* but don't include ethernet FCS because hardware appends it
*/
min_tx_space = (hw->max_frame_size +
@@ -649,7 +645,8 @@ void e1000_reset(struct e1000_adapter *adapter)
/* If current Tx allocation is less than the min Tx FIFO size,
* and the min Tx FIFO size is less than the current Rx FIFO
- * allocation, take space away from current Rx allocation */
+ * allocation, take space away from current Rx allocation
+ */
if (tx_space < min_tx_space &&
((min_tx_space - tx_space) < pba)) {
pba = pba - (min_tx_space - tx_space);
@@ -663,8 +660,9 @@ void e1000_reset(struct e1000_adapter *adapter)
break;
}
- /* if short on rx space, rx wins and must trump tx
- * adjustment or use Early Receive if available */
+ /* if short on Rx space, Rx wins and must trump Tx
+ * adjustment or use Early Receive if available
+ */
if (pba < min_rx_space)
pba = min_rx_space;
}
@@ -672,8 +670,7 @@ void e1000_reset(struct e1000_adapter *adapter)
ew32(PBA, pba);
- /*
- * flow control settings:
+ /* flow control settings:
* The high water mark must be low enough to fit one full frame
* (or the size used for early receive) above it in the Rx FIFO.
* Set it to the lower of:
@@ -707,7 +704,8 @@ void e1000_reset(struct e1000_adapter *adapter)
u32 ctrl = er32(CTRL);
/* clear phy power management bit if we are in gig only mode,
* which if enabled will attempt negotiation to 100Mb, which
- * can cause a loss of link at power off or driver unload */
+ * can cause a loss of link at power off or driver unload
+ */
ctrl &= ~E1000_CTRL_SWDPIN3;
ew32(CTRL, ctrl);
}
@@ -808,9 +806,8 @@ static int e1000_is_need_ioport(struct pci_dev *pdev)
static netdev_features_t e1000_fix_features(struct net_device *netdev,
netdev_features_t features)
{
- /*
- * Since there is no support for separate rx/tx vlan accel
- * enable/disable make sure tx flag is always in same state as rx.
+ /* Since there is no support for separate Rx/Tx vlan accel
+ * enable/disable make sure Tx flag is always in same state as Rx.
*/
if (features & NETIF_F_HW_VLAN_RX)
features |= NETIF_F_HW_VLAN_TX;
@@ -1012,16 +1009,14 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto err_sw_init;
- /*
- * there is a workaround being applied below that limits
+ /* there is a workaround being applied below that limits
* 64-bit DMA addresses to 64-bit hardware. There are some
* 32-bit adapters that Tx hang when given 64-bit DMA addresses
*/
pci_using_dac = 0;
if ((hw->bus_type == e1000_bus_type_pcix) &&
!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
- /*
- * according to DMA-API-HOWTO, coherent calls will always
+ /* according to DMA-API-HOWTO, coherent calls will always
* succeed if the set call did
*/
dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
@@ -1099,7 +1094,8 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
/* before reading the EEPROM, reset the controller to
- * put the device in a known good starting state */
+ * put the device in a known good starting state
+ */
e1000_reset_hw(hw);
@@ -1107,8 +1103,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (e1000_validate_eeprom_checksum(hw) < 0) {
e_err(probe, "The EEPROM Checksum Is Not Valid\n");
e1000_dump_eeprom(adapter);
- /*
- * set MAC address to all zeroes to invalidate and temporary
+ /* set MAC address to all zeroes to invalidate and temporary
* disable this device for the user. This blocks regular
* traffic while still permitting ethtool ioctls from reaching
* the hardware as well as allowing the user to run the
@@ -1169,7 +1164,8 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* now that we have the eeprom settings, apply the special cases
* where the eeprom may be wrong or the board simply won't support
- * wake on lan on a particular port */
+ * wake on lan on a particular port
+ */
switch (pdev->device) {
case E1000_DEV_ID_82546GB_PCIE:
adapter->eeprom_wol = 0;
@@ -1177,7 +1173,8 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
case E1000_DEV_ID_82546EB_FIBER:
case E1000_DEV_ID_82546GB_FIBER:
/* Wake events only supported on port A for dual fiber
- * regardless of eeprom setting */
+ * regardless of eeprom setting
+ */
if (er32(STATUS) & E1000_STATUS_FUNC_1)
adapter->eeprom_wol = 0;
break;
@@ -1270,7 +1267,6 @@ err_pci_reg:
* Hot-Plug event, or because the driver is going to be removed from
* memory.
**/
-
static void e1000_remove(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
@@ -1306,7 +1302,6 @@ static void e1000_remove(struct pci_dev *pdev)
* e1000_sw_init initializes the Adapter private data structure.
* e1000_init_hw_struct MUST be called before this function
**/
-
static int e1000_sw_init(struct e1000_adapter *adapter)
{
adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
@@ -1337,7 +1332,6 @@ static int e1000_sw_init(struct e1000_adapter *adapter)
* We allocate one ring per queue at run-time since we don't know the
* number of queues at compile-time.
**/
-
static int e1000_alloc_queues(struct e1000_adapter *adapter)
{
adapter->tx_ring = kcalloc(adapter->num_tx_queues,
@@ -1367,7 +1361,6 @@ static int e1000_alloc_queues(struct e1000_adapter *adapter)
* handler is registered with the OS, the watchdog task is started,
* and the stack is notified that the interface is ready.
**/
-
static int e1000_open(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -1401,7 +1394,8 @@ static int e1000_open(struct net_device *netdev)
/* before we allocate an interrupt, we must be ready to handle it.
* Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
* as soon as we call pci_request_irq, so we have to setup our
- * clean_rx handler before we do so. */
+ * clean_rx handler before we do so.
+ */
e1000_configure(adapter);
err = e1000_request_irq(adapter);
@@ -1444,7 +1438,6 @@ err_setup_tx:
* needs to be disabled. A global MAC reset is issued to stop the
* hardware, and all transmit and receive resources are freed.
**/
-
static int e1000_close(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -1459,10 +1452,11 @@ static int e1000_close(struct net_device *netdev)
e1000_free_all_rx_resources(adapter);
/* kill manageability vlan ID if supported, but not if a vlan with
- * the same ID is registered on the host OS (let 8021q kill it) */
+ * the same ID is registered on the host OS (let 8021q kill it)
+ */
if ((hw->mng_cookie.status &
- E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
- !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) {
+ E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
+ !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) {
e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
}
@@ -1483,7 +1477,8 @@ static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start,
unsigned long end = begin + len;
/* First rev 82545 and 82546 need to not allow any memory
- * write location to cross 64k boundary due to errata 23 */
+ * write location to cross 64k boundary due to errata 23
+ */
if (hw->mac_type == e1000_82545 ||
hw->mac_type == e1000_ce4100 ||
hw->mac_type == e1000_82546) {
@@ -1500,7 +1495,6 @@ static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start,
*
* Return 0 on success, negative on failure
**/
-
static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
struct e1000_tx_ring *txdr)
{
@@ -1574,7 +1568,6 @@ setup_tx_desc_die:
*
* Return 0 on success, negative on failure
**/
-
int e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
{
int i, err = 0;
@@ -1599,7 +1592,6 @@ int e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
*
* Configure the Tx unit of the MAC after a reset.
**/
-
static void e1000_configure_tx(struct e1000_adapter *adapter)
{
u64 tdba;
@@ -1620,8 +1612,10 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
ew32(TDBAL, (tdba & 0x00000000ffffffffULL));
ew32(TDT, 0);
ew32(TDH, 0);
- adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ? E1000_TDH : E1000_82542_TDH);
- adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ? E1000_TDT : E1000_82542_TDT);
+ adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ?
+ E1000_TDH : E1000_82542_TDH);
+ adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ?
+ E1000_TDT : E1000_82542_TDT);
break;
}
@@ -1676,7 +1670,8 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
adapter->txd_cmd |= E1000_TXD_CMD_RS;
/* Cache if we're 82544 running in PCI-X because we'll
- * need this to apply a workaround later in the send path. */
+ * need this to apply a workaround later in the send path.
+ */
if (hw->mac_type == e1000_82544 &&
hw->bus_type == e1000_bus_type_pcix)
adapter->pcix_82544 = true;
@@ -1692,7 +1687,6 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
*
* Returns 0 on success, negative on failure
**/
-
static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
struct e1000_rx_ring *rxdr)
{
@@ -1771,7 +1765,6 @@ setup_rx_desc_die:
*
* Return 0 on success, negative on failure
**/
-
int e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
{
int i, err = 0;
@@ -1840,7 +1833,8 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
/* This is useful for sniffing bad packets. */
if (adapter->netdev->features & NETIF_F_RXALL) {
/* UPE and MPE will be handled by normal PROMISC logic
- * in e1000e_set_rx_mode */
+ * in e1000e_set_rx_mode
+ */
rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
E1000_RCTL_BAM | /* RX All Bcast Pkts */
E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
@@ -1862,7 +1856,6 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
*
* Configure the Rx unit of the MAC after a reset.
**/
-
static void e1000_configure_rx(struct e1000_adapter *adapter)
{
u64 rdba;
@@ -1895,7 +1888,8 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
}
/* Setup the HW Rx Head and Tail Descriptor Pointers and
- * the Base and Length of the Rx Descriptor Ring */
+ * the Base and Length of the Rx Descriptor Ring
+ */
switch (adapter->num_rx_queues) {
case 1:
default:
@@ -1905,8 +1899,10 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
ew32(RDBAL, (rdba & 0x00000000ffffffffULL));
ew32(RDT, 0);
ew32(RDH, 0);
- adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ? E1000_RDH : E1000_82542_RDH);
- adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ? E1000_RDT : E1000_82542_RDT);
+ adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ?
+ E1000_RDH : E1000_82542_RDH);
+ adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ?
+ E1000_RDT : E1000_82542_RDT);
break;
}
@@ -1932,7 +1928,6 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
*
* Free all transmit software resources
**/
-
static void e1000_free_tx_resources(struct e1000_adapter *adapter,
struct e1000_tx_ring *tx_ring)
{
@@ -1955,7 +1950,6 @@ static void e1000_free_tx_resources(struct e1000_adapter *adapter,
*
* Free all transmit software resources
**/
-
void e1000_free_all_tx_resources(struct e1000_adapter *adapter)
{
int i;
@@ -1990,7 +1984,6 @@ static void e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
* @adapter: board private structure
* @tx_ring: ring to be cleaned
**/
-
static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
struct e1000_tx_ring *tx_ring)
{
@@ -2026,7 +2019,6 @@ static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
* e1000_clean_all_tx_rings - Free Tx Buffers for all queues
* @adapter: board private structure
**/
-
static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
{
int i;
@@ -2042,7 +2034,6 @@ static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
*
* Free all receive software resources
**/
-
static void e1000_free_rx_resources(struct e1000_adapter *adapter,
struct e1000_rx_ring *rx_ring)
{
@@ -2065,7 +2056,6 @@ static void e1000_free_rx_resources(struct e1000_adapter *adapter,
*
* Free all receive software resources
**/
-
void e1000_free_all_rx_resources(struct e1000_adapter *adapter)
{
int i;
@@ -2079,7 +2069,6 @@ void e1000_free_all_rx_resources(struct e1000_adapter *adapter)
* @adapter: board private structure
* @rx_ring: ring to free buffers from
**/
-
static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
struct e1000_rx_ring *rx_ring)
{
@@ -2138,7 +2127,6 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
* e1000_clean_all_rx_rings - Free Rx Buffers for all queues
* @adapter: board private structure
**/
-
static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
{
int i;
@@ -2198,7 +2186,6 @@ static void e1000_leave_82542_rst(struct e1000_adapter *adapter)
*
* Returns 0 on success, negative on failure
**/
-
static int e1000_set_mac(struct net_device *netdev, void *p)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -2233,7 +2220,6 @@ static int e1000_set_mac(struct net_device *netdev, void *p)
* responsible for configuring the hardware for proper unicast, multicast,
* promiscuous mode, and all-multi behavior.
**/
-
static void e1000_set_rx_mode(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -2317,10 +2303,10 @@ static void e1000_set_rx_mode(struct net_device *netdev)
}
/* write the hash table completely, write from bottom to avoid
- * both stupid write combining chipsets, and flushing each write */
+ * both stupid write combining chipsets, and flushing each write
+ */
for (i = mta_reg_count - 1; i >= 0 ; i--) {
- /*
- * If we are on an 82544 has an errata where writing odd
+ /* If we are on an 82544 has an errata where writing odd
* offsets overwrites the previous even offset, but writing
* backwards over the range solves the issue by always
* writing the odd offset first
@@ -2458,8 +2444,8 @@ static void e1000_watchdog(struct work_struct *work)
bool txb2b = true;
/* update snapshot of PHY registers on LSC */
e1000_get_speed_and_duplex(hw,
- &adapter->link_speed,
- &adapter->link_duplex);
+ &adapter->link_speed,
+ &adapter->link_duplex);
ctrl = er32(CTRL);
pr_info("%s NIC Link is Up %d Mbps %s, "
@@ -2533,7 +2519,8 @@ link_up:
/* We've lost link, so the controller stops DMA,
* but we've got queued Tx work that's never going
* to get done, so reset controller to flush Tx.
- * (Do the reset outside of interrupt context). */
+ * (Do the reset outside of interrupt context).
+ */
adapter->tx_timeout_count++;
schedule_work(&adapter->reset_task);
/* exit immediately since reset is imminent */
@@ -2543,8 +2530,7 @@ link_up:
/* Simple mode for Interrupt Throttle Rate (ITR) */
if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) {
- /*
- * Symmetric Tx/Rx gets a reduced ITR=2000;
+ /* Symmetric Tx/Rx gets a reduced ITR=2000;
* Total asymmetrical Tx or Rx gets ITR=8000;
* everyone else is between 2000-8000.
*/
@@ -2659,18 +2645,16 @@ static void e1000_set_itr(struct e1000_adapter *adapter)
goto set_itr_now;
}
- adapter->tx_itr = e1000_update_itr(adapter,
- adapter->tx_itr,
- adapter->total_tx_packets,
- adapter->total_tx_bytes);
+ adapter->tx_itr = e1000_update_itr(adapter, adapter->tx_itr,
+ adapter->total_tx_packets,
+ adapter->total_tx_bytes);
/* conservative mode (itr 3) eliminates the lowest_latency setting */
if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
adapter->tx_itr = low_latency;
- adapter->rx_itr = e1000_update_itr(adapter,
- adapter->rx_itr,
- adapter->total_rx_packets,
- adapter->total_rx_bytes);
+ adapter->rx_itr = e1000_update_itr(adapter, adapter->rx_itr,
+ adapter->total_rx_packets,
+ adapter->total_rx_bytes);
/* conservative mode (itr 3) eliminates the lowest_latency setting */
if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
adapter->rx_itr = low_latency;
@@ -2696,10 +2680,11 @@ set_itr_now:
if (new_itr != adapter->itr) {
/* this attempts to bias the interrupt rate towards Bulk
* by adding intermediate steps when interrupt rate is
- * increasing */
+ * increasing
+ */
new_itr = new_itr > adapter->itr ?
- min(adapter->itr + (new_itr >> 2), new_itr) :
- new_itr;
+ min(adapter->itr + (new_itr >> 2), new_itr) :
+ new_itr;
adapter->itr = new_itr;
ew32(ITR, 1000000000 / (new_itr * 256));
}
@@ -2861,7 +2846,8 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
/* Workaround for Controller erratum --
* descriptor for non-tso packet in a linear SKB that follows a
* tso gets written back prematurely before the data is fully
- * DMA'd to the controller */
+ * DMA'd to the controller
+ */
if (!skb->data_len && tx_ring->last_tx_tso &&
!skb_is_gso(skb)) {
tx_ring->last_tx_tso = false;
@@ -2869,7 +2855,8 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
}
/* Workaround for premature desc write-backs
- * in TSO mode. Append 4-byte sentinel desc */
+ * in TSO mode. Append 4-byte sentinel desc
+ */
if (unlikely(mss && !nr_frags && size == len && size > 8))
size -= 4;
/* work-around for errata 10 and it applies
@@ -2882,7 +2869,8 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
size = 2015;
/* Workaround for potential 82544 hang in PCI-X. Avoid
- * terminating buffers within evenly-aligned dwords. */
+ * terminating buffers within evenly-aligned dwords.
+ */
if (unlikely(adapter->pcix_82544 &&
!((unsigned long)(skb->data + offset + size - 1) & 4) &&
size > 4))
@@ -2894,7 +2882,7 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
buffer_info->mapped_as_page = false;
buffer_info->dma = dma_map_single(&pdev->dev,
skb->data + offset,
- size, DMA_TO_DEVICE);
+ size, DMA_TO_DEVICE);
if (dma_mapping_error(&pdev->dev, buffer_info->dma))
goto dma_error;
buffer_info->next_to_watch = i;
@@ -2925,12 +2913,15 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
buffer_info = &tx_ring->buffer_info[i];
size = min(len, max_per_txd);
/* Workaround for premature desc write-backs
- * in TSO mode. Append 4-byte sentinel desc */
- if (unlikely(mss && f == (nr_frags-1) && size == len && size > 8))
+ * in TSO mode. Append 4-byte sentinel desc
+ */
+ if (unlikely(mss && f == (nr_frags-1) &&
+ size == len && size > 8))
size -= 4;
/* Workaround for potential 82544 hang in PCI-X.
* Avoid terminating buffers within evenly-aligned
- * dwords. */
+ * dwords.
+ */
bufend = (unsigned long)
page_to_phys(skb_frag_page(frag));
bufend += offset + size - 1;
@@ -2994,7 +2985,7 @@ static void e1000_tx_queue(struct e1000_adapter *adapter,
if (likely(tx_flags & E1000_TX_FLAGS_TSO)) {
txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
- E1000_TXD_CMD_TSE;
+ E1000_TXD_CMD_TSE;
txd_upper |= E1000_TXD_POPTS_TXSM << 8;
if (likely(tx_flags & E1000_TX_FLAGS_IPV4))
@@ -3035,13 +3026,15 @@ static void e1000_tx_queue(struct e1000_adapter *adapter,
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
- * such as IA-64). */
+ * such as IA-64).
+ */
wmb();
tx_ring->next_to_use = i;
writel(i, hw->hw_addr + tx_ring->tdt);
/* we need this if more than one processor can write to our tail
- * at a time, it syncronizes IO on IA64/Altix systems */
+ * at a time, it synchronizes IO on IA64/Altix systems
+ */
mmiowb();
}
@@ -3090,11 +3083,13 @@ static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
netif_stop_queue(netdev);
/* Herbert's original patch had:
* smp_mb__after_netif_stop_queue();
- * but since that doesn't exist yet, just open code it. */
+ * but since that doesn't exist yet, just open code it.
+ */
smp_mb();
/* We need to check again in a case another CPU has just
- * made room available. */
+ * made room available.
+ */
if (likely(E1000_DESC_UNUSED(tx_ring) < size))
return -EBUSY;
@@ -3105,7 +3100,7 @@ static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
}
static int e1000_maybe_stop_tx(struct net_device *netdev,
- struct e1000_tx_ring *tx_ring, int size)
+ struct e1000_tx_ring *tx_ring, int size)
{
if (likely(E1000_DESC_UNUSED(tx_ring) >= size))
return 0;
@@ -3129,10 +3124,11 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
int tso;
unsigned int f;
- /* This goes back to the question of how to logically map a tx queue
+ /* This goes back to the question of how to logically map a Tx queue
* to a flow. Right now, performance is impacted slightly negatively
- * if using multiple tx queues. If the stack breaks away from a
- * single qdisc implementation, we can look at this again. */
+ * if using multiple Tx queues. If the stack breaks away from a
+ * single qdisc implementation, we can look at this again.
+ */
tx_ring = adapter->tx_ring;
if (unlikely(skb->len <= 0)) {
@@ -3157,7 +3153,8 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
* initiating the DMA for each buffer. The calc is:
* 4 = ceil(buffer len/mss). To make sure we don't
* overrun the FIFO, adjust the max buffer len if mss
- * drops. */
+ * drops.
+ */
if (mss) {
u8 hdr_len;
max_per_txd = min(mss << 2, max_per_txd);
@@ -3173,8 +3170,10 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
* this hardware's requirements
* NOTE: this is a TSO only workaround
* if end byte alignment not correct move us
- * into the next dword */
- if ((unsigned long)(skb_tail_pointer(skb) - 1) & 4)
+ * into the next dword
+ */
+ if ((unsigned long)(skb_tail_pointer(skb) - 1)
+ & 4)
break;
/* fall through */
pull_size = min((unsigned int)4, skb->data_len);
@@ -3222,7 +3221,8 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
count += nr_frags;
/* need: count + 2 desc gap to keep tail from touching
- * head, otherwise try next time */
+ * head, otherwise try next time
+ */
if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2)))
return NETDEV_TX_BUSY;
@@ -3261,7 +3261,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
tx_flags |= E1000_TX_FLAGS_NO_FCS;
count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd,
- nr_frags, mss);
+ nr_frags, mss);
if (count) {
netdev_sent_queue(netdev, skb->len);
@@ -3363,9 +3363,7 @@ static void e1000_dump(struct e1000_adapter *adapter)
/* Print Registers */
e1000_regdump(adapter);
- /*
- * transmit dump
- */
+ /* transmit dump */
pr_info("TX Desc ring0 dump\n");
/* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
@@ -3426,9 +3424,7 @@ static void e1000_dump(struct e1000_adapter *adapter)
}
rx_ring_summary:
- /*
- * receive dump
- */
+ /* receive dump */
pr_info("\nRX Desc ring dump\n");
/* Legacy Receive Descriptor Format
@@ -3493,7 +3489,6 @@ exit:
* e1000_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
**/
-
static void e1000_tx_timeout(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -3521,7 +3516,6 @@ static void e1000_reset_task(struct work_struct *work)
* Returns the address of the device statistics structure.
* The statistics are actually updated from the watchdog.
**/
-
static struct net_device_stats *e1000_get_stats(struct net_device *netdev)
{
/* only return the current stats */
@@ -3535,7 +3529,6 @@ static struct net_device_stats *e1000_get_stats(struct net_device *netdev)
*
* Returns 0 on success, negative on failure
**/
-
static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -3572,8 +3565,9 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
* means we reserve 2 more, this pushes us to allocate from the next
* larger slab size.
* i.e. RXBUFFER_2048 --> size-4096 slab
- * however with the new *_jumbo_rx* routines, jumbo receives will use
- * fragmented skbs */
+ * however with the new *_jumbo_rx* routines, jumbo receives will use
+ * fragmented skbs
+ */
if (max_frame <= E1000_RXBUFFER_2048)
adapter->rx_buffer_len = E1000_RXBUFFER_2048;
@@ -3608,7 +3602,6 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
* e1000_update_stats - Update the board statistics counters
* @adapter: board private structure
**/
-
void e1000_update_stats(struct e1000_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
@@ -3619,8 +3612,7 @@ void e1000_update_stats(struct e1000_adapter *adapter)
#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
- /*
- * Prevent stats update while adapter is being reset, or if the pci
+ /* Prevent stats update while adapter is being reset, or if the pci
* connection is down.
*/
if (adapter->link_speed == 0)
@@ -3710,7 +3702,8 @@ void e1000_update_stats(struct e1000_adapter *adapter)
/* Rx Errors */
/* RLEC on some newer hardware can be incorrect so build
- * our own version based on RUC and ROC */
+ * our own version based on RUC and ROC
+ */
netdev->stats.rx_errors = adapter->stats.rxerrc +
adapter->stats.crcerrs + adapter->stats.algnerrc +
adapter->stats.ruc + adapter->stats.roc +
@@ -3764,7 +3757,6 @@ void e1000_update_stats(struct e1000_adapter *adapter)
* @irq: interrupt number
* @data: pointer to a network interface device structure
**/
-
static irqreturn_t e1000_intr(int irq, void *data)
{
struct net_device *netdev = data;
@@ -3775,8 +3767,7 @@ static irqreturn_t e1000_intr(int irq, void *data)
if (unlikely((!icr)))
return IRQ_NONE; /* Not our interrupt */
- /*
- * we might have caused the interrupt, but the above
+ /* we might have caused the interrupt, but the above
* read cleared it, and just in case the driver is
* down there is nothing to do so return handled
*/
@@ -3802,7 +3793,8 @@ static irqreturn_t e1000_intr(int irq, void *data)
__napi_schedule(&adapter->napi);
} else {
/* this really should not happen! if it does it is basically a
- * bug, but not a hard error, so enable ints and continue */
+ * bug, but not a hard error, so enable ints and continue
+ */
if (!test_bit(__E1000_DOWN, &adapter->flags))
e1000_irq_enable(adapter);
}
@@ -3816,7 +3808,8 @@ static irqreturn_t e1000_intr(int irq, void *data)
**/
static int e1000_clean(struct napi_struct *napi, int budget)
{
- struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi);
+ struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter,
+ napi);
int tx_clean_complete = 0, work_done = 0;
tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]);
@@ -3907,11 +3900,12 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
if (adapter->detect_tx_hung) {
/* Detect a transmit hang in hardware, this serializes the
- * check with the clearing of time_stamp and movement of i */
+ * check with the clearing of time_stamp and movement of i
+ */
adapter->detect_tx_hung = false;
if (tx_ring->buffer_info[eop].time_stamp &&
time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
- (adapter->tx_timeout_factor * HZ)) &&
+ (adapter->tx_timeout_factor * HZ)) &&
!(er32(STATUS) & E1000_STATUS_TXOFF)) {
/* detected Tx unit hang */
@@ -3954,7 +3948,6 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
* @csum: receive descriptor csum field
* @sk_buff: socket buffer with received data
**/
-
static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
u32 csum, struct sk_buff *skb)
{
@@ -3990,7 +3983,7 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
* e1000_consume_page - helper function
**/
static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
- u16 length)
+ u16 length)
{
bi->page = NULL;
skb->len += length;
@@ -4086,11 +4079,11 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
if (TBI_ACCEPT(hw, status, rx_desc->errors, length,
last_byte)) {
spin_lock_irqsave(&adapter->stats_lock,
- irq_flags);
+ irq_flags);
e1000_tbi_adjust_stats(hw, &adapter->stats,
length, mapped);
spin_unlock_irqrestore(&adapter->stats_lock,
- irq_flags);
+ irq_flags);
length--;
} else {
if (netdev->features & NETIF_F_RXALL)
@@ -4098,7 +4091,8 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
/* recycle both page and skb */
buffer_info->skb = skb;
/* an error means any chain goes out the window
- * too */
+ * too
+ */
if (rx_ring->rx_skb_top)
dev_kfree_skb(rx_ring->rx_skb_top);
rx_ring->rx_skb_top = NULL;
@@ -4114,7 +4108,7 @@ process_skb:
/* this is the beginning of a chain */
rxtop = skb;
skb_fill_page_desc(rxtop, 0, buffer_info->page,
- 0, length);
+ 0, length);
} else {
/* this is the middle of a chain */
skb_fill_page_desc(rxtop,
@@ -4132,38 +4126,42 @@ process_skb:
skb_shinfo(rxtop)->nr_frags,
buffer_info->page, 0, length);
/* re-use the current skb, we only consumed the
- * page */
+ * page
+ */
buffer_info->skb = skb;
skb = rxtop;
rxtop = NULL;
e1000_consume_page(buffer_info, skb, length);
} else {
/* no chain, got EOP, this buf is the packet
- * copybreak to save the put_page/alloc_page */
+ * copybreak to save the put_page/alloc_page
+ */
if (length <= copybreak &&
skb_tailroom(skb) >= length) {
u8 *vaddr;
vaddr = kmap_atomic(buffer_info->page);
- memcpy(skb_tail_pointer(skb), vaddr, length);
+ memcpy(skb_tail_pointer(skb), vaddr,
+ length);
kunmap_atomic(vaddr);
/* re-use the page, so don't erase
- * buffer_info->page */
+ * buffer_info->page
+ */
skb_put(skb, length);
} else {
skb_fill_page_desc(skb, 0,
- buffer_info->page, 0,
- length);
+ buffer_info->page, 0,
+ length);
e1000_consume_page(buffer_info, skb,
- length);
+ length);
}
}
}
/* Receive Checksum Offload XXX recompute due to CRC strip? */
e1000_rx_checksum(adapter,
- (u32)(status) |
- ((u32)(rx_desc->errors) << 24),
- le16_to_cpu(rx_desc->csum), skb);
+ (u32)(status) |
+ ((u32)(rx_desc->errors) << 24),
+ le16_to_cpu(rx_desc->csum), skb);
total_rx_bytes += (skb->len - 4); /* don't count FCS */
if (likely(!(netdev->features & NETIF_F_RXFCS)))
@@ -4205,8 +4203,7 @@ next_desc:
return cleaned;
}
-/*
- * this should improve performance for small packets with large amounts
+/* this should improve performance for small packets with large amounts
* of reassembly being done in the stack
*/
static void e1000_check_copybreak(struct net_device *netdev,
@@ -4310,9 +4307,9 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
last_byte)) {
spin_lock_irqsave(&adapter->stats_lock, flags);
e1000_tbi_adjust_stats(hw, &adapter->stats,
- length, skb->data);
+ length, skb->data);
spin_unlock_irqrestore(&adapter->stats_lock,
- flags);
+ flags);
length--;
} else {
if (netdev->features & NETIF_F_RXALL)
@@ -4377,10 +4374,9 @@ next_desc:
* @rx_ring: pointer to receive ring structure
* @cleaned_count: number of buffers to allocate this pass
**/
-
static void
e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
- struct e1000_rx_ring *rx_ring, int cleaned_count)
+ struct e1000_rx_ring *rx_ring, int cleaned_count)
{
struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
@@ -4421,7 +4417,7 @@ check_page:
if (!buffer_info->dma) {
buffer_info->dma = dma_map_page(&pdev->dev,
- buffer_info->page, 0,
+ buffer_info->page, 0,
buffer_info->length,
DMA_FROM_DEVICE);
if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
@@ -4451,7 +4447,8 @@ check_page:
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
- * such as IA-64). */
+ * such as IA-64).
+ */
wmb();
writel(i, adapter->hw.hw_addr + rx_ring->rdt);
}
@@ -4461,7 +4458,6 @@ check_page:
* e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
* @adapter: address of board private structure
**/
-
static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
struct e1000_rx_ring *rx_ring,
int cleaned_count)
@@ -4532,8 +4528,7 @@ map_skb:
break; /* while !buffer_info->skb */
}
- /*
- * XXX if it was allocated cleanly it will never map to a
+ /* XXX if it was allocated cleanly it will never map to a
* boundary crossing
*/
@@ -4571,7 +4566,8 @@ map_skb:
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
- * such as IA-64). */
+ * such as IA-64).
+ */
wmb();
writel(i, hw->hw_addr + rx_ring->rdt);
}
@@ -4581,7 +4577,6 @@ map_skb:
* e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
* @adapter:
**/
-
static void e1000_smartspeed(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
@@ -4594,7 +4589,8 @@ static void e1000_smartspeed(struct e1000_adapter *adapter)
if (adapter->smartspeed == 0) {
/* If Master/Slave config fault is asserted twice,
- * we assume back-to-back */
+ * we assume back-to-back
+ */
e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
@@ -4607,7 +4603,7 @@ static void e1000_smartspeed(struct e1000_adapter *adapter)
adapter->smartspeed++;
if (!e1000_phy_setup_autoneg(hw) &&
!e1000_read_phy_reg(hw, PHY_CTRL,
- &phy_ctrl)) {
+ &phy_ctrl)) {
phy_ctrl |= (MII_CR_AUTO_NEG_EN |
MII_CR_RESTART_AUTO_NEG);
e1000_write_phy_reg(hw, PHY_CTRL,
@@ -4638,7 +4634,6 @@ static void e1000_smartspeed(struct e1000_adapter *adapter)
* @ifreq:
* @cmd:
**/
-
static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
{
switch (cmd) {
@@ -4657,7 +4652,6 @@ static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
* @ifreq:
* @cmd:
**/
-
static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
int cmd)
{
@@ -4919,7 +4913,8 @@ int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
hw->autoneg = 0;
/* Make sure dplx is at most 1 bit and lsb of speed is not set
- * for the switch() below to work */
+ * for the switch() below to work
+ */
if ((spd & 1) || (dplx & ~1))
goto err_inval;
@@ -5122,8 +5117,7 @@ static void e1000_shutdown(struct pci_dev *pdev)
}
#ifdef CONFIG_NET_POLL_CONTROLLER
-/*
- * Polling 'interrupt' - used by things like netconsole to send skbs
+/* Polling 'interrupt' - used by things like netconsole to send skbs
* without having to re-enable interrupts. It's not called while
* the interrupt routine is executing.
*/
diff --git a/drivers/net/ethernet/intel/e1000/e1000_param.c b/drivers/net/ethernet/intel/e1000/e1000_param.c
index 750fc0194f37..c9cde352b1c8 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_param.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_param.c
@@ -267,7 +267,6 @@ static void e1000_check_copper_options(struct e1000_adapter *adapter);
* value exists, a default value is used. The final value is stored
* in a variable in the adapter structure.
**/
-
void e1000_check_options(struct e1000_adapter *adapter)
{
struct e1000_option opt;
@@ -319,7 +318,8 @@ void e1000_check_options(struct e1000_adapter *adapter)
.def = E1000_DEFAULT_RXD,
.arg = { .r = {
.min = E1000_MIN_RXD,
- .max = mac_type < e1000_82544 ? E1000_MAX_RXD : E1000_MAX_82544_RXD
+ .max = mac_type < e1000_82544 ? E1000_MAX_RXD :
+ E1000_MAX_82544_RXD
}}
};
@@ -408,7 +408,7 @@ void e1000_check_options(struct e1000_adapter *adapter)
if (num_TxAbsIntDelay > bd) {
adapter->tx_abs_int_delay = TxAbsIntDelay[bd];
e1000_validate_option(&adapter->tx_abs_int_delay, &opt,
- adapter);
+ adapter);
} else {
adapter->tx_abs_int_delay = opt.def;
}
@@ -426,7 +426,7 @@ void e1000_check_options(struct e1000_adapter *adapter)
if (num_RxIntDelay > bd) {
adapter->rx_int_delay = RxIntDelay[bd];
e1000_validate_option(&adapter->rx_int_delay, &opt,
- adapter);
+ adapter);
} else {
adapter->rx_int_delay = opt.def;
}
@@ -444,7 +444,7 @@ void e1000_check_options(struct e1000_adapter *adapter)
if (num_RxAbsIntDelay > bd) {
adapter->rx_abs_int_delay = RxAbsIntDelay[bd];
e1000_validate_option(&adapter->rx_abs_int_delay, &opt,
- adapter);
+ adapter);
} else {
adapter->rx_abs_int_delay = opt.def;
}
@@ -479,16 +479,17 @@ void e1000_check_options(struct e1000_adapter *adapter)
break;
case 4:
e_dev_info("%s set to simplified "
- "(2000-8000) ints mode\n", opt.name);
+ "(2000-8000) ints mode\n", opt.name);
adapter->itr_setting = adapter->itr;
break;
default:
e1000_validate_option(&adapter->itr, &opt,
- adapter);
+ adapter);
/* save the setting, because the dynamic bits
* change itr.
* clear the lower two bits because they are
- * used as control */
+ * used as control
+ */
adapter->itr_setting = adapter->itr & ~3;
break;
}
@@ -533,7 +534,6 @@ void e1000_check_options(struct e1000_adapter *adapter)
*
* Handles speed and duplex options on fiber adapters
**/
-
static void e1000_check_fiber_options(struct e1000_adapter *adapter)
{
int bd = adapter->bd_number;
@@ -559,7 +559,6 @@ static void e1000_check_fiber_options(struct e1000_adapter *adapter)
*
* Handles speed and duplex options on copper adapters
**/
-
static void e1000_check_copper_options(struct e1000_adapter *adapter)
{
struct e1000_option opt;
@@ -681,22 +680,22 @@ static void e1000_check_copper_options(struct e1000_adapter *adapter)
e_dev_info("Using Autonegotiation at Half Duplex only\n");
adapter->hw.autoneg = adapter->fc_autoneg = 1;
adapter->hw.autoneg_advertised = ADVERTISE_10_HALF |
- ADVERTISE_100_HALF;
+ ADVERTISE_100_HALF;
break;
case FULL_DUPLEX:
e_dev_info("Full Duplex specified without Speed\n");
e_dev_info("Using Autonegotiation at Full Duplex only\n");
adapter->hw.autoneg = adapter->fc_autoneg = 1;
adapter->hw.autoneg_advertised = ADVERTISE_10_FULL |
- ADVERTISE_100_FULL |
- ADVERTISE_1000_FULL;
+ ADVERTISE_100_FULL |
+ ADVERTISE_1000_FULL;
break;
case SPEED_10:
e_dev_info("10 Mbps Speed specified without Duplex\n");
e_dev_info("Using Autonegotiation at 10 Mbps only\n");
adapter->hw.autoneg = adapter->fc_autoneg = 1;
adapter->hw.autoneg_advertised = ADVERTISE_10_HALF |
- ADVERTISE_10_FULL;
+ ADVERTISE_10_FULL;
break;
case SPEED_10 + HALF_DUPLEX:
e_dev_info("Forcing to 10 Mbps Half Duplex\n");
@@ -715,7 +714,7 @@ static void e1000_check_copper_options(struct e1000_adapter *adapter)
e_dev_info("Using Autonegotiation at 100 Mbps only\n");
adapter->hw.autoneg = adapter->fc_autoneg = 1;
adapter->hw.autoneg_advertised = ADVERTISE_100_HALF |
- ADVERTISE_100_FULL;
+ ADVERTISE_100_FULL;
break;
case SPEED_100 + HALF_DUPLEX:
e_dev_info("Forcing to 100 Mbps Half Duplex\n");
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 3f7dbd1cdb0f..a177b8b65c44 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -4830,6 +4830,13 @@ static void e1000_watchdog_task(struct work_struct *work)
&adapter->link_speed,
&adapter->link_duplex);
e1000_print_link_info(adapter);
+
+ /* check if SmartSpeed worked */
+ e1000e_check_downshift(hw);
+ if (phy->speed_downgraded)
+ netdev_warn(netdev,
+ "Link Speed was downgraded by SmartSpeed\n");
+
/* On supported PHYs, check for duplex mismatch only
* if link has autonegotiated at 10/100 half
*/
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 40b5d568d808..a3830a8ba4c1 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -1891,7 +1891,7 @@ static int igb_link_test(struct igb_adapter *adapter, u64 *data)
} else {
hw->mac.ops.check_for_link(&adapter->hw);
if (hw->mac.autoneg)
- msleep(4000);
+ msleep(5000);
if (!(rd32(E1000_STATUS) & E1000_STATUS_LU))
*data = 1;
diff --git a/drivers/net/ethernet/intel/igbvf/igbvf.h b/drivers/net/ethernet/intel/igbvf/igbvf.h
index fdca7b672776..a1463e3d14c0 100644
--- a/drivers/net/ethernet/intel/igbvf/igbvf.h
+++ b/drivers/net/ethernet/intel/igbvf/igbvf.h
@@ -127,8 +127,8 @@ struct igbvf_buffer {
/* Tx */
struct {
unsigned long time_stamp;
+ union e1000_adv_tx_desc *next_to_watch;
u16 length;
- u16 next_to_watch;
u16 mapped_as_page;
};
/* Rx */
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index f53f7136e508..d60cd4393415 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -797,20 +797,31 @@ static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring)
struct sk_buff *skb;
union e1000_adv_tx_desc *tx_desc, *eop_desc;
unsigned int total_bytes = 0, total_packets = 0;
- unsigned int i, eop, count = 0;
+ unsigned int i, count = 0;
bool cleaned = false;
i = tx_ring->next_to_clean;
- eop = tx_ring->buffer_info[i].next_to_watch;
- eop_desc = IGBVF_TX_DESC_ADV(*tx_ring, eop);
+ buffer_info = &tx_ring->buffer_info[i];
+ eop_desc = buffer_info->next_to_watch;
+
+ do {
+ /* if next_to_watch is not set then there is no work pending */
+ if (!eop_desc)
+ break;
+
+ /* prevent any other reads prior to eop_desc */
+ read_barrier_depends();
+
+ /* if DD is not set pending work has not been completed */
+ if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
+ break;
+
+ /* clear next_to_watch to prevent false hangs */
+ buffer_info->next_to_watch = NULL;
- while ((eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)) &&
- (count < tx_ring->count)) {
- rmb(); /* read buffer_info after eop_desc status */
for (cleaned = false; !cleaned; count++) {
tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i);
- buffer_info = &tx_ring->buffer_info[i];
- cleaned = (i == eop);
+ cleaned = (tx_desc == eop_desc);
skb = buffer_info->skb;
if (skb) {
@@ -831,10 +842,12 @@ static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring)
i++;
if (i == tx_ring->count)
i = 0;
+
+ buffer_info = &tx_ring->buffer_info[i];
}
- eop = tx_ring->buffer_info[i].next_to_watch;
- eop_desc = IGBVF_TX_DESC_ADV(*tx_ring, eop);
- }
+
+ eop_desc = buffer_info->next_to_watch;
+ } while (count < tx_ring->count);
tx_ring->next_to_clean = i;
@@ -1961,7 +1974,6 @@ static int igbvf_tso(struct igbvf_adapter *adapter,
context_desc->seqnum_seed = 0;
buffer_info->time_stamp = jiffies;
- buffer_info->next_to_watch = i;
buffer_info->dma = 0;
i++;
if (i == tx_ring->count)
@@ -2021,7 +2033,6 @@ static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter,
context_desc->mss_l4len_idx = 0;
buffer_info->time_stamp = jiffies;
- buffer_info->next_to_watch = i;
buffer_info->dma = 0;
i++;
if (i == tx_ring->count)
@@ -2061,8 +2072,7 @@ static int igbvf_maybe_stop_tx(struct net_device *netdev, int size)
static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
struct igbvf_ring *tx_ring,
- struct sk_buff *skb,
- unsigned int first)
+ struct sk_buff *skb)
{
struct igbvf_buffer *buffer_info;
struct pci_dev *pdev = adapter->pdev;
@@ -2077,7 +2087,6 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
buffer_info->length = len;
/* set time_stamp *before* dma to help avoid a possible race */
buffer_info->time_stamp = jiffies;
- buffer_info->next_to_watch = i;
buffer_info->mapped_as_page = false;
buffer_info->dma = dma_map_single(&pdev->dev, skb->data, len,
DMA_TO_DEVICE);
@@ -2100,7 +2109,6 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
BUG_ON(len >= IGBVF_MAX_DATA_PER_TXD);
buffer_info->length = len;
buffer_info->time_stamp = jiffies;
- buffer_info->next_to_watch = i;
buffer_info->mapped_as_page = true;
buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag, 0, len,
DMA_TO_DEVICE);
@@ -2109,7 +2117,6 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
}
tx_ring->buffer_info[i].skb = skb;
- tx_ring->buffer_info[first].next_to_watch = i;
return ++count;
@@ -2120,7 +2127,6 @@ dma_error:
buffer_info->dma = 0;
buffer_info->time_stamp = 0;
buffer_info->length = 0;
- buffer_info->next_to_watch = 0;
buffer_info->mapped_as_page = false;
if (count)
count--;
@@ -2139,7 +2145,8 @@ dma_error:
static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter,
struct igbvf_ring *tx_ring,
- int tx_flags, int count, u32 paylen,
+ int tx_flags, int count,
+ unsigned int first, u32 paylen,
u8 hdr_len)
{
union e1000_adv_tx_desc *tx_desc = NULL;
@@ -2189,6 +2196,7 @@ static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter,
* such as IA-64). */
wmb();
+ tx_ring->buffer_info[first].next_to_watch = tx_desc;
tx_ring->next_to_use = i;
writel(i, adapter->hw.hw_addr + tx_ring->tail);
/* we need this if more than one processor can write to our tail
@@ -2255,11 +2263,11 @@ static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb,
* count reflects descriptors mapped, if 0 then mapping error
* has occurred and we need to rewind the descriptor queue
*/
- count = igbvf_tx_map_adv(adapter, tx_ring, skb, first);
+ count = igbvf_tx_map_adv(adapter, tx_ring, skb);
if (count) {
igbvf_tx_queue_adv(adapter, tx_ring, tx_flags, count,
- skb->len, hdr_len);
+ first, skb->len, hdr_len);
/* Make sure there is space in the ring for the next send. */
igbvf_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 4);
} else {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index d268c7b222c7..a8e10cff7a89 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -156,7 +156,7 @@ struct vf_macvlans {
/* Tx Descriptors needed, worst case */
#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD)
-#define DESC_NEEDED ((MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE)) + 4)
+#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
/* wrapper around a pointer to a socket buffer,
* so a DMA handle can be stored along with the buffer */
@@ -201,6 +201,7 @@ struct ixgbe_rx_queue_stats {
enum ixgbe_ring_state_t {
__IXGBE_TX_FDIR_INIT_DONE,
+ __IXGBE_TX_XPS_INIT_DONE,
__IXGBE_TX_DETECT_HANG,
__IXGBE_HANG_CHECK_ARMED,
__IXGBE_RX_RSC_ENABLED,
@@ -278,15 +279,10 @@ enum ixgbe_ring_f_enum {
#define IXGBE_MAX_RSS_INDICES 16
#define IXGBE_MAX_VMDQ_INDICES 64
-#define IXGBE_MAX_FDIR_INDICES 64
-#ifdef IXGBE_FCOE
+#define IXGBE_MAX_FDIR_INDICES 63 /* based on q_vector limit */
#define IXGBE_MAX_FCOE_INDICES 8
-#define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + IXGBE_MAX_FCOE_INDICES)
-#define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + IXGBE_MAX_FCOE_INDICES)
-#else
-#define MAX_RX_QUEUES IXGBE_MAX_FDIR_INDICES
-#define MAX_TX_QUEUES IXGBE_MAX_FDIR_INDICES
-#endif /* IXGBE_FCOE */
+#define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1)
+#define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1)
struct ixgbe_ring_feature {
u16 limit; /* upper limit on feature indices */
u16 indices; /* current value of indices */
@@ -624,6 +620,7 @@ enum ixgbe_state_t {
__IXGBE_DOWN,
__IXGBE_SERVICE_SCHED,
__IXGBE_IN_SFP_INIT,
+ __IXGBE_READ_I2C,
};
struct ixgbe_cb {
@@ -704,8 +701,8 @@ extern bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw);
extern void ixgbe_set_rx_mode(struct net_device *netdev);
#ifdef CONFIG_IXGBE_DCB
extern void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter);
-extern int ixgbe_setup_tc(struct net_device *dev, u8 tc);
#endif
+extern int ixgbe_setup_tc(struct net_device *dev, u8 tc);
extern void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32);
extern void ixgbe_do_reset(struct net_device *netdev);
#ifdef CONFIG_IXGBE_HWMON
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index db6735931d66..f4d2e9e3c6d5 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -39,6 +39,7 @@
#include <linux/uaccess.h>
#include "ixgbe.h"
+#include "ixgbe_phy.h"
#define IXGBE_ALL_RAR_ENTRIES 16
@@ -2112,13 +2113,17 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_q_vector *q_vector;
int i;
- u16 tx_itr_param, rx_itr_param;
+ u16 tx_itr_param, rx_itr_param, tx_itr_prev;
bool need_reset = false;
- /* don't accept tx specific changes if we've got mixed RxTx vectors */
- if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count
- && ec->tx_coalesce_usecs)
- return -EINVAL;
+ if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) {
+ /* reject Tx specific changes in case of mixed RxTx vectors */
+ if (ec->tx_coalesce_usecs)
+ return -EINVAL;
+ tx_itr_prev = adapter->rx_itr_setting;
+ } else {
+ tx_itr_prev = adapter->tx_itr_setting;
+ }
if ((ec->rx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)) ||
(ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)))
@@ -2144,8 +2149,25 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
else
tx_itr_param = adapter->tx_itr_setting;
+ /* mixed Rx/Tx */
+ if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count)
+ adapter->tx_itr_setting = adapter->rx_itr_setting;
+
+#if IS_ENABLED(CONFIG_BQL)
+ /* detect ITR changes that require update of TXDCTL.WTHRESH */
+ if ((adapter->tx_itr_setting > 1) &&
+ (adapter->tx_itr_setting < IXGBE_100K_ITR)) {
+ if ((tx_itr_prev == 1) ||
+ (tx_itr_prev > IXGBE_100K_ITR))
+ need_reset = true;
+ } else {
+ if ((tx_itr_prev > 1) &&
+ (tx_itr_prev < IXGBE_100K_ITR))
+ need_reset = true;
+ }
+#endif
/* check the old value and enable RSC if necessary */
- need_reset = ixgbe_update_rsc(adapter);
+ need_reset |= ixgbe_update_rsc(adapter);
for (i = 0; i < adapter->num_q_vectors; i++) {
q_vector = adapter->q_vector[i];
@@ -2731,6 +2753,225 @@ static int ixgbe_get_ts_info(struct net_device *dev,
return 0;
}
+static unsigned int ixgbe_max_channels(struct ixgbe_adapter *adapter)
+{
+ unsigned int max_combined;
+ u8 tcs = netdev_get_num_tc(adapter->netdev);
+
+ if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
+ /* We only support one q_vector without MSI-X */
+ max_combined = 1;
+ } else if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
+ /* SR-IOV currently only allows one queue on the PF */
+ max_combined = 1;
+ } else if (tcs > 1) {
+ /* For DCB report channels per traffic class */
+ if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+ /* 8 TC w/ 4 queues per TC */
+ max_combined = 4;
+ } else if (tcs > 4) {
+ /* 8 TC w/ 8 queues per TC */
+ max_combined = 8;
+ } else {
+ /* 4 TC w/ 16 queues per TC */
+ max_combined = 16;
+ }
+ } else if (adapter->atr_sample_rate) {
+ /* support up to 64 queues with ATR */
+ max_combined = IXGBE_MAX_FDIR_INDICES;
+ } else {
+ /* support up to 16 queues with RSS */
+ max_combined = IXGBE_MAX_RSS_INDICES;
+ }
+
+ return max_combined;
+}
+
+static void ixgbe_get_channels(struct net_device *dev,
+ struct ethtool_channels *ch)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+
+ /* report maximum channels */
+ ch->max_combined = ixgbe_max_channels(adapter);
+
+ /* report info for other vector */
+ if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
+ ch->max_other = NON_Q_VECTORS;
+ ch->other_count = NON_Q_VECTORS;
+ }
+
+ /* record RSS queues */
+ ch->combined_count = adapter->ring_feature[RING_F_RSS].indices;
+
+ /* nothing else to report if RSS is disabled */
+ if (ch->combined_count == 1)
+ return;
+
+ /* we do not support ATR queueing if SR-IOV is enabled */
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
+ return;
+
+ /* same thing goes for being DCB enabled */
+ if (netdev_get_num_tc(dev) > 1)
+ return;
+
+ /* if ATR is disabled we can exit */
+ if (!adapter->atr_sample_rate)
+ return;
+
+ /* report flow director queues as maximum channels */
+ ch->combined_count = adapter->ring_feature[RING_F_FDIR].indices;
+}
+
+static int ixgbe_set_channels(struct net_device *dev,
+ struct ethtool_channels *ch)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+ unsigned int count = ch->combined_count;
+
+ /* verify they are not requesting separate vectors */
+ if (!count || ch->rx_count || ch->tx_count)
+ return -EINVAL;
+
+ /* verify other_count has not changed */
+ if (ch->other_count != NON_Q_VECTORS)
+ return -EINVAL;
+
+ /* verify the number of channels does not exceed hardware limits */
+ if (count > ixgbe_max_channels(adapter))
+ return -EINVAL;
+
+ /* update feature limits from largest to smallest supported values */
+ adapter->ring_feature[RING_F_FDIR].limit = count;
+
+ /* cap RSS limit at 16 */
+ if (count > IXGBE_MAX_RSS_INDICES)
+ count = IXGBE_MAX_RSS_INDICES;
+ adapter->ring_feature[RING_F_RSS].limit = count;
+
+#ifdef IXGBE_FCOE
+ /* cap FCoE limit at 8 */
+ if (count > IXGBE_FCRETA_SIZE)
+ count = IXGBE_FCRETA_SIZE;
+ adapter->ring_feature[RING_F_FCOE].limit = count;
+
+#endif
+ /* use setup TC to update any traffic class queue mapping */
+ return ixgbe_setup_tc(dev, netdev_get_num_tc(dev));
+}
+
+static int ixgbe_get_module_info(struct net_device *dev,
+ struct ethtool_modinfo *modinfo)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 status;
+ u8 sff8472_rev, addr_mode;
+ int ret_val = 0;
+ bool page_swap = false;
+
+ /* avoid concurent i2c reads */
+ while (test_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
+ msleep(100);
+
+ /* used by the service task */
+ set_bit(__IXGBE_READ_I2C, &adapter->state);
+
+ /* Check whether we support SFF-8472 or not */
+ status = hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_SFF_8472_COMP,
+ &sff8472_rev);
+ if (status != 0) {
+ ret_val = -EIO;
+ goto err_out;
+ }
+
+ /* addressing mode is not supported */
+ status = hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_SFF_8472_SWAP,
+ &addr_mode);
+ if (status != 0) {
+ ret_val = -EIO;
+ goto err_out;
+ }
+
+ if (addr_mode & IXGBE_SFF_ADDRESSING_MODE) {
+ e_err(drv, "Address change required to access page 0xA2, but not supported. Please report the module type to the driver maintainers.\n");
+ page_swap = true;
+ }
+
+ if (sff8472_rev == IXGBE_SFF_SFF_8472_UNSUP || page_swap) {
+ /* We have a SFP, but it does not support SFF-8472 */
+ modinfo->type = ETH_MODULE_SFF_8079;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
+ } else {
+ /* We have a SFP which supports a revision of SFF-8472. */
+ modinfo->type = ETH_MODULE_SFF_8472;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
+ }
+
+err_out:
+ clear_bit(__IXGBE_READ_I2C, &adapter->state);
+ return ret_val;
+}
+
+static int ixgbe_get_module_eeprom(struct net_device *dev,
+ struct ethtool_eeprom *ee,
+ u8 *data)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 status = IXGBE_ERR_PHY_ADDR_INVALID;
+ u8 databyte = 0xFF;
+ int i = 0;
+ int ret_val = 0;
+
+ /* ixgbe_get_module_info is called before this function in all
+ * cases, so we do not need any checks we already do above,
+ * and can trust ee->len to be a known value.
+ */
+
+ while (test_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
+ msleep(100);
+ set_bit(__IXGBE_READ_I2C, &adapter->state);
+
+ /* Read the first block, SFF-8079 */
+ for (i = 0; i < ETH_MODULE_SFF_8079_LEN; i++) {
+ status = hw->phy.ops.read_i2c_eeprom(hw, i, &databyte);
+ if (status != 0) {
+ /* Error occured while reading module */
+ ret_val = -EIO;
+ goto err_out;
+ }
+ data[i] = databyte;
+ }
+
+ /* If the second block is requested, check if SFF-8472 is supported. */
+ if (ee->len == ETH_MODULE_SFF_8472_LEN) {
+ if (data[IXGBE_SFF_SFF_8472_COMP] == IXGBE_SFF_SFF_8472_UNSUP)
+ return -EOPNOTSUPP;
+
+ /* Read the second block, SFF-8472 */
+ for (i = ETH_MODULE_SFF_8079_LEN;
+ i < ETH_MODULE_SFF_8472_LEN; i++) {
+ status = hw->phy.ops.read_i2c_sff8472(hw,
+ i - ETH_MODULE_SFF_8079_LEN, &databyte);
+ if (status != 0) {
+ /* Error occured while reading module */
+ ret_val = -EIO;
+ goto err_out;
+ }
+ data[i] = databyte;
+ }
+ }
+
+err_out:
+ clear_bit(__IXGBE_READ_I2C, &adapter->state);
+
+ return ret_val;
+}
+
static const struct ethtool_ops ixgbe_ethtool_ops = {
.get_settings = ixgbe_get_settings,
.set_settings = ixgbe_set_settings,
@@ -2759,7 +3000,11 @@ static const struct ethtool_ops ixgbe_ethtool_ops = {
.set_coalesce = ixgbe_set_coalesce,
.get_rxnfc = ixgbe_get_rxnfc,
.set_rxnfc = ixgbe_set_rxnfc,
+ .get_channels = ixgbe_get_channels,
+ .set_channels = ixgbe_set_channels,
.get_ts_info = ixgbe_get_ts_info,
+ .get_module_info = ixgbe_get_module_info,
+ .get_module_eeprom = ixgbe_get_module_eeprom,
};
void ixgbe_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index 9ffa3309e54d..ef5f7a678ce1 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -386,7 +386,6 @@ static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter *adapter)
fcoe = &adapter->ring_feature[RING_F_FCOE];
/* limit ourselves based on feature limits */
- fcoe_i = min_t(u16, fcoe_i, num_online_cpus());
fcoe_i = min_t(u16, fcoe_i, fcoe->limit);
if (fcoe_i) {
@@ -562,9 +561,6 @@ static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
fcoe_i = min_t(u16, fcoe_i, fcoe->limit);
if (vmdq_i > 1 && fcoe_i) {
- /* reserve no more than number of CPUs */
- fcoe_i = min_t(u16, fcoe_i, num_online_cpus());
-
/* alloc queues for FCoE separately */
fcoe->indices = fcoe_i;
fcoe->offset = vmdq_i * rss_i;
@@ -623,8 +619,7 @@ static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
if (rss_i > 1 && adapter->atr_sample_rate) {
f = &adapter->ring_feature[RING_F_FDIR];
- f->indices = min_t(u16, num_online_cpus(), f->limit);
- rss_i = max_t(u16, rss_i, f->indices);
+ rss_i = f->indices = f->limit;
if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
@@ -776,19 +771,23 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
{
struct ixgbe_q_vector *q_vector;
struct ixgbe_ring *ring;
- int node = -1;
+ int node = NUMA_NO_NODE;
int cpu = -1;
int ring_count, size;
+ u8 tcs = netdev_get_num_tc(adapter->netdev);
ring_count = txr_count + rxr_count;
size = sizeof(struct ixgbe_q_vector) +
(sizeof(struct ixgbe_ring) * ring_count);
/* customize cpu for Flow Director mapping */
- if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
- if (cpu_online(v_idx)) {
- cpu = v_idx;
- node = cpu_to_node(cpu);
+ if ((tcs <= 1) && !(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) {
+ u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
+ if (rss_i > 1 && adapter->atr_sample_rate) {
+ if (cpu_online(v_idx)) {
+ cpu = v_idx;
+ node = cpu_to_node(cpu);
+ }
}
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 1d5e093e988a..68478d6dfa2d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -2786,13 +2786,19 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
/*
* set WTHRESH to encourage burst writeback, it should not be set
- * higher than 1 when ITR is 0 as it could cause false TX hangs
+ * higher than 1 when:
+ * - ITR is 0 as it could cause false TX hangs
+ * - ITR is set to > 100k int/sec and BQL is enabled
*
* In order to avoid issues WTHRESH + PTHRESH should always be equal
* to or less than the number of on chip descriptors, which is
* currently 40.
*/
+#if IS_ENABLED(CONFIG_BQL)
+ if (!ring->q_vector || (ring->q_vector->itr < IXGBE_100K_ITR))
+#else
if (!ring->q_vector || (ring->q_vector->itr < 8))
+#endif
txdctl |= (1 << 16); /* WTHRESH = 1 */
else
txdctl |= (8 << 16); /* WTHRESH = 8 */
@@ -2813,6 +2819,16 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
ring->atr_sample_rate = 0;
}
+ /* initialize XPS */
+ if (!test_and_set_bit(__IXGBE_TX_XPS_INIT_DONE, &ring->state)) {
+ struct ixgbe_q_vector *q_vector = ring->q_vector;
+
+ if (q_vector)
+ netif_set_xps_queue(adapter->netdev,
+ &q_vector->affinity_mask,
+ ring->queue_index);
+ }
+
clear_bit(__IXGBE_HANG_CHECK_ARMED, &ring->state);
/* enable queue */
@@ -4465,7 +4481,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
struct pci_dev *pdev = adapter->pdev;
- unsigned int rss;
+ unsigned int rss, fdir;
u32 fwsm;
#ifdef CONFIG_IXGBE_DCB
int j;
@@ -4485,9 +4501,10 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
adapter->ring_feature[RING_F_RSS].limit = rss;
adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
- adapter->ring_feature[RING_F_FDIR].limit = IXGBE_MAX_FDIR_INDICES;
adapter->max_q_vectors = MAX_Q_VECTORS_82599;
adapter->atr_sample_rate = 20;
+ fdir = min_t(int, IXGBE_MAX_FDIR_INDICES, num_online_cpus());
+ adapter->ring_feature[RING_F_FDIR].limit = fdir;
adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K;
#ifdef CONFIG_IXGBE_DCA
adapter->flags |= IXGBE_FLAG_DCA_CAPABLE;
@@ -5698,6 +5715,10 @@ static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter)
!(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET))
return;
+ /* concurent i2c reads are not supported */
+ if (test_bit(__IXGBE_READ_I2C, &adapter->state))
+ return;
+
/* someone else is in init, wait until next service event */
if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
return;
@@ -6363,38 +6384,40 @@ static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
return __ixgbe_maybe_stop_tx(tx_ring, size);
}
+#ifdef IXGBE_FCOE
static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
- int txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) :
- smp_processor_id();
-#ifdef IXGBE_FCOE
- __be16 protocol = vlan_get_protocol(skb);
+ struct ixgbe_adapter *adapter;
+ struct ixgbe_ring_feature *f;
+ int txq;
- if (((protocol == htons(ETH_P_FCOE)) ||
- (protocol == htons(ETH_P_FIP))) &&
- (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) {
- struct ixgbe_ring_feature *f;
+ /*
+ * only execute the code below if protocol is FCoE
+ * or FIP and we have FCoE enabled on the adapter
+ */
+ switch (vlan_get_protocol(skb)) {
+ case __constant_htons(ETH_P_FCOE):
+ case __constant_htons(ETH_P_FIP):
+ adapter = netdev_priv(dev);
- f = &adapter->ring_feature[RING_F_FCOE];
+ if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
+ break;
+ default:
+ return __netdev_pick_tx(dev, skb);
+ }
- while (txq >= f->indices)
- txq -= f->indices;
- txq += adapter->ring_feature[RING_F_FCOE].offset;
+ f = &adapter->ring_feature[RING_F_FCOE];
- return txq;
- }
-#endif
+ txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) :
+ smp_processor_id();
- if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
- while (unlikely(txq >= dev->real_num_tx_queues))
- txq -= dev->real_num_tx_queues;
- return txq;
- }
+ while (txq >= f->indices)
+ txq -= f->indices;
- return skb_tx_hash(dev, skb);
+ return txq + f->offset;
}
+#endif
netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
struct ixgbe_adapter *adapter,
struct ixgbe_ring *tx_ring)
@@ -6799,6 +6822,7 @@ static void ixgbe_set_prio_tc_map(struct ixgbe_adapter *adapter)
}
}
+#endif /* CONFIG_IXGBE_DCB */
/**
* ixgbe_setup_tc - configure net_device for multiple traffic classes
*
@@ -6824,6 +6848,7 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
ixgbe_close(dev);
ixgbe_clear_interrupt_scheme(adapter);
+#ifdef CONFIG_IXGBE_DCB
if (tc) {
netdev_set_num_tc(dev, tc);
ixgbe_set_prio_tc_map(adapter);
@@ -6846,31 +6871,24 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
adapter->dcb_cfg.pfc_mode_enable = false;
}
- ixgbe_init_interrupt_scheme(adapter);
ixgbe_validate_rtr(adapter, tc);
+
+#endif /* CONFIG_IXGBE_DCB */
+ ixgbe_init_interrupt_scheme(adapter);
+
if (netif_running(dev))
- ixgbe_open(dev);
+ return ixgbe_open(dev);
return 0;
}
-#endif /* CONFIG_IXGBE_DCB */
#ifdef CONFIG_PCI_IOV
void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
rtnl_lock();
-#ifdef CONFIG_IXGBE_DCB
ixgbe_setup_tc(netdev, netdev_get_num_tc(netdev));
-#else
- if (netif_running(netdev))
- ixgbe_close(netdev);
- ixgbe_clear_interrupt_scheme(adapter);
- ixgbe_init_interrupt_scheme(adapter);
- if (netif_running(netdev))
- ixgbe_open(netdev);
-#endif
rtnl_unlock();
}
@@ -7118,7 +7136,9 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_open = ixgbe_open,
.ndo_stop = ixgbe_close,
.ndo_start_xmit = ixgbe_xmit_frame,
+#ifdef IXGBE_FCOE
.ndo_select_queue = ixgbe_select_queue,
+#endif
.ndo_set_rx_mode = ixgbe_set_rx_mode,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = ixgbe_set_mac,
@@ -7230,9 +7250,8 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
static int cards_found;
int i, err, pci_using_dac;
+ unsigned int indices = MAX_TX_QUEUES;
u8 part_str[IXGBE_PBANUM_LENGTH];
- unsigned int indices = num_possible_cpus();
- unsigned int dcb_max = 0;
#ifdef IXGBE_FCOE
u16 device_caps;
#endif
@@ -7281,25 +7300,15 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_master(pdev);
pci_save_state(pdev);
+ if (ii->mac == ixgbe_mac_82598EB) {
#ifdef CONFIG_IXGBE_DCB
- if (ii->mac == ixgbe_mac_82598EB)
- dcb_max = min_t(unsigned int, indices * MAX_TRAFFIC_CLASS,
- IXGBE_MAX_RSS_INDICES);
- else
- dcb_max = min_t(unsigned int, indices * MAX_TRAFFIC_CLASS,
- IXGBE_MAX_FDIR_INDICES);
+ /* 8 TC w/ 4 queues per TC */
+ indices = 4 * MAX_TRAFFIC_CLASS;
+#else
+ indices = IXGBE_MAX_RSS_INDICES;
#endif
+ }
- if (ii->mac == ixgbe_mac_82598EB)
- indices = min_t(unsigned int, indices, IXGBE_MAX_RSS_INDICES);
- else
- indices = min_t(unsigned int, indices, IXGBE_MAX_FDIR_INDICES);
-
-#ifdef IXGBE_FCOE
- indices += min_t(unsigned int, num_possible_cpus(),
- IXGBE_MAX_FCOE_INDICES);
-#endif
- indices = max_t(unsigned int, dcb_max, indices);
netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices);
if (!netdev) {
err = -ENOMEM;
@@ -7454,13 +7463,17 @@ skip_sriov:
#ifdef IXGBE_FCOE
if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) {
+ unsigned int fcoe_l;
+
if (hw->mac.ops.get_device_caps) {
hw->mac.ops.get_device_caps(hw, &device_caps);
if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS)
adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
}
- adapter->ring_feature[RING_F_FCOE].limit = IXGBE_FCRETA_SIZE;
+
+ fcoe_l = min_t(int, IXGBE_FCRETA_SIZE, num_online_cpus());
+ adapter->ring_feature[RING_F_FCOE].limit = fcoe_l;
netdev->features |= NETIF_F_FSO |
NETIF_F_FCOE_CRC;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index eb534a071fde..060d2ad2ac96 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -852,11 +852,9 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
status = hw->phy.ops.read_i2c_eeprom(hw,
IXGBE_SFF_IDENTIFIER,
- &identifier);
+ &identifier);
- if (status == IXGBE_ERR_SWFW_SYNC ||
- status == IXGBE_ERR_I2C ||
- status == IXGBE_ERR_SFP_NOT_PRESENT)
+ if (status != 0)
goto err_read_i2c_eeprom;
/* LAN ID is needed for sfp_type determination */
@@ -870,26 +868,20 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
IXGBE_SFF_1GBE_COMP_CODES,
&comp_codes_1g);
- if (status == IXGBE_ERR_SWFW_SYNC ||
- status == IXGBE_ERR_I2C ||
- status == IXGBE_ERR_SFP_NOT_PRESENT)
+ if (status != 0)
goto err_read_i2c_eeprom;
status = hw->phy.ops.read_i2c_eeprom(hw,
IXGBE_SFF_10GBE_COMP_CODES,
&comp_codes_10g);
- if (status == IXGBE_ERR_SWFW_SYNC ||
- status == IXGBE_ERR_I2C ||
- status == IXGBE_ERR_SFP_NOT_PRESENT)
+ if (status != 0)
goto err_read_i2c_eeprom;
status = hw->phy.ops.read_i2c_eeprom(hw,
IXGBE_SFF_CABLE_TECHNOLOGY,
&cable_tech);
- if (status == IXGBE_ERR_SWFW_SYNC ||
- status == IXGBE_ERR_I2C ||
- status == IXGBE_ERR_SFP_NOT_PRESENT)
+ if (status != 0)
goto err_read_i2c_eeprom;
/* ID Module
@@ -984,30 +976,24 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
if (hw->phy.type != ixgbe_phy_nl) {
hw->phy.id = identifier;
status = hw->phy.ops.read_i2c_eeprom(hw,
- IXGBE_SFF_VENDOR_OUI_BYTE0,
- &oui_bytes[0]);
+ IXGBE_SFF_VENDOR_OUI_BYTE0,
+ &oui_bytes[0]);
- if (status == IXGBE_ERR_SWFW_SYNC ||
- status == IXGBE_ERR_I2C ||
- status == IXGBE_ERR_SFP_NOT_PRESENT)
+ if (status != 0)
goto err_read_i2c_eeprom;
status = hw->phy.ops.read_i2c_eeprom(hw,
IXGBE_SFF_VENDOR_OUI_BYTE1,
&oui_bytes[1]);
- if (status == IXGBE_ERR_SWFW_SYNC ||
- status == IXGBE_ERR_I2C ||
- status == IXGBE_ERR_SFP_NOT_PRESENT)
+ if (status != 0)
goto err_read_i2c_eeprom;
status = hw->phy.ops.read_i2c_eeprom(hw,
IXGBE_SFF_VENDOR_OUI_BYTE2,
&oui_bytes[2]);
- if (status == IXGBE_ERR_SWFW_SYNC ||
- status == IXGBE_ERR_I2C ||
- status == IXGBE_ERR_SFP_NOT_PRESENT)
+ if (status != 0)
goto err_read_i2c_eeprom;
vendor_oui =
@@ -1307,9 +1293,9 @@ s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
break;
fail:
+ ixgbe_i2c_bus_clear(hw);
hw->mac.ops.release_swfw_sync(hw, swfw_mask);
msleep(100);
- ixgbe_i2c_bus_clear(hw);
retry++;
if (retry < max_retry)
hw_dbg(hw, "I2C byte read error - Retrying.\n");