summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/broadcom
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/broadcom')
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig2
-rw-r--r--drivers/net/ethernet/broadcom/bcm4908_enet.c146
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c3
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c108
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h9
5 files changed, 194 insertions, 74 deletions
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index f8a168b73307..cb88ffb8f12f 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -54,7 +54,7 @@ config B44_PCI
config BCM4908_ENET
tristate "Broadcom BCM4908 internal mac support"
depends on ARCH_BCM4908 || COMPILE_TEST
- default y
+ default y if ARCH_BCM4908
help
This driver supports Ethernet controller integrated into Broadcom
BCM4908 family SoCs.
diff --git a/drivers/net/ethernet/broadcom/bcm4908_enet.c b/drivers/net/ethernet/broadcom/bcm4908_enet.c
index 98cf82dea3e4..cbfed1d1477b 100644
--- a/drivers/net/ethernet/broadcom/bcm4908_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm4908_enet.c
@@ -9,6 +9,7 @@
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_net.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/string.h>
@@ -53,6 +54,7 @@ struct bcm4908_enet_dma_ring {
int length;
u16 cfg_block;
u16 st_ram_block;
+ struct napi_struct napi;
union {
void *cpu_addr;
@@ -66,8 +68,8 @@ struct bcm4908_enet_dma_ring {
struct bcm4908_enet {
struct device *dev;
struct net_device *netdev;
- struct napi_struct napi;
void __iomem *base;
+ int irq_tx;
struct bcm4908_enet_dma_ring tx_ring;
struct bcm4908_enet_dma_ring rx_ring;
@@ -122,24 +124,31 @@ static void enet_umac_set(struct bcm4908_enet *enet, u16 offset, u32 set)
* Helpers
*/
-static void bcm4908_enet_intrs_on(struct bcm4908_enet *enet)
+static void bcm4908_enet_set_mtu(struct bcm4908_enet *enet, int mtu)
{
- enet_write(enet, ENET_DMA_CH_RX_CFG + ENET_DMA_CH_CFG_INT_MASK, ENET_DMA_INT_DEFAULTS);
+ enet_umac_write(enet, UMAC_MAX_FRAME_LEN, mtu + ENET_MAX_ETH_OVERHEAD);
}
-static void bcm4908_enet_intrs_off(struct bcm4908_enet *enet)
+/***
+ * DMA ring ops
+ */
+
+static void bcm4908_enet_dma_ring_intrs_on(struct bcm4908_enet *enet,
+ struct bcm4908_enet_dma_ring *ring)
{
- enet_write(enet, ENET_DMA_CH_RX_CFG + ENET_DMA_CH_CFG_INT_MASK, 0);
+ enet_write(enet, ring->cfg_block + ENET_DMA_CH_CFG_INT_MASK, ENET_DMA_INT_DEFAULTS);
}
-static void bcm4908_enet_intrs_ack(struct bcm4908_enet *enet)
+static void bcm4908_enet_dma_ring_intrs_off(struct bcm4908_enet *enet,
+ struct bcm4908_enet_dma_ring *ring)
{
- enet_write(enet, ENET_DMA_CH_RX_CFG + ENET_DMA_CH_CFG_INT_STAT, ENET_DMA_INT_DEFAULTS);
+ enet_write(enet, ring->cfg_block + ENET_DMA_CH_CFG_INT_MASK, 0);
}
-static void bcm4908_enet_set_mtu(struct bcm4908_enet *enet, int mtu)
+static void bcm4908_enet_dma_ring_intrs_ack(struct bcm4908_enet *enet,
+ struct bcm4908_enet_dma_ring *ring)
{
- enet_umac_write(enet, UMAC_MAX_FRAME_LEN, mtu + ENET_MAX_ETH_OVERHEAD);
+ enet_write(enet, ring->cfg_block + ENET_DMA_CH_CFG_INT_STAT, ENET_DMA_INT_DEFAULTS);
}
/***
@@ -413,11 +422,14 @@ static void bcm4908_enet_gmac_init(struct bcm4908_enet *enet)
static irqreturn_t bcm4908_enet_irq_handler(int irq, void *dev_id)
{
struct bcm4908_enet *enet = dev_id;
+ struct bcm4908_enet_dma_ring *ring;
- bcm4908_enet_intrs_off(enet);
- bcm4908_enet_intrs_ack(enet);
+ ring = (irq == enet->irq_tx) ? &enet->tx_ring : &enet->rx_ring;
- napi_schedule(&enet->napi);
+ bcm4908_enet_dma_ring_intrs_off(enet, ring);
+ bcm4908_enet_dma_ring_intrs_ack(enet, ring);
+
+ napi_schedule(&ring->napi);
return IRQ_HANDLED;
}
@@ -425,6 +437,8 @@ static irqreturn_t bcm4908_enet_irq_handler(int irq, void *dev_id)
static int bcm4908_enet_open(struct net_device *netdev)
{
struct bcm4908_enet *enet = netdev_priv(netdev);
+ struct bcm4908_enet_dma_ring *tx_ring = &enet->tx_ring;
+ struct bcm4908_enet_dma_ring *rx_ring = &enet->rx_ring;
struct device *dev = enet->dev;
int err;
@@ -434,6 +448,17 @@ static int bcm4908_enet_open(struct net_device *netdev)
return err;
}
+ if (enet->irq_tx > 0) {
+ err = request_irq(enet->irq_tx, bcm4908_enet_irq_handler, 0,
+ "tx", enet);
+ if (err) {
+ dev_err(dev, "Failed to request IRQ %d: %d\n",
+ enet->irq_tx, err);
+ free_irq(netdev->irq, enet);
+ return err;
+ }
+ }
+
bcm4908_enet_gmac_init(enet);
bcm4908_enet_dma_reset(enet);
bcm4908_enet_dma_init(enet);
@@ -442,14 +467,19 @@ static int bcm4908_enet_open(struct net_device *netdev)
enet_set(enet, ENET_DMA_CONTROLLER_CFG, ENET_DMA_CTRL_CFG_MASTER_EN);
enet_maskset(enet, ENET_DMA_CONTROLLER_CFG, ENET_DMA_CTRL_CFG_FLOWC_CH1_EN, 0);
- bcm4908_enet_dma_rx_ring_enable(enet, &enet->rx_ring);
- napi_enable(&enet->napi);
+ if (enet->irq_tx > 0) {
+ napi_enable(&tx_ring->napi);
+ bcm4908_enet_dma_ring_intrs_ack(enet, tx_ring);
+ bcm4908_enet_dma_ring_intrs_on(enet, tx_ring);
+ }
+
+ bcm4908_enet_dma_rx_ring_enable(enet, rx_ring);
+ napi_enable(&rx_ring->napi);
netif_carrier_on(netdev);
netif_start_queue(netdev);
-
- bcm4908_enet_intrs_ack(enet);
- bcm4908_enet_intrs_on(enet);
+ bcm4908_enet_dma_ring_intrs_ack(enet, rx_ring);
+ bcm4908_enet_dma_ring_intrs_on(enet, rx_ring);
return 0;
}
@@ -457,16 +487,20 @@ static int bcm4908_enet_open(struct net_device *netdev)
static int bcm4908_enet_stop(struct net_device *netdev)
{
struct bcm4908_enet *enet = netdev_priv(netdev);
+ struct bcm4908_enet_dma_ring *tx_ring = &enet->tx_ring;
+ struct bcm4908_enet_dma_ring *rx_ring = &enet->rx_ring;
netif_stop_queue(netdev);
netif_carrier_off(netdev);
- napi_disable(&enet->napi);
+ napi_disable(&rx_ring->napi);
+ napi_disable(&tx_ring->napi);
bcm4908_enet_dma_rx_ring_disable(enet, &enet->rx_ring);
bcm4908_enet_dma_tx_ring_disable(enet, &enet->tx_ring);
bcm4908_enet_dma_uninit(enet);
+ free_irq(enet->irq_tx, enet);
free_irq(enet->netdev->irq, enet);
return 0;
@@ -483,25 +517,19 @@ static int bcm4908_enet_start_xmit(struct sk_buff *skb, struct net_device *netde
u32 tmp;
/* Free transmitted skbs */
- while (ring->read_idx != ring->write_idx) {
- buf_desc = &ring->buf_desc[ring->read_idx];
- if (le32_to_cpu(buf_desc->ctl) & DMA_CTL_STATUS_OWN)
- break;
- slot = &ring->slots[ring->read_idx];
-
- dma_unmap_single(dev, slot->dma_addr, slot->len, DMA_TO_DEVICE);
- dev_kfree_skb(slot->skb);
- if (++ring->read_idx == ring->length)
- ring->read_idx = 0;
- }
+ if (enet->irq_tx < 0 &&
+ !(le32_to_cpu(ring->buf_desc[ring->read_idx].ctl) & DMA_CTL_STATUS_OWN))
+ napi_schedule(&enet->tx_ring.napi);
/* Don't use the last empty buf descriptor */
if (ring->read_idx <= ring->write_idx)
free_buf_descs = ring->read_idx - ring->write_idx + ring->length;
else
free_buf_descs = ring->read_idx - ring->write_idx;
- if (free_buf_descs < 2)
+ if (free_buf_descs < 2) {
+ netif_stop_queue(netdev);
return NETDEV_TX_BUSY;
+ }
/* Hardware removes OWN bit after sending data */
buf_desc = &ring->buf_desc[ring->write_idx];
@@ -538,9 +566,10 @@ static int bcm4908_enet_start_xmit(struct sk_buff *skb, struct net_device *netde
return NETDEV_TX_OK;
}
-static int bcm4908_enet_poll(struct napi_struct *napi, int weight)
+static int bcm4908_enet_poll_rx(struct napi_struct *napi, int weight)
{
- struct bcm4908_enet *enet = container_of(napi, struct bcm4908_enet, napi);
+ struct bcm4908_enet_dma_ring *rx_ring = container_of(napi, struct bcm4908_enet_dma_ring, napi);
+ struct bcm4908_enet *enet = container_of(rx_ring, struct bcm4908_enet, rx_ring);
struct device *dev = enet->dev;
int handled = 0;
@@ -589,7 +618,7 @@ static int bcm4908_enet_poll(struct napi_struct *napi, int weight)
if (handled < weight) {
napi_complete_done(napi, handled);
- bcm4908_enet_intrs_on(enet);
+ bcm4908_enet_dma_ring_intrs_on(enet, rx_ring);
}
/* Hardware could disable ring if it run out of descriptors */
@@ -598,6 +627,42 @@ static int bcm4908_enet_poll(struct napi_struct *napi, int weight)
return handled;
}
+static int bcm4908_enet_poll_tx(struct napi_struct *napi, int weight)
+{
+ struct bcm4908_enet_dma_ring *tx_ring = container_of(napi, struct bcm4908_enet_dma_ring, napi);
+ struct bcm4908_enet *enet = container_of(tx_ring, struct bcm4908_enet, tx_ring);
+ struct bcm4908_enet_dma_ring_bd *buf_desc;
+ struct bcm4908_enet_dma_ring_slot *slot;
+ struct device *dev = enet->dev;
+ unsigned int bytes = 0;
+ int handled = 0;
+
+ while (handled < weight && tx_ring->read_idx != tx_ring->write_idx) {
+ buf_desc = &tx_ring->buf_desc[tx_ring->read_idx];
+ if (le32_to_cpu(buf_desc->ctl) & DMA_CTL_STATUS_OWN)
+ break;
+ slot = &tx_ring->slots[tx_ring->read_idx];
+
+ dma_unmap_single(dev, slot->dma_addr, slot->len, DMA_TO_DEVICE);
+ dev_kfree_skb(slot->skb);
+ bytes += slot->len;
+ if (++tx_ring->read_idx == tx_ring->length)
+ tx_ring->read_idx = 0;
+
+ handled++;
+ }
+
+ if (handled < weight) {
+ napi_complete_done(napi, handled);
+ bcm4908_enet_dma_ring_intrs_on(enet, tx_ring);
+ }
+
+ if (netif_queue_stopped(enet->netdev))
+ netif_wake_queue(enet->netdev);
+
+ return handled;
+}
+
static int bcm4908_enet_change_mtu(struct net_device *netdev, int new_mtu)
{
struct bcm4908_enet *enet = netdev_priv(netdev);
@@ -620,6 +685,7 @@ static int bcm4908_enet_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct net_device *netdev;
struct bcm4908_enet *enet;
+ const u8 *mac;
int err;
netdev = devm_alloc_etherdev(dev, sizeof(*enet));
@@ -640,6 +706,8 @@ static int bcm4908_enet_probe(struct platform_device *pdev)
if (netdev->irq < 0)
return netdev->irq;
+ enet->irq_tx = platform_get_irq_byname(pdev, "tx");
+
dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
err = bcm4908_enet_dma_alloc(enet);
@@ -647,12 +715,17 @@ static int bcm4908_enet_probe(struct platform_device *pdev)
return err;
SET_NETDEV_DEV(netdev, &pdev->dev);
- eth_hw_addr_random(netdev);
+ mac = of_get_mac_address(dev->of_node);
+ if (!IS_ERR(mac))
+ ether_addr_copy(netdev->dev_addr, mac);
+ else
+ eth_hw_addr_random(netdev);
netdev->netdev_ops = &bcm4908_enet_netdev_ops;
netdev->min_mtu = ETH_ZLEN;
netdev->mtu = ETH_DATA_LEN;
netdev->max_mtu = ENET_MTU_MAX;
- netif_napi_add(netdev, &enet->napi, bcm4908_enet_poll, 64);
+ netif_tx_napi_add(netdev, &enet->tx_ring.napi, bcm4908_enet_poll_tx, NAPI_POLL_WEIGHT);
+ netif_napi_add(netdev, &enet->rx_ring.napi, bcm4908_enet_poll_rx, NAPI_POLL_WEIGHT);
err = register_netdev(netdev);
if (err) {
@@ -670,7 +743,8 @@ static int bcm4908_enet_remove(struct platform_device *pdev)
struct bcm4908_enet *enet = platform_get_drvdata(pdev);
unregister_netdev(enet->netdev);
- netif_napi_del(&enet->napi);
+ netif_napi_del(&enet->rx_ring.napi);
+ netif_napi_del(&enet->tx_ring.napi);
bcm4908_enet_dma_free(enet);
return 0;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index b652ed72a621..56801387591d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -1395,7 +1395,6 @@ int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, u32 poll_cnt)
u32 op_gen_command = 0;
u32 comp_addr = BAR_CSTRORM_INTMEM +
CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func);
- int ret = 0;
if (REG_RD(bp, comp_addr)) {
BNX2X_ERR("Cleanup complete was not 0 before sending\n");
@@ -1420,7 +1419,7 @@ int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, u32 poll_cnt)
/* Zero completion for next FLR */
REG_WR(bp, comp_addr, 0);
- return ret;
+ return 0;
}
u8 bnx2x_is_pcie_pending(struct pci_dev *dev)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index b53a0d87371a..6f13642121c4 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -4470,7 +4470,7 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
writel(1, bp->bar0 + doorbell_offset);
if (!pci_is_enabled(bp->pdev))
- return 0;
+ return -ENODEV;
if (!timeout)
timeout = DFLT_HWRM_CMD_TIMEOUT;
@@ -4500,12 +4500,15 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
return -EBUSY;
/* on first few passes, just barely sleep */
- if (i < HWRM_SHORT_TIMEOUT_COUNTER)
+ if (i < HWRM_SHORT_TIMEOUT_COUNTER) {
usleep_range(HWRM_SHORT_MIN_TIMEOUT,
HWRM_SHORT_MAX_TIMEOUT);
- else
+ } else {
+ if (HWRM_WAIT_MUST_ABORT(bp, req))
+ break;
usleep_range(HWRM_MIN_TIMEOUT,
HWRM_MAX_TIMEOUT);
+ }
}
if (bp->hwrm_intr_seq_id != (u16)~seq_id) {
@@ -4530,15 +4533,19 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
if (len)
break;
/* on first few passes, just barely sleep */
- if (i < HWRM_SHORT_TIMEOUT_COUNTER)
+ if (i < HWRM_SHORT_TIMEOUT_COUNTER) {
usleep_range(HWRM_SHORT_MIN_TIMEOUT,
HWRM_SHORT_MAX_TIMEOUT);
- else
+ } else {
+ if (HWRM_WAIT_MUST_ABORT(bp, req))
+ goto timeout_abort;
usleep_range(HWRM_MIN_TIMEOUT,
HWRM_MAX_TIMEOUT);
+ }
}
if (i >= tmo_count) {
+timeout_abort:
if (!silent)
netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n",
HWRM_TOTAL_TIMEOUT(i),
@@ -7540,6 +7547,32 @@ static void __bnxt_map_fw_health_reg(struct bnxt *bp, u32 reg)
BNXT_FW_HEALTH_WIN_MAP_OFF);
}
+bool bnxt_is_fw_healthy(struct bnxt *bp)
+{
+ if (bp->fw_health && bp->fw_health->status_reliable) {
+ u32 fw_status;
+
+ fw_status = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
+ if (fw_status && !BNXT_FW_IS_HEALTHY(fw_status))
+ return false;
+ }
+
+ return true;
+}
+
+static void bnxt_inv_fw_health_reg(struct bnxt *bp)
+{
+ struct bnxt_fw_health *fw_health = bp->fw_health;
+ u32 reg_type;
+
+ if (!fw_health || !fw_health->status_reliable)
+ return;
+
+ reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_HEALTH_REG]);
+ if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC)
+ fw_health->status_reliable = false;
+}
+
static void bnxt_try_map_fw_health_reg(struct bnxt *bp)
{
void __iomem *hs;
@@ -7547,6 +7580,9 @@ static void bnxt_try_map_fw_health_reg(struct bnxt *bp)
u32 reg_type;
u32 sig;
+ if (bp->fw_health)
+ bp->fw_health->status_reliable = false;
+
__bnxt_map_fw_health_reg(bp, HCOMM_STATUS_STRUCT_LOC);
hs = bp->bar0 + BNXT_FW_HEALTH_WIN_OFF(HCOMM_STATUS_STRUCT_LOC);
@@ -7558,11 +7594,9 @@ static void bnxt_try_map_fw_health_reg(struct bnxt *bp)
BNXT_FW_HEALTH_WIN_BASE +
BNXT_GRC_REG_CHIP_NUM);
}
- if (!BNXT_CHIP_P5(bp)) {
- if (bp->fw_health)
- bp->fw_health->status_reliable = false;
+ if (!BNXT_CHIP_P5(bp))
return;
- }
+
status_loc = BNXT_GRC_REG_STATUS_P5 |
BNXT_FW_HEALTH_REG_TYPE_BAR0;
} else {
@@ -7592,6 +7626,7 @@ static int bnxt_map_fw_health_regs(struct bnxt *bp)
u32 reg_base = 0xffffffff;
int i;
+ bp->fw_health->status_reliable = false;
/* Only pre-map the monitoring GRC registers using window 3 */
for (i = 0; i < 4; i++) {
u32 reg = fw_health->regs[i];
@@ -7604,6 +7639,7 @@ static int bnxt_map_fw_health_regs(struct bnxt *bp)
return -ERANGE;
fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_OFF(reg);
}
+ bp->fw_health->status_reliable = true;
if (reg_base == 0xffffffff)
return 0;
@@ -9494,9 +9530,10 @@ static int bnxt_try_recover_fw(struct bnxt *bp)
mutex_lock(&bp->hwrm_cmd_lock);
do {
- rc = __bnxt_hwrm_ver_get(bp, true);
sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
- if (!sts || !BNXT_FW_IS_BOOTING(sts))
+ rc = __bnxt_hwrm_ver_get(bp, true);
+ if (!sts || (!BNXT_FW_IS_BOOTING(sts) &&
+ !BNXT_FW_IS_RECOVERING(sts)))
break;
retry++;
} while (rc == -EBUSY && retry < BNXT_FW_RETRY);
@@ -9556,13 +9593,17 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
if (rc)
return rc;
- if (!up)
+ if (!up) {
+ bnxt_inv_fw_health_reg(bp);
return 0;
+ }
if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE)
resc_reinit = true;
if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE)
fw_reset = true;
+ else if (bp->fw_health && !bp->fw_health->status_reliable)
+ bnxt_try_map_fw_health_reg(bp);
if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) {
netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n");
@@ -9571,6 +9612,7 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
}
if (resc_reinit || fw_reset) {
if (fw_reset) {
+ set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
bnxt_ulp_stop(bp);
bnxt_free_ctx_mem(bp);
@@ -9579,21 +9621,25 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
bnxt_dcb_free(bp);
rc = bnxt_fw_init_one(bp);
if (rc) {
+ clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
return rc;
}
bnxt_clear_int_mode(bp);
rc = bnxt_init_int_mode(bp);
if (rc) {
+ clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
netdev_err(bp->dev, "init int mode failed\n");
return rc;
}
- set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
}
if (BNXT_NEW_RM(bp)) {
struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
rc = bnxt_hwrm_func_resc_qcaps(bp, true);
+ if (rc)
+ netdev_err(bp->dev, "resc_qcaps failed\n");
+
hw_resc->resv_cp_rings = 0;
hw_resc->resv_stat_ctxs = 0;
hw_resc->resv_irqs = 0;
@@ -9607,7 +9653,7 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
}
}
}
- return 0;
+ return rc;
}
static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
@@ -11640,7 +11686,7 @@ static void bnxt_reset_all(struct bnxt *bp)
req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP;
req.flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL;
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- if (rc)
+ if (rc != -ENODEV)
netdev_warn(bp->dev, "Unable to reset FW rc=%d\n", rc);
}
bp->fw_reset_timestamp = jiffies;
@@ -11723,28 +11769,20 @@ static void bnxt_fw_reset_task(struct work_struct *work)
bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10);
return;
case BNXT_FW_RESET_STATE_ENABLE_DEV:
- if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) {
- u32 val;
-
- if (!bp->fw_reset_min_dsecs) {
- u16 val;
-
- pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID,
- &val);
- if (val == 0xffff) {
- if (bnxt_fw_reset_timeout(bp)) {
- netdev_err(bp->dev, "Firmware reset aborted, PCI config space invalid\n");
- goto fw_reset_abort;
- }
- bnxt_queue_fw_reset_work(bp, HZ / 1000);
- return;
+ bnxt_inv_fw_health_reg(bp);
+ if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) &&
+ !bp->fw_reset_min_dsecs) {
+ u16 val;
+
+ pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
+ if (val == 0xffff) {
+ if (bnxt_fw_reset_timeout(bp)) {
+ netdev_err(bp->dev, "Firmware reset aborted, PCI config space invalid\n");
+ goto fw_reset_abort;
}
+ bnxt_queue_fw_reset_work(bp, HZ / 1000);
+ return;
}
- val = bnxt_fw_health_readl(bp,
- BNXT_FW_RESET_INPROG_REG);
- if (val)
- netdev_warn(bp->dev, "FW reset inprog %x after min wait time.\n",
- val);
}
clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
if (pci_enable_device(bp->pdev)) {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 1259e68cba2a..29061c577baa 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -671,6 +671,10 @@ struct nqe_cn {
#define HWRM_MIN_TIMEOUT 25
#define HWRM_MAX_TIMEOUT 40
+#define HWRM_WAIT_MUST_ABORT(bp, req) \
+ (le16_to_cpu((req)->req_type) != HWRM_VER_GET && \
+ !bnxt_is_fw_healthy(bp))
+
#define HWRM_TOTAL_TIMEOUT(n) (((n) <= HWRM_SHORT_TIMEOUT_COUNTER) ? \
((n) * HWRM_SHORT_MIN_TIMEOUT) : \
(HWRM_SHORT_TIMEOUT_COUNTER * HWRM_SHORT_MIN_TIMEOUT + \
@@ -1560,6 +1564,7 @@ struct bnxt_fw_reporter_ctx {
#define BNXT_FW_STATUS_HEALTH_MSK 0xffff
#define BNXT_FW_STATUS_HEALTHY 0x8000
#define BNXT_FW_STATUS_SHUTDOWN 0x100000
+#define BNXT_FW_STATUS_RECOVERING 0x400000
#define BNXT_FW_IS_HEALTHY(sts) (((sts) & BNXT_FW_STATUS_HEALTH_MSK) ==\
BNXT_FW_STATUS_HEALTHY)
@@ -1570,6 +1575,9 @@ struct bnxt_fw_reporter_ctx {
#define BNXT_FW_IS_ERR(sts) (((sts) & BNXT_FW_STATUS_HEALTH_MSK) > \
BNXT_FW_STATUS_HEALTHY)
+#define BNXT_FW_IS_RECOVERING(sts) (BNXT_FW_IS_ERR(sts) && \
+ ((sts) & BNXT_FW_STATUS_RECOVERING))
+
#define BNXT_FW_RETRY 5
#define BNXT_FW_IF_RETRY 10
@@ -2228,6 +2236,7 @@ int bnxt_hwrm_set_link_setting(struct bnxt *, bool, bool);
int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp);
int bnxt_hwrm_free_wol_fltr(struct bnxt *bp);
int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all);
+bool bnxt_is_fw_healthy(struct bnxt *bp);
int bnxt_hwrm_fw_set_time(struct bnxt *);
int bnxt_open_nic(struct bnxt *, bool, bool);
int bnxt_half_open_nic(struct bnxt *bp);