From 012ec81223aa45d2b80aeafb77392fd1a19c7b10 Mon Sep 17 00:00:00 2001 From: Himanshu Madhani Date: Wed, 21 Aug 2013 11:24:10 -0400 Subject: qlcnic: Multi Tx queue support for 82xx Series adapter. o 82xx firmware allows support for multiple Tx queues. This patch will enable multi Tx queue support for 82xx series adapter. Max number of Tx queues supported will be 8. Signed-off-by: Himanshu Madhani Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c | 140 +++++++++++++++++++++---- 1 file changed, 117 insertions(+), 23 deletions(-) (limited to 'drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c') diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c index cec0908bb1d6..f4b09f44173c 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c @@ -127,6 +127,21 @@ struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *, struct qlcnic_host_rds_ring *, u16, u16); +inline void qlcnic_enable_tx_intr(struct qlcnic_adapter *adapter, + struct qlcnic_host_tx_ring *tx_ring) +{ + if (qlcnic_check_multi_tx(adapter)) + writel(0x0, tx_ring->crb_intr_mask); +} + + +static inline void qlcnic_disable_tx_int(struct qlcnic_adapter *adapter, + struct qlcnic_host_tx_ring *tx_ring) +{ + if (qlcnic_check_multi_tx(adapter)) + writel(1, tx_ring->crb_intr_mask); +} + inline void qlcnic_83xx_enable_tx_intr(struct qlcnic_adapter *adapter, struct qlcnic_host_tx_ring *tx_ring) { @@ -354,14 +369,14 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter, } static int qlcnic_tx_pkt(struct qlcnic_adapter *adapter, - struct cmd_desc_type0 *first_desc, struct sk_buff *skb) + struct cmd_desc_type0 *first_desc, struct sk_buff *skb, + struct qlcnic_host_tx_ring *tx_ring) { u8 l4proto, opcode = 0, hdr_len = 0; u16 flags = 0, vlan_tci = 0; int copied, offset, copy_len, size; struct cmd_desc_type0 *hwdesc; struct vlan_ethhdr *vh; - struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring; u16 protocol = ntohs(skb->protocol); u32 producer = tx_ring->producer; @@ -544,7 +559,7 @@ static inline void qlcnic_clear_cmddesc(u64 *desc) netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) { struct qlcnic_adapter *adapter = netdev_priv(netdev); - struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring; + struct qlcnic_host_tx_ring *tx_ring; struct qlcnic_cmd_buffer *pbuf; struct qlcnic_skb_frag *buffrag; struct cmd_desc_type0 *hwdesc, *first_desc; @@ -553,10 +568,8 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) int i, k, frag_count, delta = 0; u32 producer, num_txd; - num_txd = tx_ring->num_desc; - if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) { - netif_stop_queue(netdev); + netif_tx_stop_all_queues(netdev); return NETDEV_TX_BUSY; } @@ -566,7 +579,14 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) goto drop_packet; } + if (qlcnic_check_multi_tx(adapter)) + tx_ring = &adapter->tx_ring[skb_get_queue_mapping(skb)]; + else + tx_ring = &adapter->tx_ring[0]; + num_txd = tx_ring->num_desc; + frag_count = skb_shinfo(skb)->nr_frags + 1; + /* 14 frags supported for normal packet and * 32 frags supported for TSO packet */ @@ -581,11 +601,12 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) } if (unlikely(qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH)) { - netif_stop_queue(netdev); + netif_tx_stop_queue(tx_ring->txq); if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) { - netif_start_queue(netdev); + netif_tx_start_queue(tx_ring->txq); } else { adapter->stats.xmit_off++; + tx_ring->xmit_off++; return NETDEV_TX_BUSY; } } @@ -640,7 +661,7 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) tx_ring->producer = get_next_index(producer, num_txd); smp_mb(); - if (unlikely(qlcnic_tx_pkt(adapter, first_desc, skb))) + if (unlikely(qlcnic_tx_pkt(adapter, first_desc, skb, tx_ring))) goto unwind_buff; if (adapter->drv_mac_learn) @@ -648,6 +669,7 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) adapter->stats.txbytes += skb->len; adapter->stats.xmitcalled++; + tx_ring->xmit_called++; qlcnic_update_cmd_producer(tx_ring); @@ -670,7 +692,7 @@ void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup) adapter->ahw->linkup = 0; if (netif_running(netdev)) { netif_carrier_off(netdev); - netif_stop_queue(netdev); + netif_tx_stop_all_queues(netdev); } } else if (!adapter->ahw->linkup && linkup) { netdev_info(netdev, "NIC Link is up\n"); @@ -765,9 +787,6 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter, struct net_device *netdev = adapter->netdev; struct qlcnic_skb_frag *frag; - if (!spin_trylock(&adapter->tx_clean_lock)) - return 1; - sw_consumer = tx_ring->sw_consumer; hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer)); @@ -785,6 +804,7 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter, frag->dma = 0ULL; } adapter->stats.xmitfinished++; + tx_ring->xmit_finished++; dev_kfree_skb_any(buffer->skb); buffer->skb = NULL; } @@ -797,10 +817,12 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter, if (count && netif_running(netdev)) { tx_ring->sw_consumer = sw_consumer; smp_mb(); - if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) { + if (netif_tx_queue_stopped(tx_ring->txq) && + netif_carrier_ok(netdev)) { if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) { - netif_wake_queue(netdev); + netif_tx_wake_queue(tx_ring->txq); adapter->stats.xmit_on++; + tx_ring->xmit_on++; } } adapter->tx_timeo_cnt = 0; @@ -820,7 +842,6 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter, */ hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer)); done = (sw_consumer == hw_consumer); - spin_unlock(&adapter->tx_clean_lock); return done; } @@ -830,16 +851,40 @@ static int qlcnic_poll(struct napi_struct *napi, int budget) int tx_complete, work_done; struct qlcnic_host_sds_ring *sds_ring; struct qlcnic_adapter *adapter; + struct qlcnic_host_tx_ring *tx_ring; sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi); adapter = sds_ring->adapter; - tx_complete = qlcnic_process_cmd_ring(adapter, adapter->tx_ring, + tx_ring = sds_ring->tx_ring; + + tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring, budget); work_done = qlcnic_process_rcv_ring(sds_ring, budget); if ((work_done < budget) && tx_complete) { napi_complete(&sds_ring->napi); - if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) + if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) { qlcnic_enable_int(sds_ring); + qlcnic_enable_tx_intr(adapter, tx_ring); + } + } + + return work_done; +} + +static int qlcnic_tx_poll(struct napi_struct *napi, int budget) +{ + struct qlcnic_host_tx_ring *tx_ring; + struct qlcnic_adapter *adapter; + int work_done; + + tx_ring = container_of(napi, struct qlcnic_host_tx_ring, napi); + adapter = tx_ring->adapter; + + work_done = qlcnic_process_cmd_ring(adapter, tx_ring, budget); + if (work_done) { + napi_complete(&tx_ring->napi); + if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) + qlcnic_enable_tx_intr(adapter, tx_ring); } return work_done; @@ -1411,6 +1456,7 @@ int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter, int ring, max_sds_rings; struct qlcnic_host_sds_ring *sds_ring; struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; + struct qlcnic_host_tx_ring *tx_ring; if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings)) return -ENOMEM; @@ -1419,12 +1465,21 @@ int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter, for (ring = 0; ring < adapter->max_sds_rings; ring++) { sds_ring = &recv_ctx->sds_rings[ring]; - if (ring == adapter->max_sds_rings - 1) - netif_napi_add(netdev, &sds_ring->napi, qlcnic_poll, - QLCNIC_NETDEV_WEIGHT / max_sds_rings); - else + if (qlcnic_check_multi_tx(adapter) && + (adapter->max_drv_tx_rings > 1)) { netif_napi_add(netdev, &sds_ring->napi, qlcnic_rx_poll, - QLCNIC_NETDEV_WEIGHT*2); + QLCNIC_NETDEV_WEIGHT * 2); + } else { + if (ring == (adapter->max_sds_rings - 1)) + netif_napi_add(netdev, &sds_ring->napi, + qlcnic_poll, + QLCNIC_NETDEV_WEIGHT / + max_sds_rings); + else + netif_napi_add(netdev, &sds_ring->napi, + qlcnic_rx_poll, + QLCNIC_NETDEV_WEIGHT * 2); + } } if (qlcnic_alloc_tx_rings(adapter, netdev)) { @@ -1432,6 +1487,14 @@ int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter, return -ENOMEM; } + if (qlcnic_check_multi_tx(adapter)) { + for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) { + tx_ring = &adapter->tx_ring[ring]; + netif_napi_add(netdev, &tx_ring->napi, qlcnic_tx_poll, + QLCNIC_NETDEV_WEIGHT); + } + } + return 0; } @@ -1440,6 +1503,7 @@ void qlcnic_82xx_napi_del(struct qlcnic_adapter *adapter) int ring; struct qlcnic_host_sds_ring *sds_ring; struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; + struct qlcnic_host_tx_ring *tx_ring; for (ring = 0; ring < adapter->max_sds_rings; ring++) { sds_ring = &recv_ctx->sds_rings[ring]; @@ -1447,6 +1511,14 @@ void qlcnic_82xx_napi_del(struct qlcnic_adapter *adapter) } qlcnic_free_sds_rings(adapter->recv_ctx); + + if (qlcnic_check_multi_tx(adapter)) { + for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) { + tx_ring = &adapter->tx_ring[ring]; + netif_napi_del(&tx_ring->napi); + } + } + qlcnic_free_tx_rings(adapter); } @@ -1454,6 +1526,7 @@ void qlcnic_82xx_napi_enable(struct qlcnic_adapter *adapter) { int ring; struct qlcnic_host_sds_ring *sds_ring; + struct qlcnic_host_tx_ring *tx_ring; struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC) @@ -1464,12 +1537,23 @@ void qlcnic_82xx_napi_enable(struct qlcnic_adapter *adapter) napi_enable(&sds_ring->napi); qlcnic_enable_int(sds_ring); } + + if (qlcnic_check_multi_tx(adapter) && + (adapter->flags & QLCNIC_MSIX_ENABLED) && + (adapter->max_drv_tx_rings > 1)) { + for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) { + tx_ring = &adapter->tx_ring[ring]; + napi_enable(&tx_ring->napi); + qlcnic_enable_tx_intr(adapter, tx_ring); + } + } } void qlcnic_82xx_napi_disable(struct qlcnic_adapter *adapter) { int ring; struct qlcnic_host_sds_ring *sds_ring; + struct qlcnic_host_tx_ring *tx_ring; struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC) @@ -1481,6 +1565,16 @@ void qlcnic_82xx_napi_disable(struct qlcnic_adapter *adapter) napi_synchronize(&sds_ring->napi); napi_disable(&sds_ring->napi); } + + if ((adapter->flags & QLCNIC_MSIX_ENABLED) && + qlcnic_check_multi_tx(adapter)) { + for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) { + tx_ring = &adapter->tx_ring[ring]; + qlcnic_disable_tx_int(adapter, tx_ring); + napi_synchronize(&tx_ring->napi); + napi_disable(&tx_ring->napi); + } + } } #define QLC_83XX_NORMAL_LB_PKT (1ULL << 36) -- cgit v1.2.3