summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c')
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c104
1 files changed, 54 insertions, 50 deletions
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 592ff237d692..3558f019b631 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -59,7 +59,7 @@ static const char ixgbevf_driver_string[] =
#define DRV_VERSION "2.12.1-k"
const char ixgbevf_driver_version[] = DRV_VERSION;
static char ixgbevf_copyright[] =
- "Copyright (c) 2009 - 2012 Intel Corporation.";
+ "Copyright (c) 2009 - 2015 Intel Corporation.";
static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
[board_82599_vf] = &ixgbevf_82599_vf_info,
@@ -96,12 +96,14 @@ static int debug = -1;
module_param(debug, int, 0);
MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
+static struct workqueue_struct *ixgbevf_wq;
+
static void ixgbevf_service_event_schedule(struct ixgbevf_adapter *adapter)
{
if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
!test_bit(__IXGBEVF_REMOVING, &adapter->state) &&
!test_and_set_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state))
- schedule_work(&adapter->service_task);
+ queue_work(ixgbevf_wq, &adapter->service_task);
}
static void ixgbevf_service_event_complete(struct ixgbevf_adapter *adapter)
@@ -1014,6 +1016,8 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget)
ixgbevf_for_each_ring(ring, q_vector->tx)
clean_complete &= ixgbevf_clean_tx_irq(q_vector, ring);
+ if (budget <= 0)
+ return budget;
#ifdef CONFIG_NET_RX_BUSY_POLL
if (!ixgbevf_qv_lock_napi(q_vector))
return budget;
@@ -1043,7 +1047,7 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget)
return budget;
/* all work done, exit the polling mode */
napi_complete_done(napi, work_done);
- if (adapter->rx_itr_setting & 1)
+ if (adapter->rx_itr_setting == 1)
ixgbevf_set_itr(q_vector);
if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
!test_bit(__IXGBEVF_REMOVING, &adapter->state))
@@ -1138,7 +1142,7 @@ static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter)
if (q_vector->tx.ring && !q_vector->rx.ring) {
/* Tx only vector */
if (adapter->tx_itr_setting == 1)
- q_vector->itr = IXGBE_10K_ITR;
+ q_vector->itr = IXGBE_12K_ITR;
else
q_vector->itr = adapter->tx_itr_setting;
} else {
@@ -1196,7 +1200,7 @@ static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector,
/* simple throttle rate management
* 0-20MB/s lowest (100000 ints/s)
* 20-100MB/s low (20000 ints/s)
- * 100-1249MB/s bulk (8000 ints/s)
+ * 100-1249MB/s bulk (12000 ints/s)
*/
/* what was last interrupt timeslice? */
timepassed_us = q_vector->itr >> 2;
@@ -1246,8 +1250,9 @@ static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector)
new_itr = IXGBE_20K_ITR;
break;
case bulk_latency:
+ new_itr = IXGBE_12K_ITR;
+ break;
default:
- new_itr = IXGBE_8K_ITR;
break;
}
@@ -1288,7 +1293,7 @@ static irqreturn_t ixgbevf_msix_clean_rings(int irq, void *data)
/* EIAM disabled interrupts (on this vector) for us */
if (q_vector->rx.ring || q_vector->tx.ring)
- napi_schedule(&q_vector->napi);
+ napi_schedule_irqoff(&q_vector->napi);
return IRQ_HANDLED;
}
@@ -1332,7 +1337,6 @@ static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter)
int txr_remaining = adapter->num_tx_queues;
int i, j;
int rqpv, tqpv;
- int err = 0;
q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
@@ -1345,7 +1349,7 @@ static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter)
for (; txr_idx < txr_remaining; v_start++, txr_idx++)
map_vector_to_txq(adapter, v_start, txr_idx);
- goto out;
+ return 0;
}
/* If we don't have enough vectors for a 1-to-1
@@ -1370,8 +1374,7 @@ static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter)
}
}
-out:
- return err;
+ return 0;
}
/**
@@ -1469,9 +1472,7 @@ static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter)
**/
static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter)
{
- int err = 0;
-
- err = ixgbevf_request_msix_irqs(adapter);
+ int err = ixgbevf_request_msix_irqs(adapter);
if (err)
hw_dbg(&adapter->hw, "request_irq failed, Error %d\n", err);
@@ -1830,7 +1831,7 @@ static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev,
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
- int err = -EOPNOTSUPP;
+ int err;
spin_lock_bh(&adapter->mbx_lock);
@@ -2046,7 +2047,7 @@ static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
ixgbe_mbox_api_11,
ixgbe_mbox_api_10,
ixgbe_mbox_api_unknown };
- int err = 0, idx = 0;
+ int err, idx = 0;
spin_lock_bh(&adapter->mbx_lock);
@@ -2260,10 +2261,8 @@ void ixgbevf_reset(struct ixgbevf_adapter *adapter)
}
if (is_valid_ether_addr(adapter->hw.mac.addr)) {
- memcpy(netdev->dev_addr, adapter->hw.mac.addr,
- netdev->addr_len);
- memcpy(netdev->perm_addr, adapter->hw.mac.addr,
- netdev->addr_len);
+ ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
+ ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
}
adapter->last_reset = jiffies;
@@ -2421,7 +2420,7 @@ err_allocation:
static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
- int err = 0;
+ int err;
int vector, v_budget;
/* It's easy to be greedy for MSI-X vectors, but it really
@@ -2439,26 +2438,21 @@ static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
*/
adapter->msix_entries = kcalloc(v_budget,
sizeof(struct msix_entry), GFP_KERNEL);
- if (!adapter->msix_entries) {
- err = -ENOMEM;
- goto out;
- }
+ if (!adapter->msix_entries)
+ return -ENOMEM;
for (vector = 0; vector < v_budget; vector++)
adapter->msix_entries[vector].entry = vector;
err = ixgbevf_acquire_msix_vectors(adapter, v_budget);
if (err)
- goto out;
+ return err;
err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
if (err)
- goto out;
-
- err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues);
+ return err;
-out:
- return err;
+ return netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues);
}
/**
@@ -2483,9 +2477,6 @@ static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter)
q_vector->v_idx = q_idx;
netif_napi_add(adapter->netdev, &q_vector->napi,
ixgbevf_poll, 64);
-#ifdef CONFIG_NET_RX_BUSY_POLL
- napi_hash_add(&q_vector->napi);
-#endif
adapter->q_vector[q_idx] = q_vector;
}
@@ -2662,13 +2653,14 @@ static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
else if (is_zero_ether_addr(adapter->hw.mac.addr))
dev_info(&pdev->dev,
"MAC address not assigned by administrator.\n");
- memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
+ ether_addr_copy(netdev->dev_addr, hw->mac.addr);
}
if (!is_valid_ether_addr(netdev->dev_addr)) {
dev_info(&pdev->dev, "Assigning random MAC address\n");
eth_hw_addr_random(netdev);
- memcpy(hw->mac.addr, netdev->dev_addr, netdev->addr_len);
+ ether_addr_copy(hw->mac.addr, netdev->dev_addr);
+ ether_addr_copy(hw->mac.perm_addr, netdev->dev_addr);
}
/* Enable dynamic interrupt throttling rates */
@@ -3355,6 +3347,7 @@ static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
if (skb->ip_summed == CHECKSUM_PARTIAL) {
u8 l4_hdr = 0;
+ __be16 frag_off;
switch (first->protocol) {
case htons(ETH_P_IP):
@@ -3365,13 +3358,16 @@ static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
case htons(ETH_P_IPV6):
vlan_macip_lens |= skb_network_header_len(skb);
l4_hdr = ipv6_hdr(skb)->nexthdr;
+ if (likely(skb_network_header_len(skb) ==
+ sizeof(struct ipv6hdr)))
+ break;
+ ipv6_skip_exthdr(skb, skb_network_offset(skb) +
+ sizeof(struct ipv6hdr),
+ &l4_hdr, &frag_off);
+ if (unlikely(frag_off))
+ l4_hdr = NEXTHDR_FRAGMENT;
break;
default:
- if (unlikely(net_ratelimit())) {
- dev_warn(tx_ring->dev,
- "partial checksum but proto=%x!\n",
- first->protocol);
- }
break;
}
@@ -3393,16 +3389,18 @@ static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
default:
if (unlikely(net_ratelimit())) {
dev_warn(tx_ring->dev,
- "partial checksum but l4 proto=%x!\n",
- l4_hdr);
+ "partial checksum, l3 proto=%x, l4 proto=%x\n",
+ first->protocol, l4_hdr);
}
- break;
+ skb_checksum_help(skb);
+ goto no_csum;
}
/* update TX checksum flag */
first->tx_flags |= IXGBE_TX_FLAGS_CSUM;
}
+no_csum:
/* vlan_macip_lens: MACLEN, VLAN tag */
vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
@@ -3698,8 +3696,8 @@ static int ixgbevf_set_mac(struct net_device *netdev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
- memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
+ ether_addr_copy(netdev->dev_addr, addr->sa_data);
+ ether_addr_copy(hw->mac.addr, addr->sa_data);
spin_lock_bh(&adapter->mbx_lock);
@@ -4248,15 +4246,17 @@ static struct pci_driver ixgbevf_driver = {
**/
static int __init ixgbevf_init_module(void)
{
- int ret;
-
pr_info("%s - version %s\n", ixgbevf_driver_string,
ixgbevf_driver_version);
pr_info("%s\n", ixgbevf_copyright);
+ ixgbevf_wq = create_singlethread_workqueue(ixgbevf_driver_name);
+ if (!ixgbevf_wq) {
+ pr_err("%s: Failed to create workqueue\n", ixgbevf_driver_name);
+ return -ENOMEM;
+ }
- ret = pci_register_driver(&ixgbevf_driver);
- return ret;
+ return pci_register_driver(&ixgbevf_driver);
}
module_init(ixgbevf_init_module);
@@ -4270,6 +4270,10 @@ module_init(ixgbevf_init_module);
static void __exit ixgbevf_exit_module(void)
{
pci_unregister_driver(&ixgbevf_driver);
+ if (ixgbevf_wq) {
+ destroy_workqueue(ixgbevf_wq);
+ ixgbevf_wq = NULL;
+ }
}
#ifdef DEBUG