diff options
Diffstat (limited to 'drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c')
-rw-r--r-- | drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | 298 |
1 files changed, 175 insertions, 123 deletions
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index b0edae94d73d..5e348b125090 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -62,10 +62,14 @@ static char ixgbevf_copyright[] = "Copyright (c) 2009 - 2015 Intel Corporation."; static const struct ixgbevf_info *ixgbevf_info_tbl[] = { - [board_82599_vf] = &ixgbevf_82599_vf_info, - [board_X540_vf] = &ixgbevf_X540_vf_info, - [board_X550_vf] = &ixgbevf_X550_vf_info, - [board_X550EM_x_vf] = &ixgbevf_X550EM_x_vf_info, + [board_82599_vf] = &ixgbevf_82599_vf_info, + [board_82599_vf_hv] = &ixgbevf_82599_vf_hv_info, + [board_X540_vf] = &ixgbevf_X540_vf_info, + [board_X540_vf_hv] = &ixgbevf_X540_vf_hv_info, + [board_X550_vf] = &ixgbevf_X550_vf_info, + [board_X550_vf_hv] = &ixgbevf_X550_vf_hv_info, + [board_X550EM_x_vf] = &ixgbevf_X550EM_x_vf_info, + [board_X550EM_x_vf_hv] = &ixgbevf_X550EM_x_vf_hv_info, }; /* ixgbevf_pci_tbl - PCI Device ID Table @@ -78,9 +82,13 @@ static const struct ixgbevf_info *ixgbevf_info_tbl[] = { */ static const struct pci_device_id ixgbevf_pci_tbl[] = { {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF), board_82599_vf }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF_HV), board_82599_vf_hv }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF), board_X540_vf }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF_HV), board_X540_vf_hv }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550_VF), board_X550_vf }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550_VF_HV), board_X550_vf_hv }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF), board_X550EM_x_vf }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF_HV), board_X550EM_x_vf_hv}, /* required last entry */ {0, } }; @@ -268,7 +276,7 @@ static void ixgbevf_tx_timeout_reset(struct ixgbevf_adapter *adapter) { /* Do the reset outside of interrupt context */ if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) { - adapter->flags |= IXGBEVF_FLAG_RESET_REQUESTED; + set_bit(__IXGBEVF_RESET_REQUESTED, &adapter->state); ixgbevf_service_event_schedule(adapter); } } @@ -288,9 +296,10 @@ static void ixgbevf_tx_timeout(struct net_device *netdev) * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes * @q_vector: board private structure * @tx_ring: tx ring to clean + * @napi_budget: Used to determine if we are in netpoll **/ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, - struct ixgbevf_ring *tx_ring) + struct ixgbevf_ring *tx_ring, int napi_budget) { struct ixgbevf_adapter *adapter = q_vector->adapter; struct ixgbevf_tx_buffer *tx_buffer; @@ -328,7 +337,7 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, total_packets += tx_buffer->gso_segs; /* free the skb */ - dev_kfree_skb_any(tx_buffer->skb); + napi_consume_skb(tx_buffer->skb, napi_budget); /* unmap skb header data */ dma_unmap_single(tx_ring->dev, @@ -1013,8 +1022,10 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget) int per_ring_budget, work_done = 0; bool clean_complete = true; - ixgbevf_for_each_ring(ring, q_vector->tx) - clean_complete &= ixgbevf_clean_tx_irq(q_vector, ring); + ixgbevf_for_each_ring(ring, q_vector->tx) { + if (!ixgbevf_clean_tx_irq(q_vector, ring, budget)) + clean_complete = false; + } if (budget <= 0) return budget; @@ -1035,7 +1046,8 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget) int cleaned = ixgbevf_clean_rx_irq(q_vector, ring, per_ring_budget); work_done += cleaned; - clean_complete &= (cleaned < per_ring_budget); + if (cleaned >= per_ring_budget) + clean_complete = false; } #ifdef CONFIG_NET_RX_BUSY_POLL @@ -1052,7 +1064,7 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget) if (!test_bit(__IXGBEVF_DOWN, &adapter->state) && !test_bit(__IXGBEVF_REMOVING, &adapter->state)) ixgbevf_irq_enable_queues(adapter, - 1 << q_vector->v_idx); + BIT(q_vector->v_idx)); return 0; } @@ -1154,14 +1166,14 @@ static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter) } /* add q_vector eims value to global eims_enable_mask */ - adapter->eims_enable_mask |= 1 << v_idx; + adapter->eims_enable_mask |= BIT(v_idx); ixgbevf_write_eitr(q_vector); } ixgbevf_set_ivar(adapter, -1, 1, v_idx); /* setup eims_other and add value to global eims_enable_mask */ - adapter->eims_other = 1 << v_idx; + adapter->eims_other = BIT(v_idx); adapter->eims_enable_mask |= adapter->eims_other; } @@ -1585,8 +1597,8 @@ static void ixgbevf_configure_tx_ring(struct ixgbevf_adapter *adapter, txdctl |= (8 << 16); /* WTHRESH = 8 */ /* Setting PTHRESH to 32 both improves performance */ - txdctl |= (1 << 8) | /* HTHRESH = 1 */ - 32; /* PTHRESH = 32 */ + txdctl |= (1u << 8) | /* HTHRESH = 1 */ + 32; /* PTHRESH = 32 */ clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &ring->state); @@ -1642,7 +1654,7 @@ static void ixgbevf_setup_psrtype(struct ixgbevf_adapter *adapter) IXGBE_PSRTYPE_L2HDR; if (adapter->num_rx_queues > 1) - psrtype |= 1 << 29; + psrtype |= BIT(29); IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype); } @@ -1748,9 +1760,15 @@ static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter, IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(reg_idx), ring->count * sizeof(union ixgbe_adv_rx_desc)); +#ifndef CONFIG_SPARC /* enable relaxed ordering */ IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(reg_idx), IXGBE_DCA_RXCTRL_DESC_RRO_EN); +#else + IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(reg_idx), + IXGBE_DCA_RXCTRL_DESC_RRO_EN | + IXGBE_DCA_RXCTRL_DATA_WRO_EN); +#endif /* reset head and tail pointers */ IXGBE_WRITE_REG(hw, IXGBE_VFRDH(reg_idx), 0); @@ -1791,7 +1809,7 @@ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter) ixgbevf_setup_vfmrqc(adapter); /* notify the PF of our intent to use this size of frame */ - ixgbevf_rlpml_set_vf(hw, netdev->mtu + ETH_HLEN + ETH_FCS_LEN); + hw->mac.ops.set_rlpml(hw, netdev->mtu + ETH_HLEN + ETH_FCS_LEN); /* Setup the HW Rx Head and Tail Descriptor Pointers and * the Base and Length of the Rx Descriptor Ring @@ -1904,7 +1922,7 @@ static void ixgbevf_set_rx_mode(struct net_device *netdev) spin_lock_bh(&adapter->mbx_lock); - hw->mac.ops.update_xcast_mode(hw, netdev, xcast_mode); + hw->mac.ops.update_xcast_mode(hw, xcast_mode); /* reprogram multicast list */ hw->mac.ops.update_mc_addr_list(hw, netdev); @@ -1984,7 +2002,7 @@ static int ixgbevf_configure_dcb(struct ixgbevf_adapter *adapter) hw->mbx.timeout = 0; /* wait for watchdog to come around and bail us out */ - adapter->flags |= IXGBEVF_FLAG_QUEUE_RESET_REQUESTED; + set_bit(__IXGBEVF_QUEUE_RESET_REQUESTED, &adapter->state); } return 0; @@ -2052,7 +2070,7 @@ static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter) spin_lock_bh(&adapter->mbx_lock); while (api[idx] != ixgbe_mbox_api_unknown) { - err = ixgbevf_negotiate_api_version(hw, api[idx]); + err = hw->mac.ops.negotiate_api_version(hw, api[idx]); if (!err) break; idx++; @@ -2749,11 +2767,9 @@ static void ixgbevf_service_timer(unsigned long data) static void ixgbevf_reset_subtask(struct ixgbevf_adapter *adapter) { - if (!(adapter->flags & IXGBEVF_FLAG_RESET_REQUESTED)) + if (!test_and_clear_bit(__IXGBEVF_RESET_REQUESTED, &adapter->state)) return; - adapter->flags &= ~IXGBEVF_FLAG_RESET_REQUESTED; - /* If we're already down or resetting, just bail */ if (test_bit(__IXGBEVF_DOWN, &adapter->state) || test_bit(__IXGBEVF_RESETTING, &adapter->state)) @@ -2795,7 +2811,7 @@ static void ixgbevf_check_hang_subtask(struct ixgbevf_adapter *adapter) struct ixgbevf_q_vector *qv = adapter->q_vector[i]; if (qv->rx.ring || qv->tx.ring) - eics |= 1 << i; + eics |= BIT(i); } /* Cause software interrupt to ensure rings are cleaned */ @@ -2821,7 +2837,7 @@ static void ixgbevf_watchdog_update_link(struct ixgbevf_adapter *adapter) /* if check for link returns error we will need to reset */ if (err && time_after(jiffies, adapter->last_reset + (10 * HZ))) { - adapter->flags |= IXGBEVF_FLAG_RESET_REQUESTED; + set_bit(__IXGBEVF_RESET_REQUESTED, &adapter->state); link_up = false; } @@ -3222,11 +3238,10 @@ static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter) { struct net_device *dev = adapter->netdev; - if (!(adapter->flags & IXGBEVF_FLAG_QUEUE_RESET_REQUESTED)) + if (!test_and_clear_bit(__IXGBEVF_QUEUE_RESET_REQUESTED, + &adapter->state)) return; - adapter->flags &= ~IXGBEVF_FLAG_QUEUE_RESET_REQUESTED; - /* if interface is down do nothing */ if (test_bit(__IXGBEVF_DOWN, &adapter->state) || test_bit(__IXGBEVF_RESETTING, &adapter->state)) @@ -3271,9 +3286,18 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring, struct ixgbevf_tx_buffer *first, u8 *hdr_len) { + u32 vlan_macip_lens, type_tucmd, mss_l4len_idx; struct sk_buff *skb = first->skb; - u32 vlan_macip_lens, type_tucmd; - u32 mss_l4len_idx, l4len; + union { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; + } ip; + union { + struct tcphdr *tcp; + unsigned char *hdr; + } l4; + u32 paylen, l4_offset; int err; if (skb->ip_summed != CHECKSUM_PARTIAL) @@ -3286,49 +3310,53 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring, if (err < 0) return err; + ip.hdr = skb_network_header(skb); + l4.hdr = skb_checksum_start(skb); + /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; - if (first->protocol == htons(ETH_P_IP)) { - struct iphdr *iph = ip_hdr(skb); - - iph->tot_len = 0; - iph->check = 0; - tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, - iph->daddr, 0, - IPPROTO_TCP, - 0); + /* initialize outer IP header fields */ + if (ip.v4->version == 4) { + /* IP header will have to cancel out any data that + * is not a part of the outer IP header + */ + ip.v4->check = csum_fold(csum_add(lco_csum(skb), + csum_unfold(l4.tcp->check))); type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; + + ip.v4->tot_len = 0; first->tx_flags |= IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_CSUM | IXGBE_TX_FLAGS_IPV4; - } else if (skb_is_gso_v6(skb)) { - ipv6_hdr(skb)->payload_len = 0; - tcp_hdr(skb)->check = - ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, - &ipv6_hdr(skb)->daddr, - 0, IPPROTO_TCP, 0); + } else { + ip.v6->payload_len = 0; first->tx_flags |= IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_CSUM; } - /* compute header lengths */ - l4len = tcp_hdrlen(skb); - *hdr_len += l4len; - *hdr_len = skb_transport_offset(skb) + l4len; + /* determine offset of inner transport header */ + l4_offset = l4.hdr - skb->data; + + /* compute length of segmentation header */ + *hdr_len = (l4.tcp->doff * 4) + l4_offset; - /* update GSO size and bytecount with header size */ + /* remove payload length from inner checksum */ + paylen = skb->len - l4_offset; + csum_replace_by_diff(&l4.tcp->check, htonl(paylen)); + + /* update gso size and bytecount with header size */ first->gso_segs = skb_shinfo(skb)->gso_segs; first->bytecount += (first->gso_segs - 1) * *hdr_len; /* mss_l4len_id: use 1 as index for TSO */ - mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT; + mss_l4len_idx = (*hdr_len - l4_offset) << IXGBE_ADVTXD_L4LEN_SHIFT; mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT; - mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT; + mss_l4len_idx |= (1u << IXGBE_ADVTXD_IDX_SHIFT); /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ - vlan_macip_lens = skb_network_header_len(skb); - vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; + vlan_macip_lens = l4.hdr - ip.hdr; + vlan_macip_lens |= (ip.hdr - skb->data) << IXGBE_ADVTXD_MACLEN_SHIFT; vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, @@ -3337,76 +3365,55 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring, return 1; } +static inline bool ixgbevf_ipv6_csum_is_sctp(struct sk_buff *skb) +{ + unsigned int offset = 0; + + ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL); + + return offset == skb_checksum_start_offset(skb); +} + static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring, struct ixgbevf_tx_buffer *first) { struct sk_buff *skb = first->skb; u32 vlan_macip_lens = 0; - u32 mss_l4len_idx = 0; u32 type_tucmd = 0; - if (skb->ip_summed == CHECKSUM_PARTIAL) { - u8 l4_hdr = 0; - __be16 frag_off; - - switch (first->protocol) { - case htons(ETH_P_IP): - vlan_macip_lens |= skb_network_header_len(skb); - type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; - l4_hdr = ip_hdr(skb)->protocol; - break; - case htons(ETH_P_IPV6): - vlan_macip_lens |= skb_network_header_len(skb); - l4_hdr = ipv6_hdr(skb)->nexthdr; - if (likely(skb_network_header_len(skb) == - sizeof(struct ipv6hdr))) - break; - ipv6_skip_exthdr(skb, skb_network_offset(skb) + - sizeof(struct ipv6hdr), - &l4_hdr, &frag_off); - if (unlikely(frag_off)) - l4_hdr = NEXTHDR_FRAGMENT; - break; - default: - break; - } + if (skb->ip_summed != CHECKSUM_PARTIAL) + goto no_csum; - switch (l4_hdr) { - case IPPROTO_TCP: - type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP; - mss_l4len_idx = tcp_hdrlen(skb) << - IXGBE_ADVTXD_L4LEN_SHIFT; - break; - case IPPROTO_SCTP: - type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP; - mss_l4len_idx = sizeof(struct sctphdr) << - IXGBE_ADVTXD_L4LEN_SHIFT; - break; - case IPPROTO_UDP: - mss_l4len_idx = sizeof(struct udphdr) << - IXGBE_ADVTXD_L4LEN_SHIFT; + switch (skb->csum_offset) { + case offsetof(struct tcphdr, check): + type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; + /* fall through */ + case offsetof(struct udphdr, check): + break; + case offsetof(struct sctphdr, checksum): + /* validate that this is actually an SCTP request */ + if (((first->protocol == htons(ETH_P_IP)) && + (ip_hdr(skb)->protocol == IPPROTO_SCTP)) || + ((first->protocol == htons(ETH_P_IPV6)) && + ixgbevf_ipv6_csum_is_sctp(skb))) { + type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_SCTP; break; - default: - if (unlikely(net_ratelimit())) { - dev_warn(tx_ring->dev, - "partial checksum, l3 proto=%x, l4 proto=%x\n", - first->protocol, l4_hdr); - } - skb_checksum_help(skb); - goto no_csum; } - - /* update TX checksum flag */ - first->tx_flags |= IXGBE_TX_FLAGS_CSUM; + /* fall through */ + default: + skb_checksum_help(skb); + goto no_csum; } - + /* update TX checksum flag */ + first->tx_flags |= IXGBE_TX_FLAGS_CSUM; + vlan_macip_lens = skb_checksum_start_offset(skb) - + skb_network_offset(skb); no_csum: /* vlan_macip_lens: MACLEN, VLAN tag */ vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; - ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, - type_tucmd, mss_l4len_idx); + ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, 0); } static __le32 ixgbevf_tx_cmd_type(u32 tx_flags) @@ -3442,7 +3449,7 @@ static void ixgbevf_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc, /* use index 1 context for TSO/FSO/FCOE */ if (tx_flags & IXGBE_TX_FLAGS_TSO) - olinfo_status |= cpu_to_le32(1 << IXGBE_ADVTXD_IDX_SHIFT); + olinfo_status |= cpu_to_le32(1u << IXGBE_ADVTXD_IDX_SHIFT); /* Check Context must be set if Tx switch is enabled, which it * always is for case where virtual functions are running @@ -3747,7 +3754,7 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu) netdev->mtu = new_mtu; /* notify the PF of our intent to use this size of frame */ - ixgbevf_rlpml_set_vf(hw, max_frame); + hw->mac.ops.set_rlpml(hw, max_frame); return 0; } @@ -3890,6 +3897,40 @@ static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev, return stats; } +#define IXGBEVF_MAX_MAC_HDR_LEN 127 +#define IXGBEVF_MAX_NETWORK_HDR_LEN 511 + +static netdev_features_t +ixgbevf_features_check(struct sk_buff *skb, struct net_device *dev, + netdev_features_t features) +{ + unsigned int network_hdr_len, mac_hdr_len; + + /* Make certain the headers can be described by a context descriptor */ + mac_hdr_len = skb_network_header(skb) - skb->data; + if (unlikely(mac_hdr_len > IXGBEVF_MAX_MAC_HDR_LEN)) + return features & ~(NETIF_F_HW_CSUM | + NETIF_F_SCTP_CRC | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_TSO | + NETIF_F_TSO6); + + network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb); + if (unlikely(network_hdr_len > IXGBEVF_MAX_NETWORK_HDR_LEN)) + return features & ~(NETIF_F_HW_CSUM | + NETIF_F_SCTP_CRC | + NETIF_F_TSO | + NETIF_F_TSO6); + + /* We can only support IPV4 TSO in tunnels if we can mangle the + * inner IP ID field, so strip TSO if MANGLEID is not supported. + */ + if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) + features &= ~NETIF_F_TSO; + + return features; +} + static const struct net_device_ops ixgbevf_netdev_ops = { .ndo_open = ixgbevf_open, .ndo_stop = ixgbevf_close, @@ -3908,7 +3949,7 @@ static const struct net_device_ops ixgbevf_netdev_ops = { #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = ixgbevf_netpoll, #endif - .ndo_features_check = passthru_features_check, + .ndo_features_check = ixgbevf_features_check, }; static void ixgbevf_assign_netdev_ops(struct net_device *dev) @@ -4013,26 +4054,37 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } netdev->hw_features = NETIF_F_SG | - NETIF_F_IP_CSUM | - NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO6 | - NETIF_F_RXCSUM; + NETIF_F_RXCSUM | + NETIF_F_HW_CSUM | + NETIF_F_SCTP_CRC; - netdev->features = netdev->hw_features | - NETIF_F_HW_VLAN_CTAG_TX | - NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_HW_VLAN_CTAG_FILTER; +#define IXGBEVF_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \ + NETIF_F_GSO_GRE_CSUM | \ + NETIF_F_GSO_IPIP | \ + NETIF_F_GSO_SIT | \ + NETIF_F_GSO_UDP_TUNNEL | \ + NETIF_F_GSO_UDP_TUNNEL_CSUM) - netdev->vlan_features |= NETIF_F_TSO | - NETIF_F_TSO6 | - NETIF_F_IP_CSUM | - NETIF_F_IPV6_CSUM | - NETIF_F_SG; + netdev->gso_partial_features = IXGBEVF_GSO_PARTIAL_FEATURES; + netdev->hw_features |= NETIF_F_GSO_PARTIAL | + IXGBEVF_GSO_PARTIAL_FEATURES; + + netdev->features = netdev->hw_features; if (pci_using_dac) netdev->features |= NETIF_F_HIGHDMA; + netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID; + netdev->mpls_features |= NETIF_F_HW_CSUM; + netdev->hw_enc_features |= netdev->vlan_features; + + /* set this bit last since it cannot be part of vlan_features */ + netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_TX; + netdev->priv_flags |= IFF_UNICAST_FLT; if (IXGBE_REMOVED(hw->hw_addr)) { |