summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/cisco/enic/enic_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/cisco/enic/enic_main.c')
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c86
1 files changed, 68 insertions, 18 deletions
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index f202ba72a811..81684acf52af 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -191,8 +191,16 @@ static void enic_udp_tunnel_add(struct net_device *netdev,
goto error;
}
- if (ti->sa_family != AF_INET) {
- netdev_info(netdev, "vxlan: only IPv4 offload supported");
+ switch (ti->sa_family) {
+ case AF_INET6:
+ if (!(enic->vxlan.flags & ENIC_VXLAN_OUTER_IPV6)) {
+ netdev_info(netdev, "vxlan: only IPv4 offload supported");
+ goto error;
+ }
+ /* Fall through */
+ case AF_INET:
+ break;
+ default:
goto error;
}
@@ -204,6 +212,11 @@ static void enic_udp_tunnel_add(struct net_device *netdev,
goto error;
}
+ if ((vnic_dev_get_res_count(enic->vdev, RES_TYPE_WQ) != 1) &&
+ !(enic->vxlan.flags & ENIC_VXLAN_MULTI_WQ)) {
+ netdev_info(netdev, "vxlan: vxlan offload with multi wq not supported on this adapter");
+ goto error;
+ }
err = vnic_dev_overlay_offload_cfg(enic->vdev,
OVERLAY_CFG_VXLAN_PORT_UPDATE,
@@ -238,9 +251,8 @@ static void enic_udp_tunnel_del(struct net_device *netdev,
spin_lock_bh(&enic->devcmd_lock);
- if ((ti->sa_family != AF_INET) ||
- ((ntohs(ti->port) != enic->vxlan.vxlan_udp_port_number)) ||
- (ti->type != UDP_TUNNEL_TYPE_VXLAN)) {
+ if ((ntohs(ti->port) != enic->vxlan.vxlan_udp_port_number) ||
+ ti->type != UDP_TUNNEL_TYPE_VXLAN) {
netdev_info(netdev, "udp_tnl: port:%d, sa_family: %d, type: %d not offloaded",
ntohs(ti->port), ti->sa_family, ti->type);
goto unlock;
@@ -271,22 +283,37 @@ static netdev_features_t enic_features_check(struct sk_buff *skb,
struct enic *enic = netdev_priv(dev);
struct udphdr *udph;
u16 port = 0;
- u16 proto;
+ u8 proto;
if (!skb->encapsulation)
return features;
features = vxlan_features_check(skb, features);
- /* hardware only supports IPv4 vxlan tunnel */
- if (vlan_get_protocol(skb) != htons(ETH_P_IP))
+ switch (vlan_get_protocol(skb)) {
+ case htons(ETH_P_IPV6):
+ if (!(enic->vxlan.flags & ENIC_VXLAN_OUTER_IPV6))
+ goto out;
+ proto = ipv6_hdr(skb)->nexthdr;
+ break;
+ case htons(ETH_P_IP):
+ proto = ip_hdr(skb)->protocol;
+ break;
+ default:
goto out;
+ }
- /* hardware does not support offload of ipv6 inner pkt */
- if (eth->h_proto != ntohs(ETH_P_IP))
+ switch (eth->h_proto) {
+ case ntohs(ETH_P_IPV6):
+ if (!(enic->vxlan.flags & ENIC_VXLAN_INNER_IPV6))
+ goto out;
+ /* Fall through */
+ case ntohs(ETH_P_IP):
+ break;
+ default:
goto out;
+ }
- proto = ip_hdr(skb)->protocol;
if (proto == IPPROTO_UDP) {
udph = udp_hdr(skb);
@@ -635,12 +662,25 @@ static int enic_queue_wq_skb_csum_l4(struct enic *enic, struct vnic_wq *wq,
static void enic_preload_tcp_csum_encap(struct sk_buff *skb)
{
- if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
+ const struct ethhdr *eth = (struct ethhdr *)skb_inner_mac_header(skb);
+
+ switch (eth->h_proto) {
+ case ntohs(ETH_P_IP):
inner_ip_hdr(skb)->check = 0;
inner_tcp_hdr(skb)->check =
~csum_tcpudp_magic(inner_ip_hdr(skb)->saddr,
inner_ip_hdr(skb)->daddr, 0,
IPPROTO_TCP, 0);
+ break;
+ case ntohs(ETH_P_IPV6):
+ inner_tcp_hdr(skb)->check =
+ ~csum_ipv6_magic(&inner_ipv6_hdr(skb)->saddr,
+ &inner_ipv6_hdr(skb)->daddr, 0,
+ IPPROTO_TCP, 0);
+ break;
+ default:
+ WARN_ONCE(1, "Non ipv4/ipv6 inner pkt for encap offload");
+ break;
}
}
@@ -1898,6 +1938,8 @@ static int enic_open(struct net_device *netdev)
}
for (i = 0; i < enic->rq_count; i++) {
+ /* enable rq before updating rq desc */
+ vnic_rq_enable(&enic->rq[i]);
vnic_rq_fill(&enic->rq[i], enic_rq_alloc_buf);
/* Need at least one buffer on ring to get going */
if (vnic_rq_desc_used(&enic->rq[i]) == 0) {
@@ -1909,8 +1951,6 @@ static int enic_open(struct net_device *netdev)
for (i = 0; i < enic->wq_count; i++)
vnic_wq_enable(&enic->wq[i]);
- for (i = 0; i < enic->rq_count; i++)
- vnic_rq_enable(&enic->rq[i]);
if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic))
enic_dev_add_station_addr(enic);
@@ -1936,8 +1976,12 @@ static int enic_open(struct net_device *netdev)
return 0;
err_out_free_rq:
- for (i = 0; i < enic->rq_count; i++)
+ for (i = 0; i < enic->rq_count; i++) {
+ err = vnic_rq_disable(&enic->rq[i]);
+ if (err)
+ return err;
vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
+ }
enic_dev_notify_unset(enic);
err_out_free_intr:
enic_unset_affinity_hint(enic);
@@ -2151,9 +2195,10 @@ static int enic_dev_wait(struct vnic_dev *vdev,
static int enic_dev_open(struct enic *enic)
{
int err;
+ u32 flags = CMD_OPENF_IG_DESCCACHE;
err = enic_dev_wait(enic->vdev, vnic_dev_open,
- vnic_dev_open_done, 0);
+ vnic_dev_open_done, flags);
if (err)
dev_err(enic_get_dev(enic), "vNIC device open failed, err %d\n",
err);
@@ -2275,7 +2320,7 @@ static int enic_set_rss_nic_cfg(struct enic *enic)
{
struct device *dev = enic_get_dev(enic);
const u8 rss_default_cpu = 0;
- const u8 rss_hash_type = NIC_CFG_RSS_HASH_TYPE_IPV4 |
+ u8 rss_hash_type = NIC_CFG_RSS_HASH_TYPE_IPV4 |
NIC_CFG_RSS_HASH_TYPE_TCP_IPV4 |
NIC_CFG_RSS_HASH_TYPE_IPV6 |
NIC_CFG_RSS_HASH_TYPE_TCP_IPV6;
@@ -2283,6 +2328,8 @@ static int enic_set_rss_nic_cfg(struct enic *enic)
const u8 rss_base_cpu = 0;
u8 rss_enable = ENIC_SETTING(enic, RSS) && (enic->rq_count > 1);
+ if (vnic_dev_capable_udp_rss(enic->vdev))
+ rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_UDP;
if (rss_enable) {
if (!enic_set_rsskey(enic)) {
if (enic_set_rsscpu(enic, rss_hash_bits)) {
@@ -2901,9 +2948,11 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->hw_features |= NETIF_F_RXCSUM;
if (ENIC_SETTING(enic, VXLAN)) {
u64 patch_level;
+ u64 a1 = 0;
netdev->hw_enc_features |= NETIF_F_RXCSUM |
NETIF_F_TSO |
+ NETIF_F_TSO6 |
NETIF_F_TSO_ECN |
NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_HW_CSUM |
@@ -2922,9 +2971,10 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
*/
err = vnic_dev_get_supported_feature_ver(enic->vdev,
VIC_FEATURE_VXLAN,
- &patch_level);
+ &patch_level, &a1);
if (err)
patch_level = 0;
+ enic->vxlan.flags = (u8)a1;
/* mask bits that are supported by driver
*/
patch_level &= BIT_ULL(0) | BIT_ULL(2);