summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/netronome
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/netronome')
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net.h36
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c183
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h15
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c402
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h6
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c90
6 files changed, 728 insertions, 4 deletions
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h
index 939cfce15830..bd0e26524417 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h
@@ -621,6 +621,9 @@ struct nfp_net_dp {
* @mbox_amsg.lock: Protect message list
* @mbox_amsg.list: List of message to process
* @mbox_amsg.work: Work to process message asynchronously
+ * @fs: Flow steering
+ * @fs.count: Flow count
+ * @fs.list: List of flows
* @app_priv: APP private data for this vNIC
*/
struct nfp_net {
@@ -728,9 +731,39 @@ struct nfp_net {
struct work_struct work;
} mbox_amsg;
+ struct {
+ u16 count;
+ struct list_head list;
+ } fs;
+
void *app_priv;
};
+struct nfp_fs_entry {
+ struct list_head node;
+ u32 flow_type;
+ u32 loc;
+ struct {
+ union {
+ struct {
+ __be32 sip4;
+ __be32 dip4;
+ };
+ struct {
+ __be32 sip6[4];
+ __be32 dip6[4];
+ };
+ };
+ union {
+ __be16 l3_proto;
+ u8 l4_proto;
+ };
+ __be16 sport;
+ __be16 dport;
+ } key, msk;
+ u64 action;
+};
+
struct nfp_mbox_amsg_entry {
struct list_head list;
int (*cfg)(struct nfp_net *nn, struct nfp_mbox_amsg_entry *entry);
@@ -987,6 +1020,9 @@ struct nfp_net_dp *nfp_net_clone_dp(struct nfp_net *nn);
int nfp_net_ring_reconfig(struct nfp_net *nn, struct nfp_net_dp *new,
struct netlink_ext_ack *extack);
+int nfp_net_fs_add_hw(struct nfp_net *nn, struct nfp_fs_entry *entry);
+int nfp_net_fs_del_hw(struct nfp_net *nn, struct nfp_fs_entry *entry);
+
#ifdef CONFIG_NFP_DEBUG
void nfp_net_debugfs_create(void);
void nfp_net_debugfs_destroy(void);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index de0a5d5ded30..ac1f4514b1d0 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -1763,6 +1763,186 @@ nfp_net_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
return nfp_net_mbox_reconfig_and_unlock(nn, cmd);
}
+static void
+nfp_net_fs_fill_v4(struct nfp_net *nn, struct nfp_fs_entry *entry, u32 op, u32 *addr)
+{
+ unsigned int i;
+
+ union {
+ struct {
+ __be16 loc;
+ u8 k_proto, m_proto;
+ __be32 k_sip, m_sip, k_dip, m_dip;
+ __be16 k_sport, m_sport, k_dport, m_dport;
+ };
+ __be32 val[7];
+ } v4_rule;
+
+ nn_writel(nn, *addr, op);
+ *addr += sizeof(u32);
+
+ v4_rule.loc = cpu_to_be16(entry->loc);
+ v4_rule.k_proto = entry->key.l4_proto;
+ v4_rule.m_proto = entry->msk.l4_proto;
+ v4_rule.k_sip = entry->key.sip4;
+ v4_rule.m_sip = entry->msk.sip4;
+ v4_rule.k_dip = entry->key.dip4;
+ v4_rule.m_dip = entry->msk.dip4;
+ v4_rule.k_sport = entry->key.sport;
+ v4_rule.m_sport = entry->msk.sport;
+ v4_rule.k_dport = entry->key.dport;
+ v4_rule.m_dport = entry->msk.dport;
+
+ for (i = 0; i < ARRAY_SIZE(v4_rule.val); i++, *addr += sizeof(__be32))
+ nn_writel(nn, *addr, be32_to_cpu(v4_rule.val[i]));
+}
+
+static void
+nfp_net_fs_fill_v6(struct nfp_net *nn, struct nfp_fs_entry *entry, u32 op, u32 *addr)
+{
+ unsigned int i;
+
+ union {
+ struct {
+ __be16 loc;
+ u8 k_proto, m_proto;
+ __be32 k_sip[4], m_sip[4], k_dip[4], m_dip[4];
+ __be16 k_sport, m_sport, k_dport, m_dport;
+ };
+ __be32 val[19];
+ } v6_rule;
+
+ nn_writel(nn, *addr, op);
+ *addr += sizeof(u32);
+
+ v6_rule.loc = cpu_to_be16(entry->loc);
+ v6_rule.k_proto = entry->key.l4_proto;
+ v6_rule.m_proto = entry->msk.l4_proto;
+ for (i = 0; i < 4; i++) {
+ v6_rule.k_sip[i] = entry->key.sip6[i];
+ v6_rule.m_sip[i] = entry->msk.sip6[i];
+ v6_rule.k_dip[i] = entry->key.dip6[i];
+ v6_rule.m_dip[i] = entry->msk.dip6[i];
+ }
+ v6_rule.k_sport = entry->key.sport;
+ v6_rule.m_sport = entry->msk.sport;
+ v6_rule.k_dport = entry->key.dport;
+ v6_rule.m_dport = entry->msk.dport;
+
+ for (i = 0; i < ARRAY_SIZE(v6_rule.val); i++, *addr += sizeof(__be32))
+ nn_writel(nn, *addr, be32_to_cpu(v6_rule.val[i]));
+}
+
+#define NFP_FS_QUEUE_ID GENMASK(22, 16)
+#define NFP_FS_ACT GENMASK(15, 0)
+#define NFP_FS_ACT_DROP BIT(0)
+#define NFP_FS_ACT_Q BIT(1)
+static void
+nfp_net_fs_fill_act(struct nfp_net *nn, struct nfp_fs_entry *entry, u32 addr)
+{
+ u32 action = 0; /* 0 means default passthrough */
+
+ if (entry->action == RX_CLS_FLOW_DISC)
+ action = NFP_FS_ACT_DROP;
+ else if (!(entry->flow_type & FLOW_RSS))
+ action = FIELD_PREP(NFP_FS_QUEUE_ID, entry->action) | NFP_FS_ACT_Q;
+
+ nn_writel(nn, addr, action);
+}
+
+int nfp_net_fs_add_hw(struct nfp_net *nn, struct nfp_fs_entry *entry)
+{
+ u32 addr = nn->tlv_caps.mbox_off + NFP_NET_CFG_MBOX_SIMPLE_VAL;
+ int err;
+
+ err = nfp_net_mbox_lock(nn, NFP_NET_CFG_FS_SZ);
+ if (err)
+ return err;
+
+ switch (entry->flow_type & ~FLOW_RSS) {
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ case IPV4_USER_FLOW:
+ nfp_net_fs_fill_v4(nn, entry, NFP_NET_CFG_MBOX_CMD_FS_ADD_V4, &addr);
+ break;
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ case IPV6_USER_FLOW:
+ nfp_net_fs_fill_v6(nn, entry, NFP_NET_CFG_MBOX_CMD_FS_ADD_V6, &addr);
+ break;
+ case ETHER_FLOW:
+ nn_writel(nn, addr, NFP_NET_CFG_MBOX_CMD_FS_ADD_ETHTYPE);
+ addr += sizeof(u32);
+ nn_writew(nn, addr, be16_to_cpu(entry->key.l3_proto));
+ addr += sizeof(u32);
+ break;
+ }
+
+ nfp_net_fs_fill_act(nn, entry, addr);
+
+ err = nfp_net_mbox_reconfig_and_unlock(nn, NFP_NET_CFG_MBOX_CMD_FLOW_STEER);
+ if (err) {
+ nn_err(nn, "Add new fs rule failed with %d\n", err);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int nfp_net_fs_del_hw(struct nfp_net *nn, struct nfp_fs_entry *entry)
+{
+ u32 addr = nn->tlv_caps.mbox_off + NFP_NET_CFG_MBOX_SIMPLE_VAL;
+ int err;
+
+ err = nfp_net_mbox_lock(nn, NFP_NET_CFG_FS_SZ);
+ if (err)
+ return err;
+
+ switch (entry->flow_type & ~FLOW_RSS) {
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ case IPV4_USER_FLOW:
+ nfp_net_fs_fill_v4(nn, entry, NFP_NET_CFG_MBOX_CMD_FS_DEL_V4, &addr);
+ break;
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ case IPV6_USER_FLOW:
+ nfp_net_fs_fill_v6(nn, entry, NFP_NET_CFG_MBOX_CMD_FS_DEL_V6, &addr);
+ break;
+ case ETHER_FLOW:
+ nn_writel(nn, addr, NFP_NET_CFG_MBOX_CMD_FS_DEL_ETHTYPE);
+ addr += sizeof(u32);
+ nn_writew(nn, addr, be16_to_cpu(entry->key.l3_proto));
+ addr += sizeof(u32);
+ break;
+ }
+
+ nfp_net_fs_fill_act(nn, entry, addr);
+
+ err = nfp_net_mbox_reconfig_and_unlock(nn, NFP_NET_CFG_MBOX_CMD_FLOW_STEER);
+ if (err) {
+ nn_err(nn, "Delete fs rule failed with %d\n", err);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void nfp_net_fs_clean(struct nfp_net *nn)
+{
+ struct nfp_fs_entry *entry, *tmp;
+
+ list_for_each_entry_safe(entry, tmp, &nn->fs.list, node) {
+ nfp_net_fs_del_hw(nn, entry);
+ list_del(&entry->node);
+ kfree(entry);
+ }
+}
+
static void nfp_net_stat64(struct net_device *netdev,
struct rtnl_link_stats64 *stats)
{
@@ -2740,6 +2920,8 @@ int nfp_net_init(struct nfp_net *nn)
INIT_LIST_HEAD(&nn->mbox_amsg.list);
INIT_WORK(&nn->mbox_amsg.work, nfp_net_mbox_amsg_work);
+ INIT_LIST_HEAD(&nn->fs.list);
+
return register_netdev(nn->dp.netdev);
err_clean_mbox:
@@ -2759,6 +2941,7 @@ void nfp_net_clean(struct nfp_net *nn)
unregister_netdev(nn->dp.netdev);
nfp_net_ipsec_clean(nn);
nfp_ccm_mbox_clean(nn);
+ nfp_net_fs_clean(nn);
flush_work(&nn->mbox_amsg.work);
nfp_net_reconfig_wait_posted(nn);
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
index 3e63f6d6a563..eaf4d3c499d1 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
@@ -269,6 +269,7 @@
#define NFP_NET_CFG_CTRL_IPSEC (0x1 << 1) /* IPsec offload */
#define NFP_NET_CFG_CTRL_MCAST_FILTER (0x1 << 2) /* Multicast Filter */
#define NFP_NET_CFG_CTRL_FREELIST_EN (0x1 << 6) /* Freelist enable flag bit */
+#define NFP_NET_CFG_CTRL_FLOW_STEER (0x1 << 8) /* Flow steering */
#define NFP_NET_CFG_CAP_WORD1 0x00a4
@@ -418,6 +419,8 @@
#define NFP_NET_CFG_MBOX_CMD_MULTICAST_ADD 8
#define NFP_NET_CFG_MBOX_CMD_MULTICAST_DEL 9
+#define NFP_NET_CFG_MBOX_CMD_FLOW_STEER 10
+
/* VLAN filtering using general use mailbox
* %NFP_NET_CFG_VLAN_FILTER: Base address of VLAN filter mailbox
* %NFP_NET_CFG_VLAN_FILTER_VID: VLAN ID to filter
@@ -440,6 +443,18 @@
#define NFP_NET_CFG_MULTICAST_MAC_LO (NFP_NET_CFG_MULTICAST + 6)
#define NFP_NET_CFG_MULTICAST_SZ 0x0006
+/* Max size of FS rules in bytes */
+#define NFP_NET_CFG_FS_SZ 0x0054
+/* Sub commands for FS */
+enum {
+ NFP_NET_CFG_MBOX_CMD_FS_ADD_V4,
+ NFP_NET_CFG_MBOX_CMD_FS_DEL_V4,
+ NFP_NET_CFG_MBOX_CMD_FS_ADD_V6,
+ NFP_NET_CFG_MBOX_CMD_FS_DEL_V6,
+ NFP_NET_CFG_MBOX_CMD_FS_ADD_ETHTYPE,
+ NFP_NET_CFG_MBOX_CMD_FS_DEL_ETHTYPE,
+};
+
/* TLV capabilities
* %NFP_NET_CFG_TLV_TYPE: Offset of type within the TLV
* %NFP_NET_CFG_TLV_TYPE_REQUIRED: Driver must be able to parse the TLV
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index e75cbb287625..200b3588363c 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -1317,6 +1317,116 @@ static int nfp_net_get_rss_hash_opts(struct nfp_net *nn,
return 0;
}
+#define NFP_FS_MAX_ENTRY 1024
+
+static int nfp_net_fs_to_ethtool(struct nfp_fs_entry *entry, struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fs = &cmd->fs;
+ unsigned int i;
+
+ switch (entry->flow_type & ~FLOW_RSS) {
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ fs->h_u.tcp_ip4_spec.ip4src = entry->key.sip4;
+ fs->h_u.tcp_ip4_spec.ip4dst = entry->key.dip4;
+ fs->h_u.tcp_ip4_spec.psrc = entry->key.sport;
+ fs->h_u.tcp_ip4_spec.pdst = entry->key.dport;
+ fs->m_u.tcp_ip4_spec.ip4src = entry->msk.sip4;
+ fs->m_u.tcp_ip4_spec.ip4dst = entry->msk.dip4;
+ fs->m_u.tcp_ip4_spec.psrc = entry->msk.sport;
+ fs->m_u.tcp_ip4_spec.pdst = entry->msk.dport;
+ break;
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ for (i = 0; i < 4; i++) {
+ fs->h_u.tcp_ip6_spec.ip6src[i] = entry->key.sip6[i];
+ fs->h_u.tcp_ip6_spec.ip6dst[i] = entry->key.dip6[i];
+ fs->m_u.tcp_ip6_spec.ip6src[i] = entry->msk.sip6[i];
+ fs->m_u.tcp_ip6_spec.ip6dst[i] = entry->msk.dip6[i];
+ }
+ fs->h_u.tcp_ip6_spec.psrc = entry->key.sport;
+ fs->h_u.tcp_ip6_spec.pdst = entry->key.dport;
+ fs->m_u.tcp_ip6_spec.psrc = entry->msk.sport;
+ fs->m_u.tcp_ip6_spec.pdst = entry->msk.dport;
+ break;
+ case IPV4_USER_FLOW:
+ fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
+ fs->h_u.usr_ip4_spec.ip4src = entry->key.sip4;
+ fs->h_u.usr_ip4_spec.ip4dst = entry->key.dip4;
+ fs->h_u.usr_ip4_spec.proto = entry->key.l4_proto;
+ fs->m_u.usr_ip4_spec.ip4src = entry->msk.sip4;
+ fs->m_u.usr_ip4_spec.ip4dst = entry->msk.dip4;
+ fs->m_u.usr_ip4_spec.proto = entry->msk.l4_proto;
+ break;
+ case IPV6_USER_FLOW:
+ for (i = 0; i < 4; i++) {
+ fs->h_u.usr_ip6_spec.ip6src[i] = entry->key.sip6[i];
+ fs->h_u.usr_ip6_spec.ip6dst[i] = entry->key.dip6[i];
+ fs->m_u.usr_ip6_spec.ip6src[i] = entry->msk.sip6[i];
+ fs->m_u.usr_ip6_spec.ip6dst[i] = entry->msk.dip6[i];
+ }
+ fs->h_u.usr_ip6_spec.l4_proto = entry->key.l4_proto;
+ fs->m_u.usr_ip6_spec.l4_proto = entry->msk.l4_proto;
+ break;
+ case ETHER_FLOW:
+ fs->h_u.ether_spec.h_proto = entry->key.l3_proto;
+ fs->m_u.ether_spec.h_proto = entry->msk.l3_proto;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ fs->flow_type = entry->flow_type;
+ fs->ring_cookie = entry->action;
+
+ if (fs->flow_type & FLOW_RSS) {
+ /* Only rss_context of 0 is supported. */
+ cmd->rss_context = 0;
+ /* RSS is used, mask the ring. */
+ fs->ring_cookie |= ETHTOOL_RX_FLOW_SPEC_RING;
+ }
+
+ return 0;
+}
+
+static int nfp_net_get_fs_rule(struct nfp_net *nn, struct ethtool_rxnfc *cmd)
+{
+ struct nfp_fs_entry *entry;
+
+ if (!(nn->cap_w1 & NFP_NET_CFG_CTRL_FLOW_STEER))
+ return -EOPNOTSUPP;
+
+ if (cmd->fs.location >= NFP_FS_MAX_ENTRY)
+ return -EINVAL;
+
+ list_for_each_entry(entry, &nn->fs.list, node) {
+ if (entry->loc == cmd->fs.location)
+ return nfp_net_fs_to_ethtool(entry, cmd);
+
+ if (entry->loc > cmd->fs.location)
+ /* no need to continue */
+ return -ENOENT;
+ }
+
+ return -ENOENT;
+}
+
+static int nfp_net_get_fs_loc(struct nfp_net *nn, u32 *rule_locs)
+{
+ struct nfp_fs_entry *entry;
+ u32 count = 0;
+
+ if (!(nn->cap_w1 & NFP_NET_CFG_CTRL_FLOW_STEER))
+ return -EOPNOTSUPP;
+
+ list_for_each_entry(entry, &nn->fs.list, node)
+ rule_locs[count++] = entry->loc;
+
+ return 0;
+}
+
static int nfp_net_get_rxnfc(struct net_device *netdev,
struct ethtool_rxnfc *cmd, u32 *rule_locs)
{
@@ -1326,6 +1436,14 @@ static int nfp_net_get_rxnfc(struct net_device *netdev,
case ETHTOOL_GRXRINGS:
cmd->data = nn->dp.num_rx_rings;
return 0;
+ case ETHTOOL_GRXCLSRLCNT:
+ cmd->rule_cnt = nn->fs.count;
+ return 0;
+ case ETHTOOL_GRXCLSRULE:
+ return nfp_net_get_fs_rule(nn, cmd);
+ case ETHTOOL_GRXCLSRLALL:
+ cmd->data = NFP_FS_MAX_ENTRY;
+ return nfp_net_get_fs_loc(nn, rule_locs);
case ETHTOOL_GRXFH:
return nfp_net_get_rss_hash_opts(nn, cmd);
default:
@@ -1385,6 +1503,253 @@ static int nfp_net_set_rss_hash_opt(struct nfp_net *nn,
return 0;
}
+static int nfp_net_fs_from_ethtool(struct nfp_fs_entry *entry, struct ethtool_rx_flow_spec *fs)
+{
+ unsigned int i;
+
+ /* FLOW_EXT/FLOW_MAC_EXT is not supported. */
+ switch (fs->flow_type & ~FLOW_RSS) {
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ entry->msk.sip4 = fs->m_u.tcp_ip4_spec.ip4src;
+ entry->msk.dip4 = fs->m_u.tcp_ip4_spec.ip4dst;
+ entry->msk.sport = fs->m_u.tcp_ip4_spec.psrc;
+ entry->msk.dport = fs->m_u.tcp_ip4_spec.pdst;
+ entry->key.sip4 = fs->h_u.tcp_ip4_spec.ip4src & entry->msk.sip4;
+ entry->key.dip4 = fs->h_u.tcp_ip4_spec.ip4dst & entry->msk.dip4;
+ entry->key.sport = fs->h_u.tcp_ip4_spec.psrc & entry->msk.sport;
+ entry->key.dport = fs->h_u.tcp_ip4_spec.pdst & entry->msk.dport;
+ break;
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ for (i = 0; i < 4; i++) {
+ entry->msk.sip6[i] = fs->m_u.tcp_ip6_spec.ip6src[i];
+ entry->msk.dip6[i] = fs->m_u.tcp_ip6_spec.ip6dst[i];
+ entry->key.sip6[i] = fs->h_u.tcp_ip6_spec.ip6src[i] & entry->msk.sip6[i];
+ entry->key.dip6[i] = fs->h_u.tcp_ip6_spec.ip6dst[i] & entry->msk.dip6[i];
+ }
+ entry->msk.sport = fs->m_u.tcp_ip6_spec.psrc;
+ entry->msk.dport = fs->m_u.tcp_ip6_spec.pdst;
+ entry->key.sport = fs->h_u.tcp_ip6_spec.psrc & entry->msk.sport;
+ entry->key.dport = fs->h_u.tcp_ip6_spec.pdst & entry->msk.dport;
+ break;
+ case IPV4_USER_FLOW:
+ entry->msk.sip4 = fs->m_u.usr_ip4_spec.ip4src;
+ entry->msk.dip4 = fs->m_u.usr_ip4_spec.ip4dst;
+ entry->msk.l4_proto = fs->m_u.usr_ip4_spec.proto;
+ entry->key.sip4 = fs->h_u.usr_ip4_spec.ip4src & entry->msk.sip4;
+ entry->key.dip4 = fs->h_u.usr_ip4_spec.ip4dst & entry->msk.dip4;
+ entry->key.l4_proto = fs->h_u.usr_ip4_spec.proto & entry->msk.l4_proto;
+ break;
+ case IPV6_USER_FLOW:
+ for (i = 0; i < 4; i++) {
+ entry->msk.sip6[i] = fs->m_u.usr_ip6_spec.ip6src[i];
+ entry->msk.dip6[i] = fs->m_u.usr_ip6_spec.ip6dst[i];
+ entry->key.sip6[i] = fs->h_u.usr_ip6_spec.ip6src[i] & entry->msk.sip6[i];
+ entry->key.dip6[i] = fs->h_u.usr_ip6_spec.ip6dst[i] & entry->msk.dip6[i];
+ }
+ entry->msk.l4_proto = fs->m_u.usr_ip6_spec.l4_proto;
+ entry->key.l4_proto = fs->h_u.usr_ip6_spec.l4_proto & entry->msk.l4_proto;
+ break;
+ case ETHER_FLOW:
+ entry->msk.l3_proto = fs->m_u.ether_spec.h_proto;
+ entry->key.l3_proto = fs->h_u.ether_spec.h_proto & entry->msk.l3_proto;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (fs->flow_type & ~FLOW_RSS) {
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ entry->key.l4_proto = IPPROTO_TCP;
+ entry->msk.l4_proto = 0xff;
+ break;
+ case UDP_V4_FLOW:
+ case UDP_V6_FLOW:
+ entry->key.l4_proto = IPPROTO_UDP;
+ entry->msk.l4_proto = 0xff;
+ break;
+ case SCTP_V4_FLOW:
+ case SCTP_V6_FLOW:
+ entry->key.l4_proto = IPPROTO_SCTP;
+ entry->msk.l4_proto = 0xff;
+ break;
+ }
+
+ entry->flow_type = fs->flow_type;
+ entry->action = fs->ring_cookie;
+ entry->loc = fs->location;
+
+ return 0;
+}
+
+static int nfp_net_fs_check_existing(struct nfp_net *nn, struct nfp_fs_entry *new)
+{
+ struct nfp_fs_entry *entry;
+
+ list_for_each_entry(entry, &nn->fs.list, node) {
+ if (new->loc != entry->loc &&
+ !((new->flow_type ^ entry->flow_type) & ~FLOW_RSS) &&
+ !memcmp(&new->key, &entry->key, sizeof(new->key)) &&
+ !memcmp(&new->msk, &entry->msk, sizeof(new->msk)))
+ return entry->loc;
+ }
+
+ /* -1 means no duplicates */
+ return -1;
+}
+
+static int nfp_net_fs_add(struct nfp_net *nn, struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fs = &cmd->fs;
+ struct nfp_fs_entry *new, *entry;
+ bool unsupp_mask;
+ int err, id;
+
+ if (!(nn->cap_w1 & NFP_NET_CFG_CTRL_FLOW_STEER))
+ return -EOPNOTSUPP;
+
+ /* Only default RSS context(0) is supported. */
+ if ((fs->flow_type & FLOW_RSS) && cmd->rss_context)
+ return -EOPNOTSUPP;
+
+ if (fs->location >= NFP_FS_MAX_ENTRY)
+ return -EINVAL;
+
+ if (fs->ring_cookie != RX_CLS_FLOW_DISC &&
+ fs->ring_cookie >= nn->dp.num_rx_rings)
+ return -EINVAL;
+
+ /* FLOW_EXT/FLOW_MAC_EXT is not supported. */
+ switch (fs->flow_type & ~FLOW_RSS) {
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ unsupp_mask = !!fs->m_u.tcp_ip4_spec.tos;
+ break;
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ unsupp_mask = !!fs->m_u.tcp_ip6_spec.tclass;
+ break;
+ case IPV4_USER_FLOW:
+ unsupp_mask = !!fs->m_u.usr_ip4_spec.l4_4_bytes ||
+ !!fs->m_u.usr_ip4_spec.tos ||
+ !!fs->m_u.usr_ip4_spec.ip_ver;
+ /* ip_ver must be ETH_RX_NFC_IP4. */
+ unsupp_mask |= fs->h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4;
+ break;
+ case IPV6_USER_FLOW:
+ unsupp_mask = !!fs->m_u.usr_ip6_spec.l4_4_bytes ||
+ !!fs->m_u.usr_ip6_spec.tclass;
+ break;
+ case ETHER_FLOW:
+ if (fs->h_u.ether_spec.h_proto == htons(ETH_P_IP) ||
+ fs->h_u.ether_spec.h_proto == htons(ETH_P_IPV6)) {
+ nn_err(nn, "Please use ip4/ip6 flow type instead.\n");
+ return -EOPNOTSUPP;
+ }
+ /* Only unmasked ethtype is supported. */
+ unsupp_mask = !is_zero_ether_addr(fs->m_u.ether_spec.h_dest) ||
+ !is_zero_ether_addr(fs->m_u.ether_spec.h_source) ||
+ (fs->m_u.ether_spec.h_proto != htons(0xffff));
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ if (unsupp_mask)
+ return -EOPNOTSUPP;
+
+ new = kzalloc(sizeof(*new), GFP_KERNEL);
+ if (!new)
+ return -ENOMEM;
+
+ nfp_net_fs_from_ethtool(new, fs);
+
+ id = nfp_net_fs_check_existing(nn, new);
+ if (id >= 0) {
+ nn_err(nn, "Identical rule is existing in %d.\n", id);
+ err = -EINVAL;
+ goto err;
+ }
+
+ /* Insert to list in ascending order of location. */
+ list_for_each_entry(entry, &nn->fs.list, node) {
+ if (entry->loc == fs->location) {
+ err = nfp_net_fs_del_hw(nn, entry);
+ if (err)
+ goto err;
+
+ nn->fs.count--;
+ err = nfp_net_fs_add_hw(nn, new);
+ if (err)
+ goto err;
+
+ nn->fs.count++;
+ list_replace(&entry->node, &new->node);
+ kfree(entry);
+
+ return 0;
+ }
+
+ if (entry->loc > fs->location)
+ break;
+ }
+
+ if (nn->fs.count == NFP_FS_MAX_ENTRY) {
+ err = -ENOSPC;
+ goto err;
+ }
+
+ err = nfp_net_fs_add_hw(nn, new);
+ if (err)
+ goto err;
+
+ list_add_tail(&new->node, &entry->node);
+ nn->fs.count++;
+
+ return 0;
+
+err:
+ kfree(new);
+ return err;
+}
+
+static int nfp_net_fs_del(struct nfp_net *nn, struct ethtool_rxnfc *cmd)
+{
+ struct nfp_fs_entry *entry;
+ int err;
+
+ if (!(nn->cap_w1 & NFP_NET_CFG_CTRL_FLOW_STEER))
+ return -EOPNOTSUPP;
+
+ if (!nn->fs.count || cmd->fs.location >= NFP_FS_MAX_ENTRY)
+ return -EINVAL;
+
+ list_for_each_entry(entry, &nn->fs.list, node) {
+ if (entry->loc == cmd->fs.location) {
+ err = nfp_net_fs_del_hw(nn, entry);
+ if (err)
+ return err;
+
+ list_del(&entry->node);
+ kfree(entry);
+ nn->fs.count--;
+
+ return 0;
+ } else if (entry->loc > cmd->fs.location) {
+ /* no need to continue */
+ break;
+ }
+ }
+
+ return -ENOENT;
+}
+
static int nfp_net_set_rxnfc(struct net_device *netdev,
struct ethtool_rxnfc *cmd)
{
@@ -1393,6 +1758,10 @@ static int nfp_net_set_rxnfc(struct net_device *netdev,
switch (cmd->cmd) {
case ETHTOOL_SRXFH:
return nfp_net_set_rss_hash_opt(nn, cmd);
+ case ETHTOOL_SRXCLSRLINS:
+ return nfp_net_fs_add(nn, cmd);
+ case ETHTOOL_SRXCLSRLDEL:
+ return nfp_net_fs_del(nn, cmd);
default:
return -EOPNOTSUPP;
}
@@ -1866,6 +2235,30 @@ static int nfp_net_set_channels(struct net_device *netdev,
return nfp_net_set_num_rings(nn, total_rx, total_tx);
}
+static int nfp_port_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct nfp_eth_table_port *eth_port;
+ struct nfp_port *port;
+ int err;
+
+ port = nfp_port_from_netdev(netdev);
+ eth_port = nfp_port_get_eth_port(port);
+ if (!eth_port)
+ return -EOPNOTSUPP;
+
+ if (pause->autoneg != AUTONEG_DISABLE)
+ return -EOPNOTSUPP;
+
+ err = nfp_eth_set_pauseparam(port->app->cpp, eth_port->index,
+ pause->tx_pause, pause->rx_pause);
+ if (!err)
+ /* Only refresh if we did something */
+ nfp_net_refresh_port_table(port);
+
+ return err < 0 ? err : 0;
+}
+
static void nfp_port_get_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
@@ -1877,10 +2270,10 @@ static void nfp_port_get_pauseparam(struct net_device *netdev,
if (!eth_port)
return;
- /* Currently pause frame support is fixed */
+ /* Currently pause frame autoneg is fixed */
pause->autoneg = AUTONEG_DISABLE;
- pause->rx_pause = 1;
- pause->tx_pause = 1;
+ pause->rx_pause = eth_port->rx_pause;
+ pause->tx_pause = eth_port->tx_pause;
}
static int nfp_net_set_phys_id(struct net_device *netdev,
@@ -2106,8 +2499,10 @@ static const struct ethtool_ops nfp_net_ethtool_ops = {
.set_link_ksettings = nfp_net_set_link_ksettings,
.get_fecparam = nfp_port_get_fecparam,
.set_fecparam = nfp_port_set_fecparam,
+ .set_pauseparam = nfp_port_set_pauseparam,
.get_pauseparam = nfp_port_get_pauseparam,
.set_phys_id = nfp_net_set_phys_id,
+ .get_ts_info = ethtool_op_get_ts_info,
};
const struct ethtool_ops nfp_port_ethtool_ops = {
@@ -2130,6 +2525,7 @@ const struct ethtool_ops nfp_port_ethtool_ops = {
.set_link_ksettings = nfp_net_set_link_ksettings,
.get_fecparam = nfp_port_get_fecparam,
.set_fecparam = nfp_port_set_fecparam,
+ .set_pauseparam = nfp_port_set_pauseparam,
.get_pauseparam = nfp_port_get_pauseparam,
.set_phys_id = nfp_net_set_phys_id,
};
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
index 00264af13b49..dc0e405c1349 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
@@ -189,6 +189,8 @@ enum nfp_ethtool_link_mode_list {
* @ports.enabled: is enabled?
* @ports.tx_enabled: is TX enabled?
* @ports.rx_enabled: is RX enabled?
+ * @ports.rx_pause: Switch of RX pause frame
+ * @ports.tx_pause: Switch of Tx pause frame
* @ports.override_changed: is media reconfig pending?
*
* @ports.port_type: one of %PORT_* defines for ethtool
@@ -227,6 +229,8 @@ struct nfp_eth_table {
bool tx_enabled;
bool rx_enabled;
bool supp_aneg;
+ bool rx_pause;
+ bool tx_pause;
bool override_changed;
@@ -255,6 +259,8 @@ int
nfp_eth_set_fec(struct nfp_cpp *cpp, unsigned int idx, enum nfp_eth_fec mode);
int nfp_eth_set_idmode(struct nfp_cpp *cpp, unsigned int idx, bool state);
+int nfp_eth_set_pauseparam(struct nfp_cpp *cpp, unsigned int idx,
+ unsigned int tx_pause, unsigned int rx_pause);
static inline bool nfp_eth_can_support_fec(struct nfp_eth_table_port *eth_port)
{
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
index 9d62085d772a..5cfddc9a5d87 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
@@ -42,6 +42,8 @@
#define NSP_ETH_STATE_ANEG GENMASK_ULL(25, 23)
#define NSP_ETH_STATE_FEC GENMASK_ULL(27, 26)
#define NSP_ETH_STATE_ACT_FEC GENMASK_ULL(29, 28)
+#define NSP_ETH_STATE_TX_PAUSE BIT_ULL(31)
+#define NSP_ETH_STATE_RX_PAUSE BIT_ULL(32)
#define NSP_ETH_CTRL_CONFIGURED BIT_ULL(0)
#define NSP_ETH_CTRL_ENABLED BIT_ULL(1)
@@ -52,6 +54,8 @@
#define NSP_ETH_CTRL_SET_ANEG BIT_ULL(6)
#define NSP_ETH_CTRL_SET_FEC BIT_ULL(7)
#define NSP_ETH_CTRL_SET_IDMODE BIT_ULL(8)
+#define NSP_ETH_CTRL_SET_TX_PAUSE BIT_ULL(10)
+#define NSP_ETH_CTRL_SET_RX_PAUSE BIT_ULL(11)
enum nfp_eth_raw {
NSP_ETH_RAW_PORT = 0,
@@ -180,6 +184,15 @@ nfp_eth_port_translate(struct nfp_nsp *nsp, const union eth_table_entry *src,
dst->act_fec = FIELD_GET(NSP_ETH_STATE_ACT_FEC, state);
dst->supp_aneg = FIELD_GET(NSP_ETH_PORT_SUPP_ANEG, port);
+
+ if (nfp_nsp_get_abi_ver_minor(nsp) < 37) {
+ dst->tx_pause = true;
+ dst->rx_pause = true;
+ return;
+ }
+
+ dst->tx_pause = FIELD_GET(NSP_ETH_STATE_TX_PAUSE, state);
+ dst->rx_pause = FIELD_GET(NSP_ETH_STATE_RX_PAUSE, state);
}
static void
@@ -497,7 +510,7 @@ int nfp_eth_set_configured(struct nfp_cpp *cpp, unsigned int idx, bool configed)
static int
nfp_eth_set_bit_config(struct nfp_nsp *nsp, unsigned int raw_idx,
const u64 mask, const unsigned int shift,
- unsigned int val, const u64 ctrl_bit)
+ u64 val, const u64 ctrl_bit)
{
union eth_table_entry *entries = nfp_nsp_config_entries(nsp);
unsigned int idx = nfp_nsp_config_idx(nsp);
@@ -630,6 +643,81 @@ nfp_eth_set_fec(struct nfp_cpp *cpp, unsigned int idx, enum nfp_eth_fec mode)
}
/**
+ * __nfp_eth_set_txpause() - set tx pause control bit
+ * @nsp: NFP NSP handle returned from nfp_eth_config_start()
+ * @tx_pause: TX pause switch
+ *
+ * Set TX pause switch.
+ *
+ * Return: 0 or -ERRNO.
+ */
+static int __nfp_eth_set_txpause(struct nfp_nsp *nsp, unsigned int tx_pause)
+{
+ return NFP_ETH_SET_BIT_CONFIG(nsp, NSP_ETH_RAW_STATE, NSP_ETH_STATE_TX_PAUSE,
+ tx_pause, NSP_ETH_CTRL_SET_TX_PAUSE);
+}
+
+/**
+ * __nfp_eth_set_rxpause() - set rx pause control bit
+ * @nsp: NFP NSP handle returned from nfp_eth_config_start()
+ * @rx_pause: RX pause switch
+ *
+ * Set RX pause switch.
+ *
+ * Return: 0 or -ERRNO.
+ */
+static int __nfp_eth_set_rxpause(struct nfp_nsp *nsp, unsigned int rx_pause)
+{
+ return NFP_ETH_SET_BIT_CONFIG(nsp, NSP_ETH_RAW_STATE, NSP_ETH_STATE_RX_PAUSE,
+ rx_pause, NSP_ETH_CTRL_SET_RX_PAUSE);
+}
+
+/**
+ * nfp_eth_set_pauseparam() - Set TX/RX pause switch.
+ * @cpp: NFP CPP handle
+ * @idx: NFP chip-wide port index
+ * @tx_pause: TX pause switch
+ * @rx_pause: RX pause switch
+ *
+ * Return:
+ * 0 - configuration successful;
+ * 1 - no changes were needed;
+ * -ERRNO - configuration failed.
+ */
+int
+nfp_eth_set_pauseparam(struct nfp_cpp *cpp, unsigned int idx,
+ unsigned int tx_pause, unsigned int rx_pause)
+{
+ struct nfp_nsp *nsp;
+ int err;
+
+ nsp = nfp_eth_config_start(cpp, idx);
+ if (IS_ERR(nsp))
+ return PTR_ERR(nsp);
+
+ if (nfp_nsp_get_abi_ver_minor(nsp) < 37) {
+ nfp_err(nfp_nsp_cpp(nsp),
+ "set pause parameter operation not supported, please update flash\n");
+ nfp_eth_config_cleanup_end(nsp);
+ return -EOPNOTSUPP;
+ }
+
+ err = __nfp_eth_set_txpause(nsp, tx_pause);
+ if (err) {
+ nfp_eth_config_cleanup_end(nsp);
+ return err;
+ }
+
+ err = __nfp_eth_set_rxpause(nsp, rx_pause);
+ if (err) {
+ nfp_eth_config_cleanup_end(nsp);
+ return err;
+ }
+
+ return nfp_eth_config_commit_end(nsp);
+}
+
+/**
* __nfp_eth_set_speed() - set interface speed/rate
* @nsp: NFP NSP handle returned from nfp_eth_config_start()
* @speed: Desired speed (per lane)