summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h19
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c55
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h5
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c39
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h77
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c1303
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h232
7 files changed, 1721 insertions, 9 deletions
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index dea422c53738..591ee2ee4bf6 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -51,6 +51,7 @@
#define HNAE3_KNIC_CLIENT_INITED_B 0x3
#define HNAE3_UNIC_CLIENT_INITED_B 0x4
#define HNAE3_ROCE_CLIENT_INITED_B 0x5
+#define HNAE3_DEV_SUPPORT_FD_B 0x6
#define HNAE3_DEV_SUPPORT_ROCE_DCB_BITS (BIT(HNAE3_DEV_SUPPORT_DCB_B) |\
BIT(HNAE3_DEV_SUPPORT_ROCE_B))
@@ -61,6 +62,9 @@
#define hnae3_dev_dcb_supported(hdev) \
hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_DCB_B)
+#define hnae3_dev_fd_supported(hdev) \
+ hnae3_get_bit((hdev)->ae_dev->flag, HNAE3_DEV_SUPPORT_FD_B)
+
#define ring_ptr_move_fw(ring, p) \
((ring)->p = ((ring)->p + 1) % (ring)->desc_num)
#define ring_ptr_move_bw(ring, p) \
@@ -175,6 +179,7 @@ struct hnae3_ae_dev {
struct list_head node;
u32 flag;
enum hnae3_dev_type dev_type;
+ enum hnae3_reset_type reset_type;
void *priv;
};
@@ -412,6 +417,20 @@ struct hnae3_ae_ops {
void (*get_link_mode)(struct hnae3_handle *handle,
unsigned long *supported,
unsigned long *advertising);
+ int (*add_fd_entry)(struct hnae3_handle *handle,
+ struct ethtool_rxnfc *cmd);
+ int (*del_fd_entry)(struct hnae3_handle *handle,
+ struct ethtool_rxnfc *cmd);
+ void (*del_all_fd_entries)(struct hnae3_handle *handle,
+ bool clear_list);
+ int (*get_fd_rule_cnt)(struct hnae3_handle *handle,
+ struct ethtool_rxnfc *cmd);
+ int (*get_fd_rule_info)(struct hnae3_handle *handle,
+ struct ethtool_rxnfc *cmd);
+ int (*get_fd_all_rules)(struct hnae3_handle *handle,
+ struct ethtool_rxnfc *cmd, u32 *rule_locs);
+ int (*restore_fd_rules)(struct hnae3_handle *handle);
+ void (*enable_fd)(struct hnae3_handle *handle, bool enable);
};
struct hnae3_dcb_ops {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 1050652274a0..c2692563a4d9 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -1285,6 +1285,13 @@ static int hns3_nic_set_features(struct net_device *netdev,
return ret;
}
+ if ((changed & NETIF_F_NTUPLE) && h->ae_algo->ops->enable_fd) {
+ if (features & NETIF_F_NTUPLE)
+ h->ae_algo->ops->enable_fd(h, true);
+ else
+ h->ae_algo->ops->enable_fd(h, false);
+ }
+
netdev->features = features;
return 0;
}
@@ -1622,6 +1629,13 @@ static void hns3_disable_sriov(struct pci_dev *pdev)
pci_disable_sriov(pdev);
}
+static void hns3_get_dev_capability(struct pci_dev *pdev,
+ struct hnae3_ae_dev *ae_dev)
+{
+ if (pdev->revision >= 0x21)
+ hnae3_set_bit(ae_dev->flag, HNAE3_DEV_SUPPORT_FD_B, 1);
+}
+
/* hns3_probe - Device initialization routine
* @pdev: PCI device information struct
* @ent: entry in hns3_pci_tbl
@@ -1647,6 +1661,8 @@ static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ae_dev->pdev = pdev;
ae_dev->flag = ent->driver_data;
ae_dev->dev_type = HNAE3_DEV_KNIC;
+ ae_dev->reset_type = HNAE3_NONE_RESET;
+ hns3_get_dev_capability(pdev, ae_dev);
pci_set_drvdata(pdev, ae_dev);
hnae3_register_ae_dev(ae_dev);
@@ -1761,8 +1777,14 @@ static void hns3_set_default_feature(struct net_device *netdev)
NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC;
- if (pdev->revision != 0x20)
+ if (pdev->revision >= 0x21) {
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+
+ if (!(h->flags & HNAE3_SUPPORT_VF)) {
+ netdev->hw_features |= NETIF_F_NTUPLE;
+ netdev->features |= NETIF_F_NTUPLE;
+ }
+ }
}
static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
@@ -3142,6 +3164,25 @@ static void hns3_uninit_mac_addr(struct net_device *netdev)
h->ae_algo->ops->rm_uc_addr(h, netdev->dev_addr);
}
+static int hns3_restore_fd_rules(struct net_device *netdev)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+ int ret = 0;
+
+ if (h->ae_algo->ops->restore_fd_rules)
+ ret = h->ae_algo->ops->restore_fd_rules(h);
+
+ return ret;
+}
+
+static void hns3_del_all_fd_rules(struct net_device *netdev, bool clear_list)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ if (h->ae_algo->ops->del_all_fd_entries)
+ h->ae_algo->ops->del_all_fd_entries(h, clear_list);
+}
+
static void hns3_nic_set_priv_ops(struct net_device *netdev)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
@@ -3258,6 +3299,8 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
if (netdev->reg_state != NETREG_UNINITIALIZED)
unregister_netdev(netdev);
+ hns3_del_all_fd_rules(netdev, true);
+
hns3_force_clear_all_rx_ring(handle);
ret = hns3_nic_uninit_vector_data(priv);
@@ -3553,6 +3596,8 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle)
if (!(handle->flags & HNAE3_SUPPORT_VF))
hns3_restore_vlan(netdev);
+ hns3_restore_fd_rules(netdev);
+
/* Carrier off reporting is important to ethtool even BEFORE open */
netif_carrier_off(netdev);
@@ -3573,6 +3618,7 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle)
static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
struct net_device *netdev = handle->kinfo.netdev;
struct hns3_nic_priv *priv = netdev_priv(netdev);
int ret;
@@ -3593,6 +3639,13 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
hns3_uninit_mac_addr(netdev);
+ /* it is cumbersome for hardware to pick-and-choose rules for deletion
+ * from TCAM. Hence, for function reset software intervention is
+ * required to delete the rules
+ */
+ if (hns3_dev_ongoing_func_reset(ae_dev))
+ hns3_del_all_fd_rules(netdev, false);
+
return ret;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index 27d704f4b306..ac881e8fc05d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -585,6 +585,11 @@ static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value)
writel(value, reg_addr + reg);
}
+static inline bool hns3_dev_ongoing_func_reset(struct hnae3_ae_dev *ae_dev)
+{
+ return (ae_dev && (ae_dev->reset_type == HNAE3_FUNC_RESET));
+}
+
#define hns3_write_dev(a, reg, value) \
hns3_write_reg((a)->io_base, (reg), (value))
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index 8ad2c3eeb46b..7d79a074a214 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -699,20 +699,33 @@ static int hns3_get_rxnfc(struct net_device *netdev,
{
struct hnae3_handle *h = hns3_get_handle(netdev);
- if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->get_rss_tuple)
+ if (!h->ae_algo || !h->ae_algo->ops)
return -EOPNOTSUPP;
switch (cmd->cmd) {
case ETHTOOL_GRXRINGS:
- cmd->data = h->kinfo.rss_size;
- break;
+ cmd->data = h->kinfo.num_tqps;
+ return 0;
case ETHTOOL_GRXFH:
- return h->ae_algo->ops->get_rss_tuple(h, cmd);
+ if (h->ae_algo->ops->get_rss_tuple)
+ return h->ae_algo->ops->get_rss_tuple(h, cmd);
+ return -EOPNOTSUPP;
+ case ETHTOOL_GRXCLSRLCNT:
+ if (h->ae_algo->ops->get_fd_rule_cnt)
+ return h->ae_algo->ops->get_fd_rule_cnt(h, cmd);
+ return -EOPNOTSUPP;
+ case ETHTOOL_GRXCLSRULE:
+ if (h->ae_algo->ops->get_fd_rule_info)
+ return h->ae_algo->ops->get_fd_rule_info(h, cmd);
+ return -EOPNOTSUPP;
+ case ETHTOOL_GRXCLSRLALL:
+ if (h->ae_algo->ops->get_fd_all_rules)
+ return h->ae_algo->ops->get_fd_all_rules(h, cmd,
+ rule_locs);
+ return -EOPNOTSUPP;
default:
return -EOPNOTSUPP;
}
-
- return 0;
}
static int hns3_change_all_ring_bd_num(struct hns3_nic_priv *priv,
@@ -795,12 +808,22 @@ static int hns3_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
- if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->set_rss_tuple)
+ if (!h->ae_algo || !h->ae_algo->ops)
return -EOPNOTSUPP;
switch (cmd->cmd) {
case ETHTOOL_SRXFH:
- return h->ae_algo->ops->set_rss_tuple(h, cmd);
+ if (h->ae_algo->ops->set_rss_tuple)
+ return h->ae_algo->ops->set_rss_tuple(h, cmd);
+ return -EOPNOTSUPP;
+ case ETHTOOL_SRXCLSRLINS:
+ if (h->ae_algo->ops->add_fd_entry)
+ return h->ae_algo->ops->add_fd_entry(h, cmd);
+ return -EOPNOTSUPP;
+ case ETHTOOL_SRXCLSRLDEL:
+ if (h->ae_algo->ops->del_fd_entry)
+ return h->ae_algo->ops->del_fd_entry(h, cmd);
+ return -EOPNOTSUPP;
default:
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
index 842dc3f05247..e5e66b27e03e 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
@@ -190,6 +190,13 @@ enum hclge_opcode_type {
HCLGE_OPC_VLAN_FILTER_PF_CFG = 0x1101,
HCLGE_OPC_VLAN_FILTER_VF_CFG = 0x1102,
+ /* Flow Director commands */
+ HCLGE_OPC_FD_MODE_CTRL = 0x1200,
+ HCLGE_OPC_FD_GET_ALLOCATION = 0x1201,
+ HCLGE_OPC_FD_KEY_CONFIG = 0x1202,
+ HCLGE_OPC_FD_TCAM_OP = 0x1203,
+ HCLGE_OPC_FD_AD_OP = 0x1204,
+
/* MDIO command */
HCLGE_OPC_MDIO_CONFIG = 0x1900,
@@ -819,6 +826,76 @@ struct hclge_set_led_state_cmd {
u8 rsv2[20];
};
+struct hclge_get_fd_mode_cmd {
+ u8 mode;
+ u8 enable;
+ u8 rsv[22];
+};
+
+struct hclge_get_fd_allocation_cmd {
+ __le32 stage1_entry_num;
+ __le32 stage2_entry_num;
+ __le16 stage1_counter_num;
+ __le16 stage2_counter_num;
+ u8 rsv[12];
+};
+
+struct hclge_set_fd_key_config_cmd {
+ u8 stage;
+ u8 key_select;
+ u8 inner_sipv6_word_en;
+ u8 inner_dipv6_word_en;
+ u8 outer_sipv6_word_en;
+ u8 outer_dipv6_word_en;
+ u8 rsv1[2];
+ __le32 tuple_mask;
+ __le32 meta_data_mask;
+ u8 rsv2[8];
+};
+
+#define HCLGE_FD_EPORT_SW_EN_B 0
+struct hclge_fd_tcam_config_1_cmd {
+ u8 stage;
+ u8 xy_sel;
+ u8 port_info;
+ u8 rsv1[1];
+ __le32 index;
+ u8 entry_vld;
+ u8 rsv2[7];
+ u8 tcam_data[8];
+};
+
+struct hclge_fd_tcam_config_2_cmd {
+ u8 tcam_data[24];
+};
+
+struct hclge_fd_tcam_config_3_cmd {
+ u8 tcam_data[20];
+ u8 rsv[4];
+};
+
+#define HCLGE_FD_AD_DROP_B 0
+#define HCLGE_FD_AD_DIRECT_QID_B 1
+#define HCLGE_FD_AD_QID_S 2
+#define HCLGE_FD_AD_QID_M GENMASK(12, 2)
+#define HCLGE_FD_AD_USE_COUNTER_B 12
+#define HCLGE_FD_AD_COUNTER_NUM_S 13
+#define HCLGE_FD_AD_COUNTER_NUM_M GENMASK(20, 13)
+#define HCLGE_FD_AD_NXT_STEP_B 20
+#define HCLGE_FD_AD_NXT_KEY_S 21
+#define HCLGE_FD_AD_NXT_KEY_M GENMASK(26, 21)
+#define HCLGE_FD_AD_WR_RULE_ID_B 0
+#define HCLGE_FD_AD_RULE_ID_S 1
+#define HCLGE_FD_AD_RULE_ID_M GENMASK(13, 1)
+
+struct hclge_fd_ad_config_cmd {
+ u8 stage;
+ u8 rsv1[3];
+ __le32 index;
+ __le64 ad_data;
+ u8 rsv2[8];
+};
+
int hclge_cmd_init(struct hclge_dev *hdev);
static inline void hclge_write_reg(void __iomem *base, u32 reg, u32 value)
{
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index c17ceeefa453..21ca4af3b37a 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -2513,8 +2513,13 @@ static void hclge_clear_reset_cause(struct hclge_dev *hdev)
static void hclge_reset(struct hclge_dev *hdev)
{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
struct hnae3_handle *handle;
+ /* Initialize ae_dev reset status as well, in case enet layer wants to
+ * know if device is undergoing reset
+ */
+ ae_dev->reset_type = hdev->reset_type;
/* perform reset of the stack & ae device for a client */
handle = &hdev->vport[0].nic;
rtnl_lock();
@@ -2535,6 +2540,7 @@ static void hclge_reset(struct hclge_dev *hdev)
hclge_notify_client(hdev, HNAE3_UP_CLIENT);
handle->last_reset_time = jiffies;
rtnl_unlock();
+ ae_dev->reset_type = HNAE3_NONE_RESET;
}
static void hclge_reset_event(struct hnae3_handle *handle)
@@ -3328,6 +3334,1281 @@ static void hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
hclge_cmd_set_promisc_mode(hdev, &param);
}
+static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
+{
+ struct hclge_get_fd_mode_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
+
+ req = (struct hclge_get_fd_mode_cmd *)desc.data;
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
+ return ret;
+ }
+
+ *fd_mode = req->mode;
+
+ return ret;
+}
+
+static int hclge_get_fd_allocation(struct hclge_dev *hdev,
+ u32 *stage1_entry_num,
+ u32 *stage2_entry_num,
+ u16 *stage1_counter_num,
+ u16 *stage2_counter_num)
+{
+ struct hclge_get_fd_allocation_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
+
+ req = (struct hclge_get_fd_allocation_cmd *)desc.data;
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
+ ret);
+ return ret;
+ }
+
+ *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
+ *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
+ *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
+ *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
+
+ return ret;
+}
+
+static int hclge_set_fd_key_config(struct hclge_dev *hdev, int stage_num)
+{
+ struct hclge_set_fd_key_config_cmd *req;
+ struct hclge_fd_key_cfg *stage;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
+
+ req = (struct hclge_set_fd_key_config_cmd *)desc.data;
+ stage = &hdev->fd_cfg.key_cfg[stage_num];
+ req->stage = stage_num;
+ req->key_select = stage->key_sel;
+ req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
+ req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
+ req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
+ req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
+ req->tuple_mask = cpu_to_le32(~stage->tuple_active);
+ req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
+
+ return ret;
+}
+
+static int hclge_init_fd_config(struct hclge_dev *hdev)
+{
+#define LOW_2_WORDS 0x03
+ struct hclge_fd_key_cfg *key_cfg;
+ int ret;
+
+ if (!hnae3_dev_fd_supported(hdev))
+ return 0;
+
+ ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
+ if (ret)
+ return ret;
+
+ switch (hdev->fd_cfg.fd_mode) {
+ case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
+ hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
+ break;
+ case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
+ hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
+ break;
+ default:
+ dev_err(&hdev->pdev->dev,
+ "Unsupported flow director mode %d\n",
+ hdev->fd_cfg.fd_mode);
+ return -EOPNOTSUPP;
+ }
+
+ hdev->fd_cfg.fd_en = true;
+ hdev->fd_cfg.proto_support =
+ TCP_V4_FLOW | UDP_V4_FLOW | SCTP_V4_FLOW | TCP_V6_FLOW |
+ UDP_V6_FLOW | SCTP_V6_FLOW | IPV4_USER_FLOW | IPV6_USER_FLOW;
+ key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
+ key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
+ key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
+ key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
+ key_cfg->outer_sipv6_word_en = 0;
+ key_cfg->outer_dipv6_word_en = 0;
+
+ key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
+ BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
+ BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
+ BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
+
+ /* If use max 400bit key, we can support tuples for ether type */
+ if (hdev->fd_cfg.max_key_length == MAX_KEY_LENGTH) {
+ hdev->fd_cfg.proto_support |= ETHER_FLOW;
+ key_cfg->tuple_active |=
+ BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
+ }
+
+ /* roce_type is used to filter roce frames
+ * dst_vport is used to specify the rule
+ */
+ key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
+
+ ret = hclge_get_fd_allocation(hdev,
+ &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
+ &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
+ &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
+ &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
+ if (ret)
+ return ret;
+
+ return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
+}
+
+static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
+ int loc, u8 *key, bool is_add)
+{
+ struct hclge_fd_tcam_config_1_cmd *req1;
+ struct hclge_fd_tcam_config_2_cmd *req2;
+ struct hclge_fd_tcam_config_3_cmd *req3;
+ struct hclge_desc desc[3];
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
+ desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
+ desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
+
+ req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
+ req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
+ req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
+
+ req1->stage = stage;
+ req1->xy_sel = sel_x ? 1 : 0;
+ hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
+ req1->index = cpu_to_le32(loc);
+ req1->entry_vld = sel_x ? is_add : 0;
+
+ if (key) {
+ memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
+ memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
+ sizeof(req2->tcam_data));
+ memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
+ sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
+ }
+
+ ret = hclge_cmd_send(&hdev->hw, desc, 3);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "config tcam key fail, ret=%d\n",
+ ret);
+
+ return ret;
+}
+
+static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
+ struct hclge_fd_ad_data *action)
+{
+ struct hclge_fd_ad_config_cmd *req;
+ struct hclge_desc desc;
+ u64 ad_data = 0;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
+
+ req = (struct hclge_fd_ad_config_cmd *)desc.data;
+ req->index = cpu_to_le32(loc);
+ req->stage = stage;
+
+ hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
+ action->write_rule_id_to_bd);
+ hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
+ action->rule_id);
+ ad_data <<= 32;
+ hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
+ hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
+ action->forward_to_direct_queue);
+ hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
+ action->queue_id);
+ hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
+ hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
+ HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
+ hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
+ hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
+ action->counter_id);
+
+ req->ad_data = cpu_to_le64(ad_data);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
+
+ return ret;
+}
+
+static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
+ struct hclge_fd_rule *rule)
+{
+ u16 tmp_x_s, tmp_y_s;
+ u32 tmp_x_l, tmp_y_l;
+ int i;
+
+ if (rule->unused_tuple & tuple_bit)
+ return true;
+
+ switch (tuple_bit) {
+ case 0:
+ return false;
+ case BIT(INNER_DST_MAC):
+ for (i = 0; i < 6; i++) {
+ calc_x(key_x[5 - i], rule->tuples.dst_mac[i],
+ rule->tuples_mask.dst_mac[i]);
+ calc_y(key_y[5 - i], rule->tuples.dst_mac[i],
+ rule->tuples_mask.dst_mac[i]);
+ }
+
+ return true;
+ case BIT(INNER_SRC_MAC):
+ for (i = 0; i < 6; i++) {
+ calc_x(key_x[5 - i], rule->tuples.src_mac[i],
+ rule->tuples.src_mac[i]);
+ calc_y(key_y[5 - i], rule->tuples.src_mac[i],
+ rule->tuples.src_mac[i]);
+ }
+
+ return true;
+ case BIT(INNER_VLAN_TAG_FST):
+ calc_x(tmp_x_s, rule->tuples.vlan_tag1,
+ rule->tuples_mask.vlan_tag1);
+ calc_y(tmp_y_s, rule->tuples.vlan_tag1,
+ rule->tuples_mask.vlan_tag1);
+ *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
+ *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
+
+ return true;
+ case BIT(INNER_ETH_TYPE):
+ calc_x(tmp_x_s, rule->tuples.ether_proto,
+ rule->tuples_mask.ether_proto);
+ calc_y(tmp_y_s, rule->tuples.ether_proto,
+ rule->tuples_mask.ether_proto);
+ *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
+ *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
+
+ return true;
+ case BIT(INNER_IP_TOS):
+ calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
+ calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
+
+ return true;
+ case BIT(INNER_IP_PROTO):
+ calc_x(*key_x, rule->tuples.ip_proto,
+ rule->tuples_mask.ip_proto);
+ calc_y(*key_y, rule->tuples.ip_proto,
+ rule->tuples_mask.ip_proto);
+
+ return true;
+ case BIT(INNER_SRC_IP):
+ calc_x(tmp_x_l, rule->tuples.src_ip[3],
+ rule->tuples_mask.src_ip[3]);
+ calc_y(tmp_y_l, rule->tuples.src_ip[3],
+ rule->tuples_mask.src_ip[3]);
+ *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
+ *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
+
+ return true;
+ case BIT(INNER_DST_IP):
+ calc_x(tmp_x_l, rule->tuples.dst_ip[3],
+ rule->tuples_mask.dst_ip[3]);
+ calc_y(tmp_y_l, rule->tuples.dst_ip[3],
+ rule->tuples_mask.dst_ip[3]);
+ *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
+ *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
+
+ return true;
+ case BIT(INNER_SRC_PORT):
+ calc_x(tmp_x_s, rule->tuples.src_port,
+ rule->tuples_mask.src_port);
+ calc_y(tmp_y_s, rule->tuples.src_port,
+ rule->tuples_mask.src_port);
+ *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
+ *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
+
+ return true;
+ case BIT(INNER_DST_PORT):
+ calc_x(tmp_x_s, rule->tuples.dst_port,
+ rule->tuples_mask.dst_port);
+ calc_y(tmp_y_s, rule->tuples.dst_port,
+ rule->tuples_mask.dst_port);
+ *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
+ *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
+
+ return true;
+ default:
+ return false;
+ }
+}
+
+static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
+ u8 vf_id, u8 network_port_id)
+{
+ u32 port_number = 0;
+
+ if (port_type == HOST_PORT) {
+ hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
+ pf_id);
+ hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
+ vf_id);
+ hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
+ } else {
+ hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
+ HCLGE_NETWORK_PORT_ID_S, network_port_id);
+ hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
+ }
+
+ return port_number;
+}
+
+static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
+ __le32 *key_x, __le32 *key_y,
+ struct hclge_fd_rule *rule)
+{
+ u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
+ u8 cur_pos = 0, tuple_size, shift_bits;
+ int i;
+
+ for (i = 0; i < MAX_META_DATA; i++) {
+ tuple_size = meta_data_key_info[i].key_length;
+ tuple_bit = key_cfg->meta_data_active & BIT(i);
+
+ switch (tuple_bit) {
+ case BIT(ROCE_TYPE):
+ hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
+ cur_pos += tuple_size;
+ break;
+ case BIT(DST_VPORT):
+ port_number = hclge_get_port_number(HOST_PORT, 0,
+ rule->vf_id, 0);
+ hnae3_set_field(meta_data,
+ GENMASK(cur_pos + tuple_size, cur_pos),
+ cur_pos, port_number);
+ cur_pos += tuple_size;
+ break;
+ default:
+ break;
+ }
+ }
+
+ calc_x(tmp_x, meta_data, 0xFFFFFFFF);
+ calc_y(tmp_y, meta_data, 0xFFFFFFFF);
+ shift_bits = sizeof(meta_data) * 8 - cur_pos;
+
+ *key_x = cpu_to_le32(tmp_x << shift_bits);
+ *key_y = cpu_to_le32(tmp_y << shift_bits);
+}
+
+/* A complete key is combined with meta data key and tuple key.
+ * Meta data key is stored at the MSB region, and tuple key is stored at
+ * the LSB region, unused bits will be filled 0.
+ */
+static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
+ struct hclge_fd_rule *rule)
+{
+ struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
+ u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
+ u8 *cur_key_x, *cur_key_y;
+ int i, ret, tuple_size;
+ u8 meta_data_region;
+
+ memset(key_x, 0, sizeof(key_x));
+ memset(key_y, 0, sizeof(key_y));
+ cur_key_x = key_x;
+ cur_key_y = key_y;
+
+ for (i = 0 ; i < MAX_TUPLE; i++) {
+ bool tuple_valid;
+ u32 check_tuple;
+
+ tuple_size = tuple_key_info[i].key_length / 8;
+ check_tuple = key_cfg->tuple_active & BIT(i);
+
+ tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
+ cur_key_y, rule);
+ if (tuple_valid) {
+ cur_key_x += tuple_size;
+ cur_key_y += tuple_size;
+ }
+ }
+
+ meta_data_region = hdev->fd_cfg.max_key_length / 8 -
+ MAX_META_DATA_LENGTH / 8;
+
+ hclge_fd_convert_meta_data(key_cfg,
+ (__le32 *)(key_x + meta_data_region),
+ (__le32 *)(key_y + meta_data_region),
+ rule);
+
+ ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
+ true);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "fd key_y config fail, loc=%d, ret=%d\n",
+ rule->queue_id, ret);
+ return ret;
+ }
+
+ ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
+ true);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "fd key_x config fail, loc=%d, ret=%d\n",
+ rule->queue_id, ret);
+ return ret;
+}
+
+static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
+ struct hclge_fd_rule *rule)
+{
+ struct hclge_fd_ad_data ad_data;
+
+ ad_data.ad_id = rule->location;
+
+ if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
+ ad_data.drop_packet = true;
+ ad_data.forward_to_direct_queue = false;
+ ad_data.queue_id = 0;
+ } else {
+ ad_data.drop_packet = false;
+ ad_data.forward_to_direct_queue = true;
+ ad_data.queue_id = rule->queue_id;
+ }
+
+ ad_data.use_counter = false;
+ ad_data.counter_id = 0;
+
+ ad_data.use_next_stage = false;
+ ad_data.next_input_key = 0;
+
+ ad_data.write_rule_id_to_bd = true;
+ ad_data.rule_id = rule->location;
+
+ return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
+}
+
+static int hclge_fd_check_spec(struct hclge_dev *hdev,
+ struct ethtool_rx_flow_spec *fs, u32 *unused)
+{
+ struct ethtool_tcpip4_spec *tcp_ip4_spec;
+ struct ethtool_usrip4_spec *usr_ip4_spec;
+ struct ethtool_tcpip6_spec *tcp_ip6_spec;
+ struct ethtool_usrip6_spec *usr_ip6_spec;
+ struct ethhdr *ether_spec;
+
+ if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
+ return -EINVAL;
+
+ if (!(fs->flow_type & hdev->fd_cfg.proto_support))
+ return -EOPNOTSUPP;
+
+ if ((fs->flow_type & FLOW_EXT) &&
+ (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
+ dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
+ case SCTP_V4_FLOW:
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ tcp_ip4_spec = &fs->h_u.tcp_ip4_spec;
+ *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
+
+ if (!tcp_ip4_spec->ip4src)
+ *unused |= BIT(INNER_SRC_IP);
+
+ if (!tcp_ip4_spec->ip4dst)
+ *unused |= BIT(INNER_DST_IP);
+
+ if (!tcp_ip4_spec->psrc)
+ *unused |= BIT(INNER_SRC_PORT);
+
+ if (!tcp_ip4_spec->pdst)
+ *unused |= BIT(INNER_DST_PORT);
+
+ if (!tcp_ip4_spec->tos)
+ *unused |= BIT(INNER_IP_TOS);
+
+ break;
+ case IP_USER_FLOW:
+ usr_ip4_spec = &fs->h_u.usr_ip4_spec;
+ *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
+ BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
+
+ if (!usr_ip4_spec->ip4src)
+ *unused |= BIT(INNER_SRC_IP);
+
+ if (!usr_ip4_spec->ip4dst)
+ *unused |= BIT(INNER_DST_IP);
+
+ if (!usr_ip4_spec->tos)
+ *unused |= BIT(INNER_IP_TOS);
+
+ if (!usr_ip4_spec->proto)
+ *unused |= BIT(INNER_IP_PROTO);
+
+ if (usr_ip4_spec->l4_4_bytes)
+ return -EOPNOTSUPP;
+
+ if (usr_ip4_spec->ip_ver != ETH_RX_NFC_IP4)
+ return -EOPNOTSUPP;
+
+ break;
+ case SCTP_V6_FLOW:
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ tcp_ip6_spec = &fs->h_u.tcp_ip6_spec;
+ *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
+ BIT(INNER_IP_TOS);
+
+ if (!tcp_ip6_spec->ip6src[0] && !tcp_ip6_spec->ip6src[1] &&
+ !tcp_ip6_spec->ip6src[2] && !tcp_ip6_spec->ip6src[3])
+ *unused |= BIT(INNER_SRC_IP);
+
+ if (!tcp_ip6_spec->ip6dst[0] && !tcp_ip6_spec->ip6dst[1] &&
+ !tcp_ip6_spec->ip6dst[2] && !tcp_ip6_spec->ip6dst[3])
+ *unused |= BIT(INNER_DST_IP);
+
+ if (!tcp_ip6_spec->psrc)
+ *unused |= BIT(INNER_SRC_PORT);
+
+ if (!tcp_ip6_spec->pdst)
+ *unused |= BIT(INNER_DST_PORT);
+
+ if (tcp_ip6_spec->tclass)
+ return -EOPNOTSUPP;
+
+ break;
+ case IPV6_USER_FLOW:
+ usr_ip6_spec = &fs->h_u.usr_ip6_spec;
+ *unused |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
+ BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) |
+ BIT(INNER_DST_PORT);
+
+ if (!usr_ip6_spec->ip6src[0] && !usr_ip6_spec->ip6src[1] &&
+ !usr_ip6_spec->ip6src[2] && !usr_ip6_spec->ip6src[3])
+ *unused |= BIT(INNER_SRC_IP);
+
+ if (!usr_ip6_spec->ip6dst[0] && !usr_ip6_spec->ip6dst[1] &&
+ !usr_ip6_spec->ip6dst[2] && !usr_ip6_spec->ip6dst[3])
+ *unused |= BIT(INNER_DST_IP);
+
+ if (!usr_ip6_spec->l4_proto)
+ *unused |= BIT(INNER_IP_PROTO);
+
+ if (usr_ip6_spec->tclass)
+ return -EOPNOTSUPP;
+
+ if (usr_ip6_spec->l4_4_bytes)
+ return -EOPNOTSUPP;
+
+ break;
+ case ETHER_FLOW:
+ ether_spec = &fs->h_u.ether_spec;
+ *unused |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
+ BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
+ BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
+
+ if (is_zero_ether_addr(ether_spec->h_source))
+ *unused |= BIT(INNER_SRC_MAC);
+
+ if (is_zero_ether_addr(ether_spec->h_dest))
+ *unused |= BIT(INNER_DST_MAC);
+
+ if (!ether_spec->h_proto)
+ *unused |= BIT(INNER_ETH_TYPE);
+
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ if ((fs->flow_type & FLOW_EXT)) {
+ if (fs->h_ext.vlan_etype)
+ return -EOPNOTSUPP;
+ if (!fs->h_ext.vlan_tci)
+ *unused |= BIT(INNER_VLAN_TAG_FST);
+
+ if (fs->m_ext.vlan_tci) {
+ if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
+ return -EINVAL;
+ }
+ } else {
+ *unused |= BIT(INNER_VLAN_TAG_FST);
+ }
+
+ if (fs->flow_type & FLOW_MAC_EXT) {
+ if (!(hdev->fd_cfg.proto_support & ETHER_FLOW))
+ return -EOPNOTSUPP;
+
+ if (is_zero_ether_addr(fs->h_ext.h_dest))
+ *unused |= BIT(INNER_DST_MAC);
+ else
+ *unused &= ~(BIT(INNER_DST_MAC));
+ }
+
+ return 0;
+}
+
+static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
+{
+ struct hclge_fd_rule *rule = NULL;
+ struct hlist_node *node2;
+
+ hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
+ if (rule->location >= location)
+ break;
+ }
+
+ return rule && rule->location == location;
+}
+
+static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
+ struct hclge_fd_rule *new_rule,
+ u16 location,
+ bool is_add)
+{
+ struct hclge_fd_rule *rule = NULL, *parent = NULL;
+ struct hlist_node *node2;
+
+ if (is_add && !new_rule)
+ return -EINVAL;
+
+ hlist_for_each_entry_safe(rule, node2,
+ &hdev->fd_rule_list, rule_node) {
+ if (rule->location >= location)
+ break;
+ parent = rule;
+ }
+
+ if (rule && rule->location == location) {
+ hlist_del(&rule->rule_node);
+ kfree(rule);
+ hdev->hclge_fd_rule_num--;
+
+ if (!is_add)
+ return 0;
+
+ } else if (!is_add) {
+ dev_err(&hdev->pdev->dev,
+ "delete fail, rule %d is inexistent\n",
+ location);
+ return -EINVAL;
+ }
+
+ INIT_HLIST_NODE(&new_rule->rule_node);
+
+ if (parent)
+ hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
+ else
+ hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
+
+ hdev->hclge_fd_rule_num++;
+
+ return 0;
+}
+
+static int hclge_fd_get_tuple(struct hclge_dev *hdev,
+ struct ethtool_rx_flow_spec *fs,
+ struct hclge_fd_rule *rule)
+{
+ u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
+
+ switch (flow_type) {
+ case SCTP_V4_FLOW:
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ rule->tuples.src_ip[3] =
+ be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
+ rule->tuples_mask.src_ip[3] =
+ be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
+
+ rule->tuples.dst_ip[3] =
+ be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
+ rule->tuples_mask.dst_ip[3] =
+ be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
+
+ rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
+ rule->tuples_mask.src_port =
+ be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
+
+ rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
+ rule->tuples_mask.dst_port =
+ be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
+
+ rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
+ rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
+
+ rule->tuples.ether_proto = ETH_P_IP;
+ rule->tuples_mask.ether_proto = 0xFFFF;
+
+ break;
+ case IP_USER_FLOW:
+ rule->tuples.src_ip[3] =
+ be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
+ rule->tuples_mask.src_ip[3] =
+ be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
+
+ rule->tuples.dst_ip[3] =
+ be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
+ rule->tuples_mask.dst_ip[3] =
+ be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
+
+ rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
+ rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
+
+ rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
+ rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
+
+ rule->tuples.ether_proto = ETH_P_IP;
+ rule->tuples_mask.ether_proto = 0xFFFF;
+
+ break;
+ case SCTP_V6_FLOW:
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ be32_to_cpu_array(rule->tuples.src_ip,
+ fs->h_u.tcp_ip6_spec.ip6src, 4);
+ be32_to_cpu_array(rule->tuples_mask.src_ip,
+ fs->m_u.tcp_ip6_spec.ip6src, 4);
+
+ be32_to_cpu_array(rule->tuples.dst_ip,
+ fs->h_u.tcp_ip6_spec.ip6dst, 4);
+ be32_to_cpu_array(rule->tuples_mask.dst_ip,
+ fs->m_u.tcp_ip6_spec.ip6dst, 4);
+
+ rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
+ rule->tuples_mask.src_port =
+ be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
+
+ rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
+ rule->tuples_mask.dst_port =
+ be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
+
+ rule->tuples.ether_proto = ETH_P_IPV6;
+ rule->tuples_mask.ether_proto = 0xFFFF;
+
+ break;
+ case IPV6_USER_FLOW:
+ be32_to_cpu_array(rule->tuples.src_ip,
+ fs->h_u.usr_ip6_spec.ip6src, 4);
+ be32_to_cpu_array(rule->tuples_mask.src_ip,
+ fs->m_u.usr_ip6_spec.ip6src, 4);
+
+ be32_to_cpu_array(rule->tuples.dst_ip,
+ fs->h_u.usr_ip6_spec.ip6dst, 4);
+ be32_to_cpu_array(rule->tuples_mask.dst_ip,
+ fs->m_u.usr_ip6_spec.ip6dst, 4);
+
+ rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
+ rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
+
+ rule->tuples.ether_proto = ETH_P_IPV6;
+ rule->tuples_mask.ether_proto = 0xFFFF;
+
+ break;
+ case ETHER_FLOW:
+ ether_addr_copy(rule->tuples.src_mac,
+ fs->h_u.ether_spec.h_source);
+ ether_addr_copy(rule->tuples_mask.src_mac,
+ fs->m_u.ether_spec.h_source);
+
+ ether_addr_copy(rule->tuples.dst_mac,
+ fs->h_u.ether_spec.h_dest);
+ ether_addr_copy(rule->tuples_mask.dst_mac,
+ fs->m_u.ether_spec.h_dest);
+
+ rule->tuples.ether_proto =
+ be16_to_cpu(fs->h_u.ether_spec.h_proto);
+ rule->tuples_mask.ether_proto =
+ be16_to_cpu(fs->m_u.ether_spec.h_proto);
+
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ switch (flow_type) {
+ case SCTP_V4_FLOW:
+ case SCTP_V6_FLOW:
+ rule->tuples.ip_proto = IPPROTO_SCTP;
+ rule->tuples_mask.ip_proto = 0xFF;
+ break;
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ rule->tuples.ip_proto = IPPROTO_TCP;
+ rule->tuples_mask.ip_proto = 0xFF;
+ break;
+ case UDP_V4_FLOW:
+ case UDP_V6_FLOW:
+ rule->tuples.ip_proto = IPPROTO_UDP;
+ rule->tuples_mask.ip_proto = 0xFF;
+ break;
+ default:
+ break;
+ }
+
+ if ((fs->flow_type & FLOW_EXT)) {
+ rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
+ rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
+ }
+
+ if (fs->flow_type & FLOW_MAC_EXT) {
+ ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
+ ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
+ }
+
+ return 0;
+}
+
+static int hclge_add_fd_entry(struct hnae3_handle *handle,
+ struct ethtool_rxnfc *cmd)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ u16 dst_vport_id = 0, q_index = 0;
+ struct ethtool_rx_flow_spec *fs;
+ struct hclge_fd_rule *rule;
+ u32 unused = 0;
+ u8 action;
+ int ret;
+
+ if (!hnae3_dev_fd_supported(hdev))
+ return -EOPNOTSUPP;
+
+ if (!hdev->fd_cfg.fd_en) {
+ dev_warn(&hdev->pdev->dev,
+ "Please enable flow director first\n");
+ return -EOPNOTSUPP;
+ }
+
+ fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
+
+ ret = hclge_fd_check_spec(hdev, fs, &unused);
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "Check fd spec failed\n");
+ return ret;
+ }
+
+ if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
+ action = HCLGE_FD_ACTION_DROP_PACKET;
+ } else {
+ u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
+ u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
+ u16 tqps;
+
+ dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
+ tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
+
+ if (ring >= tqps) {
+ dev_err(&hdev->pdev->dev,
+ "Error: queue id (%d) > max tqp num (%d)\n",
+ ring, tqps - 1);
+ return -EINVAL;
+ }
+
+ if (vf > hdev->num_req_vfs) {
+ dev_err(&hdev->pdev->dev,
+ "Error: vf id (%d) > max vf num (%d)\n",
+ vf, hdev->num_req_vfs);
+ return -EINVAL;
+ }
+
+ action = HCLGE_FD_ACTION_ACCEPT_PACKET;
+ q_index = ring;
+ }
+
+ rule = kzalloc(sizeof(*rule), GFP_KERNEL);
+ if (!rule)
+ return -ENOMEM;
+
+ ret = hclge_fd_get_tuple(hdev, fs, rule);
+ if (ret)
+ goto free_rule;
+
+ rule->flow_type = fs->flow_type;
+
+ rule->location = fs->location;
+ rule->unused_tuple = unused;
+ rule->vf_id = dst_vport_id;
+ rule->queue_id = q_index;
+ rule->action = action;
+
+ ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
+ if (ret)
+ goto free_rule;
+
+ ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
+ if (ret)
+ goto free_rule;
+
+ ret = hclge_fd_update_rule_list(hdev, rule, fs->location, true);
+ if (ret)
+ goto free_rule;
+
+ return ret;
+
+free_rule:
+ kfree(rule);
+ return ret;
+}
+
+static int hclge_del_fd_entry(struct hnae3_handle *handle,
+ struct ethtool_rxnfc *cmd)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ struct ethtool_rx_flow_spec *fs;
+ int ret;
+
+ if (!hnae3_dev_fd_supported(hdev))
+ return -EOPNOTSUPP;
+
+ fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
+
+ if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
+ return -EINVAL;
+
+ if (!hclge_fd_rule_exist(hdev, fs->location)) {
+ dev_err(&hdev->pdev->dev,
+ "Delete fail, rule %d is inexistent\n",
+ fs->location);
+ return -ENOENT;
+ }
+
+ ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
+ fs->location, NULL, false);
+ if (ret)
+ return ret;
+
+ return hclge_fd_update_rule_list(hdev, NULL, fs->location,
+ false);
+}
+
+static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
+ bool clear_list)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_fd_rule *rule;
+ struct hlist_node *node;
+
+ if (!hnae3_dev_fd_supported(hdev))
+ return;
+
+ if (clear_list) {
+ hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
+ rule_node) {
+ hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
+ rule->location, NULL, false);
+ hlist_del(&rule->rule_node);
+ kfree(rule);
+ hdev->hclge_fd_rule_num--;
+ }
+ } else {
+ hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
+ rule_node)
+ hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
+ rule->location, NULL, false);
+ }
+}
+
+static int hclge_restore_fd_entries(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_fd_rule *rule;
+ struct hlist_node *node;
+ int ret;
+
+ if (!hnae3_dev_fd_supported(hdev))
+ return -EOPNOTSUPP;
+
+ hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
+ ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
+ if (!ret)
+ ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
+
+ if (ret) {
+ dev_warn(&hdev->pdev->dev,
+ "Restore rule %d failed, remove it\n",
+ rule->location);
+ hlist_del(&rule->rule_node);
+ kfree(rule);
+ hdev->hclge_fd_rule_num--;
+ }
+ }
+ return 0;
+}
+
+static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
+ struct ethtool_rxnfc *cmd)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ if (!hnae3_dev_fd_supported(hdev))
+ return -EOPNOTSUPP;
+
+ cmd->rule_cnt = hdev->hclge_fd_rule_num;
+ cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
+
+ return 0;
+}
+
+static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
+ struct ethtool_rxnfc *cmd)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_fd_rule *rule = NULL;
+ struct hclge_dev *hdev = vport->back;
+ struct ethtool_rx_flow_spec *fs;
+ struct hlist_node *node2;
+
+ if (!hnae3_dev_fd_supported(hdev))
+ return -EOPNOTSUPP;
+
+ fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
+
+ hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
+ if (rule->location >= fs->location)
+ break;
+ }
+
+ if (!rule || fs->location != rule->location)
+ return -ENOENT;
+
+ fs->flow_type = rule->flow_type;
+ switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
+ case SCTP_V4_FLOW:
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ fs->h_u.tcp_ip4_spec.ip4src =
+ cpu_to_be32(rule->tuples.src_ip[3]);
+ fs->m_u.tcp_ip4_spec.ip4src =
+ rule->unused_tuple & BIT(INNER_SRC_IP) ?
+ 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
+
+ fs->h_u.tcp_ip4_spec.ip4dst =
+ cpu_to_be32(rule->tuples.dst_ip[3]);
+ fs->m_u.tcp_ip4_spec.ip4dst =
+ rule->unused_tuple & BIT(INNER_DST_IP) ?
+ 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
+
+ fs->h_u.tcp_ip4_spec.psrc = cpu_to_be16(rule->tuples.src_port);
+ fs->m_u.tcp_ip4_spec.psrc =
+ rule->unused_tuple & BIT(INNER_SRC_PORT) ?
+ 0 : cpu_to_be16(rule->tuples_mask.src_port);
+
+ fs->h_u.tcp_ip4_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
+ fs->m_u.tcp_ip4_spec.pdst =
+ rule->unused_tuple & BIT(INNER_DST_PORT) ?
+ 0 : cpu_to_be16(rule->tuples_mask.dst_port);
+
+ fs->h_u.tcp_ip4_spec.tos = rule->tuples.ip_tos;
+ fs->m_u.tcp_ip4_spec.tos =
+ rule->unused_tuple & BIT(INNER_IP_TOS) ?
+ 0 : rule->tuples_mask.ip_tos;
+
+ break;
+ case IP_USER_FLOW:
+ fs->h_u.usr_ip4_spec.ip4src =
+ cpu_to_be32(rule->tuples.src_ip[3]);
+ fs->m_u.tcp_ip4_spec.ip4src =
+ rule->unused_tuple & BIT(INNER_SRC_IP) ?
+ 0 : cpu_to_be32(rule->tuples_mask.src_ip[3]);
+
+ fs->h_u.usr_ip4_spec.ip4dst =
+ cpu_to_be32(rule->tuples.dst_ip[3]);
+ fs->m_u.usr_ip4_spec.ip4dst =
+ rule->unused_tuple & BIT(INNER_DST_IP) ?
+ 0 : cpu_to_be32(rule->tuples_mask.dst_ip[3]);
+
+ fs->h_u.usr_ip4_spec.tos = rule->tuples.ip_tos;
+ fs->m_u.usr_ip4_spec.tos =
+ rule->unused_tuple & BIT(INNER_IP_TOS) ?
+ 0 : rule->tuples_mask.ip_tos;
+
+ fs->h_u.usr_ip4_spec.proto = rule->tuples.ip_proto;
+ fs->m_u.usr_ip4_spec.proto =
+ rule->unused_tuple & BIT(INNER_IP_PROTO) ?
+ 0 : rule->tuples_mask.ip_proto;
+
+ fs->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
+
+ break;
+ case SCTP_V6_FLOW:
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6src,
+ rule->tuples.src_ip, 4);
+ if (rule->unused_tuple & BIT(INNER_SRC_IP))
+ memset(fs->m_u.tcp_ip6_spec.ip6src, 0, sizeof(int) * 4);
+ else
+ cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6src,
+ rule->tuples_mask.src_ip, 4);
+
+ cpu_to_be32_array(fs->h_u.tcp_ip6_spec.ip6dst,
+ rule->tuples.dst_ip, 4);
+ if (rule->unused_tuple & BIT(INNER_DST_IP))
+ memset(fs->m_u.tcp_ip6_spec.ip6dst, 0, sizeof(int) * 4);
+ else
+ cpu_to_be32_array(fs->m_u.tcp_ip6_spec.ip6dst,
+ rule->tuples_mask.dst_ip, 4);
+
+ fs->h_u.tcp_ip6_spec.psrc = cpu_to_be16(rule->tuples.src_port);
+ fs->m_u.tcp_ip6_spec.psrc =
+ rule->unused_tuple & BIT(INNER_SRC_PORT) ?
+ 0 : cpu_to_be16(rule->tuples_mask.src_port);
+
+ fs->h_u.tcp_ip6_spec.pdst = cpu_to_be16(rule->tuples.dst_port);
+ fs->m_u.tcp_ip6_spec.pdst =
+ rule->unused_tuple & BIT(INNER_DST_PORT) ?
+ 0 : cpu_to_be16(rule->tuples_mask.dst_port);
+
+ break;
+ case IPV6_USER_FLOW:
+ cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6src,
+ rule->tuples.src_ip, 4);
+ if (rule->unused_tuple & BIT(INNER_SRC_IP))
+ memset(fs->m_u.usr_ip6_spec.ip6src, 0, sizeof(int) * 4);
+ else
+ cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6src,
+ rule->tuples_mask.src_ip, 4);
+
+ cpu_to_be32_array(fs->h_u.usr_ip6_spec.ip6dst,
+ rule->tuples.dst_ip, 4);
+ if (rule->unused_tuple & BIT(INNER_DST_IP))
+ memset(fs->m_u.usr_ip6_spec.ip6dst, 0, sizeof(int) * 4);
+ else
+ cpu_to_be32_array(fs->m_u.usr_ip6_spec.ip6dst,
+ rule->tuples_mask.dst_ip, 4);
+
+ fs->h_u.usr_ip6_spec.l4_proto = rule->tuples.ip_proto;
+ fs->m_u.usr_ip6_spec.l4_proto =
+ rule->unused_tuple & BIT(INNER_IP_PROTO) ?
+ 0 : rule->tuples_mask.ip_proto;
+
+ break;
+ case ETHER_FLOW:
+ ether_addr_copy(fs->h_u.ether_spec.h_source,
+ rule->tuples.src_mac);
+ if (rule->unused_tuple & BIT(INNER_SRC_MAC))
+ eth_zero_addr(fs->m_u.ether_spec.h_source);
+ else
+ ether_addr_copy(fs->m_u.ether_spec.h_source,
+ rule->tuples_mask.src_mac);
+
+ ether_addr_copy(fs->h_u.ether_spec.h_dest,
+ rule->tuples.dst_mac);
+ if (rule->unused_tuple & BIT(INNER_DST_MAC))
+ eth_zero_addr(fs->m_u.ether_spec.h_dest);
+ else
+ ether_addr_copy(fs->m_u.ether_spec.h_dest,
+ rule->tuples_mask.dst_mac);
+
+ fs->h_u.ether_spec.h_proto =
+ cpu_to_be16(rule->tuples.ether_proto);
+ fs->m_u.ether_spec.h_proto =
+ rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
+ 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
+
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ if (fs->flow_type & FLOW_EXT) {
+ fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
+ fs->m_ext.vlan_tci =
+ rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
+ cpu_to_be16(VLAN_VID_MASK) :
+ cpu_to_be16(rule->tuples_mask.vlan_tag1);
+ }
+
+ if (fs->flow_type & FLOW_MAC_EXT) {
+ ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
+ if (rule->unused_tuple & BIT(INNER_DST_MAC))
+ eth_zero_addr(fs->m_u.ether_spec.h_dest);
+ else
+ ether_addr_copy(fs->m_u.ether_spec.h_dest,
+ rule->tuples_mask.dst_mac);
+ }
+
+ if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
+ fs->ring_cookie = RX_CLS_FLOW_DISC;
+ } else {
+ u64 vf_id;
+
+ fs->ring_cookie = rule->queue_id;
+ vf_id = rule->vf_id;
+ vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
+ fs->ring_cookie |= vf_id;
+ }
+
+ return 0;
+}
+
+static int hclge_get_all_rules(struct hnae3_handle *handle,
+ struct ethtool_rxnfc *cmd, u32 *rule_locs)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_fd_rule *rule;
+ struct hlist_node *node2;
+ int cnt = 0;
+
+ if (!hnae3_dev_fd_supported(hdev))
+ return -EOPNOTSUPP;
+
+ cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
+
+ hlist_for_each_entry_safe(rule, node2,
+ &hdev->fd_rule_list, rule_node) {
+ if (cnt == cmd->rule_cnt)
+ return -EMSGSIZE;
+
+ rule_locs[cnt] = rule->location;
+ cnt++;
+ }
+
+ cmd->rule_cnt = cnt;
+
+ return 0;
+}
+
+static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ hdev->fd_cfg.fd_en = enable;
+ if (!enable)
+ hclge_del_all_fd_entries(handle, false);
+ else
+ hclge_restore_fd_entries(handle);
+}
+
static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
{
struct hclge_desc desc;
@@ -5502,6 +6783,13 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
goto err_mdiobus_unreg;
}
+ ret = hclge_init_fd_config(hdev);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "fd table init fail, ret=%d\n", ret);
+ goto err_mdiobus_unreg;
+ }
+
hclge_dcb_ops_set(hdev);
timer_setup(&hdev->service_timer, hclge_service_timer, 0);
@@ -5608,6 +6896,13 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
return ret;
}
+ ret = hclge_init_fd_config(hdev);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "fd table init fail, ret=%d\n", ret);
+ return ret;
+ }
+
dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
HCLGE_DRIVER_NAME);
@@ -6047,6 +7342,14 @@ static const struct hnae3_ae_ops hclge_ops = {
.get_regs = hclge_get_regs,
.set_led_id = hclge_set_led_id,
.get_link_mode = hclge_get_link_mode,
+ .add_fd_entry = hclge_add_fd_entry,
+ .del_fd_entry = hclge_del_fd_entry,
+ .del_all_fd_entries = hclge_del_all_fd_entries,
+ .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
+ .get_fd_rule_info = hclge_get_fd_rule_info,
+ .get_fd_all_rules = hclge_get_all_rules,
+ .restore_fd_rules = hclge_restore_fd_entries,
+ .enable_fd = hclge_enable_fd,
};
static struct hnae3_ae_algo ae_algo = {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index 7841b830a716..06adbdd27b95 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -79,6 +79,19 @@
#define HCLGE_VF_NUM_PER_CMD 64
#define HCLGE_VF_NUM_PER_BYTE 8
+enum HLCGE_PORT_TYPE {
+ HOST_PORT,
+ NETWORK_PORT
+};
+
+#define HCLGE_PF_ID_S 0
+#define HCLGE_PF_ID_M GENMASK(2, 0)
+#define HCLGE_VF_ID_S 3
+#define HCLGE_VF_ID_M GENMASK(10, 3)
+#define HCLGE_PORT_TYPE_B 11
+#define HCLGE_NETWORK_PORT_ID_S 0
+#define HCLGE_NETWORK_PORT_ID_M GENMASK(3, 0)
+
/* Reset related Registers */
#define HCLGE_MISC_RESET_STS_REG 0x20700
#define HCLGE_MISC_VECTOR_INT_STS 0x20800
@@ -359,6 +372,221 @@ struct hclge_vlan_type_cfg {
u16 tx_in_vlan_type;
};
+enum HCLGE_FD_MODE {
+ HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1,
+ HCLGE_FD_MODE_DEPTH_1K_WIDTH_400B_STAGE_2,
+ HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1,
+ HCLGE_FD_MODE_DEPTH_2K_WIDTH_200B_STAGE_2,
+};
+
+enum HCLGE_FD_KEY_TYPE {
+ HCLGE_FD_KEY_BASE_ON_PTYPE,
+ HCLGE_FD_KEY_BASE_ON_TUPLE,
+};
+
+enum HCLGE_FD_STAGE {
+ HCLGE_FD_STAGE_1,
+ HCLGE_FD_STAGE_2,
+};
+
+/* OUTER_XXX indicates tuples in tunnel header of tunnel packet
+ * INNER_XXX indicate tuples in tunneled header of tunnel packet or
+ * tuples of non-tunnel packet
+ */
+enum HCLGE_FD_TUPLE {
+ OUTER_DST_MAC,
+ OUTER_SRC_MAC,
+ OUTER_VLAN_TAG_FST,
+ OUTER_VLAN_TAG_SEC,
+ OUTER_ETH_TYPE,
+ OUTER_L2_RSV,
+ OUTER_IP_TOS,
+ OUTER_IP_PROTO,
+ OUTER_SRC_IP,
+ OUTER_DST_IP,
+ OUTER_L3_RSV,
+ OUTER_SRC_PORT,
+ OUTER_DST_PORT,
+ OUTER_L4_RSV,
+ OUTER_TUN_VNI,
+ OUTER_TUN_FLOW_ID,
+ INNER_DST_MAC,
+ INNER_SRC_MAC,
+ INNER_VLAN_TAG_FST,
+ INNER_VLAN_TAG_SEC,
+ INNER_ETH_TYPE,
+ INNER_L2_RSV,
+ INNER_IP_TOS,
+ INNER_IP_PROTO,
+ INNER_SRC_IP,
+ INNER_DST_IP,
+ INNER_L3_RSV,
+ INNER_SRC_PORT,
+ INNER_DST_PORT,
+ INNER_L4_RSV,
+ MAX_TUPLE,
+};
+
+enum HCLGE_FD_META_DATA {
+ PACKET_TYPE_ID,
+ IP_FRAGEMENT,
+ ROCE_TYPE,
+ NEXT_KEY,
+ VLAN_NUMBER,
+ SRC_VPORT,
+ DST_VPORT,
+ TUNNEL_PACKET,
+ MAX_META_DATA,
+};
+
+struct key_info {
+ u8 key_type;
+ u8 key_length;
+};
+
+static const struct key_info meta_data_key_info[] = {
+ { PACKET_TYPE_ID, 6},
+ { IP_FRAGEMENT, 1},
+ { ROCE_TYPE, 1},
+ { NEXT_KEY, 5},
+ { VLAN_NUMBER, 2},
+ { SRC_VPORT, 12},
+ { DST_VPORT, 12},
+ { TUNNEL_PACKET, 1},
+};
+
+static const struct key_info tuple_key_info[] = {
+ { OUTER_DST_MAC, 48},
+ { OUTER_SRC_MAC, 48},
+ { OUTER_VLAN_TAG_FST, 16},
+ { OUTER_VLAN_TAG_SEC, 16},
+ { OUTER_ETH_TYPE, 16},
+ { OUTER_L2_RSV, 16},
+ { OUTER_IP_TOS, 8},
+ { OUTER_IP_PROTO, 8},
+ { OUTER_SRC_IP, 32},
+ { OUTER_DST_IP, 32},
+ { OUTER_L3_RSV, 16},
+ { OUTER_SRC_PORT, 16},
+ { OUTER_DST_PORT, 16},
+ { OUTER_L4_RSV, 32},
+ { OUTER_TUN_VNI, 24},
+ { OUTER_TUN_FLOW_ID, 8},
+ { INNER_DST_MAC, 48},
+ { INNER_SRC_MAC, 48},
+ { INNER_VLAN_TAG_FST, 16},
+ { INNER_VLAN_TAG_SEC, 16},
+ { INNER_ETH_TYPE, 16},
+ { INNER_L2_RSV, 16},
+ { INNER_IP_TOS, 8},
+ { INNER_IP_PROTO, 8},
+ { INNER_SRC_IP, 32},
+ { INNER_DST_IP, 32},
+ { INNER_L3_RSV, 16},
+ { INNER_SRC_PORT, 16},
+ { INNER_DST_PORT, 16},
+ { INNER_L4_RSV, 32},
+};
+
+#define MAX_KEY_LENGTH 400
+#define MAX_KEY_DWORDS DIV_ROUND_UP(MAX_KEY_LENGTH / 8, 4)
+#define MAX_KEY_BYTES (MAX_KEY_DWORDS * 4)
+#define MAX_META_DATA_LENGTH 32
+
+enum HCLGE_FD_PACKET_TYPE {
+ NIC_PACKET,
+ ROCE_PACKET,
+};
+
+enum HCLGE_FD_ACTION {
+ HCLGE_FD_ACTION_ACCEPT_PACKET,
+ HCLGE_FD_ACTION_DROP_PACKET,
+};
+
+struct hclge_fd_key_cfg {
+ u8 key_sel;
+ u8 inner_sipv6_word_en;
+ u8 inner_dipv6_word_en;
+ u8 outer_sipv6_word_en;
+ u8 outer_dipv6_word_en;
+ u32 tuple_active;
+ u32 meta_data_active;
+};
+
+struct hclge_fd_cfg {
+ u8 fd_mode;
+ u8 fd_en;
+ u16 max_key_length;
+ u32 proto_support;
+ u32 rule_num[2]; /* rule entry number */
+ u16 cnt_num[2]; /* rule hit counter number */
+ struct hclge_fd_key_cfg key_cfg[2];
+};
+
+struct hclge_fd_rule_tuples {
+ u8 src_mac[6];
+ u8 dst_mac[6];
+ u32 src_ip[4];
+ u32 dst_ip[4];
+ u16 src_port;
+ u16 dst_port;
+ u16 vlan_tag1;
+ u16 ether_proto;
+ u8 ip_tos;
+ u8 ip_proto;
+};
+
+struct hclge_fd_rule {
+ struct hlist_node rule_node;
+ struct hclge_fd_rule_tuples tuples;
+ struct hclge_fd_rule_tuples tuples_mask;
+ u32 unused_tuple;
+ u32 flow_type;
+ u8 action;
+ u16 vf_id;
+ u16 queue_id;
+ u16 location;
+};
+
+struct hclge_fd_ad_data {
+ u16 ad_id;
+ u8 drop_packet;
+ u8 forward_to_direct_queue;
+ u16 queue_id;
+ u8 use_counter;
+ u8 counter_id;
+ u8 use_next_stage;
+ u8 write_rule_id_to_bd;
+ u8 next_input_key;
+ u16 rule_id;
+};
+
+/* For each bit of TCAM entry, it uses a pair of 'x' and
+ * 'y' to indicate which value to match, like below:
+ * ----------------------------------
+ * | bit x | bit y | search value |
+ * ----------------------------------
+ * | 0 | 0 | always hit |
+ * ----------------------------------
+ * | 1 | 0 | match '0' |
+ * ----------------------------------
+ * | 0 | 1 | match '1' |
+ * ----------------------------------
+ * | 1 | 1 | invalid |
+ * ----------------------------------
+ * Then for input key(k) and mask(v), we can calculate the value by
+ * the formulae:
+ * x = (~k) & v
+ * y = (k ^ ~v) & k
+ */
+#define calc_x(x, k, v) ((x) = (~(k) & (v)))
+#define calc_y(y, k, v) \
+ do { \
+ const typeof(k) _k_ = (k); \
+ const typeof(v) _v_ = (v); \
+ (y) = (_k_ ^ ~_v_) & (_k_); \
+ } while (0)
+
#define HCLGE_VPORT_NUM 256
struct hclge_dev {
struct pci_dev *pdev;
@@ -448,6 +676,10 @@ struct hclge_dev {
struct hclge_vlan_type_cfg vlan_type_cfg;
unsigned long vlan_table[VLAN_N_VID][BITS_TO_LONGS(HCLGE_VPORT_NUM)];
+
+ struct hclge_fd_cfg fd_cfg;
+ struct hlist_head fd_rule_list;
+ u16 hclge_fd_rule_num;
};
/* VPort level vlan tag configuration for TX direction */