summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c')
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c363
1 files changed, 329 insertions, 34 deletions
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
index 8c97106bdd1c..77a13fb555fb 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
@@ -1,15 +1,19 @@
// SPDX-License-Identifier: GPL-2.0
-/* Marvell OcteonTx2 RVU Physical Function ethernet driver
+/* Marvell RVU Ethernet driver
*
* Copyright (C) 2020 Marvell.
+ *
*/
#include <net/ipv6.h>
+#include <linux/sort.h>
#include "otx2_common.h"
#define OTX2_DEFAULT_ACTION 0x1
+static int otx2_mcam_entry_init(struct otx2_nic *pfvf);
+
struct otx2_flow {
struct ethtool_rx_flow_spec flow_spec;
struct list_head list;
@@ -18,14 +22,19 @@ struct otx2_flow {
bool is_vf;
u8 rss_ctx_id;
int vf;
+ bool dmac_filter;
+};
+
+enum dmac_req {
+ DMAC_ADDR_UPDATE,
+ DMAC_ADDR_DEL
};
static void otx2_clear_ntuple_flow_info(struct otx2_nic *pfvf, struct otx2_flow_config *flow_cfg)
{
devm_kfree(pfvf->dev, flow_cfg->flow_ent);
flow_cfg->flow_ent = NULL;
- flow_cfg->ntuple_max_flows = 0;
- flow_cfg->tc_max_flows = 0;
+ flow_cfg->max_flows = 0;
}
static int otx2_free_ntuple_mcam_entries(struct otx2_nic *pfvf)
@@ -34,11 +43,11 @@ static int otx2_free_ntuple_mcam_entries(struct otx2_nic *pfvf)
struct npc_mcam_free_entry_req *req;
int ent, err;
- if (!flow_cfg->ntuple_max_flows)
+ if (!flow_cfg->max_flows)
return 0;
mutex_lock(&pfvf->mbox.lock);
- for (ent = 0; ent < flow_cfg->ntuple_max_flows; ent++) {
+ for (ent = 0; ent < flow_cfg->max_flows; ent++) {
req = otx2_mbox_alloc_msg_npc_mcam_free_entry(&pfvf->mbox);
if (!req)
break;
@@ -55,7 +64,12 @@ static int otx2_free_ntuple_mcam_entries(struct otx2_nic *pfvf)
return 0;
}
-static int otx2_alloc_ntuple_mcam_entries(struct otx2_nic *pfvf, u16 count)
+static int mcam_entry_cmp(const void *a, const void *b)
+{
+ return *(u16 *)a - *(u16 *)b;
+}
+
+int otx2_alloc_mcam_entries(struct otx2_nic *pfvf, u16 count)
{
struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
struct npc_mcam_alloc_entry_req *req;
@@ -70,8 +84,12 @@ static int otx2_alloc_ntuple_mcam_entries(struct otx2_nic *pfvf, u16 count)
flow_cfg->flow_ent = devm_kmalloc_array(pfvf->dev, count,
sizeof(u16), GFP_KERNEL);
- if (!flow_cfg->flow_ent)
+ if (!flow_cfg->flow_ent) {
+ netdev_err(pfvf->netdev,
+ "%s: Unable to allocate memory for flow entries\n",
+ __func__);
return -ENOMEM;
+ }
mutex_lock(&pfvf->mbox.lock);
@@ -86,8 +104,14 @@ static int otx2_alloc_ntuple_mcam_entries(struct otx2_nic *pfvf, u16 count)
req->contig = false;
req->count = (count - allocated) > NPC_MAX_NONCONTIG_ENTRIES ?
NPC_MAX_NONCONTIG_ENTRIES : count - allocated;
- req->priority = NPC_MCAM_HIGHER_PRIO;
- req->ref_entry = flow_cfg->def_ent[0];
+
+ /* Allocate higher priority entries for PFs, so that VF's entries
+ * will be on top of PF.
+ */
+ if (!is_otx2_vf(pfvf->pcifunc)) {
+ req->priority = NPC_MCAM_HIGHER_PRIO;
+ req->ref_entry = flow_cfg->def_ent[0];
+ }
/* Send message to AF */
if (otx2_sync_mbox_msg(&pfvf->mbox))
@@ -108,22 +132,34 @@ static int otx2_alloc_ntuple_mcam_entries(struct otx2_nic *pfvf, u16 count)
break;
}
+ /* Multiple MCAM entry alloc requests could result in non-sequential
+ * MCAM entries in the flow_ent[] array. Sort them in an ascending order,
+ * otherwise user installed ntuple filter index and MCAM entry index will
+ * not be in sync.
+ */
+ if (allocated)
+ sort(&flow_cfg->flow_ent[0], allocated,
+ sizeof(flow_cfg->flow_ent[0]), mcam_entry_cmp, NULL);
+
exit:
mutex_unlock(&pfvf->mbox.lock);
- flow_cfg->ntuple_offset = 0;
- flow_cfg->ntuple_max_flows = allocated;
- flow_cfg->tc_max_flows = allocated;
+ flow_cfg->max_flows = allocated;
+
+ if (allocated) {
+ pfvf->flags |= OTX2_FLAG_MCAM_ENTRIES_ALLOC;
+ pfvf->flags |= OTX2_FLAG_NTUPLE_SUPPORT;
+ }
if (allocated != count)
netdev_info(pfvf->netdev,
- "Unable to allocate %d MCAM entries for ntuple, got %d\n",
+ "Unable to allocate %d MCAM entries, got only %d\n",
count, allocated);
-
return allocated;
}
+EXPORT_SYMBOL(otx2_alloc_mcam_entries);
-int otx2_alloc_mcam_entries(struct otx2_nic *pfvf)
+static int otx2_mcam_entry_init(struct otx2_nic *pfvf)
{
struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
struct npc_mcam_alloc_entry_req *req;
@@ -183,18 +219,35 @@ int otx2_alloc_mcam_entries(struct otx2_nic *pfvf)
mutex_unlock(&pfvf->mbox.lock);
/* Allocate entries for Ntuple filters */
- count = otx2_alloc_ntuple_mcam_entries(pfvf, OTX2_DEFAULT_FLOWCOUNT);
+ count = otx2_alloc_mcam_entries(pfvf, OTX2_DEFAULT_FLOWCOUNT);
if (count <= 0) {
otx2_clear_ntuple_flow_info(pfvf, flow_cfg);
return 0;
}
- pfvf->flags |= OTX2_FLAG_NTUPLE_SUPPORT;
pfvf->flags |= OTX2_FLAG_TC_FLOWER_SUPPORT;
return 0;
}
+int otx2vf_mcam_flow_init(struct otx2_nic *pfvf)
+{
+ struct otx2_flow_config *flow_cfg;
+
+ pfvf->flow_cfg = devm_kzalloc(pfvf->dev,
+ sizeof(struct otx2_flow_config),
+ GFP_KERNEL);
+ if (!pfvf->flow_cfg)
+ return -ENOMEM;
+
+ flow_cfg = pfvf->flow_cfg;
+ INIT_LIST_HEAD(&flow_cfg->flow_list);
+ flow_cfg->max_flows = 0;
+
+ return 0;
+}
+EXPORT_SYMBOL(otx2vf_mcam_flow_init);
+
int otx2_mcam_flow_init(struct otx2_nic *pf)
{
int err;
@@ -206,7 +259,10 @@ int otx2_mcam_flow_init(struct otx2_nic *pf)
INIT_LIST_HEAD(&pf->flow_cfg->flow_list);
- err = otx2_alloc_mcam_entries(pf);
+ /* Allocate bare minimum number of MCAM entries needed for
+ * unicast and ntuple filters.
+ */
+ err = otx2_mcam_entry_init(pf);
if (err)
return err;
@@ -219,6 +275,22 @@ int otx2_mcam_flow_init(struct otx2_nic *pf)
if (!pf->mac_table)
return -ENOMEM;
+ otx2_dmacflt_get_max_cnt(pf);
+
+ /* DMAC filters are not allocated */
+ if (!pf->flow_cfg->dmacflt_max_flows)
+ return 0;
+
+ pf->flow_cfg->bmap_to_dmacindex =
+ devm_kzalloc(pf->dev, sizeof(u8) *
+ pf->flow_cfg->dmacflt_max_flows,
+ GFP_KERNEL);
+
+ if (!pf->flow_cfg->bmap_to_dmacindex)
+ return -ENOMEM;
+
+ pf->flags |= OTX2_FLAG_DMACFLTR_SUPPORT;
+
return 0;
}
@@ -226,6 +298,7 @@ void otx2_mcam_flow_del(struct otx2_nic *pf)
{
otx2_destroy_mcam_flows(pf);
}
+EXPORT_SYMBOL(otx2_mcam_flow_del);
/* On success adds mcam entry
* On failure enable promisous mode
@@ -280,6 +353,12 @@ int otx2_add_macfilter(struct net_device *netdev, const u8 *mac)
{
struct otx2_nic *pf = netdev_priv(netdev);
+ if (bitmap_weight(&pf->flow_cfg->dmacflt_bmap,
+ pf->flow_cfg->dmacflt_max_flows))
+ netdev_warn(netdev,
+ "Add %pM to CGX/RPM DMAC filters list as well\n",
+ mac);
+
return otx2_do_add_macfilter(pf, mac);
}
@@ -351,12 +430,26 @@ static void otx2_add_flow_to_list(struct otx2_nic *pfvf, struct otx2_flow *flow)
list_add(&flow->list, head);
}
+int otx2_get_maxflows(struct otx2_flow_config *flow_cfg)
+{
+ if (!flow_cfg)
+ return 0;
+
+ if (flow_cfg->nr_flows == flow_cfg->max_flows ||
+ bitmap_weight(&flow_cfg->dmacflt_bmap,
+ flow_cfg->dmacflt_max_flows))
+ return flow_cfg->max_flows + flow_cfg->dmacflt_max_flows;
+ else
+ return flow_cfg->max_flows;
+}
+EXPORT_SYMBOL(otx2_get_maxflows);
+
int otx2_get_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc,
u32 location)
{
struct otx2_flow *iter;
- if (location >= pfvf->flow_cfg->ntuple_max_flows)
+ if (location >= otx2_get_maxflows(pfvf->flow_cfg))
return -EINVAL;
list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
@@ -378,7 +471,7 @@ int otx2_get_all_flows(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc,
int idx = 0;
int err = 0;
- nfc->data = pfvf->flow_cfg->ntuple_max_flows;
+ nfc->data = otx2_get_maxflows(pfvf->flow_cfg);
while ((!err || err == -ENOENT) && idx < rule_cnt) {
err = otx2_get_flow(pfvf, nfc, location);
if (!err)
@@ -670,7 +763,7 @@ static int otx2_prepare_ipv6_flow(struct ethtool_rx_flow_spec *fsp,
return 0;
}
-int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp,
+static int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp,
struct npc_install_flow_req *req)
{
struct ethhdr *eth_mask = &fsp->m_u.ether_spec;
@@ -694,7 +787,7 @@ int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp,
ether_addr_copy(pmask->dmac, eth_mask->h_dest);
req->features |= BIT_ULL(NPC_DMAC);
}
- if (eth_mask->h_proto) {
+ if (eth_hdr->h_proto) {
memcpy(&pkt->etype, &eth_hdr->h_proto,
sizeof(pkt->etype));
memcpy(&pmask->etype, &eth_mask->h_proto,
@@ -726,14 +819,31 @@ int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp,
return -EOPNOTSUPP;
}
if (fsp->flow_type & FLOW_EXT) {
- if (fsp->m_ext.vlan_etype)
- return -EINVAL;
- if (fsp->m_ext.vlan_tci) {
- if (fsp->m_ext.vlan_tci != cpu_to_be16(VLAN_VID_MASK))
+ u16 vlan_etype;
+
+ if (fsp->m_ext.vlan_etype) {
+ /* Partial masks not supported */
+ if (be16_to_cpu(fsp->m_ext.vlan_etype) != 0xFFFF)
return -EINVAL;
- if (be16_to_cpu(fsp->h_ext.vlan_tci) >= VLAN_N_VID)
+
+ vlan_etype = be16_to_cpu(fsp->h_ext.vlan_etype);
+ /* Only ETH_P_8021Q and ETH_P_802AD types supported */
+ if (vlan_etype != ETH_P_8021Q &&
+ vlan_etype != ETH_P_8021AD)
return -EINVAL;
+ memcpy(&pkt->vlan_etype, &fsp->h_ext.vlan_etype,
+ sizeof(pkt->vlan_etype));
+ memcpy(&pmask->vlan_etype, &fsp->m_ext.vlan_etype,
+ sizeof(pmask->vlan_etype));
+
+ if (vlan_etype == ETH_P_8021Q)
+ req->features |= BIT_ULL(NPC_VLAN_ETYPE_CTAG);
+ else
+ req->features |= BIT_ULL(NPC_VLAN_ETYPE_STAG);
+ }
+
+ if (fsp->m_ext.vlan_tci) {
memcpy(&pkt->vlan_tci, &fsp->h_ext.vlan_tci,
sizeof(pkt->vlan_tci));
memcpy(&pmask->vlan_tci, &fsp->m_ext.vlan_tci,
@@ -760,6 +870,32 @@ int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp,
return 0;
}
+static int otx2_is_flow_rule_dmacfilter(struct otx2_nic *pfvf,
+ struct ethtool_rx_flow_spec *fsp)
+{
+ struct ethhdr *eth_mask = &fsp->m_u.ether_spec;
+ struct ethhdr *eth_hdr = &fsp->h_u.ether_spec;
+ u64 ring_cookie = fsp->ring_cookie;
+ u32 flow_type;
+
+ if (!(pfvf->flags & OTX2_FLAG_DMACFLTR_SUPPORT))
+ return false;
+
+ flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
+
+ /* CGX/RPM block dmac filtering configured for white listing
+ * check for action other than DROP
+ */
+ if (flow_type == ETHER_FLOW && ring_cookie != RX_CLS_FLOW_DISC &&
+ !ethtool_get_flow_spec_ring_vf(ring_cookie)) {
+ if (is_zero_ether_addr(eth_mask->h_dest) &&
+ is_valid_ether_addr(eth_hdr->h_dest))
+ return true;
+ }
+
+ return false;
+}
+
static int otx2_add_flow_msg(struct otx2_nic *pfvf, struct otx2_flow *flow)
{
u64 ring_cookie = flow->flow_spec.ring_cookie;
@@ -794,6 +930,7 @@ static int otx2_add_flow_msg(struct otx2_nic *pfvf, struct otx2_flow *flow)
if (flow->flow_spec.flow_type & FLOW_RSS) {
req->op = NIX_RX_ACTIONOP_RSS;
req->index = flow->rss_ctx_id;
+ req->flow_key_alg = pfvf->hw.flowkey_alg_idx;
} else {
req->op = NIX_RX_ACTIONOP_UCAST;
req->index = ethtool_get_flow_spec_ring(ring_cookie);
@@ -818,14 +955,52 @@ static int otx2_add_flow_msg(struct otx2_nic *pfvf, struct otx2_flow *flow)
return err;
}
+static int otx2_add_flow_with_pfmac(struct otx2_nic *pfvf,
+ struct otx2_flow *flow)
+{
+ struct otx2_flow *pf_mac;
+ struct ethhdr *eth_hdr;
+
+ pf_mac = kzalloc(sizeof(*pf_mac), GFP_KERNEL);
+ if (!pf_mac)
+ return -ENOMEM;
+
+ pf_mac->entry = 0;
+ pf_mac->dmac_filter = true;
+ pf_mac->location = pfvf->flow_cfg->max_flows;
+ memcpy(&pf_mac->flow_spec, &flow->flow_spec,
+ sizeof(struct ethtool_rx_flow_spec));
+ pf_mac->flow_spec.location = pf_mac->location;
+
+ /* Copy PF mac address */
+ eth_hdr = &pf_mac->flow_spec.h_u.ether_spec;
+ ether_addr_copy(eth_hdr->h_dest, pfvf->netdev->dev_addr);
+
+ /* Install DMAC filter with PF mac address */
+ otx2_dmacflt_add(pfvf, eth_hdr->h_dest, 0);
+
+ otx2_add_flow_to_list(pfvf, pf_mac);
+ pfvf->flow_cfg->nr_flows++;
+ set_bit(0, &pfvf->flow_cfg->dmacflt_bmap);
+
+ return 0;
+}
+
int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
{
struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
struct ethtool_rx_flow_spec *fsp = &nfc->fs;
struct otx2_flow *flow;
+ struct ethhdr *eth_hdr;
bool new = false;
+ int err = 0;
u32 ring;
- int err;
+
+ if (!flow_cfg->max_flows) {
+ netdev_err(pfvf->netdev,
+ "Ntuple rule count is 0, allocate and retry\n");
+ return -EINVAL;
+ }
ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT))
@@ -834,12 +1009,12 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
if (ring >= pfvf->hw.rx_queues && fsp->ring_cookie != RX_CLS_FLOW_DISC)
return -EINVAL;
- if (fsp->location >= flow_cfg->ntuple_max_flows)
+ if (fsp->location >= otx2_get_maxflows(flow_cfg))
return -EINVAL;
flow = otx2_find_flow(pfvf, fsp->location);
if (!flow) {
- flow = kzalloc(sizeof(*flow), GFP_ATOMIC);
+ flow = kzalloc(sizeof(*flow), GFP_KERNEL);
if (!flow)
return -ENOMEM;
flow->location = fsp->location;
@@ -852,8 +1027,56 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
if (fsp->flow_type & FLOW_RSS)
flow->rss_ctx_id = nfc->rss_context;
- err = otx2_add_flow_msg(pfvf, flow);
+ if (otx2_is_flow_rule_dmacfilter(pfvf, &flow->flow_spec)) {
+ eth_hdr = &flow->flow_spec.h_u.ether_spec;
+
+ /* Sync dmac filter table with updated fields */
+ if (flow->dmac_filter)
+ return otx2_dmacflt_update(pfvf, eth_hdr->h_dest,
+ flow->entry);
+
+ if (bitmap_full(&flow_cfg->dmacflt_bmap,
+ flow_cfg->dmacflt_max_flows)) {
+ netdev_warn(pfvf->netdev,
+ "Can't insert the rule %d as max allowed dmac filters are %d\n",
+ flow->location +
+ flow_cfg->dmacflt_max_flows,
+ flow_cfg->dmacflt_max_flows);
+ err = -EINVAL;
+ if (new)
+ kfree(flow);
+ return err;
+ }
+
+ /* Install PF mac address to DMAC filter list */
+ if (!test_bit(0, &flow_cfg->dmacflt_bmap))
+ otx2_add_flow_with_pfmac(pfvf, flow);
+
+ flow->dmac_filter = true;
+ flow->entry = find_first_zero_bit(&flow_cfg->dmacflt_bmap,
+ flow_cfg->dmacflt_max_flows);
+ fsp->location = flow_cfg->max_flows + flow->entry;
+ flow->flow_spec.location = fsp->location;
+ flow->location = fsp->location;
+
+ set_bit(flow->entry, &flow_cfg->dmacflt_bmap);
+ otx2_dmacflt_add(pfvf, eth_hdr->h_dest, flow->entry);
+
+ } else {
+ if (flow->location >= pfvf->flow_cfg->max_flows) {
+ netdev_warn(pfvf->netdev,
+ "Can't insert non dmac ntuple rule at %d, allowed range %d-0\n",
+ flow->location,
+ flow_cfg->max_flows - 1);
+ err = -EINVAL;
+ } else {
+ err = otx2_add_flow_msg(pfvf, flow);
+ }
+ }
+
if (err) {
+ if (err == MBOX_MSG_INVALID)
+ err = -EINVAL;
if (new)
kfree(flow);
return err;
@@ -890,20 +1113,70 @@ static int otx2_remove_flow_msg(struct otx2_nic *pfvf, u16 entry, bool all)
return err;
}
+static void otx2_update_rem_pfmac(struct otx2_nic *pfvf, int req)
+{
+ struct otx2_flow *iter;
+ struct ethhdr *eth_hdr;
+ bool found = false;
+
+ list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
+ if (iter->dmac_filter && iter->entry == 0) {
+ eth_hdr = &iter->flow_spec.h_u.ether_spec;
+ if (req == DMAC_ADDR_DEL) {
+ otx2_dmacflt_remove(pfvf, eth_hdr->h_dest,
+ 0);
+ clear_bit(0, &pfvf->flow_cfg->dmacflt_bmap);
+ found = true;
+ } else {
+ ether_addr_copy(eth_hdr->h_dest,
+ pfvf->netdev->dev_addr);
+ otx2_dmacflt_update(pfvf, eth_hdr->h_dest, 0);
+ }
+ break;
+ }
+ }
+
+ if (found) {
+ list_del(&iter->list);
+ kfree(iter);
+ pfvf->flow_cfg->nr_flows--;
+ }
+}
+
int otx2_remove_flow(struct otx2_nic *pfvf, u32 location)
{
struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
struct otx2_flow *flow;
int err;
- if (location >= flow_cfg->ntuple_max_flows)
+ if (location >= otx2_get_maxflows(flow_cfg))
return -EINVAL;
flow = otx2_find_flow(pfvf, location);
if (!flow)
return -ENOENT;
- err = otx2_remove_flow_msg(pfvf, flow->entry, false);
+ if (flow->dmac_filter) {
+ struct ethhdr *eth_hdr = &flow->flow_spec.h_u.ether_spec;
+
+ /* user not allowed to remove dmac filter with interface mac */
+ if (ether_addr_equal(pfvf->netdev->dev_addr, eth_hdr->h_dest))
+ return -EPERM;
+
+ err = otx2_dmacflt_remove(pfvf, eth_hdr->h_dest,
+ flow->entry);
+ clear_bit(flow->entry, &flow_cfg->dmacflt_bmap);
+ /* If all dmac filters are removed delete macfilter with
+ * interface mac address and configure CGX/RPM block in
+ * promiscuous mode
+ */
+ if (bitmap_weight(&flow_cfg->dmacflt_bmap,
+ flow_cfg->dmacflt_max_flows) == 1)
+ otx2_update_rem_pfmac(pfvf, DMAC_ADDR_DEL);
+ } else {
+ err = otx2_remove_flow_msg(pfvf, flow->entry, false);
+ }
+
if (err)
return err;
@@ -940,6 +1213,9 @@ int otx2_destroy_ntuple_flows(struct otx2_nic *pfvf)
if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT))
return 0;
+ if (!flow_cfg->max_flows)
+ return 0;
+
mutex_lock(&pfvf->mbox.lock);
req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox);
if (!req) {
@@ -948,7 +1224,7 @@ int otx2_destroy_ntuple_flows(struct otx2_nic *pfvf)
}
req->start = flow_cfg->flow_ent[0];
- req->end = flow_cfg->flow_ent[flow_cfg->ntuple_max_flows - 1];
+ req->end = flow_cfg->flow_ent[flow_cfg->max_flows - 1];
err = otx2_sync_mbox_msg(&pfvf->mbox);
mutex_unlock(&pfvf->mbox.lock);
@@ -1100,3 +1376,22 @@ int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable)
mutex_unlock(&pf->mbox.lock);
return rsp_hdr->rc;
}
+
+void otx2_dmacflt_reinstall_flows(struct otx2_nic *pf)
+{
+ struct otx2_flow *iter;
+ struct ethhdr *eth_hdr;
+
+ list_for_each_entry(iter, &pf->flow_cfg->flow_list, list) {
+ if (iter->dmac_filter) {
+ eth_hdr = &iter->flow_spec.h_u.ether_spec;
+ otx2_dmacflt_add(pf, eth_hdr->h_dest,
+ iter->entry);
+ }
+ }
+}
+
+void otx2_dmacflt_update_pfmac_flow(struct otx2_nic *pfvf)
+{
+ otx2_update_rem_pfmac(pfvf, DMAC_ADDR_UPDATE);
+}