summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2023-12-20 14:27:21 +0300
committerDavid S. Miller <davem@davemloft.net>2023-12-20 14:27:21 +0300
commitd7a39d399a6dbc52dedde686e5dd40a3ae30f19f (patch)
tree9d785de8d275bd3146d8698e84401687588e5387
parentb6895d0ac9d7a3d29f9f238a2688b3b66da71692 (diff)
parentc3e87a7fcd0bb5820ca6db9b385bbfacb556d083 (diff)
downloadlinux-d7a39d399a6dbc52dedde686e5dd40a3ae30f19f.tar.xz
Merge branch 'bridge-mdb-bulk-delete'
Ido Schimmel says: ==================== Add MDB bulk deletion support This patchset adds MDB bulk deletion support, allowing user space to request the deletion of matching entries instead of dumping the entire MDB and issuing a separate deletion request for each matching entry. Support is added in both the bridge and VXLAN drivers in a similar fashion to the existing FDB bulk deletion support. The parameters according to which bulk deletion can be performed are similar to the FDB ones, namely: Destination port, VLAN ID, state (e.g., "permanent"), routing protocol, source / destination VNI, destination IP and UDP port. Flushing based on flags (e.g., "offload", "fast_leave", "added_by_star_ex", "blocked") is not currently supported, but can be added in the future, if a use case arises. Patch #1 adds a new uAPI attribute to allow specifying the state mask according to which bulk deletion will be performed, if any. Patch #2 adds a new policy according to which bulk deletion requests (with 'NLM_F_BULK' flag set) will be parsed. Patches #3-#4 add a new NDO for MDB bulk deletion and invoke it from the rtnetlink code when a bulk deletion request is made. Patches #5-#6 implement the MDB bulk deletion NDO in the bridge and VXLAN drivers, respectively. Patch #7 allows user space to issue MDB bulk deletion requests by no longer rejecting the 'NLM_F_BULK' flag when it is set in 'RTM_DELMDB' requests. Patches #8-#9 add selftests for both drivers, for both good and bad flows. iproute2 changes can be found here [1]. https://github.com/idosch/iproute2/tree/submit/mdb_flush_v1 ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/vxlan/vxlan_core.c1
-rw-r--r--drivers/net/vxlan/vxlan_mdb.c174
-rw-r--r--drivers/net/vxlan/vxlan_private.h2
-rw-r--r--include/linux/netdevice.h6
-rw-r--r--include/uapi/linux/if_bridge.h1
-rw-r--r--net/bridge/br_device.c1
-rw-r--r--net/bridge/br_mdb.c133
-rw-r--r--net/bridge/br_private.h8
-rw-r--r--net/core/rtnetlink.c62
-rwxr-xr-xtools/testing/selftests/net/forwarding/bridge_mdb.sh191
-rwxr-xr-xtools/testing/selftests/net/test_vxlan_mdb.sh201
11 files changed, 749 insertions, 31 deletions
diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c
index 764ea02ff911..16106e088c63 100644
--- a/drivers/net/vxlan/vxlan_core.c
+++ b/drivers/net/vxlan/vxlan_core.c
@@ -3235,6 +3235,7 @@ static const struct net_device_ops vxlan_netdev_ether_ops = {
.ndo_fdb_get = vxlan_fdb_get,
.ndo_mdb_add = vxlan_mdb_add,
.ndo_mdb_del = vxlan_mdb_del,
+ .ndo_mdb_del_bulk = vxlan_mdb_del_bulk,
.ndo_mdb_dump = vxlan_mdb_dump,
.ndo_mdb_get = vxlan_mdb_get,
.ndo_fill_metadata_dst = vxlan_fill_metadata_dst,
diff --git a/drivers/net/vxlan/vxlan_mdb.c b/drivers/net/vxlan/vxlan_mdb.c
index eb4c580b5cee..60eb95a06d55 100644
--- a/drivers/net/vxlan/vxlan_mdb.c
+++ b/drivers/net/vxlan/vxlan_mdb.c
@@ -74,6 +74,14 @@ struct vxlan_mdb_config {
u8 rt_protocol;
};
+struct vxlan_mdb_flush_desc {
+ union vxlan_addr remote_ip;
+ __be32 src_vni;
+ __be32 remote_vni;
+ __be16 remote_port;
+ u8 rt_protocol;
+};
+
static const struct rhashtable_params vxlan_mdb_rht_params = {
.head_offset = offsetof(struct vxlan_mdb_entry, rhnode),
.key_offset = offsetof(struct vxlan_mdb_entry, key),
@@ -1306,6 +1314,145 @@ int vxlan_mdb_del(struct net_device *dev, struct nlattr *tb[],
return err;
}
+static const struct nla_policy
+vxlan_mdbe_attrs_del_bulk_pol[MDBE_ATTR_MAX + 1] = {
+ [MDBE_ATTR_RTPROT] = NLA_POLICY_MIN(NLA_U8, RTPROT_STATIC),
+ [MDBE_ATTR_DST] = NLA_POLICY_RANGE(NLA_BINARY,
+ sizeof(struct in_addr),
+ sizeof(struct in6_addr)),
+ [MDBE_ATTR_DST_PORT] = { .type = NLA_U16 },
+ [MDBE_ATTR_VNI] = NLA_POLICY_FULL_RANGE(NLA_U32, &vni_range),
+ [MDBE_ATTR_SRC_VNI] = NLA_POLICY_FULL_RANGE(NLA_U32, &vni_range),
+ [MDBE_ATTR_STATE_MASK] = NLA_POLICY_MASK(NLA_U8, MDB_PERMANENT),
+};
+
+static int vxlan_mdb_flush_desc_init(struct vxlan_dev *vxlan,
+ struct vxlan_mdb_flush_desc *desc,
+ struct nlattr *tb[],
+ struct netlink_ext_ack *extack)
+{
+ struct br_mdb_entry *entry = nla_data(tb[MDBA_SET_ENTRY]);
+ struct nlattr *mdbe_attrs[MDBE_ATTR_MAX + 1];
+ int err;
+
+ if (entry->ifindex && entry->ifindex != vxlan->dev->ifindex) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid port net device");
+ return -EINVAL;
+ }
+
+ if (entry->vid) {
+ NL_SET_ERR_MSG_MOD(extack, "VID must not be specified");
+ return -EINVAL;
+ }
+
+ if (!tb[MDBA_SET_ENTRY_ATTRS])
+ return 0;
+
+ err = nla_parse_nested(mdbe_attrs, MDBE_ATTR_MAX,
+ tb[MDBA_SET_ENTRY_ATTRS],
+ vxlan_mdbe_attrs_del_bulk_pol, extack);
+ if (err)
+ return err;
+
+ if (mdbe_attrs[MDBE_ATTR_STATE_MASK]) {
+ u8 state_mask = nla_get_u8(mdbe_attrs[MDBE_ATTR_STATE_MASK]);
+
+ if ((state_mask & MDB_PERMANENT) && !(entry->state & MDB_PERMANENT)) {
+ NL_SET_ERR_MSG_MOD(extack, "Only permanent MDB entries are supported");
+ return -EINVAL;
+ }
+ }
+
+ if (mdbe_attrs[MDBE_ATTR_RTPROT])
+ desc->rt_protocol = nla_get_u8(mdbe_attrs[MDBE_ATTR_RTPROT]);
+
+ if (mdbe_attrs[MDBE_ATTR_DST])
+ vxlan_nla_get_addr(&desc->remote_ip, mdbe_attrs[MDBE_ATTR_DST]);
+
+ if (mdbe_attrs[MDBE_ATTR_DST_PORT])
+ desc->remote_port =
+ cpu_to_be16(nla_get_u16(mdbe_attrs[MDBE_ATTR_DST_PORT]));
+
+ if (mdbe_attrs[MDBE_ATTR_VNI])
+ desc->remote_vni =
+ cpu_to_be32(nla_get_u32(mdbe_attrs[MDBE_ATTR_VNI]));
+
+ if (mdbe_attrs[MDBE_ATTR_SRC_VNI])
+ desc->src_vni =
+ cpu_to_be32(nla_get_u32(mdbe_attrs[MDBE_ATTR_SRC_VNI]));
+
+ return 0;
+}
+
+static void vxlan_mdb_remotes_flush(struct vxlan_dev *vxlan,
+ struct vxlan_mdb_entry *mdb_entry,
+ const struct vxlan_mdb_flush_desc *desc)
+{
+ struct vxlan_mdb_remote *remote, *tmp;
+
+ list_for_each_entry_safe(remote, tmp, &mdb_entry->remotes, list) {
+ struct vxlan_rdst *rd = rtnl_dereference(remote->rd);
+ __be32 remote_vni;
+
+ if (desc->remote_ip.sa.sa_family &&
+ !vxlan_addr_equal(&desc->remote_ip, &rd->remote_ip))
+ continue;
+
+ /* Encapsulation is performed with source VNI if remote VNI
+ * is not set.
+ */
+ remote_vni = rd->remote_vni ? : mdb_entry->key.vni;
+ if (desc->remote_vni && desc->remote_vni != remote_vni)
+ continue;
+
+ if (desc->remote_port && desc->remote_port != rd->remote_port)
+ continue;
+
+ if (desc->rt_protocol &&
+ desc->rt_protocol != remote->rt_protocol)
+ continue;
+
+ vxlan_mdb_remote_del(vxlan, mdb_entry, remote);
+ }
+}
+
+static void vxlan_mdb_flush(struct vxlan_dev *vxlan,
+ const struct vxlan_mdb_flush_desc *desc)
+{
+ struct vxlan_mdb_entry *mdb_entry;
+ struct hlist_node *tmp;
+
+ /* The removal of an entry cannot trigger the removal of another entry
+ * since entries are always added to the head of the list.
+ */
+ hlist_for_each_entry_safe(mdb_entry, tmp, &vxlan->mdb_list, mdb_node) {
+ if (desc->src_vni && desc->src_vni != mdb_entry->key.vni)
+ continue;
+
+ vxlan_mdb_remotes_flush(vxlan, mdb_entry, desc);
+ /* Entry will only be removed if its remotes list is empty. */
+ vxlan_mdb_entry_put(vxlan, mdb_entry);
+ }
+}
+
+int vxlan_mdb_del_bulk(struct net_device *dev, struct nlattr *tb[],
+ struct netlink_ext_ack *extack)
+{
+ struct vxlan_dev *vxlan = netdev_priv(dev);
+ struct vxlan_mdb_flush_desc desc = {};
+ int err;
+
+ ASSERT_RTNL();
+
+ err = vxlan_mdb_flush_desc_init(vxlan, &desc, tb, extack);
+ if (err)
+ return err;
+
+ vxlan_mdb_flush(vxlan, &desc);
+
+ return 0;
+}
+
static const struct nla_policy vxlan_mdbe_attrs_get_pol[MDBE_ATTR_MAX + 1] = {
[MDBE_ATTR_SOURCE] = NLA_POLICY_RANGE(NLA_BINARY,
sizeof(struct in_addr),
@@ -1575,29 +1722,6 @@ static void vxlan_mdb_check_empty(void *ptr, void *arg)
WARN_ON_ONCE(1);
}
-static void vxlan_mdb_remotes_flush(struct vxlan_dev *vxlan,
- struct vxlan_mdb_entry *mdb_entry)
-{
- struct vxlan_mdb_remote *remote, *tmp;
-
- list_for_each_entry_safe(remote, tmp, &mdb_entry->remotes, list)
- vxlan_mdb_remote_del(vxlan, mdb_entry, remote);
-}
-
-static void vxlan_mdb_entries_flush(struct vxlan_dev *vxlan)
-{
- struct vxlan_mdb_entry *mdb_entry;
- struct hlist_node *tmp;
-
- /* The removal of an entry cannot trigger the removal of another entry
- * since entries are always added to the head of the list.
- */
- hlist_for_each_entry_safe(mdb_entry, tmp, &vxlan->mdb_list, mdb_node) {
- vxlan_mdb_remotes_flush(vxlan, mdb_entry);
- vxlan_mdb_entry_put(vxlan, mdb_entry);
- }
-}
-
int vxlan_mdb_init(struct vxlan_dev *vxlan)
{
int err;
@@ -1613,7 +1737,9 @@ int vxlan_mdb_init(struct vxlan_dev *vxlan)
void vxlan_mdb_fini(struct vxlan_dev *vxlan)
{
- vxlan_mdb_entries_flush(vxlan);
+ struct vxlan_mdb_flush_desc desc = {};
+
+ vxlan_mdb_flush(vxlan, &desc);
WARN_ON_ONCE(vxlan->cfg.flags & VXLAN_F_MDB);
rhashtable_free_and_destroy(&vxlan->mdb_tbl, vxlan_mdb_check_empty,
NULL);
diff --git a/drivers/net/vxlan/vxlan_private.h b/drivers/net/vxlan/vxlan_private.h
index db679c380955..b35d96b78843 100644
--- a/drivers/net/vxlan/vxlan_private.h
+++ b/drivers/net/vxlan/vxlan_private.h
@@ -235,6 +235,8 @@ int vxlan_mdb_add(struct net_device *dev, struct nlattr *tb[], u16 nlmsg_flags,
struct netlink_ext_ack *extack);
int vxlan_mdb_del(struct net_device *dev, struct nlattr *tb[],
struct netlink_ext_ack *extack);
+int vxlan_mdb_del_bulk(struct net_device *dev, struct nlattr *tb[],
+ struct netlink_ext_ack *extack);
int vxlan_mdb_get(struct net_device *dev, struct nlattr *tb[], u32 portid,
u32 seq, struct netlink_ext_ack *extack);
struct vxlan_mdb_entry *vxlan_mdb_entry_skb_get(struct vxlan_dev *vxlan,
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 1b935ee341b4..75c7725e5e4f 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1329,6 +1329,9 @@ struct netdev_net_notifier {
* int (*ndo_mdb_del)(struct net_device *dev, struct nlattr *tb[],
* struct netlink_ext_ack *extack);
* Deletes the MDB entry from dev.
+ * int (*ndo_mdb_del_bulk)(struct net_device *dev, struct nlattr *tb[],
+ * struct netlink_ext_ack *extack);
+ * Bulk deletes MDB entries from dev.
* int (*ndo_mdb_dump)(struct net_device *dev, struct sk_buff *skb,
* struct netlink_callback *cb);
* Dumps MDB entries from dev. The first argument (marker) in the netlink
@@ -1611,6 +1614,9 @@ struct net_device_ops {
int (*ndo_mdb_del)(struct net_device *dev,
struct nlattr *tb[],
struct netlink_ext_ack *extack);
+ int (*ndo_mdb_del_bulk)(struct net_device *dev,
+ struct nlattr *tb[],
+ struct netlink_ext_ack *extack);
int (*ndo_mdb_dump)(struct net_device *dev,
struct sk_buff *skb,
struct netlink_callback *cb);
diff --git a/include/uapi/linux/if_bridge.h b/include/uapi/linux/if_bridge.h
index 2e23f99dc0f1..a5b743a2f775 100644
--- a/include/uapi/linux/if_bridge.h
+++ b/include/uapi/linux/if_bridge.h
@@ -757,6 +757,7 @@ enum {
MDBE_ATTR_VNI,
MDBE_ATTR_IFINDEX,
MDBE_ATTR_SRC_VNI,
+ MDBE_ATTR_STATE_MASK,
__MDBE_ATTR_MAX,
};
#define MDBE_ATTR_MAX (__MDBE_ATTR_MAX - 1)
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 8f40de3af154..65cee0ad3c1b 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -471,6 +471,7 @@ static const struct net_device_ops br_netdev_ops = {
.ndo_fdb_get = br_fdb_get,
.ndo_mdb_add = br_mdb_add,
.ndo_mdb_del = br_mdb_del,
+ .ndo_mdb_del_bulk = br_mdb_del_bulk,
.ndo_mdb_dump = br_mdb_dump,
.ndo_mdb_get = br_mdb_get,
.ndo_bridge_getlink = br_getlink,
diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
index 8cc526067bc2..bc37e47ad829 100644
--- a/net/bridge/br_mdb.c
+++ b/net/bridge/br_mdb.c
@@ -1412,6 +1412,139 @@ int br_mdb_del(struct net_device *dev, struct nlattr *tb[],
return err;
}
+struct br_mdb_flush_desc {
+ u32 port_ifindex;
+ u16 vid;
+ u8 rt_protocol;
+ u8 state;
+ u8 state_mask;
+};
+
+static const struct nla_policy br_mdbe_attrs_del_bulk_pol[MDBE_ATTR_MAX + 1] = {
+ [MDBE_ATTR_RTPROT] = NLA_POLICY_MIN(NLA_U8, RTPROT_STATIC),
+ [MDBE_ATTR_STATE_MASK] = NLA_POLICY_MASK(NLA_U8, MDB_PERMANENT),
+};
+
+static int br_mdb_flush_desc_init(struct br_mdb_flush_desc *desc,
+ struct nlattr *tb[],
+ struct netlink_ext_ack *extack)
+{
+ struct br_mdb_entry *entry = nla_data(tb[MDBA_SET_ENTRY]);
+ struct nlattr *mdbe_attrs[MDBE_ATTR_MAX + 1];
+ int err;
+
+ desc->port_ifindex = entry->ifindex;
+ desc->vid = entry->vid;
+ desc->state = entry->state;
+
+ if (!tb[MDBA_SET_ENTRY_ATTRS])
+ return 0;
+
+ err = nla_parse_nested(mdbe_attrs, MDBE_ATTR_MAX,
+ tb[MDBA_SET_ENTRY_ATTRS],
+ br_mdbe_attrs_del_bulk_pol, extack);
+ if (err)
+ return err;
+
+ if (mdbe_attrs[MDBE_ATTR_STATE_MASK])
+ desc->state_mask = nla_get_u8(mdbe_attrs[MDBE_ATTR_STATE_MASK]);
+
+ if (mdbe_attrs[MDBE_ATTR_RTPROT])
+ desc->rt_protocol = nla_get_u8(mdbe_attrs[MDBE_ATTR_RTPROT]);
+
+ return 0;
+}
+
+static void br_mdb_flush_host(struct net_bridge *br,
+ struct net_bridge_mdb_entry *mp,
+ const struct br_mdb_flush_desc *desc)
+{
+ u8 state;
+
+ if (desc->port_ifindex && desc->port_ifindex != br->dev->ifindex)
+ return;
+
+ if (desc->rt_protocol)
+ return;
+
+ state = br_group_is_l2(&mp->addr) ? MDB_PERMANENT : 0;
+ if (desc->state_mask && (state & desc->state_mask) != desc->state)
+ return;
+
+ br_multicast_host_leave(mp, true);
+ if (!mp->ports && netif_running(br->dev))
+ mod_timer(&mp->timer, jiffies);
+}
+
+static void br_mdb_flush_pgs(struct net_bridge *br,
+ struct net_bridge_mdb_entry *mp,
+ const struct br_mdb_flush_desc *desc)
+{
+ struct net_bridge_port_group __rcu **pp;
+ struct net_bridge_port_group *p;
+
+ for (pp = &mp->ports; (p = mlock_dereference(*pp, br)) != NULL;) {
+ u8 state;
+
+ if (desc->port_ifindex &&
+ desc->port_ifindex != p->key.port->dev->ifindex) {
+ pp = &p->next;
+ continue;
+ }
+
+ if (desc->rt_protocol && desc->rt_protocol != p->rt_protocol) {
+ pp = &p->next;
+ continue;
+ }
+
+ state = p->flags & MDB_PG_FLAGS_PERMANENT ? MDB_PERMANENT : 0;
+ if (desc->state_mask &&
+ (state & desc->state_mask) != desc->state) {
+ pp = &p->next;
+ continue;
+ }
+
+ br_multicast_del_pg(mp, p, pp);
+ }
+}
+
+static void br_mdb_flush(struct net_bridge *br,
+ const struct br_mdb_flush_desc *desc)
+{
+ struct net_bridge_mdb_entry *mp;
+
+ spin_lock_bh(&br->multicast_lock);
+
+ /* Safe variant is not needed because entries are removed from the list
+ * upon group timer expiration or bridge deletion.
+ */
+ hlist_for_each_entry(mp, &br->mdb_list, mdb_node) {
+ if (desc->vid && desc->vid != mp->addr.vid)
+ continue;
+
+ br_mdb_flush_host(br, mp, desc);
+ br_mdb_flush_pgs(br, mp, desc);
+ }
+
+ spin_unlock_bh(&br->multicast_lock);
+}
+
+int br_mdb_del_bulk(struct net_device *dev, struct nlattr *tb[],
+ struct netlink_ext_ack *extack)
+{
+ struct net_bridge *br = netdev_priv(dev);
+ struct br_mdb_flush_desc desc = {};
+ int err;
+
+ err = br_mdb_flush_desc_init(&desc, tb, extack);
+ if (err)
+ return err;
+
+ br_mdb_flush(br, &desc);
+
+ return 0;
+}
+
static const struct nla_policy br_mdbe_attrs_get_pol[MDBE_ATTR_MAX + 1] = {
[MDBE_ATTR_SOURCE] = NLA_POLICY_RANGE(NLA_BINARY,
sizeof(struct in_addr),
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 051ea81864ac..b0a92c344722 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -1022,6 +1022,8 @@ int br_mdb_add(struct net_device *dev, struct nlattr *tb[], u16 nlmsg_flags,
struct netlink_ext_ack *extack);
int br_mdb_del(struct net_device *dev, struct nlattr *tb[],
struct netlink_ext_ack *extack);
+int br_mdb_del_bulk(struct net_device *dev, struct nlattr *tb[],
+ struct netlink_ext_ack *extack);
int br_mdb_dump(struct net_device *dev, struct sk_buff *skb,
struct netlink_callback *cb);
int br_mdb_get(struct net_device *dev, struct nlattr *tb[], u32 portid, u32 seq,
@@ -1430,6 +1432,12 @@ static inline int br_mdb_del(struct net_device *dev, struct nlattr *tb[],
return -EOPNOTSUPP;
}
+static inline int br_mdb_del_bulk(struct net_device *dev, struct nlattr *tb[],
+ struct netlink_ext_ack *extack)
+{
+ return -EOPNOTSUPP;
+}
+
static inline int br_mdb_dump(struct net_device *dev, struct sk_buff *skb,
struct netlink_callback *cb)
{
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 94c4572512b8..5f6ed6da3cfc 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -6410,17 +6410,64 @@ static int rtnl_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh,
return dev->netdev_ops->ndo_mdb_add(dev, tb, nlh->nlmsg_flags, extack);
}
+static int rtnl_validate_mdb_entry_del_bulk(const struct nlattr *attr,
+ struct netlink_ext_ack *extack)
+{
+ struct br_mdb_entry *entry = nla_data(attr);
+ struct br_mdb_entry zero_entry = {};
+
+ if (nla_len(attr) != sizeof(struct br_mdb_entry)) {
+ NL_SET_ERR_MSG_ATTR(extack, attr, "Invalid attribute length");
+ return -EINVAL;
+ }
+
+ if (entry->state != MDB_PERMANENT && entry->state != MDB_TEMPORARY) {
+ NL_SET_ERR_MSG(extack, "Unknown entry state");
+ return -EINVAL;
+ }
+
+ if (entry->flags) {
+ NL_SET_ERR_MSG(extack, "Entry flags cannot be set");
+ return -EINVAL;
+ }
+
+ if (entry->vid >= VLAN_N_VID - 1) {
+ NL_SET_ERR_MSG(extack, "Invalid entry VLAN id");
+ return -EINVAL;
+ }
+
+ if (memcmp(&entry->addr, &zero_entry.addr, sizeof(entry->addr))) {
+ NL_SET_ERR_MSG(extack, "Entry address cannot be set");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct nla_policy mdba_del_bulk_policy[MDBA_SET_ENTRY_MAX + 1] = {
+ [MDBA_SET_ENTRY] = NLA_POLICY_VALIDATE_FN(NLA_BINARY,
+ rtnl_validate_mdb_entry_del_bulk,
+ sizeof(struct br_mdb_entry)),
+ [MDBA_SET_ENTRY_ATTRS] = { .type = NLA_NESTED },
+};
+
static int rtnl_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
+ bool del_bulk = !!(nlh->nlmsg_flags & NLM_F_BULK);
struct nlattr *tb[MDBA_SET_ENTRY_MAX + 1];
struct net *net = sock_net(skb->sk);
struct br_port_msg *bpm;
struct net_device *dev;
int err;
- err = nlmsg_parse_deprecated(nlh, sizeof(*bpm), tb,
- MDBA_SET_ENTRY_MAX, mdba_policy, extack);
+ if (!del_bulk)
+ err = nlmsg_parse_deprecated(nlh, sizeof(*bpm), tb,
+ MDBA_SET_ENTRY_MAX, mdba_policy,
+ extack);
+ else
+ err = nlmsg_parse(nlh, sizeof(*bpm), tb, MDBA_SET_ENTRY_MAX,
+ mdba_del_bulk_policy, extack);
if (err)
return err;
@@ -6441,6 +6488,14 @@ static int rtnl_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh,
return -EINVAL;
}
+ if (del_bulk) {
+ if (!dev->netdev_ops->ndo_mdb_del_bulk) {
+ NL_SET_ERR_MSG(extack, "Device does not support MDB bulk deletion");
+ return -EOPNOTSUPP;
+ }
+ return dev->netdev_ops->ndo_mdb_del_bulk(dev, tb, extack);
+ }
+
if (!dev->netdev_ops->ndo_mdb_del) {
NL_SET_ERR_MSG(extack, "Device does not support MDB operations");
return -EOPNOTSUPP;
@@ -6686,5 +6741,6 @@ void __init rtnetlink_init(void)
rtnl_register(PF_BRIDGE, RTM_GETMDB, rtnl_mdb_get, rtnl_mdb_dump, 0);
rtnl_register(PF_BRIDGE, RTM_NEWMDB, rtnl_mdb_add, NULL, 0);
- rtnl_register(PF_BRIDGE, RTM_DELMDB, rtnl_mdb_del, NULL, 0);
+ rtnl_register(PF_BRIDGE, RTM_DELMDB, rtnl_mdb_del, NULL,
+ RTNL_FLAG_BULK_DEL_SUPPORTED);
}
diff --git a/tools/testing/selftests/net/forwarding/bridge_mdb.sh b/tools/testing/selftests/net/forwarding/bridge_mdb.sh
index e4e3e9405056..61348f71728c 100755
--- a/tools/testing/selftests/net/forwarding/bridge_mdb.sh
+++ b/tools/testing/selftests/net/forwarding/bridge_mdb.sh
@@ -803,11 +803,198 @@ cfg_test_dump()
cfg_test_dump_common "L2" l2_grps_get
}
+# Check flush functionality with different parameters.
+cfg_test_flush()
+{
+ local num_entries
+
+ # Add entries with different attributes and check that they are all
+ # flushed when the flush command is given with no parameters.
+
+ # Different port.
+ bridge mdb add dev br0 port $swp1 grp 239.1.1.1 vid 10
+ bridge mdb add dev br0 port $swp2 grp 239.1.1.2 vid 10
+
+ # Different VLAN ID.
+ bridge mdb add dev br0 port $swp1 grp 239.1.1.3 vid 10
+ bridge mdb add dev br0 port $swp1 grp 239.1.1.4 vid 20
+
+ # Different routing protocol.
+ bridge mdb add dev br0 port $swp1 grp 239.1.1.5 vid 10 proto bgp
+ bridge mdb add dev br0 port $swp1 grp 239.1.1.6 vid 10 proto zebra
+
+ # Different state.
+ bridge mdb add dev br0 port $swp1 grp 239.1.1.7 vid 10 permanent
+ bridge mdb add dev br0 port $swp1 grp 239.1.1.8 vid 10 temp
+
+ bridge mdb flush dev br0
+ num_entries=$(bridge mdb show dev br0 | wc -l)
+ [[ $num_entries -eq 0 ]]
+ check_err $? 0 "Not all entries flushed after flush all"
+
+ # Check that when flushing by port only entries programmed with the
+ # specified port are flushed and the rest are not.
+
+ bridge mdb add dev br0 port $swp1 grp 239.1.1.1 vid 10
+ bridge mdb add dev br0 port $swp2 grp 239.1.1.1 vid 10
+ bridge mdb add dev br0 port br0 grp 239.1.1.1 vid 10
+
+ bridge mdb flush dev br0 port $swp1
+
+ bridge mdb get dev br0 grp 239.1.1.1 vid 10 | grep -q "port $swp1"
+ check_fail $? "Entry not flushed by specified port"
+ bridge mdb get dev br0 grp 239.1.1.1 vid 10 | grep -q "port $swp2"
+ check_err $? "Entry flushed by wrong port"
+ bridge mdb get dev br0 grp 239.1.1.1 vid 10 | grep -q "port br0"
+ check_err $? "Host entry flushed by wrong port"
+
+ bridge mdb flush dev br0 port br0
+
+ bridge mdb get dev br0 grp 239.1.1.1 vid 10 | grep -q "port br0"
+ check_fail $? "Host entry not flushed by specified port"
+
+ bridge mdb flush dev br0
+
+ # Check that when flushing by VLAN ID only entries programmed with the
+ # specified VLAN ID are flushed and the rest are not.
+
+ bridge mdb add dev br0 port $swp1 grp 239.1.1.1 vid 10
+ bridge mdb add dev br0 port $swp2 grp 239.1.1.1 vid 10
+ bridge mdb add dev br0 port $swp1 grp 239.1.1.1 vid 20
+ bridge mdb add dev br0 port $swp2 grp 239.1.1.1 vid 20
+
+ bridge mdb flush dev br0 vid 10
+
+ bridge mdb get dev br0 grp 239.1.1.1 vid 10 &> /dev/null
+ check_fail $? "Entry not flushed by specified VLAN ID"
+ bridge mdb get dev br0 grp 239.1.1.1 vid 20 &> /dev/null
+ check_err $? "Entry flushed by wrong VLAN ID"
+
+ bridge mdb flush dev br0
+
+ # Check that all permanent entries are flushed when "permanent" is
+ # specified and that temporary entries are not.
+
+ bridge mdb add dev br0 port $swp1 grp 239.1.1.1 permanent vid 10
+ bridge mdb add dev br0 port $swp2 grp 239.1.1.1 temp vid 10
+
+ bridge mdb flush dev br0 permanent
+
+ bridge mdb get dev br0 grp 239.1.1.1 vid 10 | grep -q "port $swp1"
+ check_fail $? "Entry not flushed by \"permanent\" state"
+ bridge mdb get dev br0 grp 239.1.1.1 vid 10 | grep -q "port $swp2"
+ check_err $? "Entry flushed by wrong state (\"permanent\")"
+
+ bridge mdb flush dev br0
+
+ # Check that all temporary entries are flushed when "nopermanent" is
+ # specified and that permanent entries are not.
+
+ bridge mdb add dev br0 port $swp1 grp 239.1.1.1 permanent vid 10
+ bridge mdb add dev br0 port $swp2 grp 239.1.1.1 temp vid 10
+
+ bridge mdb flush dev br0 nopermanent
+
+ bridge mdb get dev br0 grp 239.1.1.1 vid 10 | grep -q "port $swp1"
+ check_err $? "Entry flushed by wrong state (\"nopermanent\")"
+ bridge mdb get dev br0 grp 239.1.1.1 vid 10 | grep -q "port $swp2"
+ check_fail $? "Entry not flushed by \"nopermanent\" state"
+
+ bridge mdb flush dev br0
+
+ # Check that L2 host entries are not flushed when "nopermanent" is
+ # specified, but flushed when "permanent" is specified.
+
+ bridge mdb add dev br0 port br0 grp 01:02:03:04:05:06 permanent vid 10
+
+ bridge mdb flush dev br0 nopermanent
+
+ bridge mdb get dev br0 grp 01:02:03:04:05:06 vid 10 &> /dev/null
+ check_err $? "L2 host entry flushed by wrong state (\"nopermanent\")"
+
+ bridge mdb flush dev br0 permanent
+
+ bridge mdb get dev br0 grp 01:02:03:04:05:06 vid 10 &> /dev/null
+ check_fail $? "L2 host entry not flushed by \"permanent\" state"
+
+ bridge mdb flush dev br0
+
+ # Check that IPv4 host entries are not flushed when "permanent" is
+ # specified, but flushed when "nopermanent" is specified.
+
+ bridge mdb add dev br0 port br0 grp 239.1.1.1 temp vid 10
+
+ bridge mdb flush dev br0 permanent
+
+ bridge mdb get dev br0 grp 239.1.1.1 vid 10 &> /dev/null
+ check_err $? "IPv4 host entry flushed by wrong state (\"permanent\")"
+
+ bridge mdb flush dev br0 nopermanent
+
+ bridge mdb get dev br0 grp 239.1.1.1 vid 10 &> /dev/null
+ check_fail $? "IPv4 host entry not flushed by \"nopermanent\" state"
+
+ bridge mdb flush dev br0
+
+ # Check that IPv6 host entries are not flushed when "permanent" is
+ # specified, but flushed when "nopermanent" is specified.
+
+ bridge mdb add dev br0 port br0 grp ff0e::1 temp vid 10
+
+ bridge mdb flush dev br0 permanent
+
+ bridge mdb get dev br0 grp ff0e::1 vid 10 &> /dev/null
+ check_err $? "IPv6 host entry flushed by wrong state (\"permanent\")"
+
+ bridge mdb flush dev br0 nopermanent
+
+ bridge mdb get dev br0 grp ff0e::1 vid 10 &> /dev/null
+ check_fail $? "IPv6 host entry not flushed by \"nopermanent\" state"
+
+ bridge mdb flush dev br0
+
+ # Check that when flushing by routing protocol only entries programmed
+ # with the specified routing protocol are flushed and the rest are not.
+
+ bridge mdb add dev br0 port $swp1 grp 239.1.1.1 vid 10 proto bgp
+ bridge mdb add dev br0 port $swp2 grp 239.1.1.1 vid 10 proto zebra
+ bridge mdb add dev br0 port br0 grp 239.1.1.1 vid 10
+
+ bridge mdb flush dev br0 proto bgp
+
+ bridge mdb get dev br0 grp 239.1.1.1 vid 10 | grep -q "port $swp1"
+ check_fail $? "Entry not flushed by specified routing protocol"
+ bridge mdb get dev br0 grp 239.1.1.1 vid 10 | grep -q "port $swp2"
+ check_err $? "Entry flushed by wrong routing protocol"
+ bridge mdb get dev br0 grp 239.1.1.1 vid 10 | grep -q "port br0"
+ check_err $? "Host entry flushed by wrong routing protocol"
+
+ bridge mdb flush dev br0
+
+ # Test that an error is returned when trying to flush using unsupported
+ # parameters.
+
+ bridge mdb flush dev br0 src_vni 10 &> /dev/null
+ check_fail $? "Managed to flush by source VNI"
+
+ bridge mdb flush dev br0 dst 198.51.100.1 &> /dev/null
+ check_fail $? "Managed to flush by destination IP"
+
+ bridge mdb flush dev br0 dst_port 4789 &> /dev/null
+ check_fail $? "Managed to flush by UDP destination port"
+
+ bridge mdb flush dev br0 vni 10 &> /dev/null
+ check_fail $? "Managed to flush by destination VNI"
+
+ log_test "Flush tests"
+}
+
cfg_test()
{
cfg_test_host
cfg_test_port
cfg_test_dump
+ cfg_test_flush
}
__fwd_test_host_ip()
@@ -1166,8 +1353,8 @@ ctrl_test()
ctrl_mldv2_is_in_test
}
-if ! bridge mdb help 2>&1 | grep -q "get"; then
- echo "SKIP: iproute2 too old, missing bridge mdb get support"
+if ! bridge mdb help 2>&1 | grep -q "flush"; then
+ echo "SKIP: iproute2 too old, missing bridge mdb flush support"
exit $ksft_skip
fi
diff --git a/tools/testing/selftests/net/test_vxlan_mdb.sh b/tools/testing/selftests/net/test_vxlan_mdb.sh
index 6725fd9157b9..84a05a9e46d8 100755
--- a/tools/testing/selftests/net/test_vxlan_mdb.sh
+++ b/tools/testing/selftests/net/test_vxlan_mdb.sh
@@ -79,6 +79,7 @@ CONTROL_PATH_TESTS="
dump_ipv6_ipv4
dump_ipv4_ipv6
dump_ipv6_ipv6
+ flush
"
DATA_PATH_TESTS="
@@ -968,6 +969,202 @@ dump_ipv6_ipv6()
dump_common $ns1 $local_addr $remote_prefix $fn
}
+flush()
+{
+ local num_entries
+
+ echo
+ echo "Control path: Flush"
+ echo "-------------------"
+
+ # Add entries with different attributes and check that they are all
+ # flushed when the flush command is given with no parameters.
+
+ # Different source VNI.
+ run_cmd "bridge -n $ns1_v4 mdb add dev vx0 port vx0 grp 239.1.1.1 permanent dst 198.51.100.1 src_vni 10010"
+ run_cmd "bridge -n $ns1_v4 mdb add dev vx0 port vx0 grp 239.1.1.2 permanent dst 198.51.100.1 src_vni 10011"
+
+ # Different routing protocol.
+ run_cmd "bridge -n $ns1_v4 mdb add dev vx0 port vx0 grp 239.1.1.3 permanent proto bgp dst 198.51.100.1 src_vni 10010"
+ run_cmd "bridge -n $ns1_v4 mdb add dev vx0 port vx0 grp 239.1.1.4 permanent proto zebra dst 198.51.100.1 src_vni 10010"
+
+ # Different destination IP.
+ run_cmd "bridge -n $ns1_v4 mdb add dev vx0 port vx0 grp 239.1.1.5 permanent dst 198.51.100.1 src_vni 10010"
+ run_cmd "bridge -n $ns1_v4 mdb add dev vx0 port vx0 grp 239.1.1.6 permanent dst 198.51.100.2 src_vni 10010"
+
+ # Different destination port.
+ run_cmd "bridge -n $ns1_v4 mdb add dev vx0 port vx0 grp 239.1.1.7 permanent dst 198.51.100.1 dst_port 11111 src_vni 10010"
+ run_cmd "bridge -n $ns1_v4 mdb add dev vx0 port vx0 grp 239.1.1.8 permanent dst 198.51.100.1 dst_port 22222 src_vni 10010"
+
+ # Different VNI.
+ run_cmd "bridge -n $ns1_v4 mdb add dev vx0 port vx0 grp 239.1.1.9 permanent dst 198.51.100.1 vni 10010 src_vni 10010"
+ run_cmd "bridge -n $ns1_v4 mdb add dev vx0 port vx0 grp 239.1.1.10 permanent dst 198.51.100.1 vni 10020 src_vni 10010"
+
+ run_cmd "bridge -n $ns1_v4 mdb flush dev vx0"
+ num_entries=$(bridge -n $ns1_v4 mdb show dev vx0 | wc -l)
+ [[ $num_entries -eq 0 ]]
+ log_test $? 0 "Flush all"
+
+ # Check that entries are flushed when port is specified as the VXLAN
+ # device and that an error is returned when port is specified as a
+ # different net device.
+
+ run_cmd "bridge -n $ns1_v4 mdb add dev vx0 port vx0 grp 239.1.1.1 permanent dst 198.51.100.1 src_vni 10010"
+ run_cmd "bridge -n $ns1_v4 mdb add dev vx0 port vx0 grp 239.1.1.1 permanent dst 198.51.100.2 src_vni 10010"
+
+ run_cmd "bridge -n $ns1_v4 mdb flush dev vx0 port vx0"
+ run_cmd "bridge -n $ns1_v4 -d -s mdb get dev vx0 grp 239.1.1.1 src_vni 10010"
+ log_test $? 254 "Flush by port"
+
+ run_cmd "bridge -n $ns1_v4 mdb flush dev vx0 port veth0"
+ log_test $? 255 "Flush by wrong port"
+
+ # Check that when flushing by source VNI only entries programmed with
+ # the specified source VNI are flushed and the rest are not.
+
+ run_cmd "bridge -n $ns1_v4 mdb add dev vx0 port vx0 grp 239.1.1.1 permanent dst 198.51.100.1 src_vni 10010"
+ run_cmd "bridge -n $ns1_v4 mdb add dev vx0 port vx0 grp 239.1.1.1 permanent dst 198.51.100.2 src_vni 10010"
+ run_cmd "bridge -n $ns1_v4 mdb add dev vx0 port vx0 grp 239.1.1.1 permanent dst 198.51.100.1 src_vni 10011"
+ run_cmd "bridge -n $ns1_v4 mdb add dev vx0 port vx0 grp 239.1.1.1 permanent dst 198.51.100.2 src_vni 10011"
+
+ run_cmd "bridge -n $ns1_v4 mdb flush dev vx0 src_vni 10010"
+
+ run_cmd "bridge -n $ns1_v4 -d -s mdb get dev vx0 grp 239.1.1.1 src_vni 10010"
+ log_test $? 254 "Flush by specified source VNI"
+ run_cmd "bridge -n $ns1_v4 -d -s mdb get dev vx0 grp 239.1.1.1 src_vni 10011"
+ log_test $? 0 "Flush by unspecified source VNI"
+
+ run_cmd "bridge -n $ns1_v4 mdb flush dev vx0"
+
+ # Check that all entries are flushed when "permanent" is specified and
+ # that an error is returned when "nopermanent" is specified.
+
+ run_cmd "bridge -n $ns1_v4 mdb add dev vx0 port vx0 grp 239.1.1.1 permanent dst 198.51.100.1 src_vni 10010"
+ run_cmd "bridge -n $ns1_v4 mdb add dev vx0 port vx0 grp 239.1.1.1 permanent dst 198.51.100.2 src_vni 10010"
+
+ run_cmd "bridge -n $ns1_v4 mdb flush dev vx0 permanent"
+ run_cmd "bridge -n $ns1_v4 -d -s mdb get dev vx0 grp 239.1.1.1 src_vni 10010"
+ log_test $? 254 "Flush by \"permanent\" state"
+
+ run_cmd "bridge -n $ns1_v4 mdb flush dev vx0 nopermanent"
+ log_test $? 255 "Flush by \"nopermanent\" state"
+
+ # Check that when flushing by routing protocol only entries programmed
+ # with the specified routing protocol are flushed and the rest are not.
+
+ run_cmd "bridge -n $ns1_v4 mdb add dev vx0 port vx0 grp 239.1.1.1 permanent proto bgp dst 198.51.100.1 src_vni 10010"
+ run_cmd "bridge -n $ns1_v4 mdb add dev vx0 port vx0 grp 239.1.1.1 permanent proto zebra dst 198.51.100.2 src_vni 10010"
+
+ run_cmd "bridge -n $ns1_v4 mdb flush dev vx0 proto bgp"
+
+ run_cmd "bridge -n $ns1_v4 -d -s mdb get dev vx0 grp 239.1.1.1 src_vni 10010 | grep \"proto bgp\""
+ log_test $? 1 "Flush by specified routing protocol"
+ run_cmd "bridge -n $ns1_v4 -d -s mdb get dev vx0 grp 239.1.1.1 src_vni 10010 | grep \"proto zebra\""
+ log_test $? 0 "Flush by unspecified routing protocol"
+
+ run_cmd "bridge -n $ns1_v4 mdb flush dev vx0"
+
+ # Check that when flushing by destination IP only entries programmed
+ # with the specified destination IP are flushed and the rest are not.
+
+ # IPv4.
+
+ run_cmd "bridge -n $ns1_v4 mdb add dev vx0 port vx0 grp 239.1.1.1 permanent dst 198.51.100.1 src_vni 10010"
+ run_cmd "bridge -n $ns1_v4 mdb add dev vx0 port vx0 grp 239.1.1.1 permanent dst 198.51.100.2 src_vni 10010"
+
+ run_cmd "bridge -n $ns1_v4 mdb flush dev vx0 dst 198.51.100.2"
+
+ run_cmd "bridge -n $ns1_v4 -d -s mdb get dev vx0 grp 239.1.1.1 src_vni 10010 | grep 198.51.100.2"
+ log_test $? 1 "Flush by specified destination IP - IPv4"
+ run_cmd "bridge -n $ns1_v4 -d -s mdb get dev vx0 grp 239.1.1.1 src_vni 10010 | grep 198.51.100.1"
+ log_test $? 0 "Flush by unspecified destination IP - IPv4"
+
+ run_cmd "bridge -n $ns1_v4 mdb flush dev vx0"
+
+ # IPv6.
+
+ run_cmd "bridge -n $ns1_v4 mdb add dev vx0 port vx0 grp 239.1.1.1 permanent dst 2001:db8:1000::1 src_vni 10010"
+ run_cmd "bridge -n $ns1_v4 mdb add dev vx0 port vx0 grp 239.1.1.1 permanent dst 2001:db8:1000::2 src_vni 10010"
+
+ run_cmd "bridge -n $ns1_v4 mdb flush dev vx0 dst 2001:db8:1000::2"
+
+ run_cmd "bridge -n $ns1_v4 -d -s mdb get dev vx0 grp 239.1.1.1 src_vni 10010 | grep 2001:db8:1000::2"
+ log_test $? 1 "Flush by specified destination IP - IPv6"
+ run_cmd "bridge -n $ns1_v4 -d -s mdb get dev vx0 grp 239.1.1.1 src_vni 10010 | grep 2001:db8:1000::1"
+ log_test $? 0 "Flush by unspecified destination IP - IPv6"
+
+ run_cmd "bridge -n $ns1_v4 mdb flush dev vx0"
+
+ # Check that when flushing by UDP destination port only entries
+ # programmed with the specified port are flushed and the rest are not.
+
+ run_cmd "bridge -n $ns1_v4 mdb add dev vx0 port vx0 grp 239.1.1.1 permanent dst_port 11111 dst 198.51.100.1 src_vni 10010"
+ run_cmd "bridge -n $ns1_v4 mdb add dev vx0 port vx0 grp 239.1.1.1 permanent dst_port 22222 dst 198.51.100.2 src_vni 10010"
+
+ run_cmd "bridge -n $ns1_v4 mdb flush dev vx0 dst_port 11111"
+
+ run_cmd "bridge -n $ns1_v4 -d -s mdb get dev vx0 grp 239.1.1.1 src_vni 10010 | grep \"dst_port 11111\""
+ log_test $? 1 "Flush by specified UDP destination port"
+ run_cmd "bridge -n $ns1_v4 -d -s mdb get dev vx0 grp 239.1.1.1 src_vni 10010 | grep \"dst_port 22222\""
+ log_test $? 0 "Flush by unspecified UDP destination port"
+
+ run_cmd "bridge -n $ns1_v4 mdb flush dev vx0"
+
+ # When not specifying a UDP destination port for an entry, traffic is
+ # encapsulated with the device's UDP destination port. Check that when
+ # flushing by the device's UDP destination port only entries programmed
+ # with this port are flushed and the rest are not.
+
+ run_cmd "bridge -n $ns1_v4 mdb add dev vx0 port vx0 grp 239.1.1.1 permanent dst 198.51.100.1 src_vni 10010"
+ run_cmd "bridge -n $ns1_v4 mdb add dev vx0 port vx0 grp 239.1.1.1 permanent dst_port 22222 dst 198.51.100.2 src_vni 10010"
+
+ run_cmd "bridge -n $ns1_v4 mdb flush dev vx0 dst_port 4789"
+
+ run_cmd "bridge -n $ns1_v4 -d -s mdb get dev vx0 grp 239.1.1.1 src_vni 10010 | grep 198.51.100.1"
+ log_test $? 1 "Flush by device's UDP destination port"
+ run_cmd "bridge -n $ns1_v4 -d -s mdb get dev vx0 grp 239.1.1.1 src_vni 10010 | grep 198.51.100.2"
+ log_test $? 0 "Flush by unspecified UDP destination port"
+
+ run_cmd "bridge -n $ns1_v4 mdb flush dev vx0"
+
+ # Check that when flushing by destination VNI only entries programmed
+ # with the specified destination VNI are flushed and the rest are not.
+
+ run_cmd "bridge -n $ns1_v4 mdb add dev vx0 port vx0 grp 239.1.1.1 permanent vni 20010 dst 198.51.100.1 src_vni 10010"
+ run_cmd "bridge -n $ns1_v4 mdb add dev vx0 port vx0 grp 239.1.1.1 permanent vni 20011 dst 198.51.100.2 src_vni 10010"
+
+ run_cmd "bridge -n $ns1_v4 mdb flush dev vx0 vni 20010"
+
+ run_cmd "bridge -n $ns1_v4 -d -s mdb get dev vx0 grp 239.1.1.1 src_vni 10010 | grep \" vni 20010\""
+ log_test $? 1 "Flush by specified destination VNI"
+ run_cmd "bridge -n $ns1_v4 -d -s mdb get dev vx0 grp 239.1.1.1 src_vni 10010 | grep \" vni 20011\""
+ log_test $? 0 "Flush by unspecified destination VNI"
+
+ run_cmd "bridge -n $ns1_v4 mdb flush dev vx0"
+
+ # When not specifying a destination VNI for an entry, traffic is
+ # encapsulated with the source VNI. Check that when flushing by a
+ # destination VNI that is equal to the source VNI only such entries are
+ # flushed and the rest are not.
+
+ run_cmd "bridge -n $ns1_v4 mdb add dev vx0 port vx0 grp 239.1.1.1 permanent dst 198.51.100.1 src_vni 10010"
+ run_cmd "bridge -n $ns1_v4 mdb add dev vx0 port vx0 grp 239.1.1.1 permanent vni 20010 dst 198.51.100.2 src_vni 10010"
+
+ run_cmd "bridge -n $ns1_v4 mdb flush dev vx0 vni 10010"
+
+ run_cmd "bridge -n $ns1_v4 -d -s mdb get dev vx0 grp 239.1.1.1 src_vni 10010 | grep 198.51.100.1"
+ log_test $? 1 "Flush by destination VNI equal to source VNI"
+ run_cmd "bridge -n $ns1_v4 -d -s mdb get dev vx0 grp 239.1.1.1 src_vni 10010 | grep 198.51.100.2"
+ log_test $? 0 "Flush by unspecified destination VNI"
+
+ run_cmd "bridge -n $ns1_v4 mdb flush dev vx0"
+
+ # Test that an error is returned when trying to flush using VLAN ID.
+
+ run_cmd "bridge -n $ns1_v4 mdb flush dev vx0 vid 10"
+ log_test $? 255 "Flush by VLAN ID"
+}
+
################################################################################
# Tests - Data path
@@ -2292,9 +2489,9 @@ if [ ! -x "$(command -v jq)" ]; then
exit $ksft_skip
fi
-bridge mdb help 2>&1 | grep -q "get"
+bridge mdb help 2>&1 | grep -q "flush"
if [ $? -ne 0 ]; then
- echo "SKIP: iproute2 bridge too old, missing VXLAN MDB get support"
+ echo "SKIP: iproute2 bridge too old, missing VXLAN MDB flush support"
exit $ksft_skip
fi