From 6ca80638b90cec66547011ee1ef79e534589989a Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Mon, 23 Oct 2023 11:17:28 -0700 Subject: net: dsa: Use conduit and user terms Use more inclusive terms throughout the DSA subsystem by moving away from "master" which is replaced by "conduit" and "slave" which is replaced by "user". No functional changes. Acked-by: Rob Herring Acked-by: Stephen Hemminger Reviewed-by: Vladimir Oltean Signed-off-by: Florian Fainelli Link: https://lore.kernel.org/r/20231023181729.1191071-2-florian.fainelli@broadcom.com Signed-off-by: Jakub Kicinski --- net/core/dev_ioctl.c | 2 +- net/dsa/Makefile | 6 +- net/dsa/conduit.c | 475 ++++++ net/dsa/conduit.h | 22 + net/dsa/dsa.c | 224 +-- net/dsa/dsa.h | 12 +- net/dsa/master.c | 475 ------ net/dsa/master.h | 22 - net/dsa/netlink.c | 14 +- net/dsa/port.c | 124 +- net/dsa/port.h | 4 +- net/dsa/slave.c | 3727 -------------------------------------------- net/dsa/slave.h | 69 - net/dsa/switch.c | 20 +- net/dsa/switch.h | 8 +- net/dsa/tag.c | 10 +- net/dsa/tag.h | 26 +- net/dsa/tag_8021q.c | 22 +- net/dsa/tag_8021q.h | 2 +- net/dsa/tag_ar9331.c | 4 +- net/dsa/tag_brcm.c | 14 +- net/dsa/tag_dsa.c | 6 +- net/dsa/tag_gswip.c | 4 +- net/dsa/tag_hellcreek.c | 4 +- net/dsa/tag_ksz.c | 12 +- net/dsa/tag_lan9303.c | 4 +- net/dsa/tag_mtk.c | 4 +- net/dsa/tag_none.c | 6 +- net/dsa/tag_ocelot.c | 22 +- net/dsa/tag_ocelot_8021q.c | 12 +- net/dsa/tag_qca.c | 6 +- net/dsa/tag_rtl4_a.c | 6 +- net/dsa/tag_rtl8_4.c | 6 +- net/dsa/tag_rzn1_a5psw.c | 4 +- net/dsa/tag_sja1105.c | 30 +- net/dsa/tag_trailer.c | 4 +- net/dsa/tag_xrs700x.c | 4 +- net/dsa/user.c | 3727 ++++++++++++++++++++++++++++++++++++++++++++ net/dsa/user.h | 69 + 39 files changed, 4606 insertions(+), 4606 deletions(-) create mode 100644 net/dsa/conduit.c create mode 100644 net/dsa/conduit.h delete mode 100644 net/dsa/master.c delete mode 100644 net/dsa/master.h delete mode 100644 net/dsa/slave.c delete mode 100644 net/dsa/slave.h create mode 100644 net/dsa/user.c create mode 100644 net/dsa/user.h (limited to 'net') diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c index b46aedc36939..feeddf95f450 100644 --- a/net/core/dev_ioctl.c +++ b/net/core/dev_ioctl.c @@ -382,7 +382,7 @@ static int dev_set_hwtstamp(struct net_device *dev, struct ifreq *ifr) if (err) return err; - err = dsa_master_hwtstamp_validate(dev, &kernel_cfg, &extack); + err = dsa_conduit_hwtstamp_validate(dev, &kernel_cfg, &extack); if (err) { if (extack._msg) netdev_err(dev, "%s\n", extack._msg); diff --git a/net/dsa/Makefile b/net/dsa/Makefile index 12e305824a96..8a1894a42552 100644 --- a/net/dsa/Makefile +++ b/net/dsa/Makefile @@ -8,16 +8,16 @@ endif # the core obj-$(CONFIG_NET_DSA) += dsa_core.o dsa_core-y += \ + conduit.o \ devlink.o \ dsa.o \ - master.o \ netlink.o \ port.o \ - slave.o \ switch.o \ tag.o \ tag_8021q.o \ - trace.o + trace.o \ + user.o # tagging formats obj-$(CONFIG_NET_DSA_TAG_AR9331) += tag_ar9331.o diff --git a/net/dsa/conduit.c b/net/dsa/conduit.c new file mode 100644 index 000000000000..3dfdb3cb47dc --- /dev/null +++ b/net/dsa/conduit.c @@ -0,0 +1,475 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Handling of a conduit device, switching frames via its switch fabric CPU port + * + * Copyright (c) 2017 Savoir-faire Linux Inc. + * Vivien Didelot + */ + +#include +#include +#include +#include + +#include "conduit.h" +#include "dsa.h" +#include "port.h" +#include "tag.h" + +static int dsa_conduit_get_regs_len(struct net_device *dev) +{ + struct dsa_port *cpu_dp = dev->dsa_ptr; + const struct ethtool_ops *ops = cpu_dp->orig_ethtool_ops; + struct dsa_switch *ds = cpu_dp->ds; + int port = cpu_dp->index; + int ret = 0; + int len; + + if (ops->get_regs_len) { + len = ops->get_regs_len(dev); + if (len < 0) + return len; + ret += len; + } + + ret += sizeof(struct ethtool_drvinfo); + ret += sizeof(struct ethtool_regs); + + if (ds->ops->get_regs_len) { + len = ds->ops->get_regs_len(ds, port); + if (len < 0) + return len; + ret += len; + } + + return ret; +} + +static void dsa_conduit_get_regs(struct net_device *dev, + struct ethtool_regs *regs, void *data) +{ + struct dsa_port *cpu_dp = dev->dsa_ptr; + const struct ethtool_ops *ops = cpu_dp->orig_ethtool_ops; + struct dsa_switch *ds = cpu_dp->ds; + struct ethtool_drvinfo *cpu_info; + struct ethtool_regs *cpu_regs; + int port = cpu_dp->index; + int len; + + if (ops->get_regs_len && ops->get_regs) { + len = ops->get_regs_len(dev); + if (len < 0) + return; + regs->len = len; + ops->get_regs(dev, regs, data); + data += regs->len; + } + + cpu_info = (struct ethtool_drvinfo *)data; + strscpy(cpu_info->driver, "dsa", sizeof(cpu_info->driver)); + data += sizeof(*cpu_info); + cpu_regs = (struct ethtool_regs *)data; + data += sizeof(*cpu_regs); + + if (ds->ops->get_regs_len && ds->ops->get_regs) { + len = ds->ops->get_regs_len(ds, port); + if (len < 0) + return; + cpu_regs->len = len; + ds->ops->get_regs(ds, port, cpu_regs, data); + } +} + +static void dsa_conduit_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, + uint64_t *data) +{ + struct dsa_port *cpu_dp = dev->dsa_ptr; + const struct ethtool_ops *ops = cpu_dp->orig_ethtool_ops; + struct dsa_switch *ds = cpu_dp->ds; + int port = cpu_dp->index; + int count = 0; + + if (ops->get_sset_count && ops->get_ethtool_stats) { + count = ops->get_sset_count(dev, ETH_SS_STATS); + ops->get_ethtool_stats(dev, stats, data); + } + + if (ds->ops->get_ethtool_stats) + ds->ops->get_ethtool_stats(ds, port, data + count); +} + +static void dsa_conduit_get_ethtool_phy_stats(struct net_device *dev, + struct ethtool_stats *stats, + uint64_t *data) +{ + struct dsa_port *cpu_dp = dev->dsa_ptr; + const struct ethtool_ops *ops = cpu_dp->orig_ethtool_ops; + struct dsa_switch *ds = cpu_dp->ds; + int port = cpu_dp->index; + int count = 0; + + if (dev->phydev && !ops->get_ethtool_phy_stats) { + count = phy_ethtool_get_sset_count(dev->phydev); + if (count >= 0) + phy_ethtool_get_stats(dev->phydev, stats, data); + } else if (ops->get_sset_count && ops->get_ethtool_phy_stats) { + count = ops->get_sset_count(dev, ETH_SS_PHY_STATS); + ops->get_ethtool_phy_stats(dev, stats, data); + } + + if (count < 0) + count = 0; + + if (ds->ops->get_ethtool_phy_stats) + ds->ops->get_ethtool_phy_stats(ds, port, data + count); +} + +static int dsa_conduit_get_sset_count(struct net_device *dev, int sset) +{ + struct dsa_port *cpu_dp = dev->dsa_ptr; + const struct ethtool_ops *ops = cpu_dp->orig_ethtool_ops; + struct dsa_switch *ds = cpu_dp->ds; + int count = 0; + + if (sset == ETH_SS_PHY_STATS && dev->phydev && + !ops->get_ethtool_phy_stats) + count = phy_ethtool_get_sset_count(dev->phydev); + else if (ops->get_sset_count) + count = ops->get_sset_count(dev, sset); + + if (count < 0) + count = 0; + + if (ds->ops->get_sset_count) + count += ds->ops->get_sset_count(ds, cpu_dp->index, sset); + + return count; +} + +static void dsa_conduit_get_strings(struct net_device *dev, uint32_t stringset, + uint8_t *data) +{ + struct dsa_port *cpu_dp = dev->dsa_ptr; + const struct ethtool_ops *ops = cpu_dp->orig_ethtool_ops; + struct dsa_switch *ds = cpu_dp->ds; + int port = cpu_dp->index; + int len = ETH_GSTRING_LEN; + int mcount = 0, count, i; + uint8_t pfx[4]; + uint8_t *ndata; + + snprintf(pfx, sizeof(pfx), "p%.2d", port); + /* We do not want to be NULL-terminated, since this is a prefix */ + pfx[sizeof(pfx) - 1] = '_'; + + if (stringset == ETH_SS_PHY_STATS && dev->phydev && + !ops->get_ethtool_phy_stats) { + mcount = phy_ethtool_get_sset_count(dev->phydev); + if (mcount < 0) + mcount = 0; + else + phy_ethtool_get_strings(dev->phydev, data); + } else if (ops->get_sset_count && ops->get_strings) { + mcount = ops->get_sset_count(dev, stringset); + if (mcount < 0) + mcount = 0; + ops->get_strings(dev, stringset, data); + } + + if (ds->ops->get_strings) { + ndata = data + mcount * len; + /* This function copies ETH_GSTRINGS_LEN bytes, we will mangle + * the output after to prepend our CPU port prefix we + * constructed earlier + */ + ds->ops->get_strings(ds, port, stringset, ndata); + count = ds->ops->get_sset_count(ds, port, stringset); + if (count < 0) + return; + for (i = 0; i < count; i++) { + memmove(ndata + (i * len + sizeof(pfx)), + ndata + i * len, len - sizeof(pfx)); + memcpy(ndata + i * len, pfx, sizeof(pfx)); + } + } +} + +/* Deny PTP operations on conduit if there is at least one switch in the tree + * that is PTP capable. + */ +int __dsa_conduit_hwtstamp_validate(struct net_device *dev, + const struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack) +{ + struct dsa_port *cpu_dp = dev->dsa_ptr; + struct dsa_switch *ds = cpu_dp->ds; + struct dsa_switch_tree *dst; + struct dsa_port *dp; + + dst = ds->dst; + + list_for_each_entry(dp, &dst->ports, list) { + if (dsa_port_supports_hwtstamp(dp)) { + NL_SET_ERR_MSG(extack, + "HW timestamping not allowed on DSA conduit when switch supports the operation"); + return -EBUSY; + } + } + + return 0; +} + +static int dsa_conduit_ethtool_setup(struct net_device *dev) +{ + struct dsa_port *cpu_dp = dev->dsa_ptr; + struct dsa_switch *ds = cpu_dp->ds; + struct ethtool_ops *ops; + + if (netif_is_lag_master(dev)) + return 0; + + ops = devm_kzalloc(ds->dev, sizeof(*ops), GFP_KERNEL); + if (!ops) + return -ENOMEM; + + cpu_dp->orig_ethtool_ops = dev->ethtool_ops; + if (cpu_dp->orig_ethtool_ops) + memcpy(ops, cpu_dp->orig_ethtool_ops, sizeof(*ops)); + + ops->get_regs_len = dsa_conduit_get_regs_len; + ops->get_regs = dsa_conduit_get_regs; + ops->get_sset_count = dsa_conduit_get_sset_count; + ops->get_ethtool_stats = dsa_conduit_get_ethtool_stats; + ops->get_strings = dsa_conduit_get_strings; + ops->get_ethtool_phy_stats = dsa_conduit_get_ethtool_phy_stats; + + dev->ethtool_ops = ops; + + return 0; +} + +static void dsa_conduit_ethtool_teardown(struct net_device *dev) +{ + struct dsa_port *cpu_dp = dev->dsa_ptr; + + if (netif_is_lag_master(dev)) + return; + + dev->ethtool_ops = cpu_dp->orig_ethtool_ops; + cpu_dp->orig_ethtool_ops = NULL; +} + +/* Keep the conduit always promiscuous if the tagging protocol requires that + * (garbles MAC DA) or if it doesn't support unicast filtering, case in which + * it would revert to promiscuous mode as soon as we call dev_uc_add() on it + * anyway. + */ +static void dsa_conduit_set_promiscuity(struct net_device *dev, int inc) +{ + const struct dsa_device_ops *ops = dev->dsa_ptr->tag_ops; + + if ((dev->priv_flags & IFF_UNICAST_FLT) && !ops->promisc_on_conduit) + return; + + ASSERT_RTNL(); + + dev_set_promiscuity(dev, inc); +} + +static ssize_t tagging_show(struct device *d, struct device_attribute *attr, + char *buf) +{ + struct net_device *dev = to_net_dev(d); + struct dsa_port *cpu_dp = dev->dsa_ptr; + + return sysfs_emit(buf, "%s\n", + dsa_tag_protocol_to_str(cpu_dp->tag_ops)); +} + +static ssize_t tagging_store(struct device *d, struct device_attribute *attr, + const char *buf, size_t count) +{ + const struct dsa_device_ops *new_tag_ops, *old_tag_ops; + const char *end = strchrnul(buf, '\n'), *name; + struct net_device *dev = to_net_dev(d); + struct dsa_port *cpu_dp = dev->dsa_ptr; + size_t len = end - buf; + int err; + + /* Empty string passed */ + if (!len) + return -ENOPROTOOPT; + + name = kstrndup(buf, len, GFP_KERNEL); + if (!name) + return -ENOMEM; + + old_tag_ops = cpu_dp->tag_ops; + new_tag_ops = dsa_tag_driver_get_by_name(name); + kfree(name); + /* Bad tagger name? */ + if (IS_ERR(new_tag_ops)) + return PTR_ERR(new_tag_ops); + + if (new_tag_ops == old_tag_ops) + /* Drop the temporarily held duplicate reference, since + * the DSA switch tree uses this tagger. + */ + goto out; + + err = dsa_tree_change_tag_proto(cpu_dp->ds->dst, new_tag_ops, + old_tag_ops); + if (err) { + /* On failure the old tagger is restored, so we don't need the + * driver for the new one. + */ + dsa_tag_driver_put(new_tag_ops); + return err; + } + + /* On success we no longer need the module for the old tagging protocol + */ +out: + dsa_tag_driver_put(old_tag_ops); + return count; +} +static DEVICE_ATTR_RW(tagging); + +static struct attribute *dsa_user_attrs[] = { + &dev_attr_tagging.attr, + NULL +}; + +static const struct attribute_group dsa_group = { + .name = "dsa", + .attrs = dsa_user_attrs, +}; + +static void dsa_conduit_reset_mtu(struct net_device *dev) +{ + int err; + + err = dev_set_mtu(dev, ETH_DATA_LEN); + if (err) + netdev_dbg(dev, + "Unable to reset MTU to exclude DSA overheads\n"); +} + +int dsa_conduit_setup(struct net_device *dev, struct dsa_port *cpu_dp) +{ + const struct dsa_device_ops *tag_ops = cpu_dp->tag_ops; + struct dsa_switch *ds = cpu_dp->ds; + struct device_link *consumer_link; + int mtu, ret; + + mtu = ETH_DATA_LEN + dsa_tag_protocol_overhead(tag_ops); + + /* The DSA conduit must use SET_NETDEV_DEV for this to work. */ + if (!netif_is_lag_master(dev)) { + consumer_link = device_link_add(ds->dev, dev->dev.parent, + DL_FLAG_AUTOREMOVE_CONSUMER); + if (!consumer_link) + netdev_err(dev, + "Failed to create a device link to DSA switch %s\n", + dev_name(ds->dev)); + } + + /* The switch driver may not implement ->port_change_mtu(), case in + * which dsa_user_change_mtu() will not update the conduit MTU either, + * so we need to do that here. + */ + ret = dev_set_mtu(dev, mtu); + if (ret) + netdev_warn(dev, "error %d setting MTU to %d to include DSA overhead\n", + ret, mtu); + + /* If we use a tagging format that doesn't have an ethertype + * field, make sure that all packets from this point on get + * sent to the tag format's receive function. + */ + wmb(); + + dev->dsa_ptr = cpu_dp; + + dsa_conduit_set_promiscuity(dev, 1); + + ret = dsa_conduit_ethtool_setup(dev); + if (ret) + goto out_err_reset_promisc; + + ret = sysfs_create_group(&dev->dev.kobj, &dsa_group); + if (ret) + goto out_err_ethtool_teardown; + + return ret; + +out_err_ethtool_teardown: + dsa_conduit_ethtool_teardown(dev); +out_err_reset_promisc: + dsa_conduit_set_promiscuity(dev, -1); + return ret; +} + +void dsa_conduit_teardown(struct net_device *dev) +{ + sysfs_remove_group(&dev->dev.kobj, &dsa_group); + dsa_conduit_ethtool_teardown(dev); + dsa_conduit_reset_mtu(dev); + dsa_conduit_set_promiscuity(dev, -1); + + dev->dsa_ptr = NULL; + + /* If we used a tagging format that doesn't have an ethertype + * field, make sure that all packets from this point get sent + * without the tag and go through the regular receive path. + */ + wmb(); +} + +int dsa_conduit_lag_setup(struct net_device *lag_dev, struct dsa_port *cpu_dp, + struct netdev_lag_upper_info *uinfo, + struct netlink_ext_ack *extack) +{ + bool conduit_setup = false; + int err; + + if (!netdev_uses_dsa(lag_dev)) { + err = dsa_conduit_setup(lag_dev, cpu_dp); + if (err) + return err; + + conduit_setup = true; + } + + err = dsa_port_lag_join(cpu_dp, lag_dev, uinfo, extack); + if (err) { + NL_SET_ERR_MSG_WEAK_MOD(extack, "CPU port failed to join LAG"); + goto out_conduit_teardown; + } + + return 0; + +out_conduit_teardown: + if (conduit_setup) + dsa_conduit_teardown(lag_dev); + return err; +} + +/* Tear down a conduit if there isn't any other user port on it, + * optionally also destroying LAG information. + */ +void dsa_conduit_lag_teardown(struct net_device *lag_dev, + struct dsa_port *cpu_dp) +{ + struct net_device *upper; + struct list_head *iter; + + dsa_port_lag_leave(cpu_dp, lag_dev); + + netdev_for_each_upper_dev_rcu(lag_dev, upper, iter) + if (dsa_user_dev_check(upper)) + return; + + dsa_conduit_teardown(lag_dev); +} diff --git a/net/dsa/conduit.h b/net/dsa/conduit.h new file mode 100644 index 000000000000..31f8834f54bb --- /dev/null +++ b/net/dsa/conduit.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#ifndef __DSA_CONDUIT_H +#define __DSA_CONDUIT_H + +struct dsa_port; +struct net_device; +struct netdev_lag_upper_info; +struct netlink_ext_ack; + +int dsa_conduit_setup(struct net_device *dev, struct dsa_port *cpu_dp); +void dsa_conduit_teardown(struct net_device *dev); +int dsa_conduit_lag_setup(struct net_device *lag_dev, struct dsa_port *cpu_dp, + struct netdev_lag_upper_info *uinfo, + struct netlink_ext_ack *extack); +void dsa_conduit_lag_teardown(struct net_device *lag_dev, + struct dsa_port *cpu_dp); +int __dsa_conduit_hwtstamp_validate(struct net_device *dev, + const struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack); + +#endif diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c index ccbdb98109f8..ac7be864e80d 100644 --- a/net/dsa/dsa.c +++ b/net/dsa/dsa.c @@ -20,14 +20,14 @@ #include #include +#include "conduit.h" #include "devlink.h" #include "dsa.h" -#include "master.h" #include "netlink.h" #include "port.h" -#include "slave.h" #include "switch.h" #include "tag.h" +#include "user.h" #define DSA_MAX_NUM_OFFLOADING_BRIDGES BITS_PER_LONG @@ -365,18 +365,18 @@ static struct dsa_port *dsa_tree_find_first_cpu(struct dsa_switch_tree *dst) return NULL; } -struct net_device *dsa_tree_find_first_master(struct dsa_switch_tree *dst) +struct net_device *dsa_tree_find_first_conduit(struct dsa_switch_tree *dst) { struct device_node *ethernet; - struct net_device *master; + struct net_device *conduit; struct dsa_port *cpu_dp; cpu_dp = dsa_tree_find_first_cpu(dst); ethernet = of_parse_phandle(cpu_dp->dn, "ethernet", 0); - master = of_find_net_device_by_node(ethernet); + conduit = of_find_net_device_by_node(ethernet); of_node_put(ethernet); - return master; + return conduit; } /* Assign the default CPU port (the first one in the tree) to all ports of the @@ -517,7 +517,7 @@ static int dsa_port_setup(struct dsa_port *dp) break; case DSA_PORT_TYPE_USER: of_get_mac_address(dp->dn, dp->mac); - err = dsa_slave_create(dp); + err = dsa_user_create(dp); break; } @@ -554,9 +554,9 @@ static void dsa_port_teardown(struct dsa_port *dp) dsa_shared_port_link_unregister_of(dp); break; case DSA_PORT_TYPE_USER: - if (dp->slave) { - dsa_slave_destroy(dp->slave); - dp->slave = NULL; + if (dp->user) { + dsa_user_destroy(dp->user); + dp->user = NULL; } break; } @@ -632,9 +632,9 @@ static int dsa_switch_setup(struct dsa_switch *ds) if (ds->setup) return 0; - /* Initialize ds->phys_mii_mask before registering the slave MDIO bus + /* Initialize ds->phys_mii_mask before registering the user MDIO bus * driver and before ops->setup() has run, since the switch drivers and - * the slave MDIO bus driver rely on these values for probing PHY + * the user MDIO bus driver rely on these values for probing PHY * devices or not */ ds->phys_mii_mask |= dsa_user_ports(ds); @@ -657,21 +657,21 @@ static int dsa_switch_setup(struct dsa_switch *ds) if (err) goto teardown; - if (!ds->slave_mii_bus && ds->ops->phy_read) { - ds->slave_mii_bus = mdiobus_alloc(); - if (!ds->slave_mii_bus) { + if (!ds->user_mii_bus && ds->ops->phy_read) { + ds->user_mii_bus = mdiobus_alloc(); + if (!ds->user_mii_bus) { err = -ENOMEM; goto teardown; } - dsa_slave_mii_bus_init(ds); + dsa_user_mii_bus_init(ds); dn = of_get_child_by_name(ds->dev->of_node, "mdio"); - err = of_mdiobus_register(ds->slave_mii_bus, dn); + err = of_mdiobus_register(ds->user_mii_bus, dn); of_node_put(dn); if (err < 0) - goto free_slave_mii_bus; + goto free_user_mii_bus; } dsa_switch_devlink_register(ds); @@ -679,9 +679,9 @@ static int dsa_switch_setup(struct dsa_switch *ds) ds->setup = true; return 0; -free_slave_mii_bus: - if (ds->slave_mii_bus && ds->ops->phy_read) - mdiobus_free(ds->slave_mii_bus); +free_user_mii_bus: + if (ds->user_mii_bus && ds->ops->phy_read) + mdiobus_free(ds->user_mii_bus); teardown: if (ds->ops->teardown) ds->ops->teardown(ds); @@ -699,10 +699,10 @@ static void dsa_switch_teardown(struct dsa_switch *ds) dsa_switch_devlink_unregister(ds); - if (ds->slave_mii_bus && ds->ops->phy_read) { - mdiobus_unregister(ds->slave_mii_bus); - mdiobus_free(ds->slave_mii_bus); - ds->slave_mii_bus = NULL; + if (ds->user_mii_bus && ds->ops->phy_read) { + mdiobus_unregister(ds->user_mii_bus); + mdiobus_free(ds->user_mii_bus); + ds->user_mii_bus = NULL; } dsa_switch_teardown_tag_protocol(ds); @@ -793,7 +793,7 @@ static int dsa_tree_setup_switches(struct dsa_switch_tree *dst) return err; } -static int dsa_tree_setup_master(struct dsa_switch_tree *dst) +static int dsa_tree_setup_conduit(struct dsa_switch_tree *dst) { struct dsa_port *cpu_dp; int err = 0; @@ -801,18 +801,18 @@ static int dsa_tree_setup_master(struct dsa_switch_tree *dst) rtnl_lock(); dsa_tree_for_each_cpu_port(cpu_dp, dst) { - struct net_device *master = cpu_dp->master; - bool admin_up = (master->flags & IFF_UP) && - !qdisc_tx_is_noop(master); + struct net_device *conduit = cpu_dp->conduit; + bool admin_up = (conduit->flags & IFF_UP) && + !qdisc_tx_is_noop(conduit); - err = dsa_master_setup(master, cpu_dp); + err = dsa_conduit_setup(conduit, cpu_dp); if (err) break; - /* Replay master state event */ - dsa_tree_master_admin_state_change(dst, master, admin_up); - dsa_tree_master_oper_state_change(dst, master, - netif_oper_up(master)); + /* Replay conduit state event */ + dsa_tree_conduit_admin_state_change(dst, conduit, admin_up); + dsa_tree_conduit_oper_state_change(dst, conduit, + netif_oper_up(conduit)); } rtnl_unlock(); @@ -820,22 +820,22 @@ static int dsa_tree_setup_master(struct dsa_switch_tree *dst) return err; } -static void dsa_tree_teardown_master(struct dsa_switch_tree *dst) +static void dsa_tree_teardown_conduit(struct dsa_switch_tree *dst) { struct dsa_port *cpu_dp; rtnl_lock(); dsa_tree_for_each_cpu_port(cpu_dp, dst) { - struct net_device *master = cpu_dp->master; + struct net_device *conduit = cpu_dp->conduit; /* Synthesizing an "admin down" state is sufficient for - * the switches to get a notification if the master is + * the switches to get a notification if the conduit is * currently up and running. */ - dsa_tree_master_admin_state_change(dst, master, false); + dsa_tree_conduit_admin_state_change(dst, conduit, false); - dsa_master_teardown(master); + dsa_conduit_teardown(conduit); } rtnl_unlock(); @@ -894,13 +894,13 @@ static int dsa_tree_setup(struct dsa_switch_tree *dst) if (err) goto teardown_switches; - err = dsa_tree_setup_master(dst); + err = dsa_tree_setup_conduit(dst); if (err) goto teardown_ports; err = dsa_tree_setup_lags(dst); if (err) - goto teardown_master; + goto teardown_conduit; dst->setup = true; @@ -908,8 +908,8 @@ static int dsa_tree_setup(struct dsa_switch_tree *dst) return 0; -teardown_master: - dsa_tree_teardown_master(dst); +teardown_conduit: + dsa_tree_teardown_conduit(dst); teardown_ports: dsa_tree_teardown_ports(dst); teardown_switches: @@ -929,7 +929,7 @@ static void dsa_tree_teardown(struct dsa_switch_tree *dst) dsa_tree_teardown_lags(dst); - dsa_tree_teardown_master(dst); + dsa_tree_teardown_conduit(dst); dsa_tree_teardown_ports(dst); @@ -978,7 +978,7 @@ out_disconnect: return err; } -/* Since the dsa/tagging sysfs device attribute is per master, the assumption +/* Since the dsa/tagging sysfs device attribute is per conduit, the assumption * is that all DSA switches within a tree share the same tagger, otherwise * they would have formed disjoint trees (different "dsa,member" values). */ @@ -999,10 +999,10 @@ int dsa_tree_change_tag_proto(struct dsa_switch_tree *dst, * restriction, there needs to be another mutex which serializes this. */ dsa_tree_for_each_user_port(dp, dst) { - if (dsa_port_to_master(dp)->flags & IFF_UP) + if (dsa_port_to_conduit(dp)->flags & IFF_UP) goto out_unlock; - if (dp->slave->flags & IFF_UP) + if (dp->user->flags & IFF_UP) goto out_unlock; } @@ -1028,62 +1028,62 @@ out_unlock: return err; } -static void dsa_tree_master_state_change(struct dsa_switch_tree *dst, - struct net_device *master) +static void dsa_tree_conduit_state_change(struct dsa_switch_tree *dst, + struct net_device *conduit) { - struct dsa_notifier_master_state_info info; - struct dsa_port *cpu_dp = master->dsa_ptr; + struct dsa_notifier_conduit_state_info info; + struct dsa_port *cpu_dp = conduit->dsa_ptr; - info.master = master; - info.operational = dsa_port_master_is_operational(cpu_dp); + info.conduit = conduit; + info.operational = dsa_port_conduit_is_operational(cpu_dp); - dsa_tree_notify(dst, DSA_NOTIFIER_MASTER_STATE_CHANGE, &info); + dsa_tree_notify(dst, DSA_NOTIFIER_CONDUIT_STATE_CHANGE, &info); } -void dsa_tree_master_admin_state_change(struct dsa_switch_tree *dst, - struct net_device *master, - bool up) +void dsa_tree_conduit_admin_state_change(struct dsa_switch_tree *dst, + struct net_device *conduit, + bool up) { - struct dsa_port *cpu_dp = master->dsa_ptr; + struct dsa_port *cpu_dp = conduit->dsa_ptr; bool notify = false; - /* Don't keep track of admin state on LAG DSA masters, - * but rather just of physical DSA masters + /* Don't keep track of admin state on LAG DSA conduits, + * but rather just of physical DSA conduits */ - if (netif_is_lag_master(master)) + if (netif_is_lag_master(conduit)) return; - if ((dsa_port_master_is_operational(cpu_dp)) != - (up && cpu_dp->master_oper_up)) + if ((dsa_port_conduit_is_operational(cpu_dp)) != + (up && cpu_dp->conduit_oper_up)) notify = true; - cpu_dp->master_admin_up = up; + cpu_dp->conduit_admin_up = up; if (notify) - dsa_tree_master_state_change(dst, master); + dsa_tree_conduit_state_change(dst, conduit); } -void dsa_tree_master_oper_state_change(struct dsa_switch_tree *dst, - struct net_device *master, - bool up) +void dsa_tree_conduit_oper_state_change(struct dsa_switch_tree *dst, + struct net_device *conduit, + bool up) { - struct dsa_port *cpu_dp = master->dsa_ptr; + struct dsa_port *cpu_dp = conduit->dsa_ptr; bool notify = false; - /* Don't keep track of oper state on LAG DSA masters, - * but rather just of physical DSA masters + /* Don't keep track of oper state on LAG DSA conduits, + * but rather just of physical DSA conduits */ - if (netif_is_lag_master(master)) + if (netif_is_lag_master(conduit)) return; - if ((dsa_port_master_is_operational(cpu_dp)) != - (cpu_dp->master_admin_up && up)) + if ((dsa_port_conduit_is_operational(cpu_dp)) != + (cpu_dp->conduit_admin_up && up)) notify = true; - cpu_dp->master_oper_up = up; + cpu_dp->conduit_oper_up = up; if (notify) - dsa_tree_master_state_change(dst, master); + dsa_tree_conduit_state_change(dst, conduit); } static struct dsa_port *dsa_port_touch(struct dsa_switch *ds, int index) @@ -1129,7 +1129,7 @@ static int dsa_port_parse_dsa(struct dsa_port *dp) } static enum dsa_tag_protocol dsa_get_tag_protocol(struct dsa_port *dp, - struct net_device *master) + struct net_device *conduit) { enum dsa_tag_protocol tag_protocol = DSA_TAG_PROTO_NONE; struct dsa_switch *mds, *ds = dp->ds; @@ -1140,21 +1140,21 @@ static enum dsa_tag_protocol dsa_get_tag_protocol(struct dsa_port *dp, * happens the switch driver may want to know if its tagging protocol * is going to work in such a configuration. */ - if (dsa_slave_dev_check(master)) { - mdp = dsa_slave_to_port(master); + if (dsa_user_dev_check(conduit)) { + mdp = dsa_user_to_port(conduit); mds = mdp->ds; mdp_upstream = dsa_upstream_port(mds, mdp->index); tag_protocol = mds->ops->get_tag_protocol(mds, mdp_upstream, DSA_TAG_PROTO_NONE); } - /* If the master device is not itself a DSA slave in a disjoint DSA + /* If the conduit device is not itself a DSA user in a disjoint DSA * tree, then return immediately. */ return ds->ops->get_tag_protocol(ds, dp->index, tag_protocol); } -static int dsa_port_parse_cpu(struct dsa_port *dp, struct net_device *master, +static int dsa_port_parse_cpu(struct dsa_port *dp, struct net_device *conduit, const char *user_protocol) { const struct dsa_device_ops *tag_ops = NULL; @@ -1163,7 +1163,7 @@ static int dsa_port_parse_cpu(struct dsa_port *dp, struct net_device *master, enum dsa_tag_protocol default_proto; /* Find out which protocol the switch would prefer. */ - default_proto = dsa_get_tag_protocol(dp, master); + default_proto = dsa_get_tag_protocol(dp, conduit); if (dst->default_proto) { if (dst->default_proto != default_proto) { dev_err(ds->dev, @@ -1218,7 +1218,7 @@ static int dsa_port_parse_cpu(struct dsa_port *dp, struct net_device *master, dst->tag_ops = tag_ops; } - dp->master = master; + dp->conduit = conduit; dp->type = DSA_PORT_TYPE_CPU; dsa_port_set_tag_protocol(dp, dst->tag_ops); dp->dst = dst; @@ -1248,16 +1248,16 @@ static int dsa_port_parse_of(struct dsa_port *dp, struct device_node *dn) dp->dn = dn; if (ethernet) { - struct net_device *master; + struct net_device *conduit; const char *user_protocol; - master = of_find_net_device_by_node(ethernet); + conduit = of_find_net_device_by_node(ethernet); of_node_put(ethernet); - if (!master) + if (!conduit) return -EPROBE_DEFER; user_protocol = of_get_property(dn, "dsa-tag-protocol", NULL); - return dsa_port_parse_cpu(dp, master, user_protocol); + return dsa_port_parse_cpu(dp, conduit, user_protocol); } if (link) @@ -1412,15 +1412,15 @@ static int dsa_port_parse(struct dsa_port *dp, const char *name, struct device *dev) { if (!strcmp(name, "cpu")) { - struct net_device *master; + struct net_device *conduit; - master = dsa_dev_to_net_device(dev); - if (!master) + conduit = dsa_dev_to_net_device(dev); + if (!conduit) return -EPROBE_DEFER; - dev_put(master); + dev_put(conduit); - return dsa_port_parse_cpu(dp, master, NULL); + return dsa_port_parse_cpu(dp, conduit, NULL); } if (!strcmp(name, "dsa")) @@ -1566,14 +1566,14 @@ void dsa_unregister_switch(struct dsa_switch *ds) } EXPORT_SYMBOL_GPL(dsa_unregister_switch); -/* If the DSA master chooses to unregister its net_device on .shutdown, DSA is +/* If the DSA conduit chooses to unregister its net_device on .shutdown, DSA is * blocking that operation from completion, due to the dev_hold taken inside - * netdev_upper_dev_link. Unlink the DSA slave interfaces from being uppers of - * the DSA master, so that the system can reboot successfully. + * netdev_upper_dev_link. Unlink the DSA user interfaces from being uppers of + * the DSA conduit, so that the system can reboot successfully. */ void dsa_switch_shutdown(struct dsa_switch *ds) { - struct net_device *master, *slave_dev; + struct net_device *conduit, *user_dev; struct dsa_port *dp; mutex_lock(&dsa2_mutex); @@ -1584,17 +1584,17 @@ void dsa_switch_shutdown(struct dsa_switch *ds) rtnl_lock(); dsa_switch_for_each_user_port(dp, ds) { - master = dsa_port_to_master(dp); - slave_dev = dp->slave; + conduit = dsa_port_to_conduit(dp); + user_dev = dp->user; - netdev_upper_dev_unlink(master, slave_dev); + netdev_upper_dev_unlink(conduit, user_dev); } - /* Disconnect from further netdevice notifiers on the master, + /* Disconnect from further netdevice notifiers on the conduit, * since netdev_uses_dsa() will now return false. */ dsa_switch_for_each_cpu_port(dp, ds) - dp->master->dsa_ptr = NULL; + dp->conduit->dsa_ptr = NULL; rtnl_unlock(); out: @@ -1605,7 +1605,7 @@ EXPORT_SYMBOL_GPL(dsa_switch_shutdown); #ifdef CONFIG_PM_SLEEP static bool dsa_port_is_initialized(const struct dsa_port *dp) { - return dp->type == DSA_PORT_TYPE_USER && dp->slave; + return dp->type == DSA_PORT_TYPE_USER && dp->user; } int dsa_switch_suspend(struct dsa_switch *ds) @@ -1613,12 +1613,12 @@ int dsa_switch_suspend(struct dsa_switch *ds) struct dsa_port *dp; int ret = 0; - /* Suspend slave network devices */ + /* Suspend user network devices */ dsa_switch_for_each_port(dp, ds) { if (!dsa_port_is_initialized(dp)) continue; - ret = dsa_slave_suspend(dp->slave); + ret = dsa_user_suspend(dp->user); if (ret) return ret; } @@ -1641,12 +1641,12 @@ int dsa_switch_resume(struct dsa_switch *ds) if (ret) return ret; - /* Resume slave network devices */ + /* Resume user network devices */ dsa_switch_for_each_port(dp, ds) { if (!dsa_port_is_initialized(dp)) continue; - ret = dsa_slave_resume(dp->slave); + ret = dsa_user_resume(dp->user); if (ret) return ret; } @@ -1658,10 +1658,10 @@ EXPORT_SYMBOL_GPL(dsa_switch_resume); struct dsa_port *dsa_port_from_netdev(struct net_device *netdev) { - if (!netdev || !dsa_slave_dev_check(netdev)) + if (!netdev || !dsa_user_dev_check(netdev)) return ERR_PTR(-ENODEV); - return dsa_slave_to_port(netdev); + return dsa_user_to_port(netdev); } EXPORT_SYMBOL_GPL(dsa_port_from_netdev); @@ -1726,7 +1726,7 @@ bool dsa_mdb_present_in_other_db(struct dsa_switch *ds, int port, EXPORT_SYMBOL_GPL(dsa_mdb_present_in_other_db); static const struct dsa_stubs __dsa_stubs = { - .master_hwtstamp_validate = __dsa_master_hwtstamp_validate, + .conduit_hwtstamp_validate = __dsa_conduit_hwtstamp_validate, }; static void dsa_register_stubs(void) @@ -1748,7 +1748,7 @@ static int __init dsa_init_module(void) if (!dsa_owq) return -ENOMEM; - rc = dsa_slave_register_notifier(); + rc = dsa_user_register_notifier(); if (rc) goto register_notifier_fail; @@ -1763,7 +1763,7 @@ static int __init dsa_init_module(void) return 0; netlink_register_fail: - dsa_slave_unregister_notifier(); + dsa_user_unregister_notifier(); dev_remove_pack(&dsa_pack_type); register_notifier_fail: destroy_workqueue(dsa_owq); @@ -1778,7 +1778,7 @@ static void __exit dsa_cleanup_module(void) rtnl_link_unregister(&dsa_link_ops); - dsa_slave_unregister_notifier(); + dsa_user_unregister_notifier(); dev_remove_pack(&dsa_pack_type); destroy_workqueue(dsa_owq); } diff --git a/net/dsa/dsa.h b/net/dsa/dsa.h index b7e17ae1094d..3cc7823e9ef3 100644 --- a/net/dsa/dsa.h +++ b/net/dsa/dsa.h @@ -21,16 +21,16 @@ void dsa_lag_map(struct dsa_switch_tree *dst, struct dsa_lag *lag); void dsa_lag_unmap(struct dsa_switch_tree *dst, struct dsa_lag *lag); struct dsa_lag *dsa_tree_lag_find(struct dsa_switch_tree *dst, const struct net_device *lag_dev); -struct net_device *dsa_tree_find_first_master(struct dsa_switch_tree *dst); +struct net_device *dsa_tree_find_first_conduit(struct dsa_switch_tree *dst); int dsa_tree_change_tag_proto(struct dsa_switch_tree *dst, const struct dsa_device_ops *tag_ops, const struct dsa_device_ops *old_tag_ops); -void dsa_tree_master_admin_state_change(struct dsa_switch_tree *dst, - struct net_device *master, +void dsa_tree_conduit_admin_state_change(struct dsa_switch_tree *dst, + struct net_device *conduit, + bool up); +void dsa_tree_conduit_oper_state_change(struct dsa_switch_tree *dst, + struct net_device *conduit, bool up); -void dsa_tree_master_oper_state_change(struct dsa_switch_tree *dst, - struct net_device *master, - bool up); unsigned int dsa_bridge_num_get(const struct net_device *bridge_dev, int max); void dsa_bridge_num_put(const struct net_device *bridge_dev, unsigned int bridge_num); diff --git a/net/dsa/master.c b/net/dsa/master.c deleted file mode 100644 index 6be89ab0cc01..000000000000 --- a/net/dsa/master.c +++ /dev/null @@ -1,475 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * Handling of a master device, switching frames via its switch fabric CPU port - * - * Copyright (c) 2017 Savoir-faire Linux Inc. - * Vivien Didelot - */ - -#include -#include -#include -#include - -#include "dsa.h" -#include "master.h" -#include "port.h" -#include "tag.h" - -static int dsa_master_get_regs_len(struct net_device *dev) -{ - struct dsa_port *cpu_dp = dev->dsa_ptr; - const struct ethtool_ops *ops = cpu_dp->orig_ethtool_ops; - struct dsa_switch *ds = cpu_dp->ds; - int port = cpu_dp->index; - int ret = 0; - int len; - - if (ops->get_regs_len) { - len = ops->get_regs_len(dev); - if (len < 0) - return len; - ret += len; - } - - ret += sizeof(struct ethtool_drvinfo); - ret += sizeof(struct ethtool_regs); - - if (ds->ops->get_regs_len) { - len = ds->ops->get_regs_len(ds, port); - if (len < 0) - return len; - ret += len; - } - - return ret; -} - -static void dsa_master_get_regs(struct net_device *dev, - struct ethtool_regs *regs, void *data) -{ - struct dsa_port *cpu_dp = dev->dsa_ptr; - const struct ethtool_ops *ops = cpu_dp->orig_ethtool_ops; - struct dsa_switch *ds = cpu_dp->ds; - struct ethtool_drvinfo *cpu_info; - struct ethtool_regs *cpu_regs; - int port = cpu_dp->index; - int len; - - if (ops->get_regs_len && ops->get_regs) { - len = ops->get_regs_len(dev); - if (len < 0) - return; - regs->len = len; - ops->get_regs(dev, regs, data); - data += regs->len; - } - - cpu_info = (struct ethtool_drvinfo *)data; - strscpy(cpu_info->driver, "dsa", sizeof(cpu_info->driver)); - data += sizeof(*cpu_info); - cpu_regs = (struct ethtool_regs *)data; - data += sizeof(*cpu_regs); - - if (ds->ops->get_regs_len && ds->ops->get_regs) { - len = ds->ops->get_regs_len(ds, port); - if (len < 0) - return; - cpu_regs->len = len; - ds->ops->get_regs(ds, port, cpu_regs, data); - } -} - -static void dsa_master_get_ethtool_stats(struct net_device *dev, - struct ethtool_stats *stats, - uint64_t *data) -{ - struct dsa_port *cpu_dp = dev->dsa_ptr; - const struct ethtool_ops *ops = cpu_dp->orig_ethtool_ops; - struct dsa_switch *ds = cpu_dp->ds; - int port = cpu_dp->index; - int count = 0; - - if (ops->get_sset_count && ops->get_ethtool_stats) { - count = ops->get_sset_count(dev, ETH_SS_STATS); - ops->get_ethtool_stats(dev, stats, data); - } - - if (ds->ops->get_ethtool_stats) - ds->ops->get_ethtool_stats(ds, port, data + count); -} - -static void dsa_master_get_ethtool_phy_stats(struct net_device *dev, - struct ethtool_stats *stats, - uint64_t *data) -{ - struct dsa_port *cpu_dp = dev->dsa_ptr; - const struct ethtool_ops *ops = cpu_dp->orig_ethtool_ops; - struct dsa_switch *ds = cpu_dp->ds; - int port = cpu_dp->index; - int count = 0; - - if (dev->phydev && !ops->get_ethtool_phy_stats) { - count = phy_ethtool_get_sset_count(dev->phydev); - if (count >= 0) - phy_ethtool_get_stats(dev->phydev, stats, data); - } else if (ops->get_sset_count && ops->get_ethtool_phy_stats) { - count = ops->get_sset_count(dev, ETH_SS_PHY_STATS); - ops->get_ethtool_phy_stats(dev, stats, data); - } - - if (count < 0) - count = 0; - - if (ds->ops->get_ethtool_phy_stats) - ds->ops->get_ethtool_phy_stats(ds, port, data + count); -} - -static int dsa_master_get_sset_count(struct net_device *dev, int sset) -{ - struct dsa_port *cpu_dp = dev->dsa_ptr; - const struct ethtool_ops *ops = cpu_dp->orig_ethtool_ops; - struct dsa_switch *ds = cpu_dp->ds; - int count = 0; - - if (sset == ETH_SS_PHY_STATS && dev->phydev && - !ops->get_ethtool_phy_stats) - count = phy_ethtool_get_sset_count(dev->phydev); - else if (ops->get_sset_count) - count = ops->get_sset_count(dev, sset); - - if (count < 0) - count = 0; - - if (ds->ops->get_sset_count) - count += ds->ops->get_sset_count(ds, cpu_dp->index, sset); - - return count; -} - -static void dsa_master_get_strings(struct net_device *dev, uint32_t stringset, - uint8_t *data) -{ - struct dsa_port *cpu_dp = dev->dsa_ptr; - const struct ethtool_ops *ops = cpu_dp->orig_ethtool_ops; - struct dsa_switch *ds = cpu_dp->ds; - int port = cpu_dp->index; - int len = ETH_GSTRING_LEN; - int mcount = 0, count, i; - uint8_t pfx[4]; - uint8_t *ndata; - - snprintf(pfx, sizeof(pfx), "p%.2d", port); - /* We do not want to be NULL-terminated, since this is a prefix */ - pfx[sizeof(pfx) - 1] = '_'; - - if (stringset == ETH_SS_PHY_STATS && dev->phydev && - !ops->get_ethtool_phy_stats) { - mcount = phy_ethtool_get_sset_count(dev->phydev); - if (mcount < 0) - mcount = 0; - else - phy_ethtool_get_strings(dev->phydev, data); - } else if (ops->get_sset_count && ops->get_strings) { - mcount = ops->get_sset_count(dev, stringset); - if (mcount < 0) - mcount = 0; - ops->get_strings(dev, stringset, data); - } - - if (ds->ops->get_strings) { - ndata = data + mcount * len; - /* This function copies ETH_GSTRINGS_LEN bytes, we will mangle - * the output after to prepend our CPU port prefix we - * constructed earlier - */ - ds->ops->get_strings(ds, port, stringset, ndata); - count = ds->ops->get_sset_count(ds, port, stringset); - if (count < 0) - return; - for (i = 0; i < count; i++) { - memmove(ndata + (i * len + sizeof(pfx)), - ndata + i * len, len - sizeof(pfx)); - memcpy(ndata + i * len, pfx, sizeof(pfx)); - } - } -} - -/* Deny PTP operations on master if there is at least one switch in the tree - * that is PTP capable. - */ -int __dsa_master_hwtstamp_validate(struct net_device *dev, - const struct kernel_hwtstamp_config *config, - struct netlink_ext_ack *extack) -{ - struct dsa_port *cpu_dp = dev->dsa_ptr; - struct dsa_switch *ds = cpu_dp->ds; - struct dsa_switch_tree *dst; - struct dsa_port *dp; - - dst = ds->dst; - - list_for_each_entry(dp, &dst->ports, list) { - if (dsa_port_supports_hwtstamp(dp)) { - NL_SET_ERR_MSG(extack, - "HW timestamping not allowed on DSA master when switch supports the operation"); - return -EBUSY; - } - } - - return 0; -} - -static int dsa_master_ethtool_setup(struct net_device *dev) -{ - struct dsa_port *cpu_dp = dev->dsa_ptr; - struct dsa_switch *ds = cpu_dp->ds; - struct ethtool_ops *ops; - - if (netif_is_lag_master(dev)) - return 0; - - ops = devm_kzalloc(ds->dev, sizeof(*ops), GFP_KERNEL); - if (!ops) - return -ENOMEM; - - cpu_dp->orig_ethtool_ops = dev->ethtool_ops; - if (cpu_dp->orig_ethtool_ops) - memcpy(ops, cpu_dp->orig_ethtool_ops, sizeof(*ops)); - - ops->get_regs_len = dsa_master_get_regs_len; - ops->get_regs = dsa_master_get_regs; - ops->get_sset_count = dsa_master_get_sset_count; - ops->get_ethtool_stats = dsa_master_get_ethtool_stats; - ops->get_strings = dsa_master_get_strings; - ops->get_ethtool_phy_stats = dsa_master_get_ethtool_phy_stats; - - dev->ethtool_ops = ops; - - return 0; -} - -static void dsa_master_ethtool_teardown(struct net_device *dev) -{ - struct dsa_port *cpu_dp = dev->dsa_ptr; - - if (netif_is_lag_master(dev)) - return; - - dev->ethtool_ops = cpu_dp->orig_ethtool_ops; - cpu_dp->orig_ethtool_ops = NULL; -} - -/* Keep the master always promiscuous if the tagging protocol requires that - * (garbles MAC DA) or if it doesn't support unicast filtering, case in which - * it would revert to promiscuous mode as soon as we call dev_uc_add() on it - * anyway. - */ -static void dsa_master_set_promiscuity(struct net_device *dev, int inc) -{ - const struct dsa_device_ops *ops = dev->dsa_ptr->tag_ops; - - if ((dev->priv_flags & IFF_UNICAST_FLT) && !ops->promisc_on_master) - return; - - ASSERT_RTNL(); - - dev_set_promiscuity(dev, inc); -} - -static ssize_t tagging_show(struct device *d, struct device_attribute *attr, - char *buf) -{ - struct net_device *dev = to_net_dev(d); - struct dsa_port *cpu_dp = dev->dsa_ptr; - - return sysfs_emit(buf, "%s\n", - dsa_tag_protocol_to_str(cpu_dp->tag_ops)); -} - -static ssize_t tagging_store(struct device *d, struct device_attribute *attr, - const char *buf, size_t count) -{ - const struct dsa_device_ops *new_tag_ops, *old_tag_ops; - const char *end = strchrnul(buf, '\n'), *name; - struct net_device *dev = to_net_dev(d); - struct dsa_port *cpu_dp = dev->dsa_ptr; - size_t len = end - buf; - int err; - - /* Empty string passed */ - if (!len) - return -ENOPROTOOPT; - - name = kstrndup(buf, len, GFP_KERNEL); - if (!name) - return -ENOMEM; - - old_tag_ops = cpu_dp->tag_ops; - new_tag_ops = dsa_tag_driver_get_by_name(name); - kfree(name); - /* Bad tagger name? */ - if (IS_ERR(new_tag_ops)) - return PTR_ERR(new_tag_ops); - - if (new_tag_ops == old_tag_ops) - /* Drop the temporarily held duplicate reference, since - * the DSA switch tree uses this tagger. - */ - goto out; - - err = dsa_tree_change_tag_proto(cpu_dp->ds->dst, new_tag_ops, - old_tag_ops); - if (err) { - /* On failure the old tagger is restored, so we don't need the - * driver for the new one. - */ - dsa_tag_driver_put(new_tag_ops); - return err; - } - - /* On success we no longer need the module for the old tagging protocol - */ -out: - dsa_tag_driver_put(old_tag_ops); - return count; -} -static DEVICE_ATTR_RW(tagging); - -static struct attribute *dsa_slave_attrs[] = { - &dev_attr_tagging.attr, - NULL -}; - -static const struct attribute_group dsa_group = { - .name = "dsa", - .attrs = dsa_slave_attrs, -}; - -static void dsa_master_reset_mtu(struct net_device *dev) -{ - int err; - - err = dev_set_mtu(dev, ETH_DATA_LEN); - if (err) - netdev_dbg(dev, - "Unable to reset MTU to exclude DSA overheads\n"); -} - -int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp) -{ - const struct dsa_device_ops *tag_ops = cpu_dp->tag_ops; - struct dsa_switch *ds = cpu_dp->ds; - struct device_link *consumer_link; - int mtu, ret; - - mtu = ETH_DATA_LEN + dsa_tag_protocol_overhead(tag_ops); - - /* The DSA master must use SET_NETDEV_DEV for this to work. */ - if (!netif_is_lag_master(dev)) { - consumer_link = device_link_add(ds->dev, dev->dev.parent, - DL_FLAG_AUTOREMOVE_CONSUMER); - if (!consumer_link) - netdev_err(dev, - "Failed to create a device link to DSA switch %s\n", - dev_name(ds->dev)); - } - - /* The switch driver may not implement ->port_change_mtu(), case in - * which dsa_slave_change_mtu() will not update the master MTU either, - * so we need to do that here. - */ - ret = dev_set_mtu(dev, mtu); - if (ret) - netdev_warn(dev, "error %d setting MTU to %d to include DSA overhead\n", - ret, mtu); - - /* If we use a tagging format that doesn't have an ethertype - * field, make sure that all packets from this point on get - * sent to the tag format's receive function. - */ - wmb(); - - dev->dsa_ptr = cpu_dp; - - dsa_master_set_promiscuity(dev, 1); - - ret = dsa_master_ethtool_setup(dev); - if (ret) - goto out_err_reset_promisc; - - ret = sysfs_create_group(&dev->dev.kobj, &dsa_group); - if (ret) - goto out_err_ethtool_teardown; - - return ret; - -out_err_ethtool_teardown: - dsa_master_ethtool_teardown(dev); -out_err_reset_promisc: - dsa_master_set_promiscuity(dev, -1); - return ret; -} - -void dsa_master_teardown(struct net_device *dev) -{ - sysfs_remove_group(&dev->dev.kobj, &dsa_group); - dsa_master_ethtool_teardown(dev); - dsa_master_reset_mtu(dev); - dsa_master_set_promiscuity(dev, -1); - - dev->dsa_ptr = NULL; - - /* If we used a tagging format that doesn't have an ethertype - * field, make sure that all packets from this point get sent - * without the tag and go through the regular receive path. - */ - wmb(); -} - -int dsa_master_lag_setup(struct net_device *lag_dev, struct dsa_port *cpu_dp, - struct netdev_lag_upper_info *uinfo, - struct netlink_ext_ack *extack) -{ - bool master_setup = false; - int err; - - if (!netdev_uses_dsa(lag_dev)) { - err = dsa_master_setup(lag_dev, cpu_dp); - if (err) - return err; - - master_setup = true; - } - - err = dsa_port_lag_join(cpu_dp, lag_dev, uinfo, extack); - if (err) { - NL_SET_ERR_MSG_WEAK_MOD(extack, "CPU port failed to join LAG"); - goto out_master_teardown; - } - - return 0; - -out_master_teardown: - if (master_setup) - dsa_master_teardown(lag_dev); - return err; -} - -/* Tear down a master if there isn't any other user port on it, - * optionally also destroying LAG information. - */ -void dsa_master_lag_teardown(struct net_device *lag_dev, - struct dsa_port *cpu_dp) -{ - struct net_device *upper; - struct list_head *iter; - - dsa_port_lag_leave(cpu_dp, lag_dev); - - netdev_for_each_upper_dev_rcu(lag_dev, upper, iter) - if (dsa_slave_dev_check(upper)) - return; - - dsa_master_teardown(lag_dev); -} diff --git a/net/dsa/master.h b/net/dsa/master.h deleted file mode 100644 index 76e39d3ec909..000000000000 --- a/net/dsa/master.h +++ /dev/null @@ -1,22 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ - -#ifndef __DSA_MASTER_H -#define __DSA_MASTER_H - -struct dsa_port; -struct net_device; -struct netdev_lag_upper_info; -struct netlink_ext_ack; - -int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp); -void dsa_master_teardown(struct net_device *dev); -int dsa_master_lag_setup(struct net_device *lag_dev, struct dsa_port *cpu_dp, - struct netdev_lag_upper_info *uinfo, - struct netlink_ext_ack *extack); -void dsa_master_lag_teardown(struct net_device *lag_dev, - struct dsa_port *cpu_dp); -int __dsa_master_hwtstamp_validate(struct net_device *dev, - const struct kernel_hwtstamp_config *config, - struct netlink_ext_ack *extack); - -#endif diff --git a/net/dsa/netlink.c b/net/dsa/netlink.c index bd4bbaf851de..f56f90a25b99 100644 --- a/net/dsa/netlink.c +++ b/net/dsa/netlink.c @@ -5,7 +5,7 @@ #include #include "netlink.h" -#include "slave.h" +#include "user.h" static const struct nla_policy dsa_policy[IFLA_DSA_MAX + 1] = { [IFLA_DSA_MASTER] = { .type = NLA_U32 }, @@ -22,13 +22,13 @@ static int dsa_changelink(struct net_device *dev, struct nlattr *tb[], if (data[IFLA_DSA_MASTER]) { u32 ifindex = nla_get_u32(data[IFLA_DSA_MASTER]); - struct net_device *master; + struct net_device *conduit; - master = __dev_get_by_index(dev_net(dev), ifindex); - if (!master) + conduit = __dev_get_by_index(dev_net(dev), ifindex); + if (!conduit) return -EINVAL; - err = dsa_slave_change_master(dev, master, extack); + err = dsa_user_change_conduit(dev, conduit, extack); if (err) return err; } @@ -44,9 +44,9 @@ static size_t dsa_get_size(const struct net_device *dev) static int dsa_fill_info(struct sk_buff *skb, const struct net_device *dev) { - struct net_device *master = dsa_slave_to_master(dev); + struct net_device *conduit = dsa_user_to_conduit(dev); - if (nla_put_u32(skb, IFLA_DSA_MASTER, master->ifindex)) + if (nla_put_u32(skb, IFLA_DSA_MASTER, conduit->ifindex)) return -EMSGSIZE; return 0; diff --git a/net/dsa/port.c b/net/dsa/port.c index 6e0d000a97c4..c42dac87671b 100644 --- a/net/dsa/port.c +++ b/net/dsa/port.c @@ -14,9 +14,9 @@ #include "dsa.h" #include "port.h" -#include "slave.h" #include "switch.h" #include "tag_8021q.h" +#include "user.h" /** * dsa_port_notify - Notify the switching fabric of changes to a port @@ -289,7 +289,7 @@ static void dsa_port_reset_vlan_filtering(struct dsa_port *dp, } /* If the bridge was vlan_filtering, the bridge core doesn't trigger an - * event for changing vlan_filtering setting upon slave ports leaving + * event for changing vlan_filtering setting upon user ports leaving * it. That is a good thing, because that lets us handle it and also * handle the case where the switch's vlan_filtering setting is global * (not per port). When that happens, the correct moment to trigger the @@ -489,7 +489,7 @@ int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br, .dp = dp, .extack = extack, }; - struct net_device *dev = dp->slave; + struct net_device *dev = dp->user; struct net_device *brport_dev; int err; @@ -514,8 +514,8 @@ int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br, dp->bridge->tx_fwd_offload = info.tx_fwd_offload; err = switchdev_bridge_port_offload(brport_dev, dev, dp, - &dsa_slave_switchdev_notifier, - &dsa_slave_switchdev_blocking_notifier, + &dsa_user_switchdev_notifier, + &dsa_user_switchdev_blocking_notifier, dp->bridge->tx_fwd_offload, extack); if (err) goto out_rollback_unbridge; @@ -528,8 +528,8 @@ int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br, out_rollback_unoffload: switchdev_bridge_port_unoffload(brport_dev, dp, - &dsa_slave_switchdev_notifier, - &dsa_slave_switchdev_blocking_notifier); + &dsa_user_switchdev_notifier, + &dsa_user_switchdev_blocking_notifier); dsa_flush_workqueue(); out_rollback_unbridge: dsa_broadcast(DSA_NOTIFIER_BRIDGE_LEAVE, &info); @@ -547,8 +547,8 @@ void dsa_port_pre_bridge_leave(struct dsa_port *dp, struct net_device *br) return; switchdev_bridge_port_unoffload(brport_dev, dp, - &dsa_slave_switchdev_notifier, - &dsa_slave_switchdev_blocking_notifier); + &dsa_user_switchdev_notifier, + &dsa_user_switchdev_blocking_notifier); dsa_flush_workqueue(); } @@ -741,10 +741,10 @@ static bool dsa_port_can_apply_vlan_filtering(struct dsa_port *dp, */ if (vlan_filtering && dsa_port_is_user(dp)) { struct net_device *br = dsa_port_bridge_dev_get(dp); - struct net_device *upper_dev, *slave = dp->slave; + struct net_device *upper_dev, *user = dp->user; struct list_head *iter; - netdev_for_each_upper_dev_rcu(slave, upper_dev, iter) { + netdev_for_each_upper_dev_rcu(user, upper_dev, iter) { struct bridge_vlan_info br_info; u16 vid; @@ -803,9 +803,9 @@ int dsa_port_vlan_filtering(struct dsa_port *dp, bool vlan_filtering, if (!ds->ops->port_vlan_filtering) return -EOPNOTSUPP; - /* We are called from dsa_slave_switchdev_blocking_event(), + /* We are called from dsa_user_switchdev_blocking_event(), * which is not under rcu_read_lock(), unlike - * dsa_slave_switchdev_event(). + * dsa_user_switchdev_event(). */ rcu_read_lock(); apply = dsa_port_can_apply_vlan_filtering(dp, vlan_filtering, extack); @@ -827,24 +827,24 @@ int dsa_port_vlan_filtering(struct dsa_port *dp, bool vlan_filtering, ds->vlan_filtering = vlan_filtering; dsa_switch_for_each_user_port(other_dp, ds) { - struct net_device *slave = other_dp->slave; + struct net_device *user = other_dp->user; /* We might be called in the unbind path, so not - * all slave devices might still be registered. + * all user devices might still be registered. */ - if (!slave) + if (!user) continue; - err = dsa_slave_manage_vlan_filtering(slave, - vlan_filtering); + err = dsa_user_manage_vlan_filtering(user, + vlan_filtering); if (err) goto restore; } } else { dp->vlan_filtering = vlan_filtering; - err = dsa_slave_manage_vlan_filtering(dp->slave, - vlan_filtering); + err = dsa_user_manage_vlan_filtering(dp->user, + vlan_filtering); if (err) goto restore; } @@ -863,7 +863,7 @@ restore: } /* This enforces legacy behavior for switch drivers which assume they can't - * receive VLAN configuration when enslaved to a bridge with vlan_filtering=0 + * receive VLAN configuration when joining a bridge with vlan_filtering=0 */ bool dsa_port_skip_vlan_configuration(struct dsa_port *dp) { @@ -1047,7 +1047,7 @@ int dsa_port_standalone_host_fdb_add(struct dsa_port *dp, int dsa_port_bridge_host_fdb_add(struct dsa_port *dp, const unsigned char *addr, u16 vid) { - struct net_device *master = dsa_port_to_master(dp); + struct net_device *conduit = dsa_port_to_conduit(dp); struct dsa_db db = { .type = DSA_DB_BRIDGE, .bridge = *dp->bridge, @@ -1057,12 +1057,12 @@ int dsa_port_bridge_host_fdb_add(struct dsa_port *dp, if (!dp->ds->fdb_isolation) db.bridge.num = 0; - /* Avoid a call to __dev_set_promiscuity() on the master, which + /* Avoid a call to __dev_set_promiscuity() on the conduit, which * requires rtnl_lock(), since we can't guarantee that is held here, * and we can't take it either. */ - if (master->priv_flags & IFF_UNICAST_FLT) { - err = dev_uc_add(master, addr); + if (conduit->priv_flags & IFF_UNICAST_FLT) { + err = dev_uc_add(conduit, addr); if (err) return err; } @@ -1098,7 +1098,7 @@ int dsa_port_standalone_host_fdb_del(struct dsa_port *dp, int dsa_port_bridge_host_fdb_del(struct dsa_port *dp, const unsigned char *addr, u16 vid) { - struct net_device *master = dsa_port_to_master(dp); + struct net_device *conduit = dsa_port_to_conduit(dp); struct dsa_db db = { .type = DSA_DB_BRIDGE, .bridge = *dp->bridge, @@ -1108,8 +1108,8 @@ int dsa_port_bridge_host_fdb_del(struct dsa_port *dp, if (!dp->ds->fdb_isolation) db.bridge.num = 0; - if (master->priv_flags & IFF_UNICAST_FLT) { - err = dev_uc_del(master, addr); + if (conduit->priv_flags & IFF_UNICAST_FLT) { + err = dev_uc_del(conduit, addr); if (err) return err; } @@ -1229,7 +1229,7 @@ int dsa_port_standalone_host_mdb_add(const struct dsa_port *dp, int dsa_port_bridge_host_mdb_add(const struct dsa_port *dp, const struct switchdev_obj_port_mdb *mdb) { - struct net_device *master = dsa_port_to_master(dp); + struct net_device *conduit = dsa_port_to_conduit(dp); struct dsa_db db = { .type = DSA_DB_BRIDGE, .bridge = *dp->bridge, @@ -1239,7 +1239,7 @@ int dsa_port_bridge_host_mdb_add(const struct dsa_port *dp, if (!dp->ds->fdb_isolation) db.bridge.num = 0; - err = dev_mc_add(master, mdb->addr); + err = dev_mc_add(conduit, mdb->addr); if (err) return err; @@ -1273,7 +1273,7 @@ int dsa_port_standalone_host_mdb_del(const struct dsa_port *dp, int dsa_port_bridge_host_mdb_del(const struct dsa_port *dp, const struct switchdev_obj_port_mdb *mdb) { - struct net_device *master = dsa_port_to_master(dp); + struct net_device *conduit = dsa_port_to_conduit(dp); struct dsa_db db = { .type = DSA_DB_BRIDGE, .bridge = *dp->bridge, @@ -1283,7 +1283,7 @@ int dsa_port_bridge_host_mdb_del(const struct dsa_port *dp, if (!dp->ds->fdb_isolation) db.bridge.num = 0; - err = dev_mc_del(master, mdb->addr); + err = dev_mc_del(conduit, mdb->addr); if (err) return err; @@ -1318,7 +1318,7 @@ int dsa_port_host_vlan_add(struct dsa_port *dp, const struct switchdev_obj_port_vlan *vlan, struct netlink_ext_ack *extack) { - struct net_device *master = dsa_port_to_master(dp); + struct net_device *conduit = dsa_port_to_conduit(dp); struct dsa_notifier_vlan_info info = { .dp = dp, .vlan = vlan, @@ -1330,7 +1330,7 @@ int dsa_port_host_vlan_add(struct dsa_port *dp, if (err && err != -EOPNOTSUPP) return err; - vlan_vid_add(master, htons(ETH_P_8021Q), vlan->vid); + vlan_vid_add(conduit, htons(ETH_P_8021Q), vlan->vid); return err; } @@ -1338,7 +1338,7 @@ int dsa_port_host_vlan_add(struct dsa_port *dp, int dsa_port_host_vlan_del(struct dsa_port *dp, const struct switchdev_obj_port_vlan *vlan) { - struct net_device *master = dsa_port_to_master(dp); + struct net_device *conduit = dsa_port_to_conduit(dp); struct dsa_notifier_vlan_info info = { .dp = dp, .vlan = vlan, @@ -1349,7 +1349,7 @@ int dsa_port_host_vlan_del(struct dsa_port *dp, if (err && err != -EOPNOTSUPP) return err; - vlan_vid_del(master, htons(ETH_P_8021Q), vlan->vid); + vlan_vid_del(conduit, htons(ETH_P_8021Q), vlan->vid); return err; } @@ -1398,24 +1398,24 @@ int dsa_port_mrp_del_ring_role(const struct dsa_port *dp, return ds->ops->port_mrp_del_ring_role(ds, dp->index, mrp); } -static int dsa_port_assign_master(struct dsa_port *dp, - struct net_device *master, - struct netlink_ext_ack *extack, - bool fail_on_err) +static int dsa_port_assign_conduit(struct dsa_port *dp, + struct net_device *conduit, + struct netlink_ext_ack *extack, + bool fail_on_err) { struct dsa_switch *ds = dp->ds; int port = dp->index, err; - err = ds->ops->port_change_master(ds, port, master, extack); + err = ds->ops->port_change_conduit(ds, port, conduit, extack); if (err && !fail_on_err) - dev_err(ds->dev, "port %d failed to assign master %s: %pe\n", - port, master->name, ERR_PTR(err)); + dev_err(ds->dev, "port %d failed to assign conduit %s: %pe\n", + port, conduit->name, ERR_PTR(err)); if (err && fail_on_err) return err; - dp->cpu_dp = master->dsa_ptr; - dp->cpu_port_in_lag = netif_is_lag_master(master); + dp->cpu_dp = conduit->dsa_ptr; + dp->cpu_port_in_lag = netif_is_lag_master(conduit); return 0; } @@ -1428,12 +1428,12 @@ static int dsa_port_assign_master(struct dsa_port *dp, * the old CPU port before changing it, and restore it on errors during the * bringup of the new one. */ -int dsa_port_change_master(struct dsa_port *dp, struct net_device *master, - struct netlink_ext_ack *extack) +int dsa_port_change_conduit(struct dsa_port *dp, struct net_device *conduit, + struct netlink_ext_ack *extack) { struct net_device *bridge_dev = dsa_port_bridge_dev_get(dp); - struct net_device *old_master = dsa_port_to_master(dp); - struct net_device *dev = dp->slave; + struct net_device *old_conduit = dsa_port_to_conduit(dp); + struct net_device *dev = dp->user; struct dsa_switch *ds = dp->ds; bool vlan_filtering; int err, tmp; @@ -1454,7 +1454,7 @@ int dsa_port_change_master(struct dsa_port *dp, struct net_device *master, */ vlan_filtering = dsa_port_is_vlan_filtering(dp); if (vlan_filtering) { - err = dsa_slave_manage_vlan_filtering(dev, false); + err = dsa_user_manage_vlan_filtering(dev, false); if (err) { NL_SET_ERR_MSG_MOD(extack, "Failed to remove standalone VLANs"); @@ -1465,16 +1465,16 @@ int dsa_port_change_master(struct dsa_port *dp, struct net_device *master, /* Standalone addresses, and addresses of upper interfaces like * VLAN, LAG, HSR need to be migrated. */ - dsa_slave_unsync_ha(dev); + dsa_user_unsync_ha(dev); - err = dsa_port_assign_master(dp, master, extack, true); + err = dsa_port_assign_conduit(dp, conduit, extack, true); if (err) goto rewind_old_addrs; - dsa_slave_sync_ha(dev); + dsa_user_sync_ha(dev); if (vlan_filtering) { - err = dsa_slave_manage_vlan_filtering(dev, true); + err = dsa_user_manage_vlan_filtering(dev, true); if (err) { NL_SET_ERR_MSG_MOD(extack, "Failed to restore standalone VLANs"); @@ -1495,19 +1495,19 @@ int dsa_port_change_master(struct dsa_port *dp, struct net_device *master, rewind_new_vlan: if (vlan_filtering) - dsa_slave_manage_vlan_filtering(dev, false); + dsa_user_manage_vlan_filtering(dev, false); rewind_new_addrs: - dsa_slave_unsync_ha(dev); + dsa_user_unsync_ha(dev); - dsa_port_assign_master(dp, old_master, NULL, false); + dsa_port_assign_conduit(dp, old_conduit, NULL, false); /* Restore the objects on the old CPU port */ rewind_old_addrs: - dsa_slave_sync_ha(dev); + dsa_user_sync_ha(dev); if (vlan_filtering) { - tmp = dsa_slave_manage_vlan_filtering(dev, true); + tmp = dsa_user_manage_vlan_filtering(dev, true); if (tmp) { dev_err(ds->dev, "port %d failed to restore standalone VLANs: %pe\n", @@ -1620,7 +1620,7 @@ static void dsa_port_phylink_mac_link_down(struct phylink_config *config, struct dsa_switch *ds = dp->ds; if (dsa_port_is_user(dp)) - phydev = dp->slave->phydev; + phydev = dp->user->phydev; if (!ds->ops->phylink_mac_link_down) { if (ds->ops->adjust_link && phydev) @@ -1808,7 +1808,7 @@ err_phy_connect: * their type. * * User ports with no phy-handle or fixed-link are expected to connect to an - * internal PHY located on the ds->slave_mii_bus at an MDIO address equal to + * internal PHY located on the ds->user_mii_bus at an MDIO address equal to * the port number. This description is still actively supported. * * Shared (CPU and DSA) ports with no phy-handle or fixed-link are expected to @@ -1829,7 +1829,7 @@ err_phy_connect: * a fixed-link, a phy-handle, or a managed = "in-band-status" property. * It becomes the responsibility of the driver to ensure that these ports * operate at the maximum speed (whatever this means) and will interoperate - * with the DSA master or other cascade port, since phylink methods will not be + * with the DSA conduit or other cascade port, since phylink methods will not be * invoked for them. * * If you are considering expanding this table for newly introduced switches, diff --git a/net/dsa/port.h b/net/dsa/port.h index 334879964e2c..6bc3291573c0 100644 --- a/net/dsa/port.h +++ b/net/dsa/port.h @@ -109,7 +109,7 @@ void dsa_port_hsr_leave(struct dsa_port *dp, struct net_device *hsr); int dsa_port_tag_8021q_vlan_add(struct dsa_port *dp, u16 vid, bool broadcast); void dsa_port_tag_8021q_vlan_del(struct dsa_port *dp, u16 vid, bool broadcast); void dsa_port_set_host_flood(struct dsa_port *dp, bool uc, bool mc); -int dsa_port_change_master(struct dsa_port *dp, struct net_device *master, - struct netlink_ext_ack *extack); +int dsa_port_change_conduit(struct dsa_port *dp, struct net_device *conduit, + struct netlink_ext_ack *extack); #endif diff --git a/net/dsa/slave.c b/net/dsa/slave.c deleted file mode 100644 index 4c3e502d7e16..000000000000 --- a/net/dsa/slave.c +++ /dev/null @@ -1,3727 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * net/dsa/slave.c - Slave device handling - * Copyright (c) 2008-2009 Marvell Semiconductor - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "dsa.h" -#include "port.h" -#include "master.h" -#include "netlink.h" -#include "slave.h" -#include "switch.h" -#include "tag.h" - -struct dsa_switchdev_event_work { - struct net_device *dev; - struct net_device *orig_dev; - struct work_struct work; - unsigned long event; - /* Specific for SWITCHDEV_FDB_ADD_TO_DEVICE and - * SWITCHDEV_FDB_DEL_TO_DEVICE - */ - unsigned char addr[ETH_ALEN]; - u16 vid; - bool host_addr; -}; - -enum dsa_standalone_event { - DSA_UC_ADD, - DSA_UC_DEL, - DSA_MC_ADD, - DSA_MC_DEL, -}; - -struct dsa_standalone_event_work { - struct work_struct work; - struct net_device *dev; - enum dsa_standalone_event event; - unsigned char addr[ETH_ALEN]; - u16 vid; -}; - -struct dsa_host_vlan_rx_filtering_ctx { - struct net_device *dev; - const unsigned char *addr; - enum dsa_standalone_event event; -}; - -static bool dsa_switch_supports_uc_filtering(struct dsa_switch *ds) -{ - return ds->ops->port_fdb_add && ds->ops->port_fdb_del && - ds->fdb_isolation && !ds->vlan_filtering_is_global && - !ds->needs_standalone_vlan_filtering; -} - -static bool dsa_switch_supports_mc_filtering(struct dsa_switch *ds) -{ - return ds->ops->port_mdb_add && ds->ops->port_mdb_del && - ds->fdb_isolation && !ds->vlan_filtering_is_global && - !ds->needs_standalone_vlan_filtering; -} - -static void dsa_slave_standalone_event_work(struct work_struct *work) -{ - struct dsa_standalone_event_work *standalone_work = - container_of(work, struct dsa_standalone_event_work, work); - const unsigned char *addr = standalone_work->addr; - struct net_device *dev = standalone_work->dev; - struct dsa_port *dp = dsa_slave_to_port(dev); - struct switchdev_obj_port_mdb mdb; - struct dsa_switch *ds = dp->ds; - u16 vid = standalone_work->vid; - int err; - - switch (standalone_work->event) { - case DSA_UC_ADD: - err = dsa_port_standalone_host_fdb_add(dp, addr, vid); - if (err) { - dev_err(ds->dev, - "port %d failed to add %pM vid %d to fdb: %d\n", - dp->index, addr, vid, err); - break; - } - break; - - case DSA_UC_DEL: - err = dsa_port_standalone_host_fdb_del(dp, addr, vid); - if (err) { - dev_err(ds->dev, - "port %d failed to delete %pM vid %d from fdb: %d\n", - dp->index, addr, vid, err); - } - - break; - case DSA_MC_ADD: - ether_addr_copy(mdb.addr, addr); - mdb.vid = vid; - - err = dsa_port_standalone_host_mdb_add(dp, &mdb); - if (err) { - dev_err(ds->dev, - "port %d failed to add %pM vid %d to mdb: %d\n", - dp->index, addr, vid, err); - break; - } - break; - case DSA_MC_DEL: - ether_addr_copy(mdb.addr, addr); - mdb.vid = vid; - - err = dsa_port_standalone_host_mdb_del(dp, &mdb); - if (err) { - dev_err(ds->dev, - "port %d failed to delete %pM vid %d from mdb: %d\n", - dp->index, addr, vid, err); - } - - break; - } - - kfree(standalone_work); -} - -static int dsa_slave_schedule_standalone_work(struct net_device *dev, - enum dsa_standalone_event event, - const unsigned char *addr, - u16 vid) -{ - struct dsa_standalone_event_work *standalone_work; - - standalone_work = kzalloc(sizeof(*standalone_work), GFP_ATOMIC); - if (!standalone_work) - return -ENOMEM; - - INIT_WORK(&standalone_work->work, dsa_slave_standalone_event_work); - standalone_work->event = event; - standalone_work->dev = dev; - - ether_addr_copy(standalone_work->addr, addr); - standalone_work->vid = vid; - - dsa_schedule_work(&standalone_work->work); - - return 0; -} - -static int dsa_slave_host_vlan_rx_filtering(void *arg, int vid) -{ - struct dsa_host_vlan_rx_filtering_ctx *ctx = arg; - - return dsa_slave_schedule_standalone_work(ctx->dev, ctx->event, - ctx->addr, vid); -} - -static int dsa_slave_vlan_for_each(struct net_device *dev, - int (*cb)(void *arg, int vid), void *arg) -{ - struct dsa_port *dp = dsa_slave_to_port(dev); - struct dsa_vlan *v; - int err; - - lockdep_assert_held(&dev->addr_list_lock); - - err = cb(arg, 0); - if (err) - return err; - - list_for_each_entry(v, &dp->user_vlans, list) { - err = cb(arg, v->vid); - if (err) - return err; - } - - return 0; -} - -static int dsa_slave_sync_uc(struct net_device *dev, - const unsigned char *addr) -{ - struct net_device *master = dsa_slave_to_master(dev); - struct dsa_port *dp = dsa_slave_to_port(dev); - struct dsa_host_vlan_rx_filtering_ctx ctx = { - .dev = dev, - .addr = addr, - .event = DSA_UC_ADD, - }; - - dev_uc_add(master, addr); - - if (!dsa_switch_supports_uc_filtering(dp->ds)) - return 0; - - return dsa_slave_vlan_for_each(dev, dsa_slave_host_vlan_rx_filtering, - &ctx); -} - -static int dsa_slave_unsync_uc(struct net_device *dev, - const unsigned char *addr) -{ - struct net_device *master = dsa_slave_to_master(dev); - struct dsa_port *dp = dsa_slave_to_port(dev); - struct dsa_host_vlan_rx_filtering_ctx ctx = { - .dev = dev, - .addr = addr, - .event = DSA_UC_DEL, - }; - - dev_uc_del(master, addr); - - if (!dsa_switch_supports_uc_filtering(dp->ds)) - return 0; - - return dsa_slave_vlan_for_each(dev, dsa_slave_host_vlan_rx_filtering, - &ctx); -} - -static int dsa_slave_sync_mc(struct net_device *dev, - const unsigned char *addr) -{ - struct net_device *master = dsa_slave_to_master(dev); - struct dsa_port *dp = dsa_slave_to_port(dev); - struct dsa_host_vlan_rx_filtering_ctx ctx = { - .dev = dev, - .addr = addr, - .event = DSA_MC_ADD, - }; - - dev_mc_add(master, addr); - - if (!dsa_switch_supports_mc_filtering(dp->ds)) - return 0; - - return dsa_slave_vlan_for_each(dev, dsa_slave_host_vlan_rx_filtering, - &ctx); -} - -static int dsa_slave_unsync_mc(struct net_device *dev, - const unsigned char *addr) -{ - struct net_device *master = dsa_slave_to_master(dev); - struct dsa_port *dp = dsa_slave_to_port(dev); - struct dsa_host_vlan_rx_filtering_ctx ctx = { - .dev = dev, - .addr = addr, - .event = DSA_MC_DEL, - }; - - dev_mc_del(master, addr); - - if (!dsa_switch_supports_mc_filtering(dp->ds)) - return 0; - - return dsa_slave_vlan_for_each(dev, dsa_slave_host_vlan_rx_filtering, - &ctx); -} - -void dsa_slave_sync_ha(struct net_device *dev) -{ - struct dsa_port *dp = dsa_slave_to_port(dev); - struct dsa_switch *ds = dp->ds; - struct netdev_hw_addr *ha; - - netif_addr_lock_bh(dev); - - netdev_for_each_synced_mc_addr(ha, dev) - dsa_slave_sync_mc(dev, ha->addr); - - netdev_for_each_synced_uc_addr(ha, dev) - dsa_slave_sync_uc(dev, ha->addr); - - netif_addr_unlock_bh(dev); - - if (dsa_switch_supports_uc_filtering(ds) || - dsa_switch_supports_mc_filtering(ds)) - dsa_flush_workqueue(); -} - -void dsa_slave_unsync_ha(struct net_device *dev) -{ - struct dsa_port *dp = dsa_slave_to_port(dev); - struct dsa_switch *ds = dp->ds; - struct netdev_hw_addr *ha; - - netif_addr_lock_bh(dev); - - netdev_for_each_synced_uc_addr(ha, dev) - dsa_slave_unsync_uc(dev, ha->addr); - - netdev_for_each_synced_mc_addr(ha, dev) - dsa_slave_unsync_mc(dev, ha->addr); - - netif_addr_unlock_bh(dev); - - if (dsa_switch_supports_uc_filtering(ds) || - dsa_switch_supports_mc_filtering(ds)) - dsa_flush_workqueue(); -} - -/* slave mii_bus handling ***************************************************/ -static int dsa_slave_phy_read(struct mii_bus *bus, int addr, int reg) -{ - struct dsa_switch *ds = bus->priv; - - if (ds->phys_mii_mask & (1 << addr)) - return ds->ops->phy_read(ds, addr, reg); - - return 0xffff; -} - -static int dsa_slave_phy_write(struct mii_bus *bus, int addr, int reg, u16 val) -{ - struct dsa_switch *ds = bus->priv; - - if (ds->phys_mii_mask & (1 << addr)) - return ds->ops->phy_write(ds, addr, reg, val); - - return 0; -} - -void dsa_slave_mii_bus_init(struct dsa_switch *ds) -{ - ds->slave_mii_bus->priv = (void *)ds; - ds->slave_mii_bus->name = "dsa slave smi"; - ds->slave_mii_bus->read = dsa_slave_phy_read; - ds->slave_mii_bus->write = dsa_slave_phy_write; - snprintf(ds->slave_mii_bus->id, MII_BUS_ID_SIZE, "dsa-%d.%d", - ds->dst->index, ds->index); - ds->slave_mii_bus->parent = ds->dev; - ds->slave_mii_bus->phy_mask = ~ds->phys_mii_mask; -} - - -/* slave device handling ****************************************************/ -static int dsa_slave_get_iflink(const struct net_device *dev) -{ - return dsa_slave_to_master(dev)->ifindex; -} - -static int dsa_slave_open(struct net_device *dev) -{ - struct net_device *master = dsa_slave_to_master(dev); - struct dsa_port *dp = dsa_slave_to_port(dev); - struct dsa_switch *ds = dp->ds; - int err; - - err = dev_open(master, NULL); - if (err < 0) { - netdev_err(dev, "failed to open master %s\n", master->name); - goto out; - } - - if (dsa_switch_supports_uc_filtering(ds)) { - err = dsa_port_standalone_host_fdb_add(dp, dev->dev_addr, 0); - if (err) - goto out; - } - - if (!ether_addr_equal(dev->dev_addr, master->dev_addr)) { - err = dev_uc_add(master, dev->dev_addr); - if (err < 0) - goto del_host_addr; - } - - err = dsa_port_enable_rt(dp, dev->phydev); - if (err) - goto del_unicast; - - return 0; - -del_unicast: - if (!ether_addr_equal(dev->dev_addr, master->dev_addr)) - dev_uc_del(master, dev->dev_addr); -del_host_addr: - if (dsa_switch_supports_uc_filtering(ds)) - dsa_port_standalone_host_fdb_del(dp, dev->dev_addr, 0); -out: - return err; -} - -static int dsa_slave_close(struct net_device *dev) -{ - struct net_device *master = dsa_slave_to_master(dev); - struct dsa_port *dp = dsa_slave_to_port(dev); - struct dsa_switch *ds = dp->ds; - - dsa_port_disable_rt(dp); - - if (!ether_addr_equal(dev->dev_addr, master->dev_addr)) - dev_uc_del(master, dev->dev_addr); - - if (dsa_switch_supports_uc_filtering(ds)) - dsa_port_standalone_host_fdb_del(dp, dev->dev_addr, 0); - - return 0; -} - -static void dsa_slave_manage_host_flood(struct net_device *dev) -{ - bool mc = dev->flags & (IFF_PROMISC | IFF_ALLMULTI); - struct dsa_port *dp = dsa_slave_to_port(dev); - bool uc = dev->flags & IFF_PROMISC; - - dsa_port_set_host_flood(dp, uc, mc); -} - -static void dsa_slave_change_rx_flags(struct net_device *dev, int change) -{ - struct net_device *master = dsa_slave_to_master(dev); - struct dsa_port *dp = dsa_slave_to_port(dev); - struct dsa_switch *ds = dp->ds; - - if (change & IFF_ALLMULTI) - dev_set_allmulti(master, - dev->flags & IFF_ALLMULTI ? 1 : -1); - if (change & IFF_PROMISC) - dev_set_promiscuity(master, - dev->flags & IFF_PROMISC ? 1 : -1); - - if (dsa_switch_supports_uc_filtering(ds) && - dsa_switch_supports_mc_filtering(ds)) - dsa_slave_manage_host_flood(dev); -} - -static void dsa_slave_set_rx_mode(struct net_device *dev) -{ - __dev_mc_sync(dev, dsa_slave_sync_mc, dsa_slave_unsync_mc); - __dev_uc_sync(dev, dsa_slave_sync_uc, dsa_slave_unsync_uc); -} - -static int dsa_slave_set_mac_address(struct net_device *dev, void *a) -{ - struct net_device *master = dsa_slave_to_master(dev); - struct dsa_port *dp = dsa_slave_to_port(dev); - struct dsa_switch *ds = dp->ds; - struct sockaddr *addr = a; - int err; - - if (!is_valid_ether_addr(addr->sa_data)) - return -EADDRNOTAVAIL; - - if (ds->ops->port_set_mac_address) { - err = ds->ops->port_set_mac_address(ds, dp->index, - addr->sa_data); - if (err) - return err; - } - - /* If the port is down, the address isn't synced yet to hardware or - * to the DSA master, so there is nothing to change. - */ - if (!(dev->flags & IFF_UP)) - goto out_change_dev_addr; - - if (dsa_switch_supports_uc_filtering(ds)) { - err = dsa_port_standalone_host_fdb_add(dp, addr->sa_data, 0); - if (err) - return err; - } - - if (!ether_addr_equal(addr->sa_data, master->dev_addr)) { - err = dev_uc_add(master, addr->sa_data); - if (err < 0) - goto del_unicast; - } - - if (!ether_addr_equal(dev->dev_addr, master->dev_addr)) - dev_uc_del(master, dev->dev_addr); - - if (dsa_switch_supports_uc_filtering(ds)) - dsa_port_standalone_host_fdb_del(dp, dev->dev_addr, 0); - -out_change_dev_addr: - eth_hw_addr_set(dev, addr->sa_data); - - return 0; - -del_unicast: - if (dsa_switch_supports_uc_filtering(ds)) - dsa_port_standalone_host_fdb_del(dp, addr->sa_data, 0); - - return err; -} - -struct dsa_slave_dump_ctx { - struct net_device *dev; - struct sk_buff *skb; - struct netlink_callback *cb; - int idx; -}; - -static int -dsa_slave_port_fdb_do_dump(const unsigned char *addr, u16 vid, - bool is_static, void *data) -{ - struct dsa_slave_dump_ctx *dump = data; - u32 portid = NETLINK_CB(dump->cb->skb).portid; - u32 seq = dump->cb->nlh->nlmsg_seq; - struct nlmsghdr *nlh; - struct ndmsg *ndm; - - if (dump->idx < dump->cb->args[2]) - goto skip; - - nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH, - sizeof(*ndm), NLM_F_MULTI); - if (!nlh) - return -EMSGSIZE; - - ndm = nlmsg_data(nlh); - ndm->ndm_family = AF_BRIDGE; - ndm->ndm_pad1 = 0; - ndm->ndm_pad2 = 0; - ndm->ndm_flags = NTF_SELF; - ndm->ndm_type = 0; - ndm->ndm_ifindex = dump->dev->ifindex; - ndm->ndm_state = is_static ? NUD_NOARP : NUD_REACHABLE; - - if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, addr)) - goto nla_put_failure; - - if (vid && nla_put_u16(dump->skb, NDA_VLAN, vid)) - goto nla_put_failure; - - nlmsg_end(dump->skb, nlh); - -skip: - dump->idx++; - return 0; - -nla_put_failure: - nlmsg_cancel(dump->skb, nlh); - return -EMSGSIZE; -} - -static int -dsa_slave_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, - struct net_device *dev, struct net_device *filter_dev, - int *idx) -{ - struct dsa_port *dp = dsa_slave_to_port(dev); - struct dsa_slave_dump_ctx dump = { - .dev = dev, - .skb = skb, - .cb = cb, - .idx = *idx, - }; - int err; - - err = dsa_port_fdb_dump(dp, dsa_slave_port_fdb_do_dump, &dump); - *idx = dump.idx; - - return err; -} - -static int dsa_slave_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) -{ - struct dsa_slave_priv *p = netdev_priv(dev); - struct dsa_switch *ds = p->dp->ds; - int port = p->dp->index; - - /* Pass through to switch driver if it supports timestamping */ - switch (cmd) { - case SIOCGHWTSTAMP: - if (ds->ops->port_hwtstamp_get) - return ds->ops->port_hwtstamp_get(ds, port, ifr); - break; - case SIOCSHWTSTAMP: - if (ds->ops->port_hwtstamp_set) - return ds->ops->port_hwtstamp_set(ds, port, ifr); - break; - } - - return phylink_mii_ioctl(p->dp->pl, ifr, cmd); -} - -static int dsa_slave_port_attr_set(struct net_device *dev, const void *ctx, - const struct switchdev_attr *attr, - struct netlink_ext_ack *extack) -{ - struct dsa_port *dp = dsa_slave_to_port(dev); - int ret; - - if (ctx && ctx != dp) - return 0; - - switch (attr->id) { - case SWITCHDEV_ATTR_ID_PORT_STP_STATE: - if (!dsa_port_offloads_bridge_port(dp, attr->orig_dev)) - return -EOPNOTSUPP; - - ret = dsa_port_set_state(dp, attr->u.stp_state, true); - break; - case SWITCHDEV_ATTR_ID_PORT_MST_STATE: - if (!dsa_port_offloads_bridge_port(dp, attr->orig_dev)) - return -EOPNOTSUPP; - - ret = dsa_port_set_mst_state(dp, &attr->u.mst_state, extack); - break; - case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING: - if (!dsa_port_offloads_bridge_dev(dp, attr->orig_dev)) - return -EOPNOTSUPP; - - ret = dsa_port_vlan_filtering(dp, attr->u.vlan_filtering, - extack); - break; - case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME: - if (!dsa_port_offloads_bridge_dev(dp, attr->orig_dev)) - return -EOPNOTSUPP; - - ret = dsa_port_ageing_time(dp, attr->u.ageing_time); - break; - case SWITCHDEV_ATTR_ID_BRIDGE_MST: - if (!dsa_port_offloads_bridge_dev(dp, attr->orig_dev)) - return -EOPNOTSUPP; - - ret = dsa_port_mst_enable(dp, attr->u.mst, extack); - break; - case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS: - if (!dsa_port_offloads_bridge_port(dp, attr->orig_dev)) - return -EOPNOTSUPP; - - ret = dsa_port_pre_bridge_flags(dp, attr->u.brport_flags, - extack); - break; - case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: - if (!dsa_port_offloads_bridge_port(dp, attr->orig_dev)) - return -EOPNOTSUPP; - - ret = dsa_port_bridge_flags(dp, attr->u.brport_flags, extack); - break; - case SWITCHDEV_ATTR_ID_VLAN_MSTI: - if (!dsa_port_offloads_bridge_dev(dp, attr->orig_dev)) - return -EOPNOTSUPP; - - ret = dsa_port_vlan_msti(dp, &attr->u.vlan_msti); - break; - default: - ret = -EOPNOTSUPP; - break; - } - - return ret; -} - -/* Must be called under rcu_read_lock() */ -static int -dsa_slave_vlan_check_for_8021q_uppers(struct net_device *slave, - const struct switchdev_obj_port_vlan *vlan) -{ - struct net_device *upper_dev; - struct list_head *iter; - - netdev_for_each_upper_dev_rcu(slave, upper_dev, iter) { - u16 vid; - - if (!is_vlan_dev(upper_dev)) - continue; - - vid = vlan_dev_vlan_id(upper_dev); - if (vid == vlan->vid) - return -EBUSY; - } - - return 0; -} - -static int dsa_slave_vlan_add(struct net_device *dev, - const struct switchdev_obj *obj, - struct netlink_ext_ack *extack) -{ - struct dsa_port *dp = dsa_slave_to_port(dev); - struct switchdev_obj_port_vlan *vlan; - int err; - - if (dsa_port_skip_vlan_configuration(dp)) { - NL_SET_ERR_MSG_MOD(extack, "skipping configuration of VLAN"); - return 0; - } - - vlan = SWITCHDEV_OBJ_PORT_VLAN(obj); - - /* Deny adding a bridge VLAN when there is already an 802.1Q upper with - * the same VID. - */ - if (br_vlan_enabled(dsa_port_bridge_dev_get(dp))) { - rcu_read_lock(); - err = dsa_slave_vlan_check_for_8021q_uppers(dev, vlan); - rcu_read_unlock(); - if (err) { - NL_SET_ERR_MSG_MOD(extack, - "Port already has a VLAN upper with this VID"); - return err; - } - } - - return dsa_port_vlan_add(dp, vlan, extack); -} - -/* Offload a VLAN installed on the bridge or on a foreign interface by - * installing it as a VLAN towards the CPU port. - */ -static int dsa_slave_host_vlan_add(struct net_device *dev, - const struct switchdev_obj *obj, - struct netlink_ext_ack *extack) -{ - struct dsa_port *dp = dsa_slave_to_port(dev); - struct switchdev_obj_port_vlan vlan; - - /* Do nothing if this is a software bridge */ - if (!dp->bridge) - return -EOPNOTSUPP; - - if (dsa_port_skip_vlan_configuration(dp)) { - NL_SET_ERR_MSG_MOD(extack, "skipping configuration of VLAN"); - return 0; - } - - vlan = *SWITCHDEV_OBJ_PORT_VLAN(obj); - - /* Even though drivers often handle CPU membership in special ways, - * it doesn't make sense to program a PVID, so clear this flag. - */ - vlan.flags &= ~BRIDGE_VLAN_INFO_PVID; - - return dsa_port_host_vlan_add(dp, &vlan, extack); -} - -static int dsa_slave_port_obj_add(struct net_device *dev, const void *ctx, - const struct switchdev_obj *obj, - struct netlink_ext_ack *extack) -{ - struct dsa_port *dp = dsa_slave_to_port(dev); - int err; - - if (ctx && ctx != dp) - return 0; - - switch (obj->id) { - case SWITCHDEV_OBJ_ID_PORT_MDB: - if (!dsa_port_offloads_bridge_port(dp, obj->orig_dev)) - return -EOPNOTSUPP; - - err = dsa_port_mdb_add(dp, SWITCHDEV_OBJ_PORT_MDB(obj)); - break; - case SWITCHDEV_OBJ_ID_HOST_MDB: - if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev)) - return -EOPNOTSUPP; - - err = dsa_port_bridge_host_mdb_add(dp, SWITCHDEV_OBJ_PORT_MDB(obj)); - break; - case SWITCHDEV_OBJ_ID_PORT_VLAN: - if (dsa_port_offloads_bridge_port(dp, obj->orig_dev)) - err = dsa_slave_vlan_add(dev, obj, extack); - else - err = dsa_slave_host_vlan_add(dev, obj, extack); - break; - case SWITCHDEV_OBJ_ID_MRP: - if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev)) - return -EOPNOTSUPP; - - err = dsa_port_mrp_add(dp, SWITCHDEV_OBJ_MRP(obj)); - break; - case SWITCHDEV_OBJ_ID_RING_ROLE_MRP: - if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev)) - return -EOPNOTSUPP; - - err = dsa_port_mrp_add_ring_role(dp, - SWITCHDEV_OBJ_RING_ROLE_MRP(obj)); - break; - default: - err = -EOPNOTSUPP; - break; - } - - return err; -} - -static int dsa_slave_vlan_del(struct net_device *dev, - const struct switchdev_obj *obj) -{ - struct dsa_port *dp = dsa_slave_to_port(dev); - struct switchdev_obj_port_vlan *vlan; - - if (dsa_port_skip_vlan_configuration(dp)) - return 0; - - vlan = SWITCHDEV_OBJ_PORT_VLAN(obj); - - return dsa_port_vlan_del(dp, vlan); -} - -static int dsa_slave_host_vlan_del(struct net_device *dev, - const struct switchdev_obj *obj) -{ - struct dsa_port *dp = dsa_slave_to_port(dev); - struct switchdev_obj_port_vlan *vlan; - - /* Do nothing if this is a software bridge */ - if (!dp->bridge) - return -EOPNOTSUPP; - - if (dsa_port_skip_vlan_configuration(dp)) - return 0; - - vlan = SWITCHDEV_OBJ_PORT_VLAN(obj); - - return dsa_port_host_vlan_del(dp, vlan); -} - -static int dsa_slave_port_obj_del(struct net_device *dev, const void *ctx, - const struct switchdev_obj *obj) -{ - struct dsa_port *dp = dsa_slave_to_port(dev); - int err; - - if (ctx && ctx != dp) - return 0; - - switch (obj->id) { - case SWITCHDEV_OBJ_ID_PORT_MDB: - if (!dsa_port_offloads_bridge_port(dp, obj->orig_dev)) - return -EOPNOTSUPP; - - err = dsa_port_mdb_del(dp, SWITCHDEV_OBJ_PORT_MDB(obj)); - break; - case SWITCHDEV_OBJ_ID_HOST_MDB: - if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev)) - return -EOPNOTSUPP; - - err = dsa_port_bridge_host_mdb_del(dp, SWITCHDEV_OBJ_PORT_MDB(obj)); - break; - case SWITCHDEV_OBJ_ID_PORT_VLAN: - if (dsa_port_offloads_bridge_port(dp, obj->orig_dev)) - err = dsa_slave_vlan_del(dev, obj); - else - err = dsa_slave_host_vlan_del(dev, obj); - break; - case SWITCHDEV_OBJ_ID_MRP: - if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev)) - return -EOPNOTSUPP; - - err = dsa_port_mrp_del(dp, SWITCHDEV_OBJ_MRP(obj)); - break; - case SWITCHDEV_OBJ_ID_RING_ROLE_MRP: - if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev)) - return -EOPNOTSUPP; - - err = dsa_port_mrp_del_ring_role(dp, - SWITCHDEV_OBJ_RING_ROLE_MRP(obj)); - break; - default: - err = -EOPNOTSUPP; - break; - } - - return err; -} - -static inline netdev_tx_t dsa_slave_netpoll_send_skb(struct net_device *dev, - struct sk_buff *skb) -{ -#ifdef CONFIG_NET_POLL_CONTROLLER - struct dsa_slave_priv *p = netdev_priv(dev); - - return netpoll_send_skb(p->netpoll, skb); -#else - BUG(); - return NETDEV_TX_OK; -#endif -} - -static void dsa_skb_tx_timestamp(struct dsa_slave_priv *p, - struct sk_buff *skb) -{ - struct dsa_switch *ds = p->dp->ds; - - if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) - return; - - if (!ds->ops->port_txtstamp) - return; - - ds->ops->port_txtstamp(ds, p->dp->index, skb); -} - -netdev_tx_t dsa_enqueue_skb(struct sk_buff *skb, struct net_device *dev) -{ - /* SKB for netpoll still need to be mangled with the protocol-specific - * tag to be successfully transmitted - */ - if (unlikely(netpoll_tx_running(dev))) - return dsa_slave_netpoll_send_skb(dev, skb); - - /* Queue the SKB for transmission on the parent interface, but - * do not modify its EtherType - */ - skb->dev = dsa_slave_to_master(dev); - dev_queue_xmit(skb); - - return NETDEV_TX_OK; -} -EXPORT_SYMBOL_GPL(dsa_enqueue_skb); - -static int dsa_realloc_skb(struct sk_buff *skb, struct net_device *dev) -{ - int needed_headroom = dev->needed_headroom; - int needed_tailroom = dev->needed_tailroom; - - /* For tail taggers, we need to pad short frames ourselves, to ensure - * that the tail tag does not fail at its role of being at the end of - * the packet, once the master interface pads the frame. Account for - * that pad length here, and pad later. - */ - if (unlikely(needed_tailroom && skb->len < ETH_ZLEN)) - needed_tailroom += ETH_ZLEN - skb->len; - /* skb_headroom() returns unsigned int... */ - needed_headroom = max_t(int, needed_headroom - skb_headroom(skb), 0); - needed_tailroom = max_t(int, needed_tailroom - skb_tailroom(skb), 0); - - if (likely(!needed_headroom && !needed_tailroom && !skb_cloned(skb))) - /* No reallocation needed, yay! */ - return 0; - - return pskb_expand_head(skb, needed_headroom, needed_tailroom, - GFP_ATOMIC); -} - -static netdev_tx_t dsa_slave_xmit(struct sk_buff *skb, struct net_device *dev) -{ - struct dsa_slave_priv *p = netdev_priv(dev); - struct sk_buff *nskb; - - dev_sw_netstats_tx_add(dev, 1, skb->len); - - memset(skb->cb, 0, sizeof(skb->cb)); - - /* Handle tx timestamp if any */ - dsa_skb_tx_timestamp(p, skb); - - if (dsa_realloc_skb(skb, dev)) { - dev_kfree_skb_any(skb); - return NETDEV_TX_OK; - } - - /* needed_tailroom should still be 'warm' in the cache line from - * dsa_realloc_skb(), which has also ensured that padding is safe. - */ - if (dev->needed_tailroom) - eth_skb_pad(skb); - - /* Transmit function may have to reallocate the original SKB, - * in which case it must have freed it. Only free it here on error. - */ - nskb = p->xmit(skb, dev); - if (!nskb) { - kfree_skb(skb); - return NETDEV_TX_OK; - } - - return dsa_enqueue_skb(nskb, dev); -} - -/* ethtool operations *******************************************************/ - -static void dsa_slave_get_drvinfo(struct net_device *dev, - struct ethtool_drvinfo *drvinfo) -{ - strscpy(drvinfo->driver, "dsa", sizeof(drvinfo->driver)); - strscpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version)); - strscpy(drvinfo->bus_info, "platform", sizeof(drvinfo->bus_info)); -} - -static int dsa_slave_get_regs_len(struct net_device *dev) -{ - struct dsa_port *dp = dsa_slave_to_port(dev); - struct dsa_switch *ds = dp->ds; - - if (ds->ops->get_regs_len) - return ds->ops->get_regs_len(ds, dp->index); - - return -EOPNOTSUPP; -} - -static void -dsa_slave_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p) -{ - struct dsa_port *dp = dsa_slave_to_port(dev); - struct dsa_switch *ds = dp->ds; - - if (ds->ops->get_regs) - ds->ops->get_regs(ds, dp->index, regs, _p); -} - -static int dsa_slave_nway_reset(struct net_device *dev) -{ - struct dsa_port *dp = dsa_slave_to_port(dev); - - return phylink_ethtool_nway_reset(dp->pl); -} - -static int dsa_slave_get_eeprom_len(struct net_device *dev) -{ - struct dsa_port *dp = dsa_slave_to_port(dev); - struct dsa_switch *ds = dp->ds; - - if (ds->cd && ds->cd->eeprom_len) - return ds->cd->eeprom_len; - - if (ds->ops->get_eeprom_len) - return ds->ops->get_eeprom_len(ds); - - return 0; -} - -static int dsa_slave_get_eeprom(struct net_device *dev, - struct ethtool_eeprom *eeprom, u8 *data) -{ - struct dsa_port *dp = dsa_slave_to_port(dev); - struct dsa_switch *ds = dp->ds; - - if (ds->ops->get_eeprom) - return ds->ops->get_eeprom(ds, eeprom, data); - - return -EOPNOTSUPP; -} - -static int dsa_slave_set_eeprom(struct net_device *dev, - struct ethtool_eeprom *eeprom, u8 *data) -{ - struct dsa_port *dp = dsa_slave_to_port(dev); - struct dsa_switch *ds = dp->ds; - - if (ds->ops->set_eeprom) - return ds->ops->set_eeprom(ds, eeprom, data); - - return -EOPNOTSUPP; -} - -static void dsa_slave_get_strings(struct net_device *dev, - uint32_t stringset, uint8_t *data) -{ - struct dsa_port *dp = dsa_slave_to_port(dev); - struct dsa_switch *ds = dp->ds; - - if (stringset == ETH_SS_STATS) { - int len = ETH_GSTRING_LEN; - - strscpy_pad(data, "tx_packets", len); - strscpy_pad(data + len, "tx_bytes", len); - strscpy_pad(data + 2 * len, "rx_packets", len); - strscpy_pad(data + 3 * len, "rx_bytes", len); - if (ds->ops->get_strings) - ds->ops->get_strings(ds, dp->index, stringset, - data + 4 * len); - } else if (stringset == ETH_SS_TEST) { - net_selftest_get_strings(data); - } - -} - -static void dsa_slave_get_ethtool_stats(struct net_device *dev, - struct ethtool_stats *stats, - uint64_t *data) -{ - struct dsa_port *dp = dsa_slave_to_port(dev); - struct dsa_switch *ds = dp->ds; - struct pcpu_sw_netstats *s; - unsigned int start; - int i; - - for_each_possible_cpu(i) { - u64 tx_packets, tx_bytes, rx_packets, rx_bytes; - - s = per_cpu_ptr(dev->tstats, i); - do { - start = u64_stats_fetch_begin(&s->syncp); - tx_packets = u64_stats_read(&s->tx_packets); - tx_bytes = u64_stats_read(&s->tx_bytes); - rx_packets = u64_stats_read(&s->rx_packets); - rx_bytes = u64_stats_read(&s->rx_bytes); - } while (u64_stats_fetch_retry(&s->syncp, start)); - data[0] += tx_packets; - data[1] += tx_bytes; - data[2] += rx_packets; - data[3] += rx_bytes; - } - if (ds->ops->get_ethtool_stats) - ds->ops->get_ethtool_stats(ds, dp->index, data + 4); -} - -static int dsa_slave_get_sset_count(struct net_device *dev, int sset) -{ - struct dsa_port *dp = dsa_slave_to_port(dev); - struct dsa_switch *ds = dp->ds; - - if (sset == ETH_SS_STATS) { - int count = 0; - - if (ds->ops->get_sset_count) { - count = ds->ops->get_sset_count(ds, dp->index, sset); - if (count < 0) - return count; - } - - return count + 4; - } else if (sset == ETH_SS_TEST) { - return net_selftest_get_count(); - } - - return -EOPNOTSUPP; -} - -static void dsa_slave_get_eth_phy_stats(struct net_device *dev, - struct ethtool_eth_phy_stats *phy_stats) -{ - struct dsa_port *dp = dsa_slave_to_port(dev); - struct dsa_switch *ds = dp->ds; - - if (ds->ops->get_eth_phy_stats) - ds->ops->get_eth_phy_stats(ds, dp->index, phy_stats); -} - -static void dsa_slave_get_eth_mac_stats(struct net_device *dev, - struct ethtool_eth_mac_stats *mac_stats) -{ - struct dsa_port *dp = dsa_slave_to_port(dev); - struct dsa_switch *ds = dp->ds; - - if (ds->ops->get_eth_mac_stats) - ds->ops->get_eth_mac_stats(ds, dp->index, mac_stats); -} - -static void -dsa_slave_get_eth_ctrl_stats(struct net_device *dev, - struct ethtool_eth_ctrl_stats *ctrl_stats) -{ - struct dsa_port *dp = dsa_slave_to_port(dev); - struct dsa_switch *ds = dp->ds; - - if (ds->ops->get_eth_ctrl_stats) - ds->ops->get_eth_ctrl_stats(ds, dp->index, ctrl_stats); -} - -static void -dsa_slave_get_rmon_stats(struct net_device *dev, - struct ethtool_rmon_stats *rmon_stats, - const struct ethtool_rmon_hist_range **ranges) -{ - struct dsa_port *dp = dsa_slave_to_port(dev); - struct dsa_switch *ds = dp->ds; - - if (ds->ops->get_rmon_stats) - ds->ops->get_rmon_stats(ds, dp->index, rmon_stats, ranges); -} - -static void dsa_slave_net_selftest(struct net_device *ndev, - struct ethtool_test *etest, u64 *buf) -{ - struct dsa_port *dp = dsa_slave_to_port(ndev); - struct dsa_switch *ds = dp->ds; - - if (ds->ops->self_test) { - ds->ops->self_test(ds, dp->index, etest, buf); - return; - } - - net_selftest(ndev, etest, buf); -} - -static int dsa_slave_get_mm(struct net_device *dev, - struct ethtool_mm_state *state) -{ - struct dsa_port *dp = dsa_slave_to_port(dev); - struct dsa_switch *ds = dp->ds; - - if (!ds->ops->get_mm) - return -EOPNOTSUPP; - - return ds->ops->get_mm(ds, dp->index, state); -} - -static int dsa_slave_set_mm(struct net_device *dev, struct ethtool_mm_cfg *cfg, - struct netlink_ext_ack *extack) -{ - struct dsa_port *dp = dsa_slave_to_port(dev); - struct dsa_switch *ds = dp->ds; - - if (!ds->ops->set_mm) - return -EOPNOTSUPP; - - return ds->ops->set_mm(ds, dp->index, cfg, extack); -} - -static void dsa_slave_get_mm_stats(struct net_device *dev, - struct ethtool_mm_stats *stats) -{ - struct dsa_port *dp = dsa_slave_to_port(dev); - struct dsa_switch *ds = dp->ds; - - if (ds->ops->get_mm_stats) - ds->ops->get_mm_stats(ds, dp->index, stats); -} - -static void dsa_slave_get_wol(struct net_device *dev, struct ethtool_wolinfo *w) -{ - struct dsa_port *dp = dsa_slave_to_port(dev); - struct dsa_switch *ds = dp->ds; - - phylink_ethtool_get_wol(dp->pl, w); - - if (ds->ops->get_wol) - ds->ops->get_wol(ds, dp->index, w); -} - -static int dsa_slave_set_wol(struct net_device *dev, struct ethtool_wolinfo *w) -{ - struct dsa_port *dp = dsa_slave_to_port(dev); - struct dsa_switch *ds = dp->ds; - int ret = -EOPNOTSUPP; - - phylink_ethtool_set_wol(dp->pl, w); - - if (ds->ops->set_wol) - ret = ds->ops->set_wol(ds, dp->index, w); - - return ret; -} - -static int dsa_slave_set_eee(struct net_device *dev, struct ethtool_eee *e) -{ - struct dsa_port *dp = dsa_slave_to_port(dev); - struct dsa_switch *ds = dp->ds; - int ret; - - /* Port's PHY and MAC both need to be EEE capable */ - if (!dev->phydev || !dp->pl) - return -ENODEV; - - if (!ds->ops->set_mac_eee) - return -EOPNOTSUPP; - - ret = ds->ops->set_mac_eee(ds, dp->index, e); - if (ret) - return ret; - - return phylink_ethtool_set_eee(dp->pl, e); -} - -static int dsa_slave_get_eee(struct net_device *dev, struct ethtool_eee *e) -{ - struct dsa_port *dp = dsa_slave_to_port(dev); - struct dsa_switch *ds = dp->ds; - int ret; - - /* Port's PHY and MAC both need to be EEE capable */ - if (!dev->phydev || !dp->pl) - return -ENODEV; - - if (!ds->ops->get_mac_eee) - return -EOPNOTSUPP; - - ret = ds->ops->get_mac_eee(ds, dp->index, e); - if (ret) - return ret; - - return phylink_ethtool_get_eee(dp->pl, e); -} - -static int dsa_slave_get_link_ksettings(struct net_device *dev, - struct ethtool_link_ksettings *cmd) -{ - struct dsa_port *dp = dsa_slave_to_port(dev); - - return phylink_ethtool_ksettings_get(dp->pl, cmd); -} - -static int dsa_slave_set_link_ksettings(struct net_device *dev, - const struct ethtool_link_ksettings *cmd) -{ - struct dsa_port *dp = dsa_slave_to_port(dev); - - return phylink_ethtool_ksettings_set(dp->pl, cmd); -} - -static void dsa_slave_get_pause_stats(struct net_device *dev, - struct ethtool_pause_stats *pause_stats) -{ - struct dsa_port *dp = dsa_slave_to_port(dev); - struct dsa_switch *ds = dp->ds; - - if (ds->ops->get_pause_stats) - ds->ops->get_pause_stats(ds, dp->index, pause_stats); -} - -static void dsa_slave_get_pauseparam(struct net_device *dev, - struct ethtool_pauseparam *pause) -{ - struct dsa_port *dp = dsa_slave_to_port(dev); - - phylink_ethtool_get_pauseparam(dp->pl, pause); -} - -static int dsa_slave_set_pauseparam(struct net_device *dev, - struct ethtool_pauseparam *pause) -{ - struct dsa_port *dp = dsa_slave_to_port(dev); - - return phylink_ethtool_set_pauseparam(dp->pl, pause); -} - -#ifdef CONFIG_NET_POLL_CONTROLLER -static int dsa_slave_netpoll_setup(struct net_device *dev, - struct netpoll_info *ni) -{ - struct net_device *master = dsa_slave_to_master(dev); - struct dsa_slave_priv *p = netdev_priv(dev); - struct netpoll *netpoll; - int err = 0; - - netpoll = kzalloc(sizeof(*netpoll), GFP_KERNEL); - if (!netpoll) - return -ENOMEM; - - err = __netpoll_setup(netpoll, master); - if (err) { - kfree(netpoll); - goto out; - } - - p->netpoll = netpoll; -out: - return err; -} - -static void dsa_slave_netpoll_cleanup(struct net_device *dev) -{ - struct dsa_slave_priv *p = netdev_priv(dev); - struct netpoll *netpoll = p->netpoll; - - if (!netpoll) - return; - - p->netpoll = NULL; - - __netpoll_free(netpoll); -} - -static void dsa_slave_poll_controller(struct net_device *dev) -{ -} -#endif - -static struct dsa_mall_tc_entry * -dsa_slave_mall_tc_entry_find(struct net_device *dev, unsigned long cookie) -{ - struct dsa_slave_priv *p = netdev_priv(dev); - struct dsa_mall_tc_entry *mall_tc_entry; - - list_for_each_entry(mall_tc_entry, &p->mall_tc_list, list) - if (mall_tc_entry->cookie == cookie) - return mall_tc_entry; - - return NULL; -} - -static int -dsa_slave_add_cls_matchall_mirred(struct net_device *dev, - struct tc_cls_matchall_offload *cls, - bool ingress) -{ - struct netlink_ext_ack *extack = cls->common.extack; - struct dsa_port *dp = dsa_slave_to_port(dev); - struct dsa_slave_priv *p = netdev_priv(dev); - struct dsa_mall_mirror_tc_entry *mirror; - struct dsa_mall_tc_entry *mall_tc_entry; - struct dsa_switch *ds = dp->ds; - struct flow_action_entry *act; - struct dsa_port *to_dp; - int err; - - if (!ds->ops->port_mirror_add) - return -EOPNOTSUPP; - - if (!flow_action_basic_hw_stats_check(&cls->rule->action, - cls->common.extack)) - return -EOPNOTSUPP; - - act = &cls->rule->action.entries[0]; - - if (!act->dev) - return -EINVAL; - - if (!dsa_slave_dev_check(act->dev)) - return -EOPNOTSUPP; - - mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL); - if (!mall_tc_entry) - return -ENOMEM; - - mall_tc_entry->cookie = cls->cookie; - mall_tc_entry->type = DSA_PORT_MALL_MIRROR; - mirror = &mall_tc_entry->mirror; - - to_dp = dsa_slave_to_port(act->dev); - - mirror->to_local_port = to_dp->index; - mirror->ingress = ingress; - - err = ds->ops->port_mirror_add(ds, dp->index, mirror, ingress, extack); - if (err) { - kfree(mall_tc_entry); - return err; - } - - list_add_tail(&mall_tc_entry->list, &p->mall_tc_list); - - return err; -} - -static int -dsa_slave_add_cls_matchall_police(struct net_device *dev, - struct tc_cls_matchall_offload *cls, - bool ingress) -{ - struct netlink_ext_ack *extack = cls->common.extack; - struct dsa_port *dp = dsa_slave_to_port(dev); - struct dsa_slave_priv *p = netdev_priv(dev); - struct dsa_mall_policer_tc_entry *policer; - struct dsa_mall_tc_entry *mall_tc_entry; - struct dsa_switch *ds = dp->ds; - struct flow_action_entry *act; - int err; - - if (!ds->ops->port_policer_add) { - NL_SET_ERR_MSG_MOD(extack, - "Policing offload not implemented"); - return -EOPNOTSUPP; - } - - if (!ingress) { - NL_SET_ERR_MSG_MOD(extack, - "Only supported on ingress qdisc"); - return -EOPNOTSUPP; - } - - if (!flow_action_basic_hw_stats_check(&cls->rule->action, - cls->common.extack)) - return -EOPNOTSUPP; - - list_for_each_entry(mall_tc_entry, &p->mall_tc_list, list) { - if (mall_tc_entry->type == DSA_PORT_MALL_POLICER) { - NL_SET_ERR_MSG_MOD(extack, - "Only one port policer allowed"); - return -EEXIST; - } - } - - act = &cls->rule->action.entries[0]; - - mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL); - if (!mall_tc_entry) - return -ENOMEM; - - mall_tc_entry->cookie = cls->cookie; - mall_tc_entry->type = DSA_PORT_MALL_POLICER; - policer = &mall_tc_entry->policer; - policer->rate_bytes_per_sec = act->police.rate_bytes_ps; - policer->burst = act->police.burst; - - err = ds->ops->port_policer_add(ds, dp->index, policer); - if (err) { - kfree(mall_tc_entry); - return err; - } - - list_add_tail(&mall_tc_entry->list, &p->mall_tc_list); - - return err; -} - -static int dsa_slave_add_cls_matchall(struct net_device *dev, - struct tc_cls_matchall_offload *cls, - bool ingress) -{ - int err = -EOPNOTSUPP; - - if (cls->common.protocol == htons(ETH_P_ALL) && - flow_offload_has_one_action(&cls->rule->action) && - cls->rule->action.entries[0].id == FLOW_ACTION_MIRRED) - err = dsa_slave_add_cls_matchall_mirred(dev, cls, ingress); - else if (flow_offload_has_one_action(&cls->rule->action) && - cls->rule->action.entries[0].id == FLOW_ACTION_POLICE) - err = dsa_slave_add_cls_matchall_police(dev, cls, ingress); - - return err; -} - -static void dsa_slave_del_cls_matchall(struct net_device *dev, - struct tc_cls_matchall_offload *cls) -{ - struct dsa_port *dp = dsa_slave_to_port(dev); - struct dsa_mall_tc_entry *mall_tc_entry; - struct dsa_switch *ds = dp->ds; - - mall_tc_entry = dsa_slave_mall_tc_entry_find(dev, cls->cookie); - if (!mall_tc_entry) - return; - - list_del(&mall_tc_entry->list); - - switch (mall_tc_entry->type) { - case DSA_PORT_MALL_MIRROR: - if (ds->ops->port_mirror_del) - ds->ops->port_mirror_del(ds, dp->index, - &mall_tc_entry->mirror); - break; - case DSA_PORT_MALL_POLICER: - if (ds->ops->port_policer_del) - ds->ops->port_policer_del(ds, dp->index); - break; - default: - WARN_ON(1); - } - - kfree(mall_tc_entry); -} - -static int dsa_slave_setup_tc_cls_matchall(struct net_device *dev, - struct tc_cls_matchall_offload *cls, - bool ingress) -{ - if (cls->common.chain_index) - return -EOPNOTSUPP; - - switch (cls->command) { - case TC_CLSMATCHALL_REPLACE: - return dsa_slave_add_cls_matchall(dev, cls, ingress); - case TC_CLSMATCHALL_DESTROY: - dsa_slave_del_cls_matchall(dev, cls); - return 0; - default: - return -EOPNOTSUPP; - } -} - -static int dsa_slave_add_cls_flower(struct net_device *dev, - struct flow_cls_offload *cls, - bool ingress) -{ - struct dsa_port *dp = dsa_slave_to_port(dev); - struct dsa_switch *ds = dp->ds; - int port = dp->index; - - if (!ds->ops->cls_flower_add) - return -EOPNOTSUPP; - - return ds->ops->cls_flower_add(ds, port, cls, ingress); -} - -static int dsa_slave_del_cls_flower(struct net_device *dev, - struct flow_cls_offload *cls, - bool ingress) -{ - struct dsa_port *dp = dsa_slave_to_port(dev); - struct dsa_switch *ds = dp->ds; - int port = dp->index; - - if (!ds->ops->cls_flower_del) - return -EOPNOTSUPP; - - return ds->ops->cls_flower_del(ds, port, cls, ingress); -} - -static int dsa_slave_stats_cls_flower(struct net_device *dev, - struct flow_cls_offload *cls, - bool ingress) -{ - struct dsa_port *dp = dsa_slave_to_port(dev); - struct dsa_switch *ds = dp->ds; - int port = dp->index; - - if (!ds->ops->cls_flower_stats) - return -EOPNOTSUPP; - - return ds->ops->cls_flower_stats(ds, port, cls, ingress); -} - -static int dsa_slave_setup_tc_cls_flower(struct net_device *dev, - struct flow_cls_offload *cls, - bool ingress) -{ - switch (cls->command) { - case FLOW_CLS_REPLACE: - return dsa_slave_add_cls_flower(dev, cls, ingress); - case FLOW_CLS_DESTROY: - return dsa_slave_del_cls_flower(dev, cls, ingress); - case FLOW_CLS_STATS: - return dsa_slave_stats_cls_flower(dev, cls, ingress); - default: - return -EOPNOTSUPP; - } -} - -static int dsa_slave_setup_tc_block_cb(enum tc_setup_type type, void *type_data, - void *cb_priv, bool ingress) -{ - struct net_device *dev = cb_priv; - - if (!tc_can_offload(dev)) - return -EOPNOTSUPP; - - switch (type) { - case TC_SETUP_CLSMATCHALL: - return dsa_slave_setup_tc_cls_matchall(dev, type_data, ingress); - case TC_SETUP_CLSFLOWER: - return dsa_slave_setup_tc_cls_flower(dev, type_data, ingress); - default: - return -EOPNOTSUPP; - } -} - -static int dsa_slave_setup_tc_block_cb_ig(enum tc_setup_type type, - void *type_data, void *cb_priv) -{ - return dsa_slave_setup_tc_block_cb(type, type_data, cb_priv, true); -} - -static int dsa_slave_setup_tc_block_cb_eg(enum tc_setup_type type, - void *type_data, void *cb_priv) -{ - return dsa_slave_setup_tc_block_cb(type, type_data, cb_priv, false); -} - -static LIST_HEAD(dsa_slave_block_cb_list); - -static int dsa_slave_setup_tc_block(struct net_device *dev, - struct flow_block_offload *f) -{ - struct flow_block_cb *block_cb; - flow_setup_cb_t *cb; - - if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) - cb = dsa_slave_setup_tc_block_cb_ig; - else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) - cb = dsa_slave_setup_tc_block_cb_eg; - else - return -EOPNOTSUPP; - - f->driver_block_list = &dsa_slave_block_cb_list; - - switch (f->command) { - case FLOW_BLOCK_BIND: - if (flow_block_cb_is_busy(cb, dev, &dsa_slave_block_cb_list)) - return -EBUSY; - - block_cb = flow_block_cb_alloc(cb, dev, dev, NULL); - if (IS_ERR(block_cb)) - return PTR_ERR(block_cb); - - flow_block_cb_add(block_cb, f); - list_add_tail(&block_cb->driver_list, &dsa_slave_block_cb_list); - return 0; - case FLOW_BLOCK_UNBIND: - block_cb = flow_block_cb_lookup(f->block, cb, dev); - if (!block_cb) - return -ENOENT; - - flow_block_cb_remove(block_cb, f); - list_del(&block_cb->driver_list); - return 0; - default: - return -EOPNOTSUPP; - } -} - -static int dsa_slave_setup_ft_block(struct dsa_switch *ds, int port, - void *type_data) -{ - struct net_device *master = dsa_port_to_master(dsa_to_port(ds, port)); - - if (!master->netdev_ops->ndo_setup_tc) - return -EOPNOTSUPP; - - return master->netdev_ops->ndo_setup_tc(master, TC_SETUP_FT, type_data); -} - -static int dsa_slave_setup_tc(struct net_device *dev, enum tc_setup_type type, - void *type_data) -{ - struct dsa_port *dp = dsa_slave_to_port(dev); - struct dsa_switch *ds = dp->ds; - - switch (type) { - case TC_SETUP_BLOCK: - return dsa_slave_setup_tc_block(dev, type_data); - case TC_SETUP_FT: - return dsa_slave_setup_ft_block(ds, dp->index, type_data); - default: - break; - } - - if (!ds->ops->port_setup_tc) - return -EOPNOTSUPP; - - return ds->ops->port_setup_tc(ds, dp->index, type, type_data); -} - -static int dsa_slave_get_rxnfc(struct net_device *dev, - struct ethtool_rxnfc *nfc, u32 *rule_locs) -{ - struct dsa_port *dp = dsa_slave_to_port(dev); - struct dsa_switch *ds = dp->ds; - - if (!ds->ops->get_rxnfc) - return -EOPNOTSUPP; - - return ds->ops->get_rxnfc(ds, dp->index, nfc, rule_locs); -} - -static int dsa_slave_set_rxnfc(struct net_device *dev, - struct ethtool_rxnfc *nfc) -{ - struct dsa_port *dp = dsa_slave_to_port(dev); - struct dsa_switch *ds = dp->ds; - - if (!ds->ops->set_rxnfc) - return -EOPNOTSUPP; - - return ds->ops->set_rxnfc(ds, dp->index, nfc); -} - -static int dsa_slave_get_ts_info(struct net_device *dev, - struct ethtool_ts_info *ts) -{ - struct dsa_slave_priv *p = netdev_priv(dev); - struct dsa_switch *ds = p->dp->ds; - - if (!ds->ops->get_ts_info) - return -EOPNOTSUPP; - - return ds->ops->get_ts_info(ds, p->dp->index, ts); -} - -static int dsa_slave_vlan_rx_add_vid(struct net_device *dev, __be16 proto, - u16 vid) -{ - struct dsa_port *dp = dsa_slave_to_port(dev); - struct switchdev_obj_port_vlan vlan = { - .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN, - .vid = vid, - /* This API only allows programming tagged, non-PVID VIDs */ - .flags = 0, - }; - struct netlink_ext_ack extack = {0}; - struct dsa_switch *ds = dp->ds; - struct netdev_hw_addr *ha; - struct dsa_vlan *v; - int ret; - - /* User port... */ - ret = dsa_port_vlan_add(dp, &vlan, &extack); - if (ret) { - if (extack._msg) - netdev_err(dev, "%s\n", extack._msg); - return ret; - } - - /* And CPU port... */ - ret = dsa_port_host_vlan_add(dp, &vlan, &extack); - if (ret) { - if (extack._msg) - netdev_err(dev, "CPU port %d: %s\n", dp->cpu_dp->index, - extack._msg); - return ret; - } - - if (!dsa_switch_supports_uc_filtering(ds) && - !dsa_switch_supports_mc_filtering(ds)) - return 0; - - v = kzalloc(sizeof(*v), GFP_KERNEL); - if (!v) { - ret = -ENOMEM; - goto rollback; - } - - netif_addr_lock_bh(dev); - - v->vid = vid; - list_add_tail(&v->list, &dp->user_vlans); - - if (dsa_switch_supports_mc_filtering(ds)) { - netdev_for_each_synced_mc_addr(ha, dev) { - dsa_slave_schedule_standalone_work(dev, DSA_MC_ADD, - ha->addr, vid); - } - } - - if (dsa_switch_supports_uc_filtering(ds)) { - netdev_for_each_synced_uc_addr(ha, dev) { - dsa_slave_schedule_standalone_work(dev, DSA_UC_ADD, - ha->addr, vid); - } - } - - netif_addr_unlock_bh(dev); - - dsa_flush_workqueue(); - - return 0; - -rollback: - dsa_port_host_vlan_del(dp, &vlan); - dsa_port_vlan_del(dp, &vlan); - - return ret; -} - -static int dsa_slave_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, - u16 vid) -{ - struct dsa_port *dp = dsa_slave_to_port(dev); - struct switchdev_obj_port_vlan vlan = { - .vid = vid, - /* This API only allows programming tagged, non-PVID VIDs */ - .flags = 0, - }; - struct dsa_switch *ds = dp->ds; - struct netdev_hw_addr *ha; - struct dsa_vlan *v; - int err; - - err = dsa_port_vlan_del(dp, &vlan); - if (err) - return err; - - err = dsa_port_host_vlan_del(dp, &vlan); - if (err) - return err; - - if (!dsa_switch_supports_uc_filtering(ds) && - !dsa_switch_supports_mc_filtering(ds)) - return 0; - - netif_addr_lock_bh(dev); - - v = dsa_vlan_find(&dp->user_vlans, &vlan); - if (!v) { - netif_addr_unlock_bh(dev); - return -ENOENT; - } - - list_del(&v->list); - kfree(v); - - if (dsa_switch_supports_mc_filtering(ds)) { - netdev_for_each_synced_mc_addr(ha, dev) { - dsa_slave_schedule_standalone_work(dev, DSA_MC_DEL, - ha->addr, vid); - } - } - - if (dsa_switch_supports_uc_filtering(ds)) { - netdev_for_each_synced_uc_addr(ha, dev) { - dsa_slave_schedule_standalone_work(dev, DSA_UC_DEL, - ha->addr, vid); - } - } - - netif_addr_unlock_bh(dev); - - dsa_flush_workqueue(); - - return 0; -} - -static int dsa_slave_restore_vlan(struct net_device *vdev, int vid, void *arg) -{ - __be16 proto = vdev ? vlan_dev_vlan_proto(vdev) : htons(ETH_P_8021Q); - - return dsa_slave_vlan_rx_add_vid(arg, proto, vid); -} - -static int dsa_slave_clear_vlan(struct net_device *vdev, int vid, void *arg) -{ - __be16 proto = vdev ? vlan_dev_vlan_proto(vdev) : htons(ETH_P_8021Q); - - return dsa_slave_vlan_rx_kill_vid(arg, proto, vid); -} - -/* Keep the VLAN RX filtering list in sync with the hardware only if VLAN - * filtering is enabled. The baseline is that only ports that offload a - * VLAN-aware bridge are VLAN-aware, and standalone ports are VLAN-unaware, - * but there are exceptions for quirky hardware. - * - * If ds->vlan_filtering_is_global = true, then standalone ports which share - * the same switch with other ports that offload a VLAN-aware bridge are also - * inevitably VLAN-aware. - * - * To summarize, a DSA switch port offloads: - * - * - If standalone (this includes software bridge, software LAG): - * - if ds->needs_standalone_vlan_filtering = true, OR if - * (ds->vlan_filtering_is_global = true AND there are bridges spanning - * this switch chip which have vlan_filtering=1) - * - the 8021q upper VLANs - * - else (standalone VLAN filtering is not needed, VLAN filtering is not - * global, or it is, but no port is under a VLAN-aware bridge): - * - no VLAN (any 8021q upper is a software VLAN) - * - * - If under a vlan_filtering=0 bridge which it offload: - * - if ds->configure_vlan_while_not_filtering = true (default): - * - the bridge VLANs. These VLANs are committed to hardware but inactive. - * - else (deprecated): - * - no VLAN. The bridge VLANs are not restored when VLAN awareness is - * enabled, so this behavior is broken and discouraged. - * - * - If under a vlan_filtering=1 bridge which it offload: - * - the bridge VLANs - * - the 8021q upper VLANs - */ -int dsa_slave_manage_vlan_filtering(struct net_device *slave, - bool vlan_filtering) -{ - int err; - - if (vlan_filtering) { - slave->features |= NETIF_F_HW_VLAN_CTAG_FILTER; - - err = vlan_for_each(slave, dsa_slave_restore_vlan, slave); - if (err) { - vlan_for_each(slave, dsa_slave_clear_vlan, slave); - slave->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; - return err; - } - } else { - err = vlan_for_each(slave, dsa_slave_clear_vlan, slave); - if (err) - return err; - - slave->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; - } - - return 0; -} - -struct dsa_hw_port { - struct list_head list; - struct net_device *dev; - int old_mtu; -}; - -static int dsa_hw_port_list_set_mtu(struct list_head *hw_port_list, int mtu) -{ - const struct dsa_hw_port *p; - int err; - - list_for_each_entry(p, hw_port_list, list) { - if (p->dev->mtu == mtu) - continue; - - err = dev_set_mtu(p->dev, mtu); - if (err) - goto rollback; - } - - return 0; - -rollback: - list_for_each_entry_continue_reverse(p, hw_port_list, list) { - if (p->dev->mtu == p->old_mtu) - continue; - - if (dev_set_mtu(p->dev, p->old_mtu)) - netdev_err(p->dev, "Failed to restore MTU\n"); - } - - return err; -} - -static void dsa_hw_port_list_free(struct list_head *hw_port_list) -{ - struct dsa_hw_port *p, *n; - - list_for_each_entry_safe(p, n, hw_port_list, list) - kfree(p); -} - -/* Make the hardware datapath to/from @dev limited to a common MTU */ -static void dsa_bridge_mtu_normalization(struct dsa_port *dp) -{ - struct list_head hw_port_list; - struct dsa_switch_tree *dst; - int min_mtu = ETH_MAX_MTU; - struct dsa_port *other_dp; - int err; - - if (!dp->ds->mtu_enforcement_ingress) - return; - - if (!dp->bridge) - return; - - INIT_LIST_HEAD(&hw_port_list); - - /* Populate the list of ports that are part of the same bridge - * as the newly added/modified port - */ - list_for_each_entry(dst, &dsa_tree_list, list) { - list_for_each_entry(other_dp, &dst->ports, list) { - struct dsa_hw_port *hw_port; - struct net_device *slave; - - if (other_dp->type != DSA_PORT_TYPE_USER) - continue; - - if (!dsa_port_bridge_same(dp, other_dp)) - continue; - - if (!other_dp->ds->mtu_enforcement_ingress) - continue; - - slave = other_dp->slave; - - if (min_mtu > slave->mtu) - min_mtu = slave->mtu; - - hw_port = kzalloc(sizeof(*hw_port), GFP_KERNEL); - if (!hw_port) - goto out; - - hw_port->dev = slave; - hw_port->old_mtu = slave->mtu; - - list_add(&hw_port->list, &hw_port_list); - } - } - - /* Attempt to configure the entire hardware bridge to the newly added - * interface's MTU first, regardless of whether the intention of the - * user was to raise or lower it. - */ - err = dsa_hw_port_list_set_mtu(&hw_port_list, dp->slave->mtu); - if (!err) - goto out; - - /* Clearly that didn't work out so well, so just set the minimum MTU on - * all hardware bridge ports now. If this fails too, then all ports will - * still have their old MTU rolled back anyway. - */ - dsa_hw_port_list_set_mtu(&hw_port_list, min_mtu); - -out: - dsa_hw_port_list_free(&hw_port_list); -} - -int dsa_slave_change_mtu(struct net_device *dev, int new_mtu) -{ - struct net_device *master = dsa_slave_to_master(dev); - struct dsa_port *dp = dsa_slave_to_port(dev); - struct dsa_port *cpu_dp = dp->cpu_dp; - struct dsa_switch *ds = dp->ds; - struct dsa_port *other_dp; - int largest_mtu = 0; - int new_master_mtu; - int old_master_mtu; - int mtu_limit; - int overhead; - int cpu_mtu; - int err; - - if (!ds->ops->port_change_mtu) - return -EOPNOTSUPP; - - dsa_tree_for_each_user_port(other_dp, ds->dst) { - int slave_mtu; - - /* During probe, this function will be called for each slave - * device, while not all of them have been allocated. That's - * ok, it doesn't change what the maximum is, so ignore it. - */ - if (!other_dp->slave) - continue; - - /* Pretend that we already applied the setting, which we - * actually haven't (still haven't done all integrity checks) - */ - if (dp == other_dp) - slave_mtu = new_mtu; - else - slave_mtu = other_dp->slave->mtu; - - if (largest_mtu < slave_mtu) - largest_mtu = slave_mtu; - } - - overhead = dsa_tag_protocol_overhead(cpu_dp->tag_ops); - mtu_limit = min_t(int, master->max_mtu, dev->max_mtu + overhead); - old_master_mtu = master->mtu; - new_master_mtu = largest_mtu + overhead; - if (new_master_mtu > mtu_limit) - return -ERANGE; - - /* If the master MTU isn't over limit, there's no need to check the CPU - * MTU, since that surely isn't either. - */ - cpu_mtu = largest_mtu; - - /* Start applying stuff */ - if (new_master_mtu != old_master_mtu) { - err = dev_set_mtu(master, new_master_mtu); - if (err < 0) - goto out_master_failed; - - /* We only need to propagate the MTU of the CPU port to - * upstream switches, so emit a notifier which updates them. - */ - err = dsa_port_mtu_change(cpu_dp, cpu_mtu); - if (err) - goto out_cpu_failed; - } - - err = ds->ops->port_change_mtu(ds, dp->index, new_mtu); - if (err) - goto out_port_failed; - - dev->mtu = new_mtu; - - dsa_bridge_mtu_normalization(dp); - - return 0; - -out_port_failed: - if (new_master_mtu != old_master_mtu) - dsa_port_mtu_change(cpu_dp, old_master_mtu - overhead); -out_cpu_failed: - if (new_master_mtu != old_master_mtu) - dev_set_mtu(master, old_master_mtu); -out_master_failed: - return err; -} - -static int __maybe_unused -dsa_slave_dcbnl_set_default_prio(struct net_device *dev, struct dcb_app *app) -{ - struct dsa_port *dp = dsa_slave_to_port(dev); - struct dsa_switch *ds = dp->ds; - unsigned long mask, new_prio; - int err, port = dp->index; - - if (!ds->ops->port_set_default_prio) - return -EOPNOTSUPP; - - err = dcb_ieee_setapp(dev, app); - if (err) - return err; - - mask = dcb_ieee_getapp_mask(dev, app); - new_prio = __fls(mask); - - err = ds->ops->port_set_default_prio(ds, port, new_prio); - if (err) { - dcb_ieee_delapp(dev, app); - return err; - } - - return 0; -} - -static int __maybe_unused -dsa_slave_dcbnl_add_dscp_prio(struct net_device *dev, struct dcb_app *app) -{ - struct dsa_port *dp = dsa_slave_to_port(dev); - struct dsa_switch *ds = dp->ds; - unsigned long mask, new_prio; - int err, port = dp->index; - u8 dscp = app->protocol; - - if (!ds->ops->port_add_dscp_prio) - return -EOPNOTSUPP; - - if (dscp >= 64) { - netdev_err(dev, "DSCP APP entry with protocol value %u is invalid\n", - dscp); - return -EINVAL; - } - - err = dcb_ieee_setapp(dev, app); - if (err) - return err; - - mask = dcb_ieee_getapp_mask(dev, app); - new_prio = __fls(mask); - - err = ds->ops->port_add_dscp_prio(ds, port, dscp, new_prio); - if (err) { - dcb_ieee_delapp(dev, app); - return err; - } - - return 0; -} - -static int __maybe_unused dsa_slave_dcbnl_ieee_setapp(struct net_device *dev, - struct dcb_app *app) -{ - switch (app->selector) { - case IEEE_8021QAZ_APP_SEL_ETHERTYPE: - switch (app->protocol) { - case 0: - return dsa_slave_dcbnl_set_default_prio(dev, app); - default: - return -EOPNOTSUPP; - } - break; - case IEEE_8021QAZ_APP_SEL_DSCP: - return dsa_slave_dcbnl_add_dscp_prio(dev, app); - default: - return -EOPNOTSUPP; - } -} - -static int __maybe_unused -dsa_slave_dcbnl_del_default_prio(struct net_device *dev, struct dcb_app *app) -{ - struct dsa_port *dp = dsa_slave_to_port(dev); - struct dsa_switch *ds = dp->ds; - unsigned long mask, new_prio; - int err, port = dp->index; - - if (!ds->ops->port_set_default_prio) - return -EOPNOTSUPP; - - err = dcb_ieee_delapp(dev, app); - if (err) - return err; - - mask = dcb_ieee_getapp_mask(dev, app); - new_prio = mask ? __fls(mask) : 0; - - err = ds->ops->port_set_default_prio(ds, port, new_prio); - if (err) { - dcb_ieee_setapp(dev, app); - return err; - } - - return 0; -} - -static int __maybe_unused -dsa_slave_dcbnl_del_dscp_prio(struct net_device *dev, struct dcb_app *app) -{ - struct dsa_port *dp = dsa_slave_to_port(dev); - struct dsa_switch *ds = dp->ds; - int err, port = dp->index; - u8 dscp = app->protocol; - - if (!ds->ops->port_del_dscp_prio) - return -EOPNOTSUPP; - - err = dcb_ieee_delapp(dev, app); - if (err) - return err; - - err = ds->ops->port_del_dscp_prio(ds, port, dscp, app->priority); - if (err) { - dcb_ieee_setapp(dev, app); - return err; - } - - return 0; -} - -static int __maybe_unused dsa_slave_dcbnl_ieee_delapp(struct net_device *dev, - struct dcb_app *app) -{ - switch (app->selector) { - case IEEE_8021QAZ_APP_SEL_ETHERTYPE: - switch (app->protocol) { - case 0: - return dsa_slave_dcbnl_del_default_prio(dev, app); - default: - return -EOPNOTSUPP; - } - break; - case IEEE_8021QAZ_APP_SEL_DSCP: - return dsa_slave_dcbnl_del_dscp_prio(dev, app); - default: - return -EOPNOTSUPP; - } -} - -/* Pre-populate the DCB application priority table with the priorities - * configured during switch setup, which we read from hardware here. - */ -static int dsa_slave_dcbnl_init(struct net_device *dev) -{ - struct dsa_port *dp = dsa_slave_to_port(dev); - struct dsa_switch *ds = dp->ds; - int port = dp->index; - int err; - - if (ds->ops->port_get_default_prio) { - int prio = ds->ops->port_get_default_prio(ds, port); - struct dcb_app app = { - .selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE, - .protocol = 0, - .priority = prio, - }; - - if (prio < 0) - return prio; - - err = dcb_ieee_setapp(dev, &app); - if (err) - return err; - } - - if (ds->ops->port_get_dscp_prio) { - int protocol; - - for (protocol = 0; protocol < 64; protocol++) { - struct dcb_app app = { - .selector = IEEE_8021QAZ_APP_SEL_DSCP, - .protocol = protocol, - }; - int prio; - - prio = ds->ops->port_get_dscp_prio(ds, port, protocol); - if (prio == -EOPNOTSUPP) - continue; - if (prio < 0) - return prio; - - app.priority = prio; - - err = dcb_ieee_setapp(dev, &app); - if (err) - return err; - } - } - - return 0; -} - -static const struct ethtool_ops dsa_slave_ethtool_ops = { - .get_drvinfo = dsa_slave_get_drvinfo, - .get_regs_len = dsa_slave_get_regs_len, - .get_regs = dsa_slave_get_regs, - .nway_reset = dsa_slave_nway_reset, - .get_link = ethtool_op_get_link, - .get_eeprom_len = dsa_slave_get_eeprom_len, - .get_eeprom = dsa_slave_get_eeprom, - .set_eeprom = dsa_slave_set_eeprom, - .get_strings = dsa_slave_get_strings, - .get_ethtool_stats = dsa_slave_get_ethtool_stats, - .get_sset_count = dsa_slave_get_sset_count, - .get_eth_phy_stats = dsa_slave_get_eth_phy_stats, - .get_eth_mac_stats = dsa_slave_get_eth_mac_stats, - .get_eth_ctrl_stats = dsa_slave_get_eth_ctrl_stats, - .get_rmon_stats = dsa_slave_get_rmon_stats, - .set_wol = dsa_slave_set_wol, - .get_wol = dsa_slave_get_wol, - .set_eee = dsa_slave_set_eee, - .get_eee = dsa_slave_get_eee, - .get_link_ksettings = dsa_slave_get_link_ksettings, - .set_link_ksettings = dsa_slave_set_link_ksettings, - .get_pause_stats = dsa_slave_get_pause_stats, - .get_pauseparam = dsa_slave_get_pauseparam, - .set_pauseparam = dsa_slave_set_pauseparam, - .get_rxnfc = dsa_slave_get_rxnfc, - .set_rxnfc = dsa_slave_set_rxnfc, - .get_ts_info = dsa_slave_get_ts_info, - .self_test = dsa_slave_net_selftest, - .get_mm = dsa_slave_get_mm, - .set_mm = dsa_slave_set_mm, - .get_mm_stats = dsa_slave_get_mm_stats, -}; - -static const struct dcbnl_rtnl_ops __maybe_unused dsa_slave_dcbnl_ops = { - .ieee_setapp = dsa_slave_dcbnl_ieee_setapp, - .ieee_delapp = dsa_slave_dcbnl_ieee_delapp, -}; - -static void dsa_slave_get_stats64(struct net_device *dev, - struct rtnl_link_stats64 *s) -{ - struct dsa_port *dp = dsa_slave_to_port(dev); - struct dsa_switch *ds = dp->ds; - - if (ds->ops->get_stats64) - ds->ops->get_stats64(ds, dp->index, s); - else - dev_get_tstats64(dev, s); -} - -static int dsa_slave_fill_forward_path(struct net_device_path_ctx *ctx, - struct net_device_path *path) -{ - struct dsa_port *dp = dsa_slave_to_port(ctx->dev); - struct net_device *master = dsa_port_to_master(dp); - struct dsa_port *cpu_dp = dp->cpu_dp; - - path->dev = ctx->dev; - path->type = DEV_PATH_DSA; - path->dsa.proto = cpu_dp->tag_ops->proto; - path->dsa.port = dp->index; - ctx->dev = master; - - return 0; -} - -static const struct net_device_ops dsa_slave_netdev_ops = { - .ndo_open = dsa_slave_open, - .ndo_stop = dsa_slave_close, - .ndo_start_xmit = dsa_slave_xmit, - .ndo_change_rx_flags = dsa_slave_change_rx_flags, - .ndo_set_rx_mode = dsa_slave_set_rx_mode, - .ndo_set_mac_address = dsa_slave_set_mac_address, - .ndo_fdb_dump = dsa_slave_fdb_dump, - .ndo_eth_ioctl = dsa_slave_ioctl, - .ndo_get_iflink = dsa_slave_get_iflink, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_netpoll_setup = dsa_slave_netpoll_setup, - .ndo_netpoll_cleanup = dsa_slave_netpoll_cleanup, - .ndo_poll_controller = dsa_slave_poll_controller, -#endif - .ndo_setup_tc = dsa_slave_setup_tc, - .ndo_get_stats64 = dsa_slave_get_stats64, - .ndo_vlan_rx_add_vid = dsa_slave_vlan_rx_add_vid, - .ndo_vlan_rx_kill_vid = dsa_slave_vlan_rx_kill_vid, - .ndo_change_mtu = dsa_slave_change_mtu, - .ndo_fill_forward_path = dsa_slave_fill_forward_path, -}; - -static struct device_type dsa_type = { - .name = "dsa", -}; - -void dsa_port_phylink_mac_change(struct dsa_switch *ds, int port, bool up) -{ - const struct dsa_port *dp = dsa_to_port(ds, port); - - if (dp->pl) - phylink_mac_change(dp->pl, up); -} -EXPORT_SYMBOL_GPL(dsa_port_phylink_mac_change); - -static void dsa_slave_phylink_fixed_state(struct phylink_config *config, - struct phylink_link_state *state) -{ - struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); - struct dsa_switch *ds = dp->ds; - - /* No need to check that this operation is valid, the callback would - * not be called if it was not. - */ - ds->ops->phylink_fixed_state(ds, dp->index, state); -} - -/* slave device setup *******************************************************/ -static int dsa_slave_phy_connect(struct net_device *slave_dev, int addr, - u32 flags) -{ - struct dsa_port *dp = dsa_slave_to_port(slave_dev); - struct dsa_switch *ds = dp->ds; - - slave_dev->phydev = mdiobus_get_phy(ds->slave_mii_bus, addr); - if (!slave_dev->phydev) { - netdev_err(slave_dev, "no phy at %d\n", addr); - return -ENODEV; - } - - slave_dev->phydev->dev_flags |= flags; - - return phylink_connect_phy(dp->pl, slave_dev->phydev); -} - -static int dsa_slave_phy_setup(struct net_device *slave_dev) -{ - struct dsa_port *dp = dsa_slave_to_port(slave_dev); - struct device_node *port_dn = dp->dn; - struct dsa_switch *ds = dp->ds; - u32 phy_flags = 0; - int ret; - - dp->pl_config.dev = &slave_dev->dev; - dp->pl_config.type = PHYLINK_NETDEV; - - /* The get_fixed_state callback takes precedence over polling the - * link GPIO in PHYLINK (see phylink_get_fixed_state). Only set - * this if the switch provides such a callback. - */ - if (ds->ops->phylink_fixed_state) { - dp->pl_config.get_fixed_state = dsa_slave_phylink_fixed_state; - dp->pl_config.poll_fixed_state = true; - } - - ret = dsa_port_phylink_create(dp); - if (ret) - return ret; - - if (ds->ops->get_phy_flags) - phy_flags = ds->ops->get_phy_flags(ds, dp->index); - - ret = phylink_of_phy_connect(dp->pl, port_dn, phy_flags); - if (ret == -ENODEV && ds->slave_mii_bus) { - /* We could not connect to a designated PHY or SFP, so try to - * use the switch internal MDIO bus instead - */ - ret = dsa_slave_phy_connect(slave_dev, dp->index, phy_flags); - } - if (ret) { - netdev_err(slave_dev, "failed to connect to PHY: %pe\n", - ERR_PTR(ret)); - dsa_port_phylink_destroy(dp); - } - - return ret; -} - -void dsa_slave_setup_tagger(struct net_device *slave) -{ - struct dsa_port *dp = dsa_slave_to_port(slave); - struct net_device *master = dsa_port_to_master(dp); - struct dsa_slave_priv *p = netdev_priv(slave); - const struct dsa_port *cpu_dp = dp->cpu_dp; - const struct dsa_switch *ds = dp->ds; - - slave->needed_headroom = cpu_dp->tag_ops->needed_headroom; - slave->needed_tailroom = cpu_dp->tag_ops->needed_tailroom; - /* Try to save one extra realloc later in the TX path (in the master) - * by also inheriting the master's needed headroom and tailroom. - * The 8021q driver also does this. - */ - slave->needed_headroom += master->needed_headroom; - slave->needed_tailroom += master->needed_tailroom; - - p->xmit = cpu_dp->tag_ops->xmit; - - slave->features = master->vlan_features | NETIF_F_HW_TC; - slave->hw_features |= NETIF_F_HW_TC; - slave->features |= NETIF_F_LLTX; - if (slave->needed_tailroom) - slave->features &= ~(NETIF_F_SG | NETIF_F_FRAGLIST); - if (ds->needs_standalone_vlan_filtering) - slave->features |= NETIF_F_HW_VLAN_CTAG_FILTER; -} - -int dsa_slave_suspend(struct net_device *slave_dev) -{ - struct dsa_port *dp = dsa_slave_to_port(slave_dev); - - if (!netif_running(slave_dev)) - return 0; - - netif_device_detach(slave_dev); - - rtnl_lock(); - phylink_stop(dp->pl); - rtnl_unlock(); - - return 0; -} - -int dsa_slave_resume(struct net_device *slave_dev) -{ - struct dsa_port *dp = dsa_slave_to_port(slave_dev); - - if (!netif_running(slave_dev)) - return 0; - - netif_device_attach(slave_dev); - - rtnl_lock(); - phylink_start(dp->pl); - rtnl_unlock(); - - return 0; -} - -int dsa_slave_create(struct dsa_port *port) -{ - struct net_device *master = dsa_port_to_master(port); - struct dsa_switch *ds = port->ds; - struct net_device *slave_dev; - struct dsa_slave_priv *p; - const char *name; - int assign_type; - int ret; - - if (!ds->num_tx_queues) - ds->num_tx_queues = 1; - - if (port->name) { - name = port->name; - assign_type = NET_NAME_PREDICTABLE; - } else { - name = "eth%d"; - assign_type = NET_NAME_ENUM; - } - - slave_dev = alloc_netdev_mqs(sizeof(struct dsa_slave_priv), name, - assign_type, ether_setup, - ds->num_tx_queues, 1); - if (slave_dev == NULL) - return -ENOMEM; - - slave_dev->rtnl_link_ops = &dsa_link_ops; - slave_dev->ethtool_ops = &dsa_slave_ethtool_ops; -#if IS_ENABLED(CONFIG_DCB) - slave_dev->dcbnl_ops = &dsa_slave_dcbnl_ops; -#endif - if (!is_zero_ether_addr(port->mac)) - eth_hw_addr_set(slave_dev, port->mac); - else - eth_hw_addr_inherit(slave_dev, master); - slave_dev->priv_flags |= IFF_NO_QUEUE; - if (dsa_switch_supports_uc_filtering(ds)) - slave_dev->priv_flags |= IFF_UNICAST_FLT; - slave_dev->netdev_ops = &dsa_slave_netdev_ops; - if (ds->ops->port_max_mtu) - slave_dev->max_mtu = ds->ops->port_max_mtu(ds, port->index); - SET_NETDEV_DEVTYPE(slave_dev, &dsa_type); - - SET_NETDEV_DEV(slave_dev, port->ds->dev); - SET_NETDEV_DEVLINK_PORT(slave_dev, &port->devlink_port); - slave_dev->dev.of_node = port->dn; - slave_dev->vlan_features = master->vlan_features; - - p = netdev_priv(slave_dev); - slave_dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); - if (!slave_dev->tstats) { - free_netdev(slave_dev); - return -ENOMEM; - } - - ret = gro_cells_init(&p->gcells, slave_dev); - if (ret) - goto out_free; - - p->dp = port; - INIT_LIST_HEAD(&p->mall_tc_list); - port->slave = slave_dev; - dsa_slave_setup_tagger(slave_dev); - - netif_carrier_off(slave_dev); - - ret = dsa_slave_phy_setup(slave_dev); - if (ret) { - netdev_err(slave_dev, - "error %d setting up PHY for tree %d, switch %d, port %d\n", - ret, ds->dst->index, ds->index, port->index); - goto out_gcells; - } - - rtnl_lock(); - - ret = dsa_slave_change_mtu(slave_dev, ETH_DATA_LEN); - if (ret && ret != -EOPNOTSUPP) - dev_warn(ds->dev, "nonfatal error %d setting MTU to %d on port %d\n", - ret, ETH_DATA_LEN, port->index); - - ret = register_netdevice(slave_dev); - if (ret) { - netdev_err(master, "error %d registering interface %s\n", - ret, slave_dev->name); - rtnl_unlock(); - goto out_phy; - } - - if (IS_ENABLED(CONFIG_DCB)) { - ret = dsa_slave_dcbnl_init(slave_dev); - if (ret) { - netdev_err(slave_dev, - "failed to initialize DCB: %pe\n", - ERR_PTR(ret)); - rtnl_unlock(); - goto out_unregister; - } - } - - ret = netdev_upper_dev_link(master, slave_dev, NULL); - - rtnl_unlock(); - - if (ret) - goto out_unregister; - - return 0; - -out_unregister: - unregister_netdev(slave_dev); -out_phy: - rtnl_lock(); - phylink_disconnect_phy(p->dp->pl); - rtnl_unlock(); - dsa_port_phylink_destroy(p->dp); -out_gcells: - gro_cells_destroy(&p->gcells); -out_free: - free_percpu(slave_dev->tstats); - free_netdev(slave_dev); - port->slave = NULL; - return ret; -} - -void dsa_slave_destroy(struct net_device *slave_dev) -{ - struct net_device *master = dsa_slave_to_master(slave_dev); - struct dsa_port *dp = dsa_slave_to_port(slave_dev); - struct dsa_slave_priv *p = netdev_priv(slave_dev); - - netif_carrier_off(slave_dev); - rtnl_lock(); - netdev_upper_dev_unlink(master, slave_dev); - unregister_netdevice(slave_dev); - phylink_disconnect_phy(dp->pl); - rtnl_unlock(); - - dsa_port_phylink_destroy(dp); - gro_cells_destroy(&p->gcells); - free_percpu(slave_dev->tstats); - free_netdev(slave_dev); -} - -int dsa_slave_change_master(struct net_device *dev, struct net_device *master, - struct netlink_ext_ack *extack) -{ - struct net_device *old_master = dsa_slave_to_master(dev); - struct dsa_port *dp = dsa_slave_to_port(dev); - struct dsa_switch *ds = dp->ds; - struct net_device *upper; - struct list_head *iter; - int err; - - if (master == old_master) - return 0; - - if (!ds->ops->port_change_master) { - NL_SET_ERR_MSG_MOD(extack, - "Driver does not support changing DSA master"); - return -EOPNOTSUPP; - } - - if (!netdev_uses_dsa(master)) { - NL_SET_ERR_MSG_MOD(extack, - "Interface not eligible as DSA master"); - return -EOPNOTSUPP; - } - - netdev_for_each_upper_dev_rcu(master, upper, iter) { - if (dsa_slave_dev_check(upper)) - continue; - if (netif_is_bridge_master(upper)) - continue; - NL_SET_ERR_MSG_MOD(extack, "Cannot join master with unknown uppers"); - return -EOPNOTSUPP; - } - - /* Since we allow live-changing the DSA master, plus we auto-open the - * DSA master when the user port opens => we need to ensure that the - * new DSA master is open too. - */ - if (dev->flags & IFF_UP) { - err = dev_open(master, extack); - if (err) - return err; - } - - netdev_upper_dev_unlink(old_master, dev); - - err = netdev_upper_dev_link(master, dev, extack); - if (err) - goto out_revert_old_master_unlink; - - err = dsa_port_change_master(dp, master, extack); - if (err) - goto out_revert_master_link; - - /* Update the MTU of the new CPU port through cross-chip notifiers */ - err = dsa_slave_change_mtu(dev, dev->mtu); - if (err && err != -EOPNOTSUPP) { - netdev_warn(dev, - "nonfatal error updating MTU with new master: %pe\n", - ERR_PTR(err)); - } - - /* If the port doesn't have its own MAC address and relies on the DSA - * master's one, inherit it again from the new DSA master. - */ - if (is_zero_ether_addr(dp->mac)) - eth_hw_addr_inherit(dev, master); - - return 0; - -out_revert_master_link: - netdev_upper_dev_unlink(master, dev); -out_revert_old_master_unlink: - netdev_upper_dev_link(old_master, dev, NULL); - return err; -} - -bool dsa_slave_dev_check(const struct net_device *dev) -{ - return dev->netdev_ops == &dsa_slave_netdev_ops; -} -EXPORT_SYMBOL_GPL(dsa_slave_dev_check); - -static int dsa_slave_changeupper(struct net_device *dev, - struct netdev_notifier_changeupper_info *info) -{ - struct dsa_port *dp = dsa_slave_to_port(dev); - struct netlink_ext_ack *extack; - int err = NOTIFY_DONE; - - if (!dsa_slave_dev_check(dev)) - return err; - - extack = netdev_notifier_info_to_extack(&info->info); - - if (netif_is_bridge_master(info->upper_dev)) { - if (info->linking) { - err = dsa_port_bridge_join(dp, info->upper_dev, extack); - if (!err) - dsa_bridge_mtu_normalization(dp); - if (err == -EOPNOTSUPP) { - NL_SET_ERR_MSG_WEAK_MOD(extack, - "Offloading not supported"); - err = 0; - } - err = notifier_from_errno(err); - } else { - dsa_port_bridge_leave(dp, info->upper_dev); - err = NOTIFY_OK; - } - } else if (netif_is_lag_master(info->upper_dev)) { - if (info->linking) { - err = dsa_port_lag_join(dp, info->upper_dev, - info->upper_info, extack); - if (err == -EOPNOTSUPP) { - NL_SET_ERR_MSG_WEAK_MOD(extack, - "Offloading not supported"); - err = 0; - } - err = notifier_from_errno(err); - } else { - dsa_port_lag_leave(dp, info->upper_dev); - err = NOTIFY_OK; - } - } else if (is_hsr_master(info->upper_dev)) { - if (info->linking) { - err = dsa_port_hsr_join(dp, info->upper_dev, extack); - if (err == -EOPNOTSUPP) { - NL_SET_ERR_MSG_WEAK_MOD(extack, - "Offloading not supported"); - err = 0; - } - err = notifier_from_errno(err); - } else { - dsa_port_hsr_leave(dp, info->upper_dev); - err = NOTIFY_OK; - } - } - - return err; -} - -static int dsa_slave_prechangeupper(struct net_device *dev, - struct netdev_notifier_changeupper_info *info) -{ - struct dsa_port *dp = dsa_slave_to_port(dev); - - if (!dsa_slave_dev_check(dev)) - return NOTIFY_DONE; - - if (netif_is_bridge_master(info->upper_dev) && !info->linking) - dsa_port_pre_bridge_leave(dp, info->upper_dev); - else if (netif_is_lag_master(info->upper_dev) && !info->linking) - dsa_port_pre_lag_leave(dp, info->upper_dev); - /* dsa_port_pre_hsr_leave is not yet necessary since hsr cannot be - * meaningfully enslaved to a bridge yet - */ - - return NOTIFY_DONE; -} - -static int -dsa_slave_lag_changeupper(struct net_device *dev, - struct netdev_notifier_changeupper_info *info) -{ - struct net_device *lower; - struct list_head *iter; - int err = NOTIFY_DONE; - struct dsa_port *dp; - - if (!netif_is_lag_master(dev)) - return err; - - netdev_for_each_lower_dev(dev, lower, iter) { - if (!dsa_slave_dev_check(lower)) - continue; - - dp = dsa_slave_to_port(lower); - if (!dp->lag) - /* Software LAG */ - continue; - - err = dsa_slave_changeupper(lower, info); - if (notifier_to_errno(err)) - break; - } - - return err; -} - -/* Same as dsa_slave_lag_changeupper() except that it calls - * dsa_slave_prechangeupper() - */ -static int -dsa_slave_lag_prechangeupper(struct net_device *dev, - struct netdev_notifier_changeupper_info *info) -{ - struct net_device *lower; - struct list_head *iter; - int err = NOTIFY_DONE; - struct dsa_port *dp; - - if (!netif_is_lag_master(dev)) - return err; - - netdev_for_each_lower_dev(dev, lower, iter) { - if (!dsa_slave_dev_check(lower)) - continue; - - dp = dsa_slave_to_port(lower); - if (!dp->lag) - /* Software LAG */ - continue; - - err = dsa_slave_prechangeupper(lower, info); - if (notifier_to_errno(err)) - break; - } - - return err; -} - -static int -dsa_prevent_bridging_8021q_upper(struct net_device *dev, - struct netdev_notifier_changeupper_info *info) -{ - struct netlink_ext_ack *ext_ack; - struct net_device *slave, *br; - struct dsa_port *dp; - - ext_ack = netdev_notifier_info_to_extack(&info->info); - - if (!is_vlan_dev(dev)) - return NOTIFY_DONE; - - slave = vlan_dev_real_dev(dev); - if (!dsa_slave_dev_check(slave)) - return NOTIFY_DONE; - - dp = dsa_slave_to_port(slave); - br = dsa_port_bridge_dev_get(dp); - if (!br) - return NOTIFY_DONE; - - /* Deny enslaving a VLAN device into a VLAN-aware bridge */ - if (br_vlan_enabled(br) && - netif_is_bridge_master(info->upper_dev) && info->linking) { - NL_SET_ERR_MSG_MOD(ext_ack, - "Cannot enslave VLAN device into VLAN aware bridge"); - return notifier_from_errno(-EINVAL); - } - - return NOTIFY_DONE; -} - -static int -dsa_slave_check_8021q_upper(struct net_device *dev, - struct netdev_notifier_changeupper_info *info) -{ - struct dsa_port *dp = dsa_slave_to_port(dev); - struct net_device *br = dsa_port_bridge_dev_get(dp); - struct bridge_vlan_info br_info; - struct netlink_ext_ack *extack; - int err = NOTIFY_DONE; - u16 vid; - - if (!br || !br_vlan_enabled(br)) - return NOTIFY_DONE; - - extack = netdev_notifier_info_to_extack(&info->info); - vid = vlan_dev_vlan_id(info->upper_dev); - - /* br_vlan_get_info() returns -EINVAL or -ENOENT if the - * device, respectively the VID is not found, returning - * 0 means success, which is a failure for us here. - */ - err = br_vlan_get_info(br, vid, &br_info); - if (err == 0) { - NL_SET_ERR_MSG_MOD(extack, - "This VLAN is already configured by the bridge"); - return notifier_from_errno(-EBUSY); - } - - return NOTIFY_DONE; -} - -static int -dsa_slave_prechangeupper_sanity_check(struct net_device *dev, - struct netdev_notifier_changeupper_info *info) -{ - struct dsa_switch *ds; - struct dsa_port *dp; - int err; - - if (!dsa_slave_dev_check(dev)) - return dsa_prevent_bridging_8021q_upper(dev, info); - - dp = dsa_slave_to_port(dev); - ds = dp->ds; - - if (ds->ops->port_prechangeupper) { - err = ds->ops->port_prechangeupper(ds, dp->index, info); - if (err) - return notifier_from_errno(err); - } - - if (is_vlan_dev(info->upper_dev)) - return dsa_slave_check_8021q_upper(dev, info); - - return NOTIFY_DONE; -} - -/* To be eligible as a DSA master, a LAG must have all lower interfaces be - * eligible DSA masters. Additionally, all LAG slaves must be DSA masters of - * switches in the same switch tree. - */ -static int dsa_lag_master_validate(struct net_device *lag_dev, - struct netlink_ext_ack *extack) -{ - struct net_device *lower1, *lower2; - struct list_head *iter1, *iter2; - - netdev_for_each_lower_dev(lag_dev, lower1, iter1) { - netdev_for_each_lower_dev(lag_dev, lower2, iter2) { - if (!netdev_uses_dsa(lower1) || - !netdev_uses_dsa(lower2)) { - NL_SET_ERR_MSG_MOD(extack, - "All LAG ports must be eligible as DSA masters"); - return notifier_from_errno(-EINVAL); - } - - if (lower1 == lower2) - continue; - - if (!dsa_port_tree_same(lower1->dsa_ptr, - lower2->dsa_ptr)) { - NL_SET_ERR_MSG_MOD(extack, - "LAG contains DSA masters of disjoint switch trees"); - return notifier_from_errno(-EINVAL); - } - } - } - - return NOTIFY_DONE; -} - -static int -dsa_master_prechangeupper_sanity_check(struct net_device *master, - struct netdev_notifier_changeupper_info *info) -{ - struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(&info->info); - - if (!netdev_uses_dsa(master)) - return NOTIFY_DONE; - - if (!info->linking) - return NOTIFY_DONE; - - /* Allow DSA switch uppers */ - if (dsa_slave_dev_check(info->upper_dev)) - return NOTIFY_DONE; - - /* Allow bridge uppers of DSA masters, subject to further - * restrictions in dsa_bridge_prechangelower_sanity_check() - */ - if (netif_is_bridge_master(info->upper_dev)) - return NOTIFY_DONE; - - /* Allow LAG uppers, subject to further restrictions in - * dsa_lag_master_prechangelower_sanity_check() - */ - if (netif_is_lag_master(info->upper_dev)) - return dsa_lag_master_validate(info->upper_dev, extack); - - NL_SET_ERR_MSG_MOD(extack, - "DSA master cannot join unknown upper interfaces"); - return notifier_from_errno(-EBUSY); -} - -static int -dsa_lag_master_prechangelower_sanity_check(struct net_device *dev, - struct netdev_notifier_changeupper_info *info) -{ - struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(&info->info); - struct net_device *lag_dev = info->upper_dev; - struct net_device *lower; - struct list_head *iter; - - if (!netdev_uses_dsa(lag_dev) || !netif_is_lag_master(lag_dev)) - return NOTIFY_DONE; - - if (!info->linking) - return NOTIFY_DONE; - - if (!netdev_uses_dsa(dev)) { - NL_SET_ERR_MSG(extack, - "Only DSA masters can join a LAG DSA master"); - return notifier_from_errno(-EINVAL); - } - - netdev_for_each_lower_dev(lag_dev, lower, iter) { - if (!dsa_port_tree_same(dev->dsa_ptr, lower->dsa_ptr)) { - NL_SET_ERR_MSG(extack, - "Interface is DSA master for a different switch tree than this LAG"); - return notifier_from_errno(-EINVAL); - } - - break; - } - - return NOTIFY_DONE; -} - -/* Don't allow bridging of DSA masters, since the bridge layer rx_handler - * prevents the DSA fake ethertype handler to be invoked, so we don't get the - * chance to strip off and parse the DSA switch tag protocol header (the bridge - * layer just returns RX_HANDLER_CONSUMED, stopping RX processing for these - * frames). - * The only case where that would not be an issue is when bridging can already - * be offloaded, such as when the DSA master is itself a DSA or plain switchdev - * port, and is bridged only with other ports from the same hardware device. - */ -static int -dsa_bridge_prechangelower_sanity_check(struct net_device *new_lower, - struct netdev_notifier_changeupper_info *info) -{ - struct net_device *br = info->upper_dev; - struct netlink_ext_ack *extack; - struct net_device *lower; - struct list_head *iter; - - if (!netif_is_bridge_master(br)) - return NOTIFY_DONE; - - if (!info->linking) - return NOTIFY_DONE; - - extack = netdev_notifier_info_to_extack(&info->info); - - netdev_for_each_lower_dev(br, lower, iter) { - if (!netdev_uses_dsa(new_lower) && !netdev_uses_dsa(lower)) - continue; - - if (!netdev_port_same_parent_id(lower, new_lower)) { - NL_SET_ERR_MSG(extack, - "Cannot do software bridging with a DSA master"); - return notifier_from_errno(-EINVAL); - } - } - - return NOTIFY_DONE; -} - -static void dsa_tree_migrate_ports_from_lag_master(struct dsa_switch_tree *dst, - struct net_device *lag_dev) -{ - struct net_device *new_master = dsa_tree_find_first_master(dst); - struct dsa_port *dp; - int err; - - dsa_tree_for_each_user_port(dp, dst) { - if (dsa_port_to_master(dp) != lag_dev) - continue; - - err = dsa_slave_change_master(dp->slave, new_master, NULL); - if (err) { - netdev_err(dp->slave, - "failed to restore master to %s: %pe\n", - new_master->name, ERR_PTR(err)); - } - } -} - -static int dsa_master_lag_join(struct net_device *master, - struct net_device *lag_dev, - struct netdev_lag_upper_info *uinfo, - struct netlink_ext_ack *extack) -{ - struct dsa_port *cpu_dp = master->dsa_ptr; - struct dsa_switch_tree *dst = cpu_dp->dst; - struct dsa_port *dp; - int err; - - err = dsa_master_lag_setup(lag_dev, cpu_dp, uinfo, extack); - if (err) - return err; - - dsa_tree_for_each_user_port(dp, dst) { - if (dsa_port_to_master(dp) != master) - continue; - - err = dsa_slave_change_master(dp->slave, lag_dev, extack); - if (err) - goto restore; - } - - return 0; - -restore: - dsa_tree_for_each_user_port_continue_reverse(dp, dst) { - if (dsa_port_to_master(dp) != lag_dev) - continue; - - err = dsa_slave_change_master(dp->slave, master, NULL); - if (err) { - netdev_err(dp->slave, - "failed to restore master to %s: %pe\n", - master->name, ERR_PTR(err)); - } - } - - dsa_master_lag_teardown(lag_dev, master->dsa_ptr); - - return err; -} - -static void dsa_master_lag_leave(struct net_device *master, - struct net_device *lag_dev) -{ - struct dsa_port *dp, *cpu_dp = lag_dev->dsa_ptr; - struct dsa_switch_tree *dst = cpu_dp->dst; - struct dsa_port *new_cpu_dp = NULL; - struct net_device *lower; - struct list_head *iter; - - netdev_for_each_lower_dev(lag_dev, lower, iter) { - if (netdev_uses_dsa(lower)) { - new_cpu_dp = lower->dsa_ptr; - break; - } - } - - if (new_cpu_dp) { - /* Update the CPU port of the user ports still under the LAG - * so that dsa_port_to_master() continues to work properly - */ - dsa_tree_for_each_user_port(dp, dst) - if (dsa_port_to_master(dp) == lag_dev) - dp->cpu_dp = new_cpu_dp; - - /* Update the index of the virtual CPU port to match the lowest - * physical CPU port - */ - lag_dev->dsa_ptr = new_cpu_dp; - wmb(); - } else { - /* If the LAG DSA master has no ports left, migrate back all - * user ports to the first physical CPU port - */ - dsa_tree_migrate_ports_from_lag_master(dst, lag_dev); - } - - /* This DSA master has left its LAG in any case, so let - * the CPU port leave the hardware LAG as well - */ - dsa_master_lag_teardown(lag_dev, master->dsa_ptr); -} - -static int dsa_master_changeupper(struct net_device *dev, - struct netdev_notifier_changeupper_info *info) -{ - struct netlink_ext_ack *extack; - int err = NOTIFY_DONE; - - if (!netdev_uses_dsa(dev)) - return err; - - extack = netdev_notifier_info_to_extack(&info->info); - - if (netif_is_lag_master(info->upper_dev)) { - if (info->linking) { - err = dsa_master_lag_join(dev, info->upper_dev, - info->upper_info, extack); - err = notifier_from_errno(err); - } else { - dsa_master_lag_leave(dev, info->upper_dev); - err = NOTIFY_OK; - } - } - - return err; -} - -static int dsa_slave_netdevice_event(struct notifier_block *nb, - unsigned long event, void *ptr) -{ - struct net_device *dev = netdev_notifier_info_to_dev(ptr); - - switch (event) { - case NETDEV_PRECHANGEUPPER: { - struct netdev_notifier_changeupper_info *info = ptr; - int err; - - err = dsa_slave_prechangeupper_sanity_check(dev, info); - if (notifier_to_errno(err)) - return err; - - err = dsa_master_prechangeupper_sanity_check(dev, info); - if (notifier_to_errno(err)) - return err; - - err = dsa_lag_master_prechangelower_sanity_check(dev, info); - if (notifier_to_errno(err)) - return err; - - err = dsa_bridge_prechangelower_sanity_check(dev, info); - if (notifier_to_errno(err)) - return err; - - err = dsa_slave_prechangeupper(dev, ptr); - if (notifier_to_errno(err)) - return err; - - err = dsa_slave_lag_prechangeupper(dev, ptr); - if (notifier_to_errno(err)) - return err; - - break; - } - case NETDEV_CHANGEUPPER: { - int err; - - err = dsa_slave_changeupper(dev, ptr); - if (notifier_to_errno(err)) - return err; - - err = dsa_slave_lag_changeupper(dev, ptr); - if (notifier_to_errno(err)) - return err; - - err = dsa_master_changeupper(dev, ptr); - if (notifier_to_errno(err)) - return err; - - break; - } - case NETDEV_CHANGELOWERSTATE: { - struct netdev_notifier_changelowerstate_info *info = ptr; - struct dsa_port *dp; - int err = 0; - - if (dsa_slave_dev_check(dev)) { - dp = dsa_slave_to_port(dev); - - err = dsa_port_lag_change(dp, info->lower_state_info); - } - - /* Mirror LAG port events on DSA masters that are in - * a LAG towards their respective switch CPU ports - */ - if (netdev_uses_dsa(dev)) { - dp = dev->dsa_ptr; - - err = dsa_port_lag_change(dp, info->lower_state_info); - } - - return notifier_from_errno(err); - } - case NETDEV_CHANGE: - case NETDEV_UP: { - /* Track state of master port. - * DSA driver may require the master port (and indirectly - * the tagger) to be available for some special operation. - */ - if (netdev_uses_dsa(dev)) { - struct dsa_port *cpu_dp = dev->dsa_ptr; - struct dsa_switch_tree *dst = cpu_dp->ds->dst; - - /* Track when the master port is UP */ - dsa_tree_master_oper_state_change(dst, dev, - netif_oper_up(dev)); - - /* Track when the master port is ready and can accept - * packet. - * NETDEV_UP event is not enough to flag a port as ready. - * We also have to wait for linkwatch_do_dev to dev_activate - * and emit a NETDEV_CHANGE event. - * We check if a master port is ready by checking if the dev - * have a qdisc assigned and is not noop. - */ - dsa_tree_master_admin_state_change(dst, dev, - !qdisc_tx_is_noop(dev)); - - return NOTIFY_OK; - } - - return NOTIFY_DONE; - } - case NETDEV_GOING_DOWN: { - struct dsa_port *dp, *cpu_dp; - struct dsa_switch_tree *dst; - LIST_HEAD(close_list); - - if (!netdev_uses_dsa(dev)) - return NOTIFY_DONE; - - cpu_dp = dev->dsa_ptr; - dst = cpu_dp->ds->dst; - - dsa_tree_master_admin_state_change(dst, dev, false); - - list_for_each_entry(dp, &dst->ports, list) { - if (!dsa_port_is_user(dp)) - continue; - - if (dp->cpu_dp != cpu_dp) - continue; - - list_add(&dp->slave->close_list, &close_list); - } - - dev_close_many(&close_list, true); - - return NOTIFY_OK; - } - default: - break; - } - - return NOTIFY_DONE; -} - -static void -dsa_fdb_offload_notify(struct dsa_switchdev_event_work *switchdev_work) -{ - struct switchdev_notifier_fdb_info info = {}; - - info.addr = switchdev_work->addr; - info.vid = switchdev_work->vid; - info.offloaded = true; - call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED, - switchdev_work->orig_dev, &info.info, NULL); -} - -static void dsa_slave_switchdev_event_work(struct work_struct *work) -{ - struct dsa_switchdev_event_work *switchdev_work = - container_of(work, struct dsa_switchdev_event_work, work); - const unsigned char *addr = switchdev_work->addr; - struct net_device *dev = switchdev_work->dev; - u16 vid = switchdev_work->vid; - struct dsa_switch *ds; - struct dsa_port *dp; - int err; - - dp = dsa_slave_to_port(dev); - ds = dp->ds; - - switch (switchdev_work->event) { - case SWITCHDEV_FDB_ADD_TO_DEVICE: - if (switchdev_work->host_addr) - err = dsa_port_bridge_host_fdb_add(dp, addr, vid); - else if (dp->lag) - err = dsa_port_lag_fdb_add(dp, addr, vid); - else - err = dsa_port_fdb_add(dp, addr, vid); - if (err) { - dev_err(ds->dev, - "port %d failed to add %pM vid %d to fdb: %d\n", - dp->index, addr, vid, err); - break; - } - dsa_fdb_offload_notify(switchdev_work); - break; - - case SWITCHDEV_FDB_DEL_TO_DEVICE: - if (switchdev_work->host_addr) - err = dsa_port_bridge_host_fdb_del(dp, addr, vid); - else if (dp->lag) - err = dsa_port_lag_fdb_del(dp, addr, vid); - else - err = dsa_port_fdb_del(dp, addr, vid); - if (err) { - dev_err(ds->dev, - "port %d failed to delete %pM vid %d from fdb: %d\n", - dp->index, addr, vid, err); - } - - break; - } - - kfree(switchdev_work); -} - -static bool dsa_foreign_dev_check(const struct net_device *dev, - const struct net_device *foreign_dev) -{ - const struct dsa_port *dp = dsa_slave_to_port(dev); - struct dsa_switch_tree *dst = dp->ds->dst; - - if (netif_is_bridge_master(foreign_dev)) - return !dsa_tree_offloads_bridge_dev(dst, foreign_dev); - - if (netif_is_bridge_port(foreign_dev)) - return !dsa_tree_offloads_bridge_port(dst, foreign_dev); - - /* Everything else is foreign */ - return true; -} - -static int dsa_slave_fdb_event(struct net_device *dev, - struct net_device *orig_dev, - unsigned long event, const void *ctx, - const struct switchdev_notifier_fdb_info *fdb_info) -{ - struct dsa_switchdev_event_work *switchdev_work; - struct dsa_port *dp = dsa_slave_to_port(dev); - bool host_addr = fdb_info->is_local; - struct dsa_switch *ds = dp->ds; - - if (ctx && ctx != dp) - return 0; - - if (!dp->bridge) - return 0; - - if (switchdev_fdb_is_dynamically_learned(fdb_info)) { - if (dsa_port_offloads_bridge_port(dp, orig_dev)) - return 0; - - /* FDB entries learned by the software bridge or by foreign - * bridge ports should be installed as host addresses only if - * the driver requests assisted learning. - */ - if (!ds->assisted_learning_on_cpu_port) - return 0; - } - - /* Also treat FDB entries on foreign interfaces bridged with us as host - * addresses. - */ - if (dsa_foreign_dev_check(dev, orig_dev)) - host_addr = true; - - /* Check early that we're not doing work in vain. - * Host addresses on LAG ports still require regular FDB ops, - * since the CPU port isn't in a LAG. - */ - if (dp->lag && !host_addr) { - if (!ds->ops->lag_fdb_add || !ds->ops->lag_fdb_del) - return -EOPNOTSUPP; - } else { - if (!ds->ops->port_fdb_add || !ds->ops->port_fdb_del) - return -EOPNOTSUPP; - } - - switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC); - if (!switchdev_work) - return -ENOMEM; - - netdev_dbg(dev, "%s FDB entry towards %s, addr %pM vid %d%s\n", - event == SWITCHDEV_FDB_ADD_TO_DEVICE ? "Adding" : "Deleting", - orig_dev->name, fdb_info->addr, fdb_info->vid, - host_addr ? " as host address" : ""); - - INIT_WORK(&switchdev_work->work, dsa_slave_switchdev_event_work); - switchdev_work->event = event; - switchdev_work->dev = dev; - switchdev_work->orig_dev = orig_dev; - - ether_addr_copy(switchdev_work->addr, fdb_info->addr); - switchdev_work->vid = fdb_info->vid; - switchdev_work->host_addr = host_addr; - - dsa_schedule_work(&switchdev_work->work); - - return 0; -} - -/* Called under rcu_read_lock() */ -static int dsa_slave_switchdev_event(struct notifier_block *unused, - unsigned long event, void *ptr) -{ - struct net_device *dev = switchdev_notifier_info_to_dev(ptr); - int err; - - switch (event) { - case SWITCHDEV_PORT_ATTR_SET: - err = switchdev_handle_port_attr_set(dev, ptr, - dsa_slave_dev_check, - dsa_slave_port_attr_set); - return notifier_from_errno(err); - case SWITCHDEV_FDB_ADD_TO_DEVICE: - case SWITCHDEV_FDB_DEL_TO_DEVICE: - err = switchdev_handle_fdb_event_to_device(dev, event, ptr, - dsa_slave_dev_check, - dsa_foreign_dev_check, - dsa_slave_fdb_event); - return notifier_from_errno(err); - default: - return NOTIFY_DONE; - } - - return NOTIFY_OK; -} - -static int dsa_slave_switchdev_blocking_event(struct notifier_block *unused, - unsigned long event, void *ptr) -{ - struct net_device *dev = switchdev_notifier_info_to_dev(ptr); - int err; - - switch (event) { - case SWITCHDEV_PORT_OBJ_ADD: - err = switchdev_handle_port_obj_add_foreign(dev, ptr, - dsa_slave_dev_check, - dsa_foreign_dev_check, - dsa_slave_port_obj_add); - return notifier_from_errno(err); - case SWITCHDEV_PORT_OBJ_DEL: - err = switchdev_handle_port_obj_del_foreign(dev, ptr, - dsa_slave_dev_check, - dsa_foreign_dev_check, - dsa_slave_port_obj_del); - return notifier_from_errno(err); - case SWITCHDEV_PORT_ATTR_SET: - err = switchdev_handle_port_attr_set(dev, ptr, - dsa_slave_dev_check, - dsa_slave_port_attr_set); - return notifier_from_errno(err); - } - - return NOTIFY_DONE; -} - -static struct notifier_block dsa_slave_nb __read_mostly = { - .notifier_call = dsa_slave_netdevice_event, -}; - -struct notifier_block dsa_slave_switchdev_notifier = { - .notifier_call = dsa_slave_switchdev_event, -}; - -struct notifier_block dsa_slave_switchdev_blocking_notifier = { - .notifier_call = dsa_slave_switchdev_blocking_event, -}; - -int dsa_slave_register_notifier(void) -{ - struct notifier_block *nb; - int err; - - err = register_netdevice_notifier(&dsa_slave_nb); - if (err) - return err; - - err = register_switchdev_notifier(&dsa_slave_switchdev_notifier); - if (err) - goto err_switchdev_nb; - - nb = &dsa_slave_switchdev_blocking_notifier; - err = register_switchdev_blocking_notifier(nb); - if (err) - goto err_switchdev_blocking_nb; - - return 0; - -err_switchdev_blocking_nb: - unregister_switchdev_notifier(&dsa_slave_switchdev_notifier); -err_switchdev_nb: - unregister_netdevice_notifier(&dsa_slave_nb); - return err; -} - -void dsa_slave_unregister_notifier(void) -{ - struct notifier_block *nb; - int err; - - nb = &dsa_slave_switchdev_blocking_notifier; - err = unregister_switchdev_blocking_notifier(nb); - if (err) - pr_err("DSA: failed to unregister switchdev blocking notifier (%d)\n", err); - - err = unregister_switchdev_notifier(&dsa_slave_switchdev_notifier); - if (err) - pr_err("DSA: failed to unregister switchdev notifier (%d)\n", err); - - err = unregister_netdevice_notifier(&dsa_slave_nb); - if (err) - pr_err("DSA: failed to unregister slave notifier (%d)\n", err); -} diff --git a/net/dsa/slave.h b/net/dsa/slave.h deleted file mode 100644 index d0abe609e00d..000000000000 --- a/net/dsa/slave.h +++ /dev/null @@ -1,69 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ - -#ifndef __DSA_SLAVE_H -#define __DSA_SLAVE_H - -#include -#include -#include -#include -#include -#include -#include - -struct net_device; -struct netlink_ext_ack; - -extern struct notifier_block dsa_slave_switchdev_notifier; -extern struct notifier_block dsa_slave_switchdev_blocking_notifier; - -struct dsa_slave_priv { - /* Copy of CPU port xmit for faster access in slave transmit hot path */ - struct sk_buff * (*xmit)(struct sk_buff *skb, - struct net_device *dev); - - struct gro_cells gcells; - - /* DSA port data, such as switch, port index, etc. */ - struct dsa_port *dp; - -#ifdef CONFIG_NET_POLL_CONTROLLER - struct netpoll *netpoll; -#endif - - /* TC context */ - struct list_head mall_tc_list; -}; - -void dsa_slave_mii_bus_init(struct dsa_switch *ds); -int dsa_slave_create(struct dsa_port *dp); -void dsa_slave_destroy(struct net_device *slave_dev); -int dsa_slave_suspend(struct net_device *slave_dev); -int dsa_slave_resume(struct net_device *slave_dev); -int dsa_slave_register_notifier(void); -void dsa_slave_unregister_notifier(void); -void dsa_slave_sync_ha(struct net_device *dev); -void dsa_slave_unsync_ha(struct net_device *dev); -void dsa_slave_setup_tagger(struct net_device *slave); -int dsa_slave_change_mtu(struct net_device *dev, int new_mtu); -int dsa_slave_change_master(struct net_device *dev, struct net_device *master, - struct netlink_ext_ack *extack); -int dsa_slave_manage_vlan_filtering(struct net_device *dev, - bool vlan_filtering); - -static inline struct dsa_port *dsa_slave_to_port(const struct net_device *dev) -{ - struct dsa_slave_priv *p = netdev_priv(dev); - - return p->dp; -} - -static inline struct net_device * -dsa_slave_to_master(const struct net_device *dev) -{ - struct dsa_port *dp = dsa_slave_to_port(dev); - - return dsa_port_to_master(dp); -} - -#endif diff --git a/net/dsa/switch.c b/net/dsa/switch.c index 1a42f9317334..3d2feeea897b 100644 --- a/net/dsa/switch.c +++ b/net/dsa/switch.c @@ -15,10 +15,10 @@ #include "dsa.h" #include "netlink.h" #include "port.h" -#include "slave.h" #include "switch.h" #include "tag_8021q.h" #include "trace.h" +#include "user.h" static unsigned int dsa_switch_fastest_ageing_time(struct dsa_switch *ds, unsigned int ageing_time) @@ -894,12 +894,12 @@ static int dsa_switch_change_tag_proto(struct dsa_switch *ds, * bits that depend on the tagger, such as the MTU. */ dsa_switch_for_each_user_port(dp, ds) { - struct net_device *slave = dp->slave; + struct net_device *user = dp->user; - dsa_slave_setup_tagger(slave); + dsa_user_setup_tagger(user); /* rtnl_mutex is held in dsa_tree_change_tag_proto */ - dsa_slave_change_mtu(slave, slave->mtu); + dsa_user_change_mtu(user, user->mtu); } return 0; @@ -960,13 +960,13 @@ dsa_switch_disconnect_tag_proto(struct dsa_switch *ds, } static int -dsa_switch_master_state_change(struct dsa_switch *ds, - struct dsa_notifier_master_state_info *info) +dsa_switch_conduit_state_change(struct dsa_switch *ds, + struct dsa_notifier_conduit_state_info *info) { - if (!ds->ops->master_state_change) + if (!ds->ops->conduit_state_change) return 0; - ds->ops->master_state_change(ds, info->master, info->operational); + ds->ops->conduit_state_change(ds, info->conduit, info->operational); return 0; } @@ -1056,8 +1056,8 @@ static int dsa_switch_event(struct notifier_block *nb, case DSA_NOTIFIER_TAG_8021Q_VLAN_DEL: err = dsa_switch_tag_8021q_vlan_del(ds, info); break; - case DSA_NOTIFIER_MASTER_STATE_CHANGE: - err = dsa_switch_master_state_change(ds, info); + case DSA_NOTIFIER_CONDUIT_STATE_CHANGE: + err = dsa_switch_conduit_state_change(ds, info); break; default: err = -EOPNOTSUPP; diff --git a/net/dsa/switch.h b/net/dsa/switch.h index ea034677da15..be0a2749cd97 100644 --- a/net/dsa/switch.h +++ b/net/dsa/switch.h @@ -34,7 +34,7 @@ enum { DSA_NOTIFIER_TAG_PROTO_DISCONNECT, DSA_NOTIFIER_TAG_8021Q_VLAN_ADD, DSA_NOTIFIER_TAG_8021Q_VLAN_DEL, - DSA_NOTIFIER_MASTER_STATE_CHANGE, + DSA_NOTIFIER_CONDUIT_STATE_CHANGE, }; /* DSA_NOTIFIER_AGEING_TIME */ @@ -105,9 +105,9 @@ struct dsa_notifier_tag_8021q_vlan_info { u16 vid; }; -/* DSA_NOTIFIER_MASTER_STATE_CHANGE */ -struct dsa_notifier_master_state_info { - const struct net_device *master; +/* DSA_NOTIFIER_CONDUIT_STATE_CHANGE */ +struct dsa_notifier_conduit_state_info { + const struct net_device *conduit; bool operational; }; diff --git a/net/dsa/tag.c b/net/dsa/tag.c index 5105a5ff58fa..6e402d49afd3 100644 --- a/net/dsa/tag.c +++ b/net/dsa/tag.c @@ -13,8 +13,8 @@ #include #include -#include "slave.h" #include "tag.h" +#include "user.h" static LIST_HEAD(dsa_tag_drivers_list); static DEFINE_MUTEX(dsa_tag_drivers_lock); @@ -27,7 +27,7 @@ static DEFINE_MUTEX(dsa_tag_drivers_lock); * switch, the DSA driver owning the interface to which the packet is * delivered is never notified unless we do so here. */ -static bool dsa_skb_defer_rx_timestamp(struct dsa_slave_priv *p, +static bool dsa_skb_defer_rx_timestamp(struct dsa_user_priv *p, struct sk_buff *skb) { struct dsa_switch *ds = p->dp->ds; @@ -57,7 +57,7 @@ static int dsa_switch_rcv(struct sk_buff *skb, struct net_device *dev, struct metadata_dst *md_dst = skb_metadata_dst(skb); struct dsa_port *cpu_dp = dev->dsa_ptr; struct sk_buff *nskb = NULL; - struct dsa_slave_priv *p; + struct dsa_user_priv *p; if (unlikely(!cpu_dp)) { kfree_skb(skb); @@ -75,7 +75,7 @@ static int dsa_switch_rcv(struct sk_buff *skb, struct net_device *dev, if (!skb_has_extensions(skb)) skb->slow_gro = 0; - skb->dev = dsa_master_find_slave(dev, 0, port); + skb->dev = dsa_conduit_find_user(dev, 0, port); if (likely(skb->dev)) { dsa_default_offload_fwd_mark(skb); nskb = skb; @@ -94,7 +94,7 @@ static int dsa_switch_rcv(struct sk_buff *skb, struct net_device *dev, skb->pkt_type = PACKET_HOST; skb->protocol = eth_type_trans(skb, skb->dev); - if (unlikely(!dsa_slave_dev_check(skb->dev))) { + if (unlikely(!dsa_user_dev_check(skb->dev))) { /* Packet is to be injected directly on an upper * device, e.g. a team/bond, so skip all DSA-port * specific actions. diff --git a/net/dsa/tag.h b/net/dsa/tag.h index 32d12f4a9d73..f6b9c73718df 100644 --- a/net/dsa/tag.h +++ b/net/dsa/tag.h @@ -9,7 +9,7 @@ #include #include "port.h" -#include "slave.h" +#include "user.h" struct dsa_tag_driver { const struct dsa_device_ops *ops; @@ -29,7 +29,7 @@ static inline int dsa_tag_protocol_overhead(const struct dsa_device_ops *ops) return ops->needed_headroom + ops->needed_tailroom; } -static inline struct net_device *dsa_master_find_slave(struct net_device *dev, +static inline struct net_device *dsa_conduit_find_user(struct net_device *dev, int device, int port) { struct dsa_port *cpu_dp = dev->dsa_ptr; @@ -39,7 +39,7 @@ static inline struct net_device *dsa_master_find_slave(struct net_device *dev, list_for_each_entry(dp, &dst->ports, list) if (dp->ds->index == device && dp->index == port && dp->type == DSA_PORT_TYPE_USER) - return dp->slave; + return dp->user; return NULL; } @@ -49,7 +49,7 @@ static inline struct net_device *dsa_master_find_slave(struct net_device *dev, */ static inline struct sk_buff *dsa_untag_bridge_pvid(struct sk_buff *skb) { - struct dsa_port *dp = dsa_slave_to_port(skb->dev); + struct dsa_port *dp = dsa_user_to_port(skb->dev); struct net_device *br = dsa_port_bridge_dev_get(dp); struct net_device *dev = skb->dev; struct net_device *upper_dev; @@ -107,12 +107,12 @@ static inline struct sk_buff *dsa_untag_bridge_pvid(struct sk_buff *skb) * to support termination through the bridge. */ static inline struct net_device * -dsa_find_designated_bridge_port_by_vid(struct net_device *master, u16 vid) +dsa_find_designated_bridge_port_by_vid(struct net_device *conduit, u16 vid) { - struct dsa_port *cpu_dp = master->dsa_ptr; + struct dsa_port *cpu_dp = conduit->dsa_ptr; struct dsa_switch_tree *dst = cpu_dp->dst; struct bridge_vlan_info vinfo; - struct net_device *slave; + struct net_device *user; struct dsa_port *dp; int err; @@ -134,13 +134,13 @@ dsa_find_designated_bridge_port_by_vid(struct net_device *master, u16 vid) if (dp->cpu_dp != cpu_dp) continue; - slave = dp->slave; + user = dp->user; - err = br_vlan_get_info_rcu(slave, vid, &vinfo); + err = br_vlan_get_info_rcu(user, vid, &vinfo); if (err) continue; - return slave; + return user; } return NULL; @@ -155,7 +155,7 @@ dsa_find_designated_bridge_port_by_vid(struct net_device *master, u16 vid) */ static inline void dsa_default_offload_fwd_mark(struct sk_buff *skb) { - struct dsa_port *dp = dsa_slave_to_port(skb->dev); + struct dsa_port *dp = dsa_user_to_port(skb->dev); skb->offload_fwd_mark = !!(dp->bridge); } @@ -215,9 +215,9 @@ static inline void dsa_alloc_etype_header(struct sk_buff *skb, int len) memmove(skb->data, skb->data + len, 2 * ETH_ALEN); } -/* On RX, eth_type_trans() on the DSA master pulls ETH_HLEN bytes starting from +/* On RX, eth_type_trans() on the DSA conduit pulls ETH_HLEN bytes starting from * skb_mac_header(skb), which leaves skb->data pointing at the first byte after - * what the DSA master perceives as the EtherType (the beginning of the L3 + * what the DSA conduit perceives as the EtherType (the beginning of the L3 * protocol). Since DSA EtherType header taggers treat the EtherType as part of * the DSA tag itself, and the EtherType is 2 bytes in length, the DSA header * is located 2 bytes behind skb->data. Note that EtherType in this context diff --git a/net/dsa/tag_8021q.c b/net/dsa/tag_8021q.c index cbdfc392f7e0..71b26ae6db39 100644 --- a/net/dsa/tag_8021q.c +++ b/net/dsa/tag_8021q.c @@ -73,7 +73,7 @@ struct dsa_tag_8021q_vlan { struct dsa_8021q_context { struct dsa_switch *ds; struct list_head vlans; - /* EtherType of RX VID, used for filtering on master interface */ + /* EtherType of RX VID, used for filtering on conduit interface */ __be16 proto; }; @@ -338,7 +338,7 @@ static int dsa_tag_8021q_port_setup(struct dsa_switch *ds, int port) struct dsa_8021q_context *ctx = ds->tag_8021q_ctx; struct dsa_port *dp = dsa_to_port(ds, port); u16 vid = dsa_tag_8021q_standalone_vid(dp); - struct net_device *master; + struct net_device *conduit; int err; /* The CPU port is implicitly configured by @@ -347,7 +347,7 @@ static int dsa_tag_8021q_port_setup(struct dsa_switch *ds, int port) if (!dsa_port_is_user(dp)) return 0; - master = dsa_port_to_master(dp); + conduit = dsa_port_to_conduit(dp); err = dsa_port_tag_8021q_vlan_add(dp, vid, false); if (err) { @@ -357,8 +357,8 @@ static int dsa_tag_8021q_port_setup(struct dsa_switch *ds, int port) return err; } - /* Add the VLAN to the master's RX filter. */ - vlan_vid_add(master, ctx->proto, vid); + /* Add the VLAN to the conduit's RX filter. */ + vlan_vid_add(conduit, ctx->proto, vid); return err; } @@ -368,7 +368,7 @@ static void dsa_tag_8021q_port_teardown(struct dsa_switch *ds, int port) struct dsa_8021q_context *ctx = ds->tag_8021q_ctx; struct dsa_port *dp = dsa_to_port(ds, port); u16 vid = dsa_tag_8021q_standalone_vid(dp); - struct net_device *master; + struct net_device *conduit; /* The CPU port is implicitly configured by * configuring the front-panel ports @@ -376,11 +376,11 @@ static void dsa_tag_8021q_port_teardown(struct dsa_switch *ds, int port) if (!dsa_port_is_user(dp)) return; - master = dsa_port_to_master(dp); + conduit = dsa_port_to_conduit(dp); dsa_port_tag_8021q_vlan_del(dp, vid, false); - vlan_vid_del(master, ctx->proto, vid); + vlan_vid_del(conduit, ctx->proto, vid); } static int dsa_tag_8021q_setup(struct dsa_switch *ds) @@ -468,10 +468,10 @@ struct sk_buff *dsa_8021q_xmit(struct sk_buff *skb, struct net_device *netdev, } EXPORT_SYMBOL_GPL(dsa_8021q_xmit); -struct net_device *dsa_tag_8021q_find_port_by_vbid(struct net_device *master, +struct net_device *dsa_tag_8021q_find_port_by_vbid(struct net_device *conduit, int vbid) { - struct dsa_port *cpu_dp = master->dsa_ptr; + struct dsa_port *cpu_dp = conduit->dsa_ptr; struct dsa_switch_tree *dst = cpu_dp->dst; struct dsa_port *dp; @@ -490,7 +490,7 @@ struct net_device *dsa_tag_8021q_find_port_by_vbid(struct net_device *master, continue; if (dsa_port_bridge_num_get(dp) == vbid) - return dp->slave; + return dp->user; } return NULL; diff --git a/net/dsa/tag_8021q.h b/net/dsa/tag_8021q.h index b75cbaa028ef..41f7167ac520 100644 --- a/net/dsa/tag_8021q.h +++ b/net/dsa/tag_8021q.h @@ -16,7 +16,7 @@ struct sk_buff *dsa_8021q_xmit(struct sk_buff *skb, struct net_device *netdev, void dsa_8021q_rcv(struct sk_buff *skb, int *source_port, int *switch_id, int *vbid); -struct net_device *dsa_tag_8021q_find_port_by_vbid(struct net_device *master, +struct net_device *dsa_tag_8021q_find_port_by_vbid(struct net_device *conduit, int vbid); int dsa_switch_tag_8021q_vlan_add(struct dsa_switch *ds, diff --git a/net/dsa/tag_ar9331.c b/net/dsa/tag_ar9331.c index 7f3b7d730b85..92ce67b93a58 100644 --- a/net/dsa/tag_ar9331.c +++ b/net/dsa/tag_ar9331.c @@ -29,7 +29,7 @@ static struct sk_buff *ar9331_tag_xmit(struct sk_buff *skb, struct net_device *dev) { - struct dsa_port *dp = dsa_slave_to_port(dev); + struct dsa_port *dp = dsa_user_to_port(dev); __le16 *phdr; u16 hdr; @@ -74,7 +74,7 @@ static struct sk_buff *ar9331_tag_rcv(struct sk_buff *skb, /* Get source port information */ port = FIELD_GET(AR9331_HDR_PORT_NUM_MASK, hdr); - skb->dev = dsa_master_find_slave(ndev, 0, port); + skb->dev = dsa_conduit_find_user(ndev, 0, port); if (!skb->dev) return NULL; diff --git a/net/dsa/tag_brcm.c b/net/dsa/tag_brcm.c index cacdafb41200..83d283a5d27e 100644 --- a/net/dsa/tag_brcm.c +++ b/net/dsa/tag_brcm.c @@ -85,7 +85,7 @@ static struct sk_buff *brcm_tag_xmit_ll(struct sk_buff *skb, struct net_device *dev, unsigned int offset) { - struct dsa_port *dp = dsa_slave_to_port(dev); + struct dsa_port *dp = dsa_user_to_port(dev); u16 queue = skb_get_queue_mapping(skb); u8 *brcm_tag; @@ -96,7 +96,7 @@ static struct sk_buff *brcm_tag_xmit_ll(struct sk_buff *skb, * (including FCS and tag) because the length verification is done after * the Broadcom tag is stripped off the ingress packet. * - * Let dsa_slave_xmit() free the SKB + * Let dsa_user_xmit() free the SKB */ if (__skb_put_padto(skb, ETH_ZLEN + BRCM_TAG_LEN, false)) return NULL; @@ -119,7 +119,7 @@ static struct sk_buff *brcm_tag_xmit_ll(struct sk_buff *skb, brcm_tag[2] = BRCM_IG_DSTMAP2_MASK; brcm_tag[3] = (1 << dp->index) & BRCM_IG_DSTMAP1_MASK; - /* Now tell the master network device about the desired output queue + /* Now tell the conduit network device about the desired output queue * as well */ skb_set_queue_mapping(skb, BRCM_TAG_SET_PORT_QUEUE(dp->index, queue)); @@ -164,7 +164,7 @@ static struct sk_buff *brcm_tag_rcv_ll(struct sk_buff *skb, /* Locate which port this is coming from */ source_port = brcm_tag[3] & BRCM_EG_PID_MASK; - skb->dev = dsa_master_find_slave(dev, 0, source_port); + skb->dev = dsa_conduit_find_user(dev, 0, source_port); if (!skb->dev) return NULL; @@ -216,7 +216,7 @@ MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_BRCM, BRCM_NAME); static struct sk_buff *brcm_leg_tag_xmit(struct sk_buff *skb, struct net_device *dev) { - struct dsa_port *dp = dsa_slave_to_port(dev); + struct dsa_port *dp = dsa_user_to_port(dev); u8 *brcm_tag; /* The Ethernet switch we are interfaced with needs packets to be at @@ -226,7 +226,7 @@ static struct sk_buff *brcm_leg_tag_xmit(struct sk_buff *skb, * (including FCS and tag) because the length verification is done after * the Broadcom tag is stripped off the ingress packet. * - * Let dsa_slave_xmit() free the SKB + * Let dsa_user_xmit() free the SKB */ if (__skb_put_padto(skb, ETH_ZLEN + BRCM_LEG_TAG_LEN, false)) return NULL; @@ -264,7 +264,7 @@ static struct sk_buff *brcm_leg_tag_rcv(struct sk_buff *skb, source_port = brcm_tag[5] & BRCM_LEG_PORT_ID; - skb->dev = dsa_master_find_slave(dev, 0, source_port); + skb->dev = dsa_conduit_find_user(dev, 0, source_port); if (!skb->dev) return NULL; diff --git a/net/dsa/tag_dsa.c b/net/dsa/tag_dsa.c index 1fd7fa26db64..8ed52dd663ab 100644 --- a/net/dsa/tag_dsa.c +++ b/net/dsa/tag_dsa.c @@ -129,7 +129,7 @@ enum dsa_code { static struct sk_buff *dsa_xmit_ll(struct sk_buff *skb, struct net_device *dev, u8 extra) { - struct dsa_port *dp = dsa_slave_to_port(dev); + struct dsa_port *dp = dsa_user_to_port(dev); struct net_device *br_dev; u8 tag_dev, tag_port; enum dsa_cmd cmd; @@ -267,14 +267,14 @@ static struct sk_buff *dsa_rcv_ll(struct sk_buff *skb, struct net_device *dev, lag = dsa_lag_by_id(cpu_dp->dst, source_port + 1); skb->dev = lag ? lag->dev : NULL; } else { - skb->dev = dsa_master_find_slave(dev, source_device, + skb->dev = dsa_conduit_find_user(dev, source_device, source_port); } if (!skb->dev) return NULL; - /* When using LAG offload, skb->dev is not a DSA slave interface, + /* When using LAG offload, skb->dev is not a DSA user interface, * so we cannot call dsa_default_offload_fwd_mark and we need to * special-case it. */ diff --git a/net/dsa/tag_gswip.c b/net/dsa/tag_gswip.c index e279cd9057b0..3539141b5350 100644 --- a/net/dsa/tag_gswip.c +++ b/net/dsa/tag_gswip.c @@ -61,7 +61,7 @@ static struct sk_buff *gswip_tag_xmit(struct sk_buff *skb, struct net_device *dev) { - struct dsa_port *dp = dsa_slave_to_port(dev); + struct dsa_port *dp = dsa_user_to_port(dev); u8 *gswip_tag; skb_push(skb, GSWIP_TX_HEADER_LEN); @@ -89,7 +89,7 @@ static struct sk_buff *gswip_tag_rcv(struct sk_buff *skb, /* Get source port information */ port = (gswip_tag[7] & GSWIP_RX_SPPID_MASK) >> GSWIP_RX_SPPID_SHIFT; - skb->dev = dsa_master_find_slave(dev, 0, port); + skb->dev = dsa_conduit_find_user(dev, 0, port); if (!skb->dev) return NULL; diff --git a/net/dsa/tag_hellcreek.c b/net/dsa/tag_hellcreek.c index 03a1fb9c87a9..6e233cd0aa38 100644 --- a/net/dsa/tag_hellcreek.c +++ b/net/dsa/tag_hellcreek.c @@ -20,7 +20,7 @@ static struct sk_buff *hellcreek_xmit(struct sk_buff *skb, struct net_device *dev) { - struct dsa_port *dp = dsa_slave_to_port(dev); + struct dsa_port *dp = dsa_user_to_port(dev); u8 *tag; /* Calculate checksums (if required) before adding the trailer tag to @@ -45,7 +45,7 @@ static struct sk_buff *hellcreek_rcv(struct sk_buff *skb, u8 *tag = skb_tail_pointer(skb) - HELLCREEK_TAG_LEN; unsigned int port = tag[0] & 0x03; - skb->dev = dsa_master_find_slave(dev, 0, port); + skb->dev = dsa_conduit_find_user(dev, 0, port); if (!skb->dev) { netdev_warn_once(dev, "Failed to get source port: %d\n", port); return NULL; diff --git a/net/dsa/tag_ksz.c b/net/dsa/tag_ksz.c index 3632e47dea9e..9be341fa88f0 100644 --- a/net/dsa/tag_ksz.c +++ b/net/dsa/tag_ksz.c @@ -87,7 +87,7 @@ static struct sk_buff *ksz_common_rcv(struct sk_buff *skb, struct net_device *dev, unsigned int port, unsigned int len) { - skb->dev = dsa_master_find_slave(dev, 0, port); + skb->dev = dsa_conduit_find_user(dev, 0, port); if (!skb->dev) return NULL; @@ -119,7 +119,7 @@ static struct sk_buff *ksz_common_rcv(struct sk_buff *skb, static struct sk_buff *ksz8795_xmit(struct sk_buff *skb, struct net_device *dev) { - struct dsa_port *dp = dsa_slave_to_port(dev); + struct dsa_port *dp = dsa_user_to_port(dev); struct ethhdr *hdr; u8 *tag; @@ -256,7 +256,7 @@ static struct sk_buff *ksz_defer_xmit(struct dsa_port *dp, struct sk_buff *skb) return NULL; kthread_init_work(&xmit_work->work, xmit_work_fn); - /* Increase refcount so the kfree_skb in dsa_slave_xmit + /* Increase refcount so the kfree_skb in dsa_user_xmit * won't really free the packet. */ xmit_work->dp = dp; @@ -272,7 +272,7 @@ static struct sk_buff *ksz9477_xmit(struct sk_buff *skb, { u16 queue_mapping = skb_get_queue_mapping(skb); u8 prio = netdev_txq_to_tc(dev, queue_mapping); - struct dsa_port *dp = dsa_slave_to_port(dev); + struct dsa_port *dp = dsa_user_to_port(dev); struct ethhdr *hdr; __be16 *tag; u16 val; @@ -344,7 +344,7 @@ static struct sk_buff *ksz9893_xmit(struct sk_buff *skb, { u16 queue_mapping = skb_get_queue_mapping(skb); u8 prio = netdev_txq_to_tc(dev, queue_mapping); - struct dsa_port *dp = dsa_slave_to_port(dev); + struct dsa_port *dp = dsa_user_to_port(dev); struct ethhdr *hdr; u8 *tag; @@ -410,7 +410,7 @@ static struct sk_buff *lan937x_xmit(struct sk_buff *skb, { u16 queue_mapping = skb_get_queue_mapping(skb); u8 prio = netdev_txq_to_tc(dev, queue_mapping); - struct dsa_port *dp = dsa_slave_to_port(dev); + struct dsa_port *dp = dsa_user_to_port(dev); const struct ethhdr *hdr = eth_hdr(skb); __be16 *tag; u16 val; diff --git a/net/dsa/tag_lan9303.c b/net/dsa/tag_lan9303.c index c25f5536706b..1ed8ee24855d 100644 --- a/net/dsa/tag_lan9303.c +++ b/net/dsa/tag_lan9303.c @@ -56,7 +56,7 @@ static int lan9303_xmit_use_arl(struct dsa_port *dp, u8 *dest_addr) static struct sk_buff *lan9303_xmit(struct sk_buff *skb, struct net_device *dev) { - struct dsa_port *dp = dsa_slave_to_port(dev); + struct dsa_port *dp = dsa_user_to_port(dev); __be16 *lan9303_tag; u16 tag; @@ -99,7 +99,7 @@ static struct sk_buff *lan9303_rcv(struct sk_buff *skb, struct net_device *dev) source_port = lan9303_tag1 & 0x3; - skb->dev = dsa_master_find_slave(dev, 0, source_port); + skb->dev = dsa_conduit_find_user(dev, 0, source_port); if (!skb->dev) { dev_warn_ratelimited(&dev->dev, "Dropping packet due to invalid source port\n"); return NULL; diff --git a/net/dsa/tag_mtk.c b/net/dsa/tag_mtk.c index 40af80452747..2483785f6ab1 100644 --- a/net/dsa/tag_mtk.c +++ b/net/dsa/tag_mtk.c @@ -23,7 +23,7 @@ static struct sk_buff *mtk_tag_xmit(struct sk_buff *skb, struct net_device *dev) { - struct dsa_port *dp = dsa_slave_to_port(dev); + struct dsa_port *dp = dsa_user_to_port(dev); u8 xmit_tpid; u8 *mtk_tag; @@ -85,7 +85,7 @@ static struct sk_buff *mtk_tag_rcv(struct sk_buff *skb, struct net_device *dev) /* Get source port information */ port = (hdr & MTK_HDR_RECV_SOURCE_PORT_MASK); - skb->dev = dsa_master_find_slave(dev, 0, port); + skb->dev = dsa_conduit_find_user(dev, 0, port); if (!skb->dev) return NULL; diff --git a/net/dsa/tag_none.c b/net/dsa/tag_none.c index d2fd179c4227..9a473624db50 100644 --- a/net/dsa/tag_none.c +++ b/net/dsa/tag_none.c @@ -12,8 +12,8 @@ #define NONE_NAME "none" -static struct sk_buff *dsa_slave_notag_xmit(struct sk_buff *skb, - struct net_device *dev) +static struct sk_buff *dsa_user_notag_xmit(struct sk_buff *skb, + struct net_device *dev) { /* Just return the original SKB */ return skb; @@ -22,7 +22,7 @@ static struct sk_buff *dsa_slave_notag_xmit(struct sk_buff *skb, static const struct dsa_device_ops none_ops = { .name = NONE_NAME, .proto = DSA_TAG_PROTO_NONE, - .xmit = dsa_slave_notag_xmit, + .xmit = dsa_user_notag_xmit, }; module_dsa_tag_driver(none_ops); diff --git a/net/dsa/tag_ocelot.c b/net/dsa/tag_ocelot.c index 20bf7074d5a6..ef2f8fffb2c7 100644 --- a/net/dsa/tag_ocelot.c +++ b/net/dsa/tag_ocelot.c @@ -45,7 +45,7 @@ static void ocelot_xmit_get_vlan_info(struct sk_buff *skb, struct dsa_port *dp, static void ocelot_xmit_common(struct sk_buff *skb, struct net_device *netdev, __be32 ifh_prefix, void **ifh) { - struct dsa_port *dp = dsa_slave_to_port(netdev); + struct dsa_port *dp = dsa_user_to_port(netdev); struct dsa_switch *ds = dp->ds; u64 vlan_tci, tag_type; void *injection; @@ -79,7 +79,7 @@ static void ocelot_xmit_common(struct sk_buff *skb, struct net_device *netdev, static struct sk_buff *ocelot_xmit(struct sk_buff *skb, struct net_device *netdev) { - struct dsa_port *dp = dsa_slave_to_port(netdev); + struct dsa_port *dp = dsa_user_to_port(netdev); void *injection; ocelot_xmit_common(skb, netdev, cpu_to_be32(0x8880000a), &injection); @@ -91,7 +91,7 @@ static struct sk_buff *ocelot_xmit(struct sk_buff *skb, static struct sk_buff *seville_xmit(struct sk_buff *skb, struct net_device *netdev) { - struct dsa_port *dp = dsa_slave_to_port(netdev); + struct dsa_port *dp = dsa_user_to_port(netdev); void *injection; ocelot_xmit_common(skb, netdev, cpu_to_be32(0x88800005), &injection); @@ -111,12 +111,12 @@ static struct sk_buff *ocelot_rcv(struct sk_buff *skb, u16 vlan_tpid; u64 rew_val; - /* Revert skb->data by the amount consumed by the DSA master, + /* Revert skb->data by the amount consumed by the DSA conduit, * so it points to the beginning of the frame. */ skb_push(skb, ETH_HLEN); /* We don't care about the short prefix, it is just for easy entrance - * into the DSA master's RX filter. Discard it now by moving it into + * into the DSA conduit's RX filter. Discard it now by moving it into * the headroom. */ skb_pull(skb, OCELOT_SHORT_PREFIX_LEN); @@ -141,12 +141,12 @@ static struct sk_buff *ocelot_rcv(struct sk_buff *skb, ocelot_xfh_get_vlan_tci(extraction, &vlan_tci); ocelot_xfh_get_rew_val(extraction, &rew_val); - skb->dev = dsa_master_find_slave(netdev, 0, src_port); + skb->dev = dsa_conduit_find_user(netdev, 0, src_port); if (!skb->dev) /* The switch will reflect back some frames sent through - * sockets opened on the bare DSA master. These will come back + * sockets opened on the bare DSA conduit. These will come back * with src_port equal to the index of the CPU port, for which - * there is no slave registered. So don't print any error + * there is no user registered. So don't print any error * message here (ignore and drop those frames). */ return NULL; @@ -170,7 +170,7 @@ static struct sk_buff *ocelot_rcv(struct sk_buff *skb, * equal to the pvid of the ingress port and should not be used for * processing. */ - dp = dsa_slave_to_port(skb->dev); + dp = dsa_user_to_port(skb->dev); vlan_tpid = tag_type ? ETH_P_8021AD : ETH_P_8021Q; if (dsa_port_is_vlan_filtering(dp) && @@ -192,7 +192,7 @@ static const struct dsa_device_ops ocelot_netdev_ops = { .xmit = ocelot_xmit, .rcv = ocelot_rcv, .needed_headroom = OCELOT_TOTAL_TAG_LEN, - .promisc_on_master = true, + .promisc_on_conduit = true, }; DSA_TAG_DRIVER(ocelot_netdev_ops); @@ -204,7 +204,7 @@ static const struct dsa_device_ops seville_netdev_ops = { .xmit = seville_xmit, .rcv = ocelot_rcv, .needed_headroom = OCELOT_TOTAL_TAG_LEN, - .promisc_on_master = true, + .promisc_on_conduit = true, }; DSA_TAG_DRIVER(seville_netdev_ops); diff --git a/net/dsa/tag_ocelot_8021q.c b/net/dsa/tag_ocelot_8021q.c index 1f0b8c20eba5..210039320888 100644 --- a/net/dsa/tag_ocelot_8021q.c +++ b/net/dsa/tag_ocelot_8021q.c @@ -37,8 +37,8 @@ static struct sk_buff *ocelot_defer_xmit(struct dsa_port *dp, return NULL; /* PTP over IP packets need UDP checksumming. We may have inherited - * NETIF_F_HW_CSUM from the DSA master, but these packets are not sent - * through the DSA master, so calculate the checksum here. + * NETIF_F_HW_CSUM from the DSA conduit, but these packets are not sent + * through the DSA conduit, so calculate the checksum here. */ if (skb->ip_summed == CHECKSUM_PARTIAL && skb_checksum_help(skb)) return NULL; @@ -49,7 +49,7 @@ static struct sk_buff *ocelot_defer_xmit(struct dsa_port *dp, /* Calls felix_port_deferred_xmit in felix.c */ kthread_init_work(&xmit_work->work, xmit_work_fn); - /* Increase refcount so the kfree_skb in dsa_slave_xmit + /* Increase refcount so the kfree_skb in dsa_user_xmit * won't really free the packet. */ xmit_work->dp = dp; @@ -63,7 +63,7 @@ static struct sk_buff *ocelot_defer_xmit(struct dsa_port *dp, static struct sk_buff *ocelot_xmit(struct sk_buff *skb, struct net_device *netdev) { - struct dsa_port *dp = dsa_slave_to_port(netdev); + struct dsa_port *dp = dsa_user_to_port(netdev); u16 queue_mapping = skb_get_queue_mapping(skb); u8 pcp = netdev_txq_to_tc(netdev, queue_mapping); u16 tx_vid = dsa_tag_8021q_standalone_vid(dp); @@ -83,7 +83,7 @@ static struct sk_buff *ocelot_rcv(struct sk_buff *skb, dsa_8021q_rcv(skb, &src_port, &switch_id, NULL); - skb->dev = dsa_master_find_slave(netdev, switch_id, src_port); + skb->dev = dsa_conduit_find_user(netdev, switch_id, src_port); if (!skb->dev) return NULL; @@ -130,7 +130,7 @@ static const struct dsa_device_ops ocelot_8021q_netdev_ops = { .connect = ocelot_connect, .disconnect = ocelot_disconnect, .needed_headroom = VLAN_HLEN, - .promisc_on_master = true, + .promisc_on_conduit = true, }; MODULE_LICENSE("GPL v2"); diff --git a/net/dsa/tag_qca.c b/net/dsa/tag_qca.c index e5ff7c34e577..6514aa7993ce 100644 --- a/net/dsa/tag_qca.c +++ b/net/dsa/tag_qca.c @@ -14,7 +14,7 @@ static struct sk_buff *qca_tag_xmit(struct sk_buff *skb, struct net_device *dev) { - struct dsa_port *dp = dsa_slave_to_port(dev); + struct dsa_port *dp = dsa_user_to_port(dev); __be16 *phdr; u16 hdr; @@ -78,7 +78,7 @@ static struct sk_buff *qca_tag_rcv(struct sk_buff *skb, struct net_device *dev) /* Get source port information */ port = FIELD_GET(QCA_HDR_RECV_SOURCE_PORT, hdr); - skb->dev = dsa_master_find_slave(dev, 0, port); + skb->dev = dsa_conduit_find_user(dev, 0, port); if (!skb->dev) return NULL; @@ -116,7 +116,7 @@ static const struct dsa_device_ops qca_netdev_ops = { .xmit = qca_tag_xmit, .rcv = qca_tag_rcv, .needed_headroom = QCA_HDR_LEN, - .promisc_on_master = true, + .promisc_on_conduit = true, }; MODULE_LICENSE("GPL"); diff --git a/net/dsa/tag_rtl4_a.c b/net/dsa/tag_rtl4_a.c index c327314b95e3..4da5bad1a7aa 100644 --- a/net/dsa/tag_rtl4_a.c +++ b/net/dsa/tag_rtl4_a.c @@ -36,7 +36,7 @@ static struct sk_buff *rtl4a_tag_xmit(struct sk_buff *skb, struct net_device *dev) { - struct dsa_port *dp = dsa_slave_to_port(dev); + struct dsa_port *dp = dsa_user_to_port(dev); __be16 *p; u8 *tag; u16 out; @@ -97,9 +97,9 @@ static struct sk_buff *rtl4a_tag_rcv(struct sk_buff *skb, } port = protport & 0xff; - skb->dev = dsa_master_find_slave(dev, 0, port); + skb->dev = dsa_conduit_find_user(dev, 0, port); if (!skb->dev) { - netdev_dbg(dev, "could not find slave for port %d\n", port); + netdev_dbg(dev, "could not find user for port %d\n", port); return NULL; } diff --git a/net/dsa/tag_rtl8_4.c b/net/dsa/tag_rtl8_4.c index 4f67834fd121..07e857debabf 100644 --- a/net/dsa/tag_rtl8_4.c +++ b/net/dsa/tag_rtl8_4.c @@ -103,7 +103,7 @@ static void rtl8_4_write_tag(struct sk_buff *skb, struct net_device *dev, void *tag) { - struct dsa_port *dp = dsa_slave_to_port(dev); + struct dsa_port *dp = dsa_user_to_port(dev); __be16 tag16[RTL8_4_TAG_LEN / 2]; /* Set Realtek EtherType */ @@ -180,10 +180,10 @@ static int rtl8_4_read_tag(struct sk_buff *skb, struct net_device *dev, /* Parse TX (switch->CPU) */ port = FIELD_GET(RTL8_4_TX, ntohs(tag16[3])); - skb->dev = dsa_master_find_slave(dev, 0, port); + skb->dev = dsa_conduit_find_user(dev, 0, port); if (!skb->dev) { dev_warn_ratelimited(&dev->dev, - "could not find slave for port %d\n", + "could not find user for port %d\n", port); return -ENOENT; } diff --git a/net/dsa/tag_rzn1_a5psw.c b/net/dsa/tag_rzn1_a5psw.c index 437a6820ac42..2ce866b45615 100644 --- a/net/dsa/tag_rzn1_a5psw.c +++ b/net/dsa/tag_rzn1_a5psw.c @@ -39,7 +39,7 @@ struct a5psw_tag { static struct sk_buff *a5psw_tag_xmit(struct sk_buff *skb, struct net_device *dev) { - struct dsa_port *dp = dsa_slave_to_port(dev); + struct dsa_port *dp = dsa_user_to_port(dev); struct a5psw_tag *ptag; u32 data2_val; @@ -90,7 +90,7 @@ static struct sk_buff *a5psw_tag_rcv(struct sk_buff *skb, port = FIELD_GET(A5PSW_CTRL_DATA_PORT, ntohs(tag->ctrl_data)); - skb->dev = dsa_master_find_slave(dev, 0, port); + skb->dev = dsa_conduit_find_user(dev, 0, port); if (!skb->dev) return NULL; diff --git a/net/dsa/tag_sja1105.c b/net/dsa/tag_sja1105.c index ade3eeb2f3e6..1fffe8c2b589 100644 --- a/net/dsa/tag_sja1105.c +++ b/net/dsa/tag_sja1105.c @@ -157,7 +157,7 @@ static struct sk_buff *sja1105_defer_xmit(struct dsa_port *dp, return NULL; kthread_init_work(&xmit_work->work, xmit_work_fn); - /* Increase refcount so the kfree_skb in dsa_slave_xmit + /* Increase refcount so the kfree_skb in dsa_user_xmit * won't really free the packet. */ xmit_work->dp = dp; @@ -210,7 +210,7 @@ static u16 sja1105_xmit_tpid(struct dsa_port *dp) static struct sk_buff *sja1105_imprecise_xmit(struct sk_buff *skb, struct net_device *netdev) { - struct dsa_port *dp = dsa_slave_to_port(netdev); + struct dsa_port *dp = dsa_user_to_port(netdev); unsigned int bridge_num = dsa_port_bridge_num_get(dp); struct net_device *br = dsa_port_bridge_dev_get(dp); u16 tx_vid; @@ -235,7 +235,7 @@ static struct sk_buff *sja1105_imprecise_xmit(struct sk_buff *skb, /* Transform untagged control packets into pvid-tagged control packets so that * all packets sent by this tagger are VLAN-tagged and we can configure the - * switch to drop untagged packets coming from the DSA master. + * switch to drop untagged packets coming from the DSA conduit. */ static struct sk_buff *sja1105_pvid_tag_control_pkt(struct dsa_port *dp, struct sk_buff *skb, u8 pcp) @@ -266,7 +266,7 @@ static struct sk_buff *sja1105_pvid_tag_control_pkt(struct dsa_port *dp, static struct sk_buff *sja1105_xmit(struct sk_buff *skb, struct net_device *netdev) { - struct dsa_port *dp = dsa_slave_to_port(netdev); + struct dsa_port *dp = dsa_user_to_port(netdev); u16 queue_mapping = skb_get_queue_mapping(skb); u8 pcp = netdev_txq_to_tc(netdev, queue_mapping); u16 tx_vid = dsa_tag_8021q_standalone_vid(dp); @@ -294,7 +294,7 @@ static struct sk_buff *sja1110_xmit(struct sk_buff *skb, struct net_device *netdev) { struct sk_buff *clone = SJA1105_SKB_CB(skb)->clone; - struct dsa_port *dp = dsa_slave_to_port(netdev); + struct dsa_port *dp = dsa_user_to_port(netdev); u16 queue_mapping = skb_get_queue_mapping(skb); u8 pcp = netdev_txq_to_tc(netdev, queue_mapping); u16 tx_vid = dsa_tag_8021q_standalone_vid(dp); @@ -383,7 +383,7 @@ static struct sk_buff * Buffer it until we get its meta frame. */ if (is_link_local) { - struct dsa_port *dp = dsa_slave_to_port(skb->dev); + struct dsa_port *dp = dsa_user_to_port(skb->dev); struct sja1105_tagger_private *priv; struct dsa_switch *ds = dp->ds; @@ -396,7 +396,7 @@ static struct sk_buff if (priv->stampable_skb) { dev_err_ratelimited(ds->dev, "Expected meta frame, is %12llx " - "in the DSA master multicast filter?\n", + "in the DSA conduit multicast filter?\n", SJA1105_META_DMAC); kfree_skb(priv->stampable_skb); } @@ -417,7 +417,7 @@ static struct sk_buff * frame, which serves no further purpose). */ } else if (is_meta) { - struct dsa_port *dp = dsa_slave_to_port(skb->dev); + struct dsa_port *dp = dsa_user_to_port(skb->dev); struct sja1105_tagger_private *priv; struct dsa_switch *ds = dp->ds; struct sk_buff *stampable_skb; @@ -550,7 +550,7 @@ static struct sk_buff *sja1105_rcv(struct sk_buff *skb, } if (source_port != -1 && switch_id != -1) - skb->dev = dsa_master_find_slave(netdev, switch_id, source_port); + skb->dev = dsa_conduit_find_user(netdev, switch_id, source_port); else if (vbid >= 1) skb->dev = dsa_tag_8021q_find_port_by_vbid(netdev, vbid); else @@ -573,16 +573,16 @@ static struct sk_buff *sja1110_rcv_meta(struct sk_buff *skb, u16 rx_header) int switch_id = SJA1110_RX_HEADER_SWITCH_ID(rx_header); int n_ts = SJA1110_RX_HEADER_N_TS(rx_header); struct sja1105_tagger_data *tagger_data; - struct net_device *master = skb->dev; + struct net_device *conduit = skb->dev; struct dsa_port *cpu_dp; struct dsa_switch *ds; int i; - cpu_dp = master->dsa_ptr; + cpu_dp = conduit->dsa_ptr; ds = dsa_switch_find(cpu_dp->dst->index, switch_id); if (!ds) { net_err_ratelimited("%s: cannot find switch id %d\n", - master->name, switch_id); + conduit->name, switch_id); return NULL; } @@ -649,7 +649,7 @@ static struct sk_buff *sja1110_rcv_inband_control_extension(struct sk_buff *skb, /* skb->len counts from skb->data, while start_of_padding * counts from the destination MAC address. Right now skb->data - * is still as set by the DSA master, so to trim away the + * is still as set by the DSA conduit, so to trim away the * padding and trailer we need to account for the fact that * skb->data points to skb_mac_header(skb) + ETH_HLEN. */ @@ -698,7 +698,7 @@ static struct sk_buff *sja1110_rcv(struct sk_buff *skb, else if (source_port == -1 || switch_id == -1) skb->dev = dsa_find_designated_bridge_port_by_vid(netdev, vid); else - skb->dev = dsa_master_find_slave(netdev, switch_id, source_port); + skb->dev = dsa_conduit_find_user(netdev, switch_id, source_port); if (!skb->dev) { netdev_warn(netdev, "Couldn't decode source port\n"); return NULL; @@ -778,7 +778,7 @@ static const struct dsa_device_ops sja1105_netdev_ops = { .disconnect = sja1105_disconnect, .needed_headroom = VLAN_HLEN, .flow_dissect = sja1105_flow_dissect, - .promisc_on_master = true, + .promisc_on_conduit = true, }; DSA_TAG_DRIVER(sja1105_netdev_ops); diff --git a/net/dsa/tag_trailer.c b/net/dsa/tag_trailer.c index 7361b9106382..1ebb25a8b140 100644 --- a/net/dsa/tag_trailer.c +++ b/net/dsa/tag_trailer.c @@ -14,7 +14,7 @@ static struct sk_buff *trailer_xmit(struct sk_buff *skb, struct net_device *dev) { - struct dsa_port *dp = dsa_slave_to_port(dev); + struct dsa_port *dp = dsa_user_to_port(dev); u8 *trailer; trailer = skb_put(skb, 4); @@ -41,7 +41,7 @@ static struct sk_buff *trailer_rcv(struct sk_buff *skb, struct net_device *dev) source_port = trailer[1] & 7; - skb->dev = dsa_master_find_slave(dev, 0, source_port); + skb->dev = dsa_conduit_find_user(dev, 0, source_port); if (!skb->dev) return NULL; diff --git a/net/dsa/tag_xrs700x.c b/net/dsa/tag_xrs700x.c index af19969f9bc4..c9c163598ef2 100644 --- a/net/dsa/tag_xrs700x.c +++ b/net/dsa/tag_xrs700x.c @@ -13,7 +13,7 @@ static struct sk_buff *xrs700x_xmit(struct sk_buff *skb, struct net_device *dev) { - struct dsa_port *partner, *dp = dsa_slave_to_port(dev); + struct dsa_port *partner, *dp = dsa_user_to_port(dev); u8 *trailer; trailer = skb_put(skb, 1); @@ -39,7 +39,7 @@ static struct sk_buff *xrs700x_rcv(struct sk_buff *skb, struct net_device *dev) if (source_port < 0) return NULL; - skb->dev = dsa_master_find_slave(dev, 0, source_port); + skb->dev = dsa_conduit_find_user(dev, 0, source_port); if (!skb->dev) return NULL; diff --git a/net/dsa/user.c b/net/dsa/user.c new file mode 100644 index 000000000000..d438884a4eb0 --- /dev/null +++ b/net/dsa/user.c @@ -0,0 +1,3727 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * net/dsa/user.c - user device handling + * Copyright (c) 2008-2009 Marvell Semiconductor + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "conduit.h" +#include "dsa.h" +#include "netlink.h" +#include "port.h" +#include "switch.h" +#include "tag.h" +#include "user.h" + +struct dsa_switchdev_event_work { + struct net_device *dev; + struct net_device *orig_dev; + struct work_struct work; + unsigned long event; + /* Specific for SWITCHDEV_FDB_ADD_TO_DEVICE and + * SWITCHDEV_FDB_DEL_TO_DEVICE + */ + unsigned char addr[ETH_ALEN]; + u16 vid; + bool host_addr; +}; + +enum dsa_standalone_event { + DSA_UC_ADD, + DSA_UC_DEL, + DSA_MC_ADD, + DSA_MC_DEL, +}; + +struct dsa_standalone_event_work { + struct work_struct work; + struct net_device *dev; + enum dsa_standalone_event event; + unsigned char addr[ETH_ALEN]; + u16 vid; +}; + +struct dsa_host_vlan_rx_filtering_ctx { + struct net_device *dev; + const unsigned char *addr; + enum dsa_standalone_event event; +}; + +static bool dsa_switch_supports_uc_filtering(struct dsa_switch *ds) +{ + return ds->ops->port_fdb_add && ds->ops->port_fdb_del && + ds->fdb_isolation && !ds->vlan_filtering_is_global && + !ds->needs_standalone_vlan_filtering; +} + +static bool dsa_switch_supports_mc_filtering(struct dsa_switch *ds) +{ + return ds->ops->port_mdb_add && ds->ops->port_mdb_del && + ds->fdb_isolation && !ds->vlan_filtering_is_global && + !ds->needs_standalone_vlan_filtering; +} + +static void dsa_user_standalone_event_work(struct work_struct *work) +{ + struct dsa_standalone_event_work *standalone_work = + container_of(work, struct dsa_standalone_event_work, work); + const unsigned char *addr = standalone_work->addr; + struct net_device *dev = standalone_work->dev; + struct dsa_port *dp = dsa_user_to_port(dev); + struct switchdev_obj_port_mdb mdb; + struct dsa_switch *ds = dp->ds; + u16 vid = standalone_work->vid; + int err; + + switch (standalone_work->event) { + case DSA_UC_ADD: + err = dsa_port_standalone_host_fdb_add(dp, addr, vid); + if (err) { + dev_err(ds->dev, + "port %d failed to add %pM vid %d to fdb: %d\n", + dp->index, addr, vid, err); + break; + } + break; + + case DSA_UC_DEL: + err = dsa_port_standalone_host_fdb_del(dp, addr, vid); + if (err) { + dev_err(ds->dev, + "port %d failed to delete %pM vid %d from fdb: %d\n", + dp->index, addr, vid, err); + } + + break; + case DSA_MC_ADD: + ether_addr_copy(mdb.addr, addr); + mdb.vid = vid; + + err = dsa_port_standalone_host_mdb_add(dp, &mdb); + if (err) { + dev_err(ds->dev, + "port %d failed to add %pM vid %d to mdb: %d\n", + dp->index, addr, vid, err); + break; + } + break; + case DSA_MC_DEL: + ether_addr_copy(mdb.addr, addr); + mdb.vid = vid; + + err = dsa_port_standalone_host_mdb_del(dp, &mdb); + if (err) { + dev_err(ds->dev, + "port %d failed to delete %pM vid %d from mdb: %d\n", + dp->index, addr, vid, err); + } + + break; + } + + kfree(standalone_work); +} + +static int dsa_user_schedule_standalone_work(struct net_device *dev, + enum dsa_standalone_event event, + const unsigned char *addr, + u16 vid) +{ + struct dsa_standalone_event_work *standalone_work; + + standalone_work = kzalloc(sizeof(*standalone_work), GFP_ATOMIC); + if (!standalone_work) + return -ENOMEM; + + INIT_WORK(&standalone_work->work, dsa_user_standalone_event_work); + standalone_work->event = event; + standalone_work->dev = dev; + + ether_addr_copy(standalone_work->addr, addr); + standalone_work->vid = vid; + + dsa_schedule_work(&standalone_work->work); + + return 0; +} + +static int dsa_user_host_vlan_rx_filtering(void *arg, int vid) +{ + struct dsa_host_vlan_rx_filtering_ctx *ctx = arg; + + return dsa_user_schedule_standalone_work(ctx->dev, ctx->event, + ctx->addr, vid); +} + +static int dsa_user_vlan_for_each(struct net_device *dev, + int (*cb)(void *arg, int vid), void *arg) +{ + struct dsa_port *dp = dsa_user_to_port(dev); + struct dsa_vlan *v; + int err; + + lockdep_assert_held(&dev->addr_list_lock); + + err = cb(arg, 0); + if (err) + return err; + + list_for_each_entry(v, &dp->user_vlans, list) { + err = cb(arg, v->vid); + if (err) + return err; + } + + return 0; +} + +static int dsa_user_sync_uc(struct net_device *dev, + const unsigned char *addr) +{ + struct net_device *conduit = dsa_user_to_conduit(dev); + struct dsa_port *dp = dsa_user_to_port(dev); + struct dsa_host_vlan_rx_filtering_ctx ctx = { + .dev = dev, + .addr = addr, + .event = DSA_UC_ADD, + }; + + dev_uc_add(conduit, addr); + + if (!dsa_switch_supports_uc_filtering(dp->ds)) + return 0; + + return dsa_user_vlan_for_each(dev, dsa_user_host_vlan_rx_filtering, + &ctx); +} + +static int dsa_user_unsync_uc(struct net_device *dev, + const unsigned char *addr) +{ + struct net_device *conduit = dsa_user_to_conduit(dev); + struct dsa_port *dp = dsa_user_to_port(dev); + struct dsa_host_vlan_rx_filtering_ctx ctx = { + .dev = dev, + .addr = addr, + .event = DSA_UC_DEL, + }; + + dev_uc_del(conduit, addr); + + if (!dsa_switch_supports_uc_filtering(dp->ds)) + return 0; + + return dsa_user_vlan_for_each(dev, dsa_user_host_vlan_rx_filtering, + &ctx); +} + +static int dsa_user_sync_mc(struct net_device *dev, + const unsigned char *addr) +{ + struct net_device *conduit = dsa_user_to_conduit(dev); + struct dsa_port *dp = dsa_user_to_port(dev); + struct dsa_host_vlan_rx_filtering_ctx ctx = { + .dev = dev, + .addr = addr, + .event = DSA_MC_ADD, + }; + + dev_mc_add(conduit, addr); + + if (!dsa_switch_supports_mc_filtering(dp->ds)) + return 0; + + return dsa_user_vlan_for_each(dev, dsa_user_host_vlan_rx_filtering, + &ctx); +} + +static int dsa_user_unsync_mc(struct net_device *dev, + const unsigned char *addr) +{ + struct net_device *conduit = dsa_user_to_conduit(dev); + struct dsa_port *dp = dsa_user_to_port(dev); + struct dsa_host_vlan_rx_filtering_ctx ctx = { + .dev = dev, + .addr = addr, + .event = DSA_MC_DEL, + }; + + dev_mc_del(conduit, addr); + + if (!dsa_switch_supports_mc_filtering(dp->ds)) + return 0; + + return dsa_user_vlan_for_each(dev, dsa_user_host_vlan_rx_filtering, + &ctx); +} + +void dsa_user_sync_ha(struct net_device *dev) +{ + struct dsa_port *dp = dsa_user_to_port(dev); + struct dsa_switch *ds = dp->ds; + struct netdev_hw_addr *ha; + + netif_addr_lock_bh(dev); + + netdev_for_each_synced_mc_addr(ha, dev) + dsa_user_sync_mc(dev, ha->addr); + + netdev_for_each_synced_uc_addr(ha, dev) + dsa_user_sync_uc(dev, ha->addr); + + netif_addr_unlock_bh(dev); + + if (dsa_switch_supports_uc_filtering(ds) || + dsa_switch_supports_mc_filtering(ds)) + dsa_flush_workqueue(); +} + +void dsa_user_unsync_ha(struct net_device *dev) +{ + struct dsa_port *dp = dsa_user_to_port(dev); + struct dsa_switch *ds = dp->ds; + struct netdev_hw_addr *ha; + + netif_addr_lock_bh(dev); + + netdev_for_each_synced_uc_addr(ha, dev) + dsa_user_unsync_uc(dev, ha->addr); + + netdev_for_each_synced_mc_addr(ha, dev) + dsa_user_unsync_mc(dev, ha->addr); + + netif_addr_unlock_bh(dev); + + if (dsa_switch_supports_uc_filtering(ds) || + dsa_switch_supports_mc_filtering(ds)) + dsa_flush_workqueue(); +} + +/* user mii_bus handling ***************************************************/ +static int dsa_user_phy_read(struct mii_bus *bus, int addr, int reg) +{ + struct dsa_switch *ds = bus->priv; + + if (ds->phys_mii_mask & (1 << addr)) + return ds->ops->phy_read(ds, addr, reg); + + return 0xffff; +} + +static int dsa_user_phy_write(struct mii_bus *bus, int addr, int reg, u16 val) +{ + struct dsa_switch *ds = bus->priv; + + if (ds->phys_mii_mask & (1 << addr)) + return ds->ops->phy_write(ds, addr, reg, val); + + return 0; +} + +void dsa_user_mii_bus_init(struct dsa_switch *ds) +{ + ds->user_mii_bus->priv = (void *)ds; + ds->user_mii_bus->name = "dsa user smi"; + ds->user_mii_bus->read = dsa_user_phy_read; + ds->user_mii_bus->write = dsa_user_phy_write; + snprintf(ds->user_mii_bus->id, MII_BUS_ID_SIZE, "dsa-%d.%d", + ds->dst->index, ds->index); + ds->user_mii_bus->parent = ds->dev; + ds->user_mii_bus->phy_mask = ~ds->phys_mii_mask; +} + + +/* user device handling ****************************************************/ +static int dsa_user_get_iflink(const struct net_device *dev) +{ + return dsa_user_to_conduit(dev)->ifindex; +} + +static int dsa_user_open(struct net_device *dev) +{ + struct net_device *conduit = dsa_user_to_conduit(dev); + struct dsa_port *dp = dsa_user_to_port(dev); + struct dsa_switch *ds = dp->ds; + int err; + + err = dev_open(conduit, NULL); + if (err < 0) { + netdev_err(dev, "failed to open conduit %s\n", conduit->name); + goto out; + } + + if (dsa_switch_supports_uc_filtering(ds)) { + err = dsa_port_standalone_host_fdb_add(dp, dev->dev_addr, 0); + if (err) + goto out; + } + + if (!ether_addr_equal(dev->dev_addr, conduit->dev_addr)) { + err = dev_uc_add(conduit, dev->dev_addr); + if (err < 0) + goto del_host_addr; + } + + err = dsa_port_enable_rt(dp, dev->phydev); + if (err) + goto del_unicast; + + return 0; + +del_unicast: + if (!ether_addr_equal(dev->dev_addr, conduit->dev_addr)) + dev_uc_del(conduit, dev->dev_addr); +del_host_addr: + if (dsa_switch_supports_uc_filtering(ds)) + dsa_port_standalone_host_fdb_del(dp, dev->dev_addr, 0); +out: + return err; +} + +static int dsa_user_close(struct net_device *dev) +{ + struct net_device *conduit = dsa_user_to_conduit(dev); + struct dsa_port *dp = dsa_user_to_port(dev); + struct dsa_switch *ds = dp->ds; + + dsa_port_disable_rt(dp); + + if (!ether_addr_equal(dev->dev_addr, conduit->dev_addr)) + dev_uc_del(conduit, dev->dev_addr); + + if (dsa_switch_supports_uc_filtering(ds)) + dsa_port_standalone_host_fdb_del(dp, dev->dev_addr, 0); + + return 0; +} + +static void dsa_user_manage_host_flood(struct net_device *dev) +{ + bool mc = dev->flags & (IFF_PROMISC | IFF_ALLMULTI); + struct dsa_port *dp = dsa_user_to_port(dev); + bool uc = dev->flags & IFF_PROMISC; + + dsa_port_set_host_flood(dp, uc, mc); +} + +static void dsa_user_change_rx_flags(struct net_device *dev, int change) +{ + struct net_device *conduit = dsa_user_to_conduit(dev); + struct dsa_port *dp = dsa_user_to_port(dev); + struct dsa_switch *ds = dp->ds; + + if (change & IFF_ALLMULTI) + dev_set_allmulti(conduit, + dev->flags & IFF_ALLMULTI ? 1 : -1); + if (change & IFF_PROMISC) + dev_set_promiscuity(conduit, + dev->flags & IFF_PROMISC ? 1 : -1); + + if (dsa_switch_supports_uc_filtering(ds) && + dsa_switch_supports_mc_filtering(ds)) + dsa_user_manage_host_flood(dev); +} + +static void dsa_user_set_rx_mode(struct net_device *dev) +{ + __dev_mc_sync(dev, dsa_user_sync_mc, dsa_user_unsync_mc); + __dev_uc_sync(dev, dsa_user_sync_uc, dsa_user_unsync_uc); +} + +static int dsa_user_set_mac_address(struct net_device *dev, void *a) +{ + struct net_device *conduit = dsa_user_to_conduit(dev); + struct dsa_port *dp = dsa_user_to_port(dev); + struct dsa_switch *ds = dp->ds; + struct sockaddr *addr = a; + int err; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + if (ds->ops->port_set_mac_address) { + err = ds->ops->port_set_mac_address(ds, dp->index, + addr->sa_data); + if (err) + return err; + } + + /* If the port is down, the address isn't synced yet to hardware or + * to the DSA conduit, so there is nothing to change. + */ + if (!(dev->flags & IFF_UP)) + goto out_change_dev_addr; + + if (dsa_switch_supports_uc_filtering(ds)) { + err = dsa_port_standalone_host_fdb_add(dp, addr->sa_data, 0); + if (err) + return err; + } + + if (!ether_addr_equal(addr->sa_data, conduit->dev_addr)) { + err = dev_uc_add(conduit, addr->sa_data); + if (err < 0) + goto del_unicast; + } + + if (!ether_addr_equal(dev->dev_addr, conduit->dev_addr)) + dev_uc_del(conduit, dev->dev_addr); + + if (dsa_switch_supports_uc_filtering(ds)) + dsa_port_standalone_host_fdb_del(dp, dev->dev_addr, 0); + +out_change_dev_addr: + eth_hw_addr_set(dev, addr->sa_data); + + return 0; + +del_unicast: + if (dsa_switch_supports_uc_filtering(ds)) + dsa_port_standalone_host_fdb_del(dp, addr->sa_data, 0); + + return err; +} + +struct dsa_user_dump_ctx { + struct net_device *dev; + struct sk_buff *skb; + struct netlink_callback *cb; + int idx; +}; + +static int +dsa_user_port_fdb_do_dump(const unsigned char *addr, u16 vid, + bool is_static, void *data) +{ + struct dsa_user_dump_ctx *dump = data; + u32 portid = NETLINK_CB(dump->cb->skb).portid; + u32 seq = dump->cb->nlh->nlmsg_seq; + struct nlmsghdr *nlh; + struct ndmsg *ndm; + + if (dump->idx < dump->cb->args[2]) + goto skip; + + nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH, + sizeof(*ndm), NLM_F_MULTI); + if (!nlh) + return -EMSGSIZE; + + ndm = nlmsg_data(nlh); + ndm->ndm_family = AF_BRIDGE; + ndm->ndm_pad1 = 0; + ndm->ndm_pad2 = 0; + ndm->ndm_flags = NTF_SELF; + ndm->ndm_type = 0; + ndm->ndm_ifindex = dump->dev->ifindex; + ndm->ndm_state = is_static ? NUD_NOARP : NUD_REACHABLE; + + if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, addr)) + goto nla_put_failure; + + if (vid && nla_put_u16(dump->skb, NDA_VLAN, vid)) + goto nla_put_failure; + + nlmsg_end(dump->skb, nlh); + +skip: + dump->idx++; + return 0; + +nla_put_failure: + nlmsg_cancel(dump->skb, nlh); + return -EMSGSIZE; +} + +static int +dsa_user_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, + struct net_device *dev, struct net_device *filter_dev, + int *idx) +{ + struct dsa_port *dp = dsa_user_to_port(dev); + struct dsa_user_dump_ctx dump = { + .dev = dev, + .skb = skb, + .cb = cb, + .idx = *idx, + }; + int err; + + err = dsa_port_fdb_dump(dp, dsa_user_port_fdb_do_dump, &dump); + *idx = dump.idx; + + return err; +} + +static int dsa_user_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + struct dsa_user_priv *p = netdev_priv(dev); + struct dsa_switch *ds = p->dp->ds; + int port = p->dp->index; + + /* Pass through to switch driver if it supports timestamping */ + switch (cmd) { + case SIOCGHWTSTAMP: + if (ds->ops->port_hwtstamp_get) + return ds->ops->port_hwtstamp_get(ds, port, ifr); + break; + case SIOCSHWTSTAMP: + if (ds->ops->port_hwtstamp_set) + return ds->ops->port_hwtstamp_set(ds, port, ifr); + break; + } + + return phylink_mii_ioctl(p->dp->pl, ifr, cmd); +} + +static int dsa_user_port_attr_set(struct net_device *dev, const void *ctx, + const struct switchdev_attr *attr, + struct netlink_ext_ack *extack) +{ + struct dsa_port *dp = dsa_user_to_port(dev); + int ret; + + if (ctx && ctx != dp) + return 0; + + switch (attr->id) { + case SWITCHDEV_ATTR_ID_PORT_STP_STATE: + if (!dsa_port_offloads_bridge_port(dp, attr->orig_dev)) + return -EOPNOTSUPP; + + ret = dsa_port_set_state(dp, attr->u.stp_state, true); + break; + case SWITCHDEV_ATTR_ID_PORT_MST_STATE: + if (!dsa_port_offloads_bridge_port(dp, attr->orig_dev)) + return -EOPNOTSUPP; + + ret = dsa_port_set_mst_state(dp, &attr->u.mst_state, extack); + break; + case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING: + if (!dsa_port_offloads_bridge_dev(dp, attr->orig_dev)) + return -EOPNOTSUPP; + + ret = dsa_port_vlan_filtering(dp, attr->u.vlan_filtering, + extack); + break; + case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME: + if (!dsa_port_offloads_bridge_dev(dp, attr->orig_dev)) + return -EOPNOTSUPP; + + ret = dsa_port_ageing_time(dp, attr->u.ageing_time); + break; + case SWITCHDEV_ATTR_ID_BRIDGE_MST: + if (!dsa_port_offloads_bridge_dev(dp, attr->orig_dev)) + return -EOPNOTSUPP; + + ret = dsa_port_mst_enable(dp, attr->u.mst, extack); + break; + case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS: + if (!dsa_port_offloads_bridge_port(dp, attr->orig_dev)) + return -EOPNOTSUPP; + + ret = dsa_port_pre_bridge_flags(dp, attr->u.brport_flags, + extack); + break; + case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: + if (!dsa_port_offloads_bridge_port(dp, attr->orig_dev)) + return -EOPNOTSUPP; + + ret = dsa_port_bridge_flags(dp, attr->u.brport_flags, extack); + break; + case SWITCHDEV_ATTR_ID_VLAN_MSTI: + if (!dsa_port_offloads_bridge_dev(dp, attr->orig_dev)) + return -EOPNOTSUPP; + + ret = dsa_port_vlan_msti(dp, &attr->u.vlan_msti); + break; + default: + ret = -EOPNOTSUPP; + break; + } + + return ret; +} + +/* Must be called under rcu_read_lock() */ +static int +dsa_user_vlan_check_for_8021q_uppers(struct net_device *user, + const struct switchdev_obj_port_vlan *vlan) +{ + struct net_device *upper_dev; + struct list_head *iter; + + netdev_for_each_upper_dev_rcu(user, upper_dev, iter) { + u16 vid; + + if (!is_vlan_dev(upper_dev)) + continue; + + vid = vlan_dev_vlan_id(upper_dev); + if (vid == vlan->vid) + return -EBUSY; + } + + return 0; +} + +static int dsa_user_vlan_add(struct net_device *dev, + const struct switchdev_obj *obj, + struct netlink_ext_ack *extack) +{ + struct dsa_port *dp = dsa_user_to_port(dev); + struct switchdev_obj_port_vlan *vlan; + int err; + + if (dsa_port_skip_vlan_configuration(dp)) { + NL_SET_ERR_MSG_MOD(extack, "skipping configuration of VLAN"); + return 0; + } + + vlan = SWITCHDEV_OBJ_PORT_VLAN(obj); + + /* Deny adding a bridge VLAN when there is already an 802.1Q upper with + * the same VID. + */ + if (br_vlan_enabled(dsa_port_bridge_dev_get(dp))) { + rcu_read_lock(); + err = dsa_user_vlan_check_for_8021q_uppers(dev, vlan); + rcu_read_unlock(); + if (err) { + NL_SET_ERR_MSG_MOD(extack, + "Port already has a VLAN upper with this VID"); + return err; + } + } + + return dsa_port_vlan_add(dp, vlan, extack); +} + +/* Offload a VLAN installed on the bridge or on a foreign interface by + * installing it as a VLAN towards the CPU port. + */ +static int dsa_user_host_vlan_add(struct net_device *dev, + const struct switchdev_obj *obj, + struct netlink_ext_ack *extack) +{ + struct dsa_port *dp = dsa_user_to_port(dev); + struct switchdev_obj_port_vlan vlan; + + /* Do nothing if this is a software bridge */ + if (!dp->bridge) + return -EOPNOTSUPP; + + if (dsa_port_skip_vlan_configuration(dp)) { + NL_SET_ERR_MSG_MOD(extack, "skipping configuration of VLAN"); + return 0; + } + + vlan = *SWITCHDEV_OBJ_PORT_VLAN(obj); + + /* Even though drivers often handle CPU membership in special ways, + * it doesn't make sense to program a PVID, so clear this flag. + */ + vlan.flags &= ~BRIDGE_VLAN_INFO_PVID; + + return dsa_port_host_vlan_add(dp, &vlan, extack); +} + +static int dsa_user_port_obj_add(struct net_device *dev, const void *ctx, + const struct switchdev_obj *obj, + struct netlink_ext_ack *extack) +{ + struct dsa_port *dp = dsa_user_to_port(dev); + int err; + + if (ctx && ctx != dp) + return 0; + + switch (obj->id) { + case SWITCHDEV_OBJ_ID_PORT_MDB: + if (!dsa_port_offloads_bridge_port(dp, obj->orig_dev)) + return -EOPNOTSUPP; + + err = dsa_port_mdb_add(dp, SWITCHDEV_OBJ_PORT_MDB(obj)); + break; + case SWITCHDEV_OBJ_ID_HOST_MDB: + if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev)) + return -EOPNOTSUPP; + + err = dsa_port_bridge_host_mdb_add(dp, SWITCHDEV_OBJ_PORT_MDB(obj)); + break; + case SWITCHDEV_OBJ_ID_PORT_VLAN: + if (dsa_port_offloads_bridge_port(dp, obj->orig_dev)) + err = dsa_user_vlan_add(dev, obj, extack); + else + err = dsa_user_host_vlan_add(dev, obj, extack); + break; + case SWITCHDEV_OBJ_ID_MRP: + if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev)) + return -EOPNOTSUPP; + + err = dsa_port_mrp_add(dp, SWITCHDEV_OBJ_MRP(obj)); + break; + case SWITCHDEV_OBJ_ID_RING_ROLE_MRP: + if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev)) + return -EOPNOTSUPP; + + err = dsa_port_mrp_add_ring_role(dp, + SWITCHDEV_OBJ_RING_ROLE_MRP(obj)); + break; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + +static int dsa_user_vlan_del(struct net_device *dev, + const struct switchdev_obj *obj) +{ + struct dsa_port *dp = dsa_user_to_port(dev); + struct switchdev_obj_port_vlan *vlan; + + if (dsa_port_skip_vlan_configuration(dp)) + return 0; + + vlan = SWITCHDEV_OBJ_PORT_VLAN(obj); + + return dsa_port_vlan_del(dp, vlan); +} + +static int dsa_user_host_vlan_del(struct net_device *dev, + const struct switchdev_obj *obj) +{ + struct dsa_port *dp = dsa_user_to_port(dev); + struct switchdev_obj_port_vlan *vlan; + + /* Do nothing if this is a software bridge */ + if (!dp->bridge) + return -EOPNOTSUPP; + + if (dsa_port_skip_vlan_configuration(dp)) + return 0; + + vlan = SWITCHDEV_OBJ_PORT_VLAN(obj); + + return dsa_port_host_vlan_del(dp, vlan); +} + +static int dsa_user_port_obj_del(struct net_device *dev, const void *ctx, + const struct switchdev_obj *obj) +{ + struct dsa_port *dp = dsa_user_to_port(dev); + int err; + + if (ctx && ctx != dp) + return 0; + + switch (obj->id) { + case SWITCHDEV_OBJ_ID_PORT_MDB: + if (!dsa_port_offloads_bridge_port(dp, obj->orig_dev)) + return -EOPNOTSUPP; + + err = dsa_port_mdb_del(dp, SWITCHDEV_OBJ_PORT_MDB(obj)); + break; + case SWITCHDEV_OBJ_ID_HOST_MDB: + if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev)) + return -EOPNOTSUPP; + + err = dsa_port_bridge_host_mdb_del(dp, SWITCHDEV_OBJ_PORT_MDB(obj)); + break; + case SWITCHDEV_OBJ_ID_PORT_VLAN: + if (dsa_port_offloads_bridge_port(dp, obj->orig_dev)) + err = dsa_user_vlan_del(dev, obj); + else + err = dsa_user_host_vlan_del(dev, obj); + break; + case SWITCHDEV_OBJ_ID_MRP: + if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev)) + return -EOPNOTSUPP; + + err = dsa_port_mrp_del(dp, SWITCHDEV_OBJ_MRP(obj)); + break; + case SWITCHDEV_OBJ_ID_RING_ROLE_MRP: + if (!dsa_port_offloads_bridge_dev(dp, obj->orig_dev)) + return -EOPNOTSUPP; + + err = dsa_port_mrp_del_ring_role(dp, + SWITCHDEV_OBJ_RING_ROLE_MRP(obj)); + break; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + +static inline netdev_tx_t dsa_user_netpoll_send_skb(struct net_device *dev, + struct sk_buff *skb) +{ +#ifdef CONFIG_NET_POLL_CONTROLLER + struct dsa_user_priv *p = netdev_priv(dev); + + return netpoll_send_skb(p->netpoll, skb); +#else + BUG(); + return NETDEV_TX_OK; +#endif +} + +static void dsa_skb_tx_timestamp(struct dsa_user_priv *p, + struct sk_buff *skb) +{ + struct dsa_switch *ds = p->dp->ds; + + if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) + return; + + if (!ds->ops->port_txtstamp) + return; + + ds->ops->port_txtstamp(ds, p->dp->index, skb); +} + +netdev_tx_t dsa_enqueue_skb(struct sk_buff *skb, struct net_device *dev) +{ + /* SKB for netpoll still need to be mangled with the protocol-specific + * tag to be successfully transmitted + */ + if (unlikely(netpoll_tx_running(dev))) + return dsa_user_netpoll_send_skb(dev, skb); + + /* Queue the SKB for transmission on the parent interface, but + * do not modify its EtherType + */ + skb->dev = dsa_user_to_conduit(dev); + dev_queue_xmit(skb); + + return NETDEV_TX_OK; +} +EXPORT_SYMBOL_GPL(dsa_enqueue_skb); + +static int dsa_realloc_skb(struct sk_buff *skb, struct net_device *dev) +{ + int needed_headroom = dev->needed_headroom; + int needed_tailroom = dev->needed_tailroom; + + /* For tail taggers, we need to pad short frames ourselves, to ensure + * that the tail tag does not fail at its role of being at the end of + * the packet, once the conduit interface pads the frame. Account for + * that pad length here, and pad later. + */ + if (unlikely(needed_tailroom && skb->len < ETH_ZLEN)) + needed_tailroom += ETH_ZLEN - skb->len; + /* skb_headroom() returns unsigned int... */ + needed_headroom = max_t(int, needed_headroom - skb_headroom(skb), 0); + needed_tailroom = max_t(int, needed_tailroom - skb_tailroom(skb), 0); + + if (likely(!needed_headroom && !needed_tailroom && !skb_cloned(skb))) + /* No reallocation needed, yay! */ + return 0; + + return pskb_expand_head(skb, needed_headroom, needed_tailroom, + GFP_ATOMIC); +} + +static netdev_tx_t dsa_user_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct dsa_user_priv *p = netdev_priv(dev); + struct sk_buff *nskb; + + dev_sw_netstats_tx_add(dev, 1, skb->len); + + memset(skb->cb, 0, sizeof(skb->cb)); + + /* Handle tx timestamp if any */ + dsa_skb_tx_timestamp(p, skb); + + if (dsa_realloc_skb(skb, dev)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + /* needed_tailroom should still be 'warm' in the cache line from + * dsa_realloc_skb(), which has also ensured that padding is safe. + */ + if (dev->needed_tailroom) + eth_skb_pad(skb); + + /* Transmit function may have to reallocate the original SKB, + * in which case it must have freed it. Only free it here on error. + */ + nskb = p->xmit(skb, dev); + if (!nskb) { + kfree_skb(skb); + return NETDEV_TX_OK; + } + + return dsa_enqueue_skb(nskb, dev); +} + +/* ethtool operations *******************************************************/ + +static void dsa_user_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *drvinfo) +{ + strscpy(drvinfo->driver, "dsa", sizeof(drvinfo->driver)); + strscpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version)); + strscpy(drvinfo->bus_info, "platform", sizeof(drvinfo->bus_info)); +} + +static int dsa_user_get_regs_len(struct net_device *dev) +{ + struct dsa_port *dp = dsa_user_to_port(dev); + struct dsa_switch *ds = dp->ds; + + if (ds->ops->get_regs_len) + return ds->ops->get_regs_len(ds, dp->index); + + return -EOPNOTSUPP; +} + +static void +dsa_user_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p) +{ + struct dsa_port *dp = dsa_user_to_port(dev); + struct dsa_switch *ds = dp->ds; + + if (ds->ops->get_regs) + ds->ops->get_regs(ds, dp->index, regs, _p); +} + +static int dsa_user_nway_reset(struct net_device *dev) +{ + struct dsa_port *dp = dsa_user_to_port(dev); + + return phylink_ethtool_nway_reset(dp->pl); +} + +static int dsa_user_get_eeprom_len(struct net_device *dev) +{ + struct dsa_port *dp = dsa_user_to_port(dev); + struct dsa_switch *ds = dp->ds; + + if (ds->cd && ds->cd->eeprom_len) + return ds->cd->eeprom_len; + + if (ds->ops->get_eeprom_len) + return ds->ops->get_eeprom_len(ds); + + return 0; +} + +static int dsa_user_get_eeprom(struct net_device *dev, + struct ethtool_eeprom *eeprom, u8 *data) +{ + struct dsa_port *dp = dsa_user_to_port(dev); + struct dsa_switch *ds = dp->ds; + + if (ds->ops->get_eeprom) + return ds->ops->get_eeprom(ds, eeprom, data); + + return -EOPNOTSUPP; +} + +static int dsa_user_set_eeprom(struct net_device *dev, + struct ethtool_eeprom *eeprom, u8 *data) +{ + struct dsa_port *dp = dsa_user_to_port(dev); + struct dsa_switch *ds = dp->ds; + + if (ds->ops->set_eeprom) + return ds->ops->set_eeprom(ds, eeprom, data); + + return -EOPNOTSUPP; +} + +static void dsa_user_get_strings(struct net_device *dev, + uint32_t stringset, uint8_t *data) +{ + struct dsa_port *dp = dsa_user_to_port(dev); + struct dsa_switch *ds = dp->ds; + + if (stringset == ETH_SS_STATS) { + int len = ETH_GSTRING_LEN; + + strscpy_pad(data, "tx_packets", len); + strscpy_pad(data + len, "tx_bytes", len); + strscpy_pad(data + 2 * len, "rx_packets", len); + strscpy_pad(data + 3 * len, "rx_bytes", len); + if (ds->ops->get_strings) + ds->ops->get_strings(ds, dp->index, stringset, + data + 4 * len); + } else if (stringset == ETH_SS_TEST) { + net_selftest_get_strings(data); + } + +} + +static void dsa_user_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, + uint64_t *data) +{ + struct dsa_port *dp = dsa_user_to_port(dev); + struct dsa_switch *ds = dp->ds; + struct pcpu_sw_netstats *s; + unsigned int start; + int i; + + for_each_possible_cpu(i) { + u64 tx_packets, tx_bytes, rx_packets, rx_bytes; + + s = per_cpu_ptr(dev->tstats, i); + do { + start = u64_stats_fetch_begin(&s->syncp); + tx_packets = u64_stats_read(&s->tx_packets); + tx_bytes = u64_stats_read(&s->tx_bytes); + rx_packets = u64_stats_read(&s->rx_packets); + rx_bytes = u64_stats_read(&s->rx_bytes); + } while (u64_stats_fetch_retry(&s->syncp, start)); + data[0] += tx_packets; + data[1] += tx_bytes; + data[2] += rx_packets; + data[3] += rx_bytes; + } + if (ds->ops->get_ethtool_stats) + ds->ops->get_ethtool_stats(ds, dp->index, data + 4); +} + +static int dsa_user_get_sset_count(struct net_device *dev, int sset) +{ + struct dsa_port *dp = dsa_user_to_port(dev); + struct dsa_switch *ds = dp->ds; + + if (sset == ETH_SS_STATS) { + int count = 0; + + if (ds->ops->get_sset_count) { + count = ds->ops->get_sset_count(ds, dp->index, sset); + if (count < 0) + return count; + } + + return count + 4; + } else if (sset == ETH_SS_TEST) { + return net_selftest_get_count(); + } + + return -EOPNOTSUPP; +} + +static void dsa_user_get_eth_phy_stats(struct net_device *dev, + struct ethtool_eth_phy_stats *phy_stats) +{ + struct dsa_port *dp = dsa_user_to_port(dev); + struct dsa_switch *ds = dp->ds; + + if (ds->ops->get_eth_phy_stats) + ds->ops->get_eth_phy_stats(ds, dp->index, phy_stats); +} + +static void dsa_user_get_eth_mac_stats(struct net_device *dev, + struct ethtool_eth_mac_stats *mac_stats) +{ + struct dsa_port *dp = dsa_user_to_port(dev); + struct dsa_switch *ds = dp->ds; + + if (ds->ops->get_eth_mac_stats) + ds->ops->get_eth_mac_stats(ds, dp->index, mac_stats); +} + +static void +dsa_user_get_eth_ctrl_stats(struct net_device *dev, + struct ethtool_eth_ctrl_stats *ctrl_stats) +{ + struct dsa_port *dp = dsa_user_to_port(dev); + struct dsa_switch *ds = dp->ds; + + if (ds->ops->get_eth_ctrl_stats) + ds->ops->get_eth_ctrl_stats(ds, dp->index, ctrl_stats); +} + +static void +dsa_user_get_rmon_stats(struct net_device *dev, + struct ethtool_rmon_stats *rmon_stats, + const struct ethtool_rmon_hist_range **ranges) +{ + struct dsa_port *dp = dsa_user_to_port(dev); + struct dsa_switch *ds = dp->ds; + + if (ds->ops->get_rmon_stats) + ds->ops->get_rmon_stats(ds, dp->index, rmon_stats, ranges); +} + +static void dsa_user_net_selftest(struct net_device *ndev, + struct ethtool_test *etest, u64 *buf) +{ + struct dsa_port *dp = dsa_user_to_port(ndev); + struct dsa_switch *ds = dp->ds; + + if (ds->ops->self_test) { + ds->ops->self_test(ds, dp->index, etest, buf); + return; + } + + net_selftest(ndev, etest, buf); +} + +static int dsa_user_get_mm(struct net_device *dev, + struct ethtool_mm_state *state) +{ + struct dsa_port *dp = dsa_user_to_port(dev); + struct dsa_switch *ds = dp->ds; + + if (!ds->ops->get_mm) + return -EOPNOTSUPP; + + return ds->ops->get_mm(ds, dp->index, state); +} + +static int dsa_user_set_mm(struct net_device *dev, struct ethtool_mm_cfg *cfg, + struct netlink_ext_ack *extack) +{ + struct dsa_port *dp = dsa_user_to_port(dev); + struct dsa_switch *ds = dp->ds; + + if (!ds->ops->set_mm) + return -EOPNOTSUPP; + + return ds->ops->set_mm(ds, dp->index, cfg, extack); +} + +static void dsa_user_get_mm_stats(struct net_device *dev, + struct ethtool_mm_stats *stats) +{ + struct dsa_port *dp = dsa_user_to_port(dev); + struct dsa_switch *ds = dp->ds; + + if (ds->ops->get_mm_stats) + ds->ops->get_mm_stats(ds, dp->index, stats); +} + +static void dsa_user_get_wol(struct net_device *dev, struct ethtool_wolinfo *w) +{ + struct dsa_port *dp = dsa_user_to_port(dev); + struct dsa_switch *ds = dp->ds; + + phylink_ethtool_get_wol(dp->pl, w); + + if (ds->ops->get_wol) + ds->ops->get_wol(ds, dp->index, w); +} + +static int dsa_user_set_wol(struct net_device *dev, struct ethtool_wolinfo *w) +{ + struct dsa_port *dp = dsa_user_to_port(dev); + struct dsa_switch *ds = dp->ds; + int ret = -EOPNOTSUPP; + + phylink_ethtool_set_wol(dp->pl, w); + + if (ds->ops->set_wol) + ret = ds->ops->set_wol(ds, dp->index, w); + + return ret; +} + +static int dsa_user_set_eee(struct net_device *dev, struct ethtool_eee *e) +{ + struct dsa_port *dp = dsa_user_to_port(dev); + struct dsa_switch *ds = dp->ds; + int ret; + + /* Port's PHY and MAC both need to be EEE capable */ + if (!dev->phydev || !dp->pl) + return -ENODEV; + + if (!ds->ops->set_mac_eee) + return -EOPNOTSUPP; + + ret = ds->ops->set_mac_eee(ds, dp->index, e); + if (ret) + return ret; + + return phylink_ethtool_set_eee(dp->pl, e); +} + +static int dsa_user_get_eee(struct net_device *dev, struct ethtool_eee *e) +{ + struct dsa_port *dp = dsa_user_to_port(dev); + struct dsa_switch *ds = dp->ds; + int ret; + + /* Port's PHY and MAC both need to be EEE capable */ + if (!dev->phydev || !dp->pl) + return -ENODEV; + + if (!ds->ops->get_mac_eee) + return -EOPNOTSUPP; + + ret = ds->ops->get_mac_eee(ds, dp->index, e); + if (ret) + return ret; + + return phylink_ethtool_get_eee(dp->pl, e); +} + +static int dsa_user_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) +{ + struct dsa_port *dp = dsa_user_to_port(dev); + + return phylink_ethtool_ksettings_get(dp->pl, cmd); +} + +static int dsa_user_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) +{ + struct dsa_port *dp = dsa_user_to_port(dev); + + return phylink_ethtool_ksettings_set(dp->pl, cmd); +} + +static void dsa_user_get_pause_stats(struct net_device *dev, + struct ethtool_pause_stats *pause_stats) +{ + struct dsa_port *dp = dsa_user_to_port(dev); + struct dsa_switch *ds = dp->ds; + + if (ds->ops->get_pause_stats) + ds->ops->get_pause_stats(ds, dp->index, pause_stats); +} + +static void dsa_user_get_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *pause) +{ + struct dsa_port *dp = dsa_user_to_port(dev); + + phylink_ethtool_get_pauseparam(dp->pl, pause); +} + +static int dsa_user_set_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *pause) +{ + struct dsa_port *dp = dsa_user_to_port(dev); + + return phylink_ethtool_set_pauseparam(dp->pl, pause); +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static int dsa_user_netpoll_setup(struct net_device *dev, + struct netpoll_info *ni) +{ + struct net_device *conduit = dsa_user_to_conduit(dev); + struct dsa_user_priv *p = netdev_priv(dev); + struct netpoll *netpoll; + int err = 0; + + netpoll = kzalloc(sizeof(*netpoll), GFP_KERNEL); + if (!netpoll) + return -ENOMEM; + + err = __netpoll_setup(netpoll, conduit); + if (err) { + kfree(netpoll); + goto out; + } + + p->netpoll = netpoll; +out: + return err; +} + +static void dsa_user_netpoll_cleanup(struct net_device *dev) +{ + struct dsa_user_priv *p = netdev_priv(dev); + struct netpoll *netpoll = p->netpoll; + + if (!netpoll) + return; + + p->netpoll = NULL; + + __netpoll_free(netpoll); +} + +static void dsa_user_poll_controller(struct net_device *dev) +{ +} +#endif + +static struct dsa_mall_tc_entry * +dsa_user_mall_tc_entry_find(struct net_device *dev, unsigned long cookie) +{ + struct dsa_user_priv *p = netdev_priv(dev); + struct dsa_mall_tc_entry *mall_tc_entry; + + list_for_each_entry(mall_tc_entry, &p->mall_tc_list, list) + if (mall_tc_entry->cookie == cookie) + return mall_tc_entry; + + return NULL; +} + +static int +dsa_user_add_cls_matchall_mirred(struct net_device *dev, + struct tc_cls_matchall_offload *cls, + bool ingress) +{ + struct netlink_ext_ack *extack = cls->common.extack; + struct dsa_port *dp = dsa_user_to_port(dev); + struct dsa_user_priv *p = netdev_priv(dev); + struct dsa_mall_mirror_tc_entry *mirror; + struct dsa_mall_tc_entry *mall_tc_entry; + struct dsa_switch *ds = dp->ds; + struct flow_action_entry *act; + struct dsa_port *to_dp; + int err; + + if (!ds->ops->port_mirror_add) + return -EOPNOTSUPP; + + if (!flow_action_basic_hw_stats_check(&cls->rule->action, + cls->common.extack)) + return -EOPNOTSUPP; + + act = &cls->rule->action.entries[0]; + + if (!act->dev) + return -EINVAL; + + if (!dsa_user_dev_check(act->dev)) + return -EOPNOTSUPP; + + mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL); + if (!mall_tc_entry) + return -ENOMEM; + + mall_tc_entry->cookie = cls->cookie; + mall_tc_entry->type = DSA_PORT_MALL_MIRROR; + mirror = &mall_tc_entry->mirror; + + to_dp = dsa_user_to_port(act->dev); + + mirror->to_local_port = to_dp->index; + mirror->ingress = ingress; + + err = ds->ops->port_mirror_add(ds, dp->index, mirror, ingress, extack); + if (err) { + kfree(mall_tc_entry); + return err; + } + + list_add_tail(&mall_tc_entry->list, &p->mall_tc_list); + + return err; +} + +static int +dsa_user_add_cls_matchall_police(struct net_device *dev, + struct tc_cls_matchall_offload *cls, + bool ingress) +{ + struct netlink_ext_ack *extack = cls->common.extack; + struct dsa_port *dp = dsa_user_to_port(dev); + struct dsa_user_priv *p = netdev_priv(dev); + struct dsa_mall_policer_tc_entry *policer; + struct dsa_mall_tc_entry *mall_tc_entry; + struct dsa_switch *ds = dp->ds; + struct flow_action_entry *act; + int err; + + if (!ds->ops->port_policer_add) { + NL_SET_ERR_MSG_MOD(extack, + "Policing offload not implemented"); + return -EOPNOTSUPP; + } + + if (!ingress) { + NL_SET_ERR_MSG_MOD(extack, + "Only supported on ingress qdisc"); + return -EOPNOTSUPP; + } + + if (!flow_action_basic_hw_stats_check(&cls->rule->action, + cls->common.extack)) + return -EOPNOTSUPP; + + list_for_each_entry(mall_tc_entry, &p->mall_tc_list, list) { + if (mall_tc_entry->type == DSA_PORT_MALL_POLICER) { + NL_SET_ERR_MSG_MOD(extack, + "Only one port policer allowed"); + return -EEXIST; + } + } + + act = &cls->rule->action.entries[0]; + + mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL); + if (!mall_tc_entry) + return -ENOMEM; + + mall_tc_entry->cookie = cls->cookie; + mall_tc_entry->type = DSA_PORT_MALL_POLICER; + policer = &mall_tc_entry->policer; + policer->rate_bytes_per_sec = act->police.rate_bytes_ps; + policer->burst = act->police.burst; + + err = ds->ops->port_policer_add(ds, dp->index, policer); + if (err) { + kfree(mall_tc_entry); + return err; + } + + list_add_tail(&mall_tc_entry->list, &p->mall_tc_list); + + return err; +} + +static int dsa_user_add_cls_matchall(struct net_device *dev, + struct tc_cls_matchall_offload *cls, + bool ingress) +{ + int err = -EOPNOTSUPP; + + if (cls->common.protocol == htons(ETH_P_ALL) && + flow_offload_has_one_action(&cls->rule->action) && + cls->rule->action.entries[0].id == FLOW_ACTION_MIRRED) + err = dsa_user_add_cls_matchall_mirred(dev, cls, ingress); + else if (flow_offload_has_one_action(&cls->rule->action) && + cls->rule->action.entries[0].id == FLOW_ACTION_POLICE) + err = dsa_user_add_cls_matchall_police(dev, cls, ingress); + + return err; +} + +static void dsa_user_del_cls_matchall(struct net_device *dev, + struct tc_cls_matchall_offload *cls) +{ + struct dsa_port *dp = dsa_user_to_port(dev); + struct dsa_mall_tc_entry *mall_tc_entry; + struct dsa_switch *ds = dp->ds; + + mall_tc_entry = dsa_user_mall_tc_entry_find(dev, cls->cookie); + if (!mall_tc_entry) + return; + + list_del(&mall_tc_entry->list); + + switch (mall_tc_entry->type) { + case DSA_PORT_MALL_MIRROR: + if (ds->ops->port_mirror_del) + ds->ops->port_mirror_del(ds, dp->index, + &mall_tc_entry->mirror); + break; + case DSA_PORT_MALL_POLICER: + if (ds->ops->port_policer_del) + ds->ops->port_policer_del(ds, dp->index); + break; + default: + WARN_ON(1); + } + + kfree(mall_tc_entry); +} + +static int dsa_user_setup_tc_cls_matchall(struct net_device *dev, + struct tc_cls_matchall_offload *cls, + bool ingress) +{ + if (cls->common.chain_index) + return -EOPNOTSUPP; + + switch (cls->command) { + case TC_CLSMATCHALL_REPLACE: + return dsa_user_add_cls_matchall(dev, cls, ingress); + case TC_CLSMATCHALL_DESTROY: + dsa_user_del_cls_matchall(dev, cls); + return 0; + default: + return -EOPNOTSUPP; + } +} + +static int dsa_user_add_cls_flower(struct net_device *dev, + struct flow_cls_offload *cls, + bool ingress) +{ + struct dsa_port *dp = dsa_user_to_port(dev); + struct dsa_switch *ds = dp->ds; + int port = dp->index; + + if (!ds->ops->cls_flower_add) + return -EOPNOTSUPP; + + return ds->ops->cls_flower_add(ds, port, cls, ingress); +} + +static int dsa_user_del_cls_flower(struct net_device *dev, + struct flow_cls_offload *cls, + bool ingress) +{ + struct dsa_port *dp = dsa_user_to_port(dev); + struct dsa_switch *ds = dp->ds; + int port = dp->index; + + if (!ds->ops->cls_flower_del) + return -EOPNOTSUPP; + + return ds->ops->cls_flower_del(ds, port, cls, ingress); +} + +static int dsa_user_stats_cls_flower(struct net_device *dev, + struct flow_cls_offload *cls, + bool ingress) +{ + struct dsa_port *dp = dsa_user_to_port(dev); + struct dsa_switch *ds = dp->ds; + int port = dp->index; + + if (!ds->ops->cls_flower_stats) + return -EOPNOTSUPP; + + return ds->ops->cls_flower_stats(ds, port, cls, ingress); +} + +static int dsa_user_setup_tc_cls_flower(struct net_device *dev, + struct flow_cls_offload *cls, + bool ingress) +{ + switch (cls->command) { + case FLOW_CLS_REPLACE: + return dsa_user_add_cls_flower(dev, cls, ingress); + case FLOW_CLS_DESTROY: + return dsa_user_del_cls_flower(dev, cls, ingress); + case FLOW_CLS_STATS: + return dsa_user_stats_cls_flower(dev, cls, ingress); + default: + return -EOPNOTSUPP; + } +} + +static int dsa_user_setup_tc_block_cb(enum tc_setup_type type, void *type_data, + void *cb_priv, bool ingress) +{ + struct net_device *dev = cb_priv; + + if (!tc_can_offload(dev)) + return -EOPNOTSUPP; + + switch (type) { + case TC_SETUP_CLSMATCHALL: + return dsa_user_setup_tc_cls_matchall(dev, type_data, ingress); + case TC_SETUP_CLSFLOWER: + return dsa_user_setup_tc_cls_flower(dev, type_data, ingress); + default: + return -EOPNOTSUPP; + } +} + +static int dsa_user_setup_tc_block_cb_ig(enum tc_setup_type type, + void *type_data, void *cb_priv) +{ + return dsa_user_setup_tc_block_cb(type, type_data, cb_priv, true); +} + +static int dsa_user_setup_tc_block_cb_eg(enum tc_setup_type type, + void *type_data, void *cb_priv) +{ + return dsa_user_setup_tc_block_cb(type, type_data, cb_priv, false); +} + +static LIST_HEAD(dsa_user_block_cb_list); + +static int dsa_user_setup_tc_block(struct net_device *dev, + struct flow_block_offload *f) +{ + struct flow_block_cb *block_cb; + flow_setup_cb_t *cb; + + if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) + cb = dsa_user_setup_tc_block_cb_ig; + else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) + cb = dsa_user_setup_tc_block_cb_eg; + else + return -EOPNOTSUPP; + + f->driver_block_list = &dsa_user_block_cb_list; + + switch (f->command) { + case FLOW_BLOCK_BIND: + if (flow_block_cb_is_busy(cb, dev, &dsa_user_block_cb_list)) + return -EBUSY; + + block_cb = flow_block_cb_alloc(cb, dev, dev, NULL); + if (IS_ERR(block_cb)) + return PTR_ERR(block_cb); + + flow_block_cb_add(block_cb, f); + list_add_tail(&block_cb->driver_list, &dsa_user_block_cb_list); + return 0; + case FLOW_BLOCK_UNBIND: + block_cb = flow_block_cb_lookup(f->block, cb, dev); + if (!block_cb) + return -ENOENT; + + flow_block_cb_remove(block_cb, f); + list_del(&block_cb->driver_list); + return 0; + default: + return -EOPNOTSUPP; + } +} + +static int dsa_user_setup_ft_block(struct dsa_switch *ds, int port, + void *type_data) +{ + struct net_device *conduit = dsa_port_to_conduit(dsa_to_port(ds, port)); + + if (!conduit->netdev_ops->ndo_setup_tc) + return -EOPNOTSUPP; + + return conduit->netdev_ops->ndo_setup_tc(conduit, TC_SETUP_FT, type_data); +} + +static int dsa_user_setup_tc(struct net_device *dev, enum tc_setup_type type, + void *type_data) +{ + struct dsa_port *dp = dsa_user_to_port(dev); + struct dsa_switch *ds = dp->ds; + + switch (type) { + case TC_SETUP_BLOCK: + return dsa_user_setup_tc_block(dev, type_data); + case TC_SETUP_FT: + return dsa_user_setup_ft_block(ds, dp->index, type_data); + default: + break; + } + + if (!ds->ops->port_setup_tc) + return -EOPNOTSUPP; + + return ds->ops->port_setup_tc(ds, dp->index, type, type_data); +} + +static int dsa_user_get_rxnfc(struct net_device *dev, + struct ethtool_rxnfc *nfc, u32 *rule_locs) +{ + struct dsa_port *dp = dsa_user_to_port(dev); + struct dsa_switch *ds = dp->ds; + + if (!ds->ops->get_rxnfc) + return -EOPNOTSUPP; + + return ds->ops->get_rxnfc(ds, dp->index, nfc, rule_locs); +} + +static int dsa_user_set_rxnfc(struct net_device *dev, + struct ethtool_rxnfc *nfc) +{ + struct dsa_port *dp = dsa_user_to_port(dev); + struct dsa_switch *ds = dp->ds; + + if (!ds->ops->set_rxnfc) + return -EOPNOTSUPP; + + return ds->ops->set_rxnfc(ds, dp->index, nfc); +} + +static int dsa_user_get_ts_info(struct net_device *dev, + struct ethtool_ts_info *ts) +{ + struct dsa_user_priv *p = netdev_priv(dev); + struct dsa_switch *ds = p->dp->ds; + + if (!ds->ops->get_ts_info) + return -EOPNOTSUPP; + + return ds->ops->get_ts_info(ds, p->dp->index, ts); +} + +static int dsa_user_vlan_rx_add_vid(struct net_device *dev, __be16 proto, + u16 vid) +{ + struct dsa_port *dp = dsa_user_to_port(dev); + struct switchdev_obj_port_vlan vlan = { + .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN, + .vid = vid, + /* This API only allows programming tagged, non-PVID VIDs */ + .flags = 0, + }; + struct netlink_ext_ack extack = {0}; + struct dsa_switch *ds = dp->ds; + struct netdev_hw_addr *ha; + struct dsa_vlan *v; + int ret; + + /* User port... */ + ret = dsa_port_vlan_add(dp, &vlan, &extack); + if (ret) { + if (extack._msg) + netdev_err(dev, "%s\n", extack._msg); + return ret; + } + + /* And CPU port... */ + ret = dsa_port_host_vlan_add(dp, &vlan, &extack); + if (ret) { + if (extack._msg) + netdev_err(dev, "CPU port %d: %s\n", dp->cpu_dp->index, + extack._msg); + return ret; + } + + if (!dsa_switch_supports_uc_filtering(ds) && + !dsa_switch_supports_mc_filtering(ds)) + return 0; + + v = kzalloc(sizeof(*v), GFP_KERNEL); + if (!v) { + ret = -ENOMEM; + goto rollback; + } + + netif_addr_lock_bh(dev); + + v->vid = vid; + list_add_tail(&v->list, &dp->user_vlans); + + if (dsa_switch_supports_mc_filtering(ds)) { + netdev_for_each_synced_mc_addr(ha, dev) { + dsa_user_schedule_standalone_work(dev, DSA_MC_ADD, + ha->addr, vid); + } + } + + if (dsa_switch_supports_uc_filtering(ds)) { + netdev_for_each_synced_uc_addr(ha, dev) { + dsa_user_schedule_standalone_work(dev, DSA_UC_ADD, + ha->addr, vid); + } + } + + netif_addr_unlock_bh(dev); + + dsa_flush_workqueue(); + + return 0; + +rollback: + dsa_port_host_vlan_del(dp, &vlan); + dsa_port_vlan_del(dp, &vlan); + + return ret; +} + +static int dsa_user_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, + u16 vid) +{ + struct dsa_port *dp = dsa_user_to_port(dev); + struct switchdev_obj_port_vlan vlan = { + .vid = vid, + /* This API only allows programming tagged, non-PVID VIDs */ + .flags = 0, + }; + struct dsa_switch *ds = dp->ds; + struct netdev_hw_addr *ha; + struct dsa_vlan *v; + int err; + + err = dsa_port_vlan_del(dp, &vlan); + if (err) + return err; + + err = dsa_port_host_vlan_del(dp, &vlan); + if (err) + return err; + + if (!dsa_switch_supports_uc_filtering(ds) && + !dsa_switch_supports_mc_filtering(ds)) + return 0; + + netif_addr_lock_bh(dev); + + v = dsa_vlan_find(&dp->user_vlans, &vlan); + if (!v) { + netif_addr_unlock_bh(dev); + return -ENOENT; + } + + list_del(&v->list); + kfree(v); + + if (dsa_switch_supports_mc_filtering(ds)) { + netdev_for_each_synced_mc_addr(ha, dev) { + dsa_user_schedule_standalone_work(dev, DSA_MC_DEL, + ha->addr, vid); + } + } + + if (dsa_switch_supports_uc_filtering(ds)) { + netdev_for_each_synced_uc_addr(ha, dev) { + dsa_user_schedule_standalone_work(dev, DSA_UC_DEL, + ha->addr, vid); + } + } + + netif_addr_unlock_bh(dev); + + dsa_flush_workqueue(); + + return 0; +} + +static int dsa_user_restore_vlan(struct net_device *vdev, int vid, void *arg) +{ + __be16 proto = vdev ? vlan_dev_vlan_proto(vdev) : htons(ETH_P_8021Q); + + return dsa_user_vlan_rx_add_vid(arg, proto, vid); +} + +static int dsa_user_clear_vlan(struct net_device *vdev, int vid, void *arg) +{ + __be16 proto = vdev ? vlan_dev_vlan_proto(vdev) : htons(ETH_P_8021Q); + + return dsa_user_vlan_rx_kill_vid(arg, proto, vid); +} + +/* Keep the VLAN RX filtering list in sync with the hardware only if VLAN + * filtering is enabled. The baseline is that only ports that offload a + * VLAN-aware bridge are VLAN-aware, and standalone ports are VLAN-unaware, + * but there are exceptions for quirky hardware. + * + * If ds->vlan_filtering_is_global = true, then standalone ports which share + * the same switch with other ports that offload a VLAN-aware bridge are also + * inevitably VLAN-aware. + * + * To summarize, a DSA switch port offloads: + * + * - If standalone (this includes software bridge, software LAG): + * - if ds->needs_standalone_vlan_filtering = true, OR if + * (ds->vlan_filtering_is_global = true AND there are bridges spanning + * this switch chip which have vlan_filtering=1) + * - the 8021q upper VLANs + * - else (standalone VLAN filtering is not needed, VLAN filtering is not + * global, or it is, but no port is under a VLAN-aware bridge): + * - no VLAN (any 8021q upper is a software VLAN) + * + * - If under a vlan_filtering=0 bridge which it offload: + * - if ds->configure_vlan_while_not_filtering = true (default): + * - the bridge VLANs. These VLANs are committed to hardware but inactive. + * - else (deprecated): + * - no VLAN. The bridge VLANs are not restored when VLAN awareness is + * enabled, so this behavior is broken and discouraged. + * + * - If under a vlan_filtering=1 bridge which it offload: + * - the bridge VLANs + * - the 8021q upper VLANs + */ +int dsa_user_manage_vlan_filtering(struct net_device *user, + bool vlan_filtering) +{ + int err; + + if (vlan_filtering) { + user->features |= NETIF_F_HW_VLAN_CTAG_FILTER; + + err = vlan_for_each(user, dsa_user_restore_vlan, user); + if (err) { + vlan_for_each(user, dsa_user_clear_vlan, user); + user->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; + return err; + } + } else { + err = vlan_for_each(user, dsa_user_clear_vlan, user); + if (err) + return err; + + user->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; + } + + return 0; +} + +struct dsa_hw_port { + struct list_head list; + struct net_device *dev; + int old_mtu; +}; + +static int dsa_hw_port_list_set_mtu(struct list_head *hw_port_list, int mtu) +{ + const struct dsa_hw_port *p; + int err; + + list_for_each_entry(p, hw_port_list, list) { + if (p->dev->mtu == mtu) + continue; + + err = dev_set_mtu(p->dev, mtu); + if (err) + goto rollback; + } + + return 0; + +rollback: + list_for_each_entry_continue_reverse(p, hw_port_list, list) { + if (p->dev->mtu == p->old_mtu) + continue; + + if (dev_set_mtu(p->dev, p->old_mtu)) + netdev_err(p->dev, "Failed to restore MTU\n"); + } + + return err; +} + +static void dsa_hw_port_list_free(struct list_head *hw_port_list) +{ + struct dsa_hw_port *p, *n; + + list_for_each_entry_safe(p, n, hw_port_list, list) + kfree(p); +} + +/* Make the hardware datapath to/from @dev limited to a common MTU */ +static void dsa_bridge_mtu_normalization(struct dsa_port *dp) +{ + struct list_head hw_port_list; + struct dsa_switch_tree *dst; + int min_mtu = ETH_MAX_MTU; + struct dsa_port *other_dp; + int err; + + if (!dp->ds->mtu_enforcement_ingress) + return; + + if (!dp->bridge) + return; + + INIT_LIST_HEAD(&hw_port_list); + + /* Populate the list of ports that are part of the same bridge + * as the newly added/modified port + */ + list_for_each_entry(dst, &dsa_tree_list, list) { + list_for_each_entry(other_dp, &dst->ports, list) { + struct dsa_hw_port *hw_port; + struct net_device *user; + + if (other_dp->type != DSA_PORT_TYPE_USER) + continue; + + if (!dsa_port_bridge_same(dp, other_dp)) + continue; + + if (!other_dp->ds->mtu_enforcement_ingress) + continue; + + user = other_dp->user; + + if (min_mtu > user->mtu) + min_mtu = user->mtu; + + hw_port = kzalloc(sizeof(*hw_port), GFP_KERNEL); + if (!hw_port) + goto out; + + hw_port->dev = user; + hw_port->old_mtu = user->mtu; + + list_add(&hw_port->list, &hw_port_list); + } + } + + /* Attempt to configure the entire hardware bridge to the newly added + * interface's MTU first, regardless of whether the intention of the + * user was to raise or lower it. + */ + err = dsa_hw_port_list_set_mtu(&hw_port_list, dp->user->mtu); + if (!err) + goto out; + + /* Clearly that didn't work out so well, so just set the minimum MTU on + * all hardware bridge ports now. If this fails too, then all ports will + * still have their old MTU rolled back anyway. + */ + dsa_hw_port_list_set_mtu(&hw_port_list, min_mtu); + +out: + dsa_hw_port_list_free(&hw_port_list); +} + +int dsa_user_change_mtu(struct net_device *dev, int new_mtu) +{ + struct net_device *conduit = dsa_user_to_conduit(dev); + struct dsa_port *dp = dsa_user_to_port(dev); + struct dsa_port *cpu_dp = dp->cpu_dp; + struct dsa_switch *ds = dp->ds; + struct dsa_port *other_dp; + int largest_mtu = 0; + int new_conduit_mtu; + int old_conduit_mtu; + int mtu_limit; + int overhead; + int cpu_mtu; + int err; + + if (!ds->ops->port_change_mtu) + return -EOPNOTSUPP; + + dsa_tree_for_each_user_port(other_dp, ds->dst) { + int user_mtu; + + /* During probe, this function will be called for each user + * device, while not all of them have been allocated. That's + * ok, it doesn't change what the maximum is, so ignore it. + */ + if (!other_dp->user) + continue; + + /* Pretend that we already applied the setting, which we + * actually haven't (still haven't done all integrity checks) + */ + if (dp == other_dp) + user_mtu = new_mtu; + else + user_mtu = other_dp->user->mtu; + + if (largest_mtu < user_mtu) + largest_mtu = user_mtu; + } + + overhead = dsa_tag_protocol_overhead(cpu_dp->tag_ops); + mtu_limit = min_t(int, conduit->max_mtu, dev->max_mtu + overhead); + old_conduit_mtu = conduit->mtu; + new_conduit_mtu = largest_mtu + overhead; + if (new_conduit_mtu > mtu_limit) + return -ERANGE; + + /* If the conduit MTU isn't over limit, there's no need to check the CPU + * MTU, since that surely isn't either. + */ + cpu_mtu = largest_mtu; + + /* Start applying stuff */ + if (new_conduit_mtu != old_conduit_mtu) { + err = dev_set_mtu(conduit, new_conduit_mtu); + if (err < 0) + goto out_conduit_failed; + + /* We only need to propagate the MTU of the CPU port to + * upstream switches, so emit a notifier which updates them. + */ + err = dsa_port_mtu_change(cpu_dp, cpu_mtu); + if (err) + goto out_cpu_failed; + } + + err = ds->ops->port_change_mtu(ds, dp->index, new_mtu); + if (err) + goto out_port_failed; + + dev->mtu = new_mtu; + + dsa_bridge_mtu_normalization(dp); + + return 0; + +out_port_failed: + if (new_conduit_mtu != old_conduit_mtu) + dsa_port_mtu_change(cpu_dp, old_conduit_mtu - overhead); +out_cpu_failed: + if (new_conduit_mtu != old_conduit_mtu) + dev_set_mtu(conduit, old_conduit_mtu); +out_conduit_failed: + return err; +} + +static int __maybe_unused +dsa_user_dcbnl_set_default_prio(struct net_device *dev, struct dcb_app *app) +{ + struct dsa_port *dp = dsa_user_to_port(dev); + struct dsa_switch *ds = dp->ds; + unsigned long mask, new_prio; + int err, port = dp->index; + + if (!ds->ops->port_set_default_prio) + return -EOPNOTSUPP; + + err = dcb_ieee_setapp(dev, app); + if (err) + return err; + + mask = dcb_ieee_getapp_mask(dev, app); + new_prio = __fls(mask); + + err = ds->ops->port_set_default_prio(ds, port, new_prio); + if (err) { + dcb_ieee_delapp(dev, app); + return err; + } + + return 0; +} + +static int __maybe_unused +dsa_user_dcbnl_add_dscp_prio(struct net_device *dev, struct dcb_app *app) +{ + struct dsa_port *dp = dsa_user_to_port(dev); + struct dsa_switch *ds = dp->ds; + unsigned long mask, new_prio; + int err, port = dp->index; + u8 dscp = app->protocol; + + if (!ds->ops->port_add_dscp_prio) + return -EOPNOTSUPP; + + if (dscp >= 64) { + netdev_err(dev, "DSCP APP entry with protocol value %u is invalid\n", + dscp); + return -EINVAL; + } + + err = dcb_ieee_setapp(dev, app); + if (err) + return err; + + mask = dcb_ieee_getapp_mask(dev, app); + new_prio = __fls(mask); + + err = ds->ops->port_add_dscp_prio(ds, port, dscp, new_prio); + if (err) { + dcb_ieee_delapp(dev, app); + return err; + } + + return 0; +} + +static int __maybe_unused dsa_user_dcbnl_ieee_setapp(struct net_device *dev, + struct dcb_app *app) +{ + switch (app->selector) { + case IEEE_8021QAZ_APP_SEL_ETHERTYPE: + switch (app->protocol) { + case 0: + return dsa_user_dcbnl_set_default_prio(dev, app); + default: + return -EOPNOTSUPP; + } + break; + case IEEE_8021QAZ_APP_SEL_DSCP: + return dsa_user_dcbnl_add_dscp_prio(dev, app); + default: + return -EOPNOTSUPP; + } +} + +static int __maybe_unused +dsa_user_dcbnl_del_default_prio(struct net_device *dev, struct dcb_app *app) +{ + struct dsa_port *dp = dsa_user_to_port(dev); + struct dsa_switch *ds = dp->ds; + unsigned long mask, new_prio; + int err, port = dp->index; + + if (!ds->ops->port_set_default_prio) + return -EOPNOTSUPP; + + err = dcb_ieee_delapp(dev, app); + if (err) + return err; + + mask = dcb_ieee_getapp_mask(dev, app); + new_prio = mask ? __fls(mask) : 0; + + err = ds->ops->port_set_default_prio(ds, port, new_prio); + if (err) { + dcb_ieee_setapp(dev, app); + return err; + } + + return 0; +} + +static int __maybe_unused +dsa_user_dcbnl_del_dscp_prio(struct net_device *dev, struct dcb_app *app) +{ + struct dsa_port *dp = dsa_user_to_port(dev); + struct dsa_switch *ds = dp->ds; + int err, port = dp->index; + u8 dscp = app->protocol; + + if (!ds->ops->port_del_dscp_prio) + return -EOPNOTSUPP; + + err = dcb_ieee_delapp(dev, app); + if (err) + return err; + + err = ds->ops->port_del_dscp_prio(ds, port, dscp, app->priority); + if (err) { + dcb_ieee_setapp(dev, app); + return err; + } + + return 0; +} + +static int __maybe_unused dsa_user_dcbnl_ieee_delapp(struct net_device *dev, + struct dcb_app *app) +{ + switch (app->selector) { + case IEEE_8021QAZ_APP_SEL_ETHERTYPE: + switch (app->protocol) { + case 0: + return dsa_user_dcbnl_del_default_prio(dev, app); + default: + return -EOPNOTSUPP; + } + break; + case IEEE_8021QAZ_APP_SEL_DSCP: + return dsa_user_dcbnl_del_dscp_prio(dev, app); + default: + return -EOPNOTSUPP; + } +} + +/* Pre-populate the DCB application priority table with the priorities + * configured during switch setup, which we read from hardware here. + */ +static int dsa_user_dcbnl_init(struct net_device *dev) +{ + struct dsa_port *dp = dsa_user_to_port(dev); + struct dsa_switch *ds = dp->ds; + int port = dp->index; + int err; + + if (ds->ops->port_get_default_prio) { + int prio = ds->ops->port_get_default_prio(ds, port); + struct dcb_app app = { + .selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE, + .protocol = 0, + .priority = prio, + }; + + if (prio < 0) + return prio; + + err = dcb_ieee_setapp(dev, &app); + if (err) + return err; + } + + if (ds->ops->port_get_dscp_prio) { + int protocol; + + for (protocol = 0; protocol < 64; protocol++) { + struct dcb_app app = { + .selector = IEEE_8021QAZ_APP_SEL_DSCP, + .protocol = protocol, + }; + int prio; + + prio = ds->ops->port_get_dscp_prio(ds, port, protocol); + if (prio == -EOPNOTSUPP) + continue; + if (prio < 0) + return prio; + + app.priority = prio; + + err = dcb_ieee_setapp(dev, &app); + if (err) + return err; + } + } + + return 0; +} + +static const struct ethtool_ops dsa_user_ethtool_ops = { + .get_drvinfo = dsa_user_get_drvinfo, + .get_regs_len = dsa_user_get_regs_len, + .get_regs = dsa_user_get_regs, + .nway_reset = dsa_user_nway_reset, + .get_link = ethtool_op_get_link, + .get_eeprom_len = dsa_user_get_eeprom_len, + .get_eeprom = dsa_user_get_eeprom, + .set_eeprom = dsa_user_set_eeprom, + .get_strings = dsa_user_get_strings, + .get_ethtool_stats = dsa_user_get_ethtool_stats, + .get_sset_count = dsa_user_get_sset_count, + .get_eth_phy_stats = dsa_user_get_eth_phy_stats, + .get_eth_mac_stats = dsa_user_get_eth_mac_stats, + .get_eth_ctrl_stats = dsa_user_get_eth_ctrl_stats, + .get_rmon_stats = dsa_user_get_rmon_stats, + .set_wol = dsa_user_set_wol, + .get_wol = dsa_user_get_wol, + .set_eee = dsa_user_set_eee, + .get_eee = dsa_user_get_eee, + .get_link_ksettings = dsa_user_get_link_ksettings, + .set_link_ksettings = dsa_user_set_link_ksettings, + .get_pause_stats = dsa_user_get_pause_stats, + .get_pauseparam = dsa_user_get_pauseparam, + .set_pauseparam = dsa_user_set_pauseparam, + .get_rxnfc = dsa_user_get_rxnfc, + .set_rxnfc = dsa_user_set_rxnfc, + .get_ts_info = dsa_user_get_ts_info, + .self_test = dsa_user_net_selftest, + .get_mm = dsa_user_get_mm, + .set_mm = dsa_user_set_mm, + .get_mm_stats = dsa_user_get_mm_stats, +}; + +static const struct dcbnl_rtnl_ops __maybe_unused dsa_user_dcbnl_ops = { + .ieee_setapp = dsa_user_dcbnl_ieee_setapp, + .ieee_delapp = dsa_user_dcbnl_ieee_delapp, +}; + +static void dsa_user_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *s) +{ + struct dsa_port *dp = dsa_user_to_port(dev); + struct dsa_switch *ds = dp->ds; + + if (ds->ops->get_stats64) + ds->ops->get_stats64(ds, dp->index, s); + else + dev_get_tstats64(dev, s); +} + +static int dsa_user_fill_forward_path(struct net_device_path_ctx *ctx, + struct net_device_path *path) +{ + struct dsa_port *dp = dsa_user_to_port(ctx->dev); + struct net_device *conduit = dsa_port_to_conduit(dp); + struct dsa_port *cpu_dp = dp->cpu_dp; + + path->dev = ctx->dev; + path->type = DEV_PATH_DSA; + path->dsa.proto = cpu_dp->tag_ops->proto; + path->dsa.port = dp->index; + ctx->dev = conduit; + + return 0; +} + +static const struct net_device_ops dsa_user_netdev_ops = { + .ndo_open = dsa_user_open, + .ndo_stop = dsa_user_close, + .ndo_start_xmit = dsa_user_xmit, + .ndo_change_rx_flags = dsa_user_change_rx_flags, + .ndo_set_rx_mode = dsa_user_set_rx_mode, + .ndo_set_mac_address = dsa_user_set_mac_address, + .ndo_fdb_dump = dsa_user_fdb_dump, + .ndo_eth_ioctl = dsa_user_ioctl, + .ndo_get_iflink = dsa_user_get_iflink, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_netpoll_setup = dsa_user_netpoll_setup, + .ndo_netpoll_cleanup = dsa_user_netpoll_cleanup, + .ndo_poll_controller = dsa_user_poll_controller, +#endif + .ndo_setup_tc = dsa_user_setup_tc, + .ndo_get_stats64 = dsa_user_get_stats64, + .ndo_vlan_rx_add_vid = dsa_user_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = dsa_user_vlan_rx_kill_vid, + .ndo_change_mtu = dsa_user_change_mtu, + .ndo_fill_forward_path = dsa_user_fill_forward_path, +}; + +static struct device_type dsa_type = { + .name = "dsa", +}; + +void dsa_port_phylink_mac_change(struct dsa_switch *ds, int port, bool up) +{ + const struct dsa_port *dp = dsa_to_port(ds, port); + + if (dp->pl) + phylink_mac_change(dp->pl, up); +} +EXPORT_SYMBOL_GPL(dsa_port_phylink_mac_change); + +static void dsa_user_phylink_fixed_state(struct phylink_config *config, + struct phylink_link_state *state) +{ + struct dsa_port *dp = container_of(config, struct dsa_port, pl_config); + struct dsa_switch *ds = dp->ds; + + /* No need to check that this operation is valid, the callback would + * not be called if it was not. + */ + ds->ops->phylink_fixed_state(ds, dp->index, state); +} + +/* user device setup *******************************************************/ +static int dsa_user_phy_connect(struct net_device *user_dev, int addr, + u32 flags) +{ + struct dsa_port *dp = dsa_user_to_port(user_dev); + struct dsa_switch *ds = dp->ds; + + user_dev->phydev = mdiobus_get_phy(ds->user_mii_bus, addr); + if (!user_dev->phydev) { + netdev_err(user_dev, "no phy at %d\n", addr); + return -ENODEV; + } + + user_dev->phydev->dev_flags |= flags; + + return phylink_connect_phy(dp->pl, user_dev->phydev); +} + +static int dsa_user_phy_setup(struct net_device *user_dev) +{ + struct dsa_port *dp = dsa_user_to_port(user_dev); + struct device_node *port_dn = dp->dn; + struct dsa_switch *ds = dp->ds; + u32 phy_flags = 0; + int ret; + + dp->pl_config.dev = &user_dev->dev; + dp->pl_config.type = PHYLINK_NETDEV; + + /* The get_fixed_state callback takes precedence over polling the + * link GPIO in PHYLINK (see phylink_get_fixed_state). Only set + * this if the switch provides such a callback. + */ + if (ds->ops->phylink_fixed_state) { + dp->pl_config.get_fixed_state = dsa_user_phylink_fixed_state; + dp->pl_config.poll_fixed_state = true; + } + + ret = dsa_port_phylink_create(dp); + if (ret) + return ret; + + if (ds->ops->get_phy_flags) + phy_flags = ds->ops->get_phy_flags(ds, dp->index); + + ret = phylink_of_phy_connect(dp->pl, port_dn, phy_flags); + if (ret == -ENODEV && ds->user_mii_bus) { + /* We could not connect to a designated PHY or SFP, so try to + * use the switch internal MDIO bus instead + */ + ret = dsa_user_phy_connect(user_dev, dp->index, phy_flags); + } + if (ret) { + netdev_err(user_dev, "failed to connect to PHY: %pe\n", + ERR_PTR(ret)); + dsa_port_phylink_destroy(dp); + } + + return ret; +} + +void dsa_user_setup_tagger(struct net_device *user) +{ + struct dsa_port *dp = dsa_user_to_port(user); + struct net_device *conduit = dsa_port_to_conduit(dp); + struct dsa_user_priv *p = netdev_priv(user); + const struct dsa_port *cpu_dp = dp->cpu_dp; + const struct dsa_switch *ds = dp->ds; + + user->needed_headroom = cpu_dp->tag_ops->needed_headroom; + user->needed_tailroom = cpu_dp->tag_ops->needed_tailroom; + /* Try to save one extra realloc later in the TX path (in the conduit) + * by also inheriting the conduit's needed headroom and tailroom. + * The 8021q driver also does this. + */ + user->needed_headroom += conduit->needed_headroom; + user->needed_tailroom += conduit->needed_tailroom; + + p->xmit = cpu_dp->tag_ops->xmit; + + user->features = conduit->vlan_features | NETIF_F_HW_TC; + user->hw_features |= NETIF_F_HW_TC; + user->features |= NETIF_F_LLTX; + if (user->needed_tailroom) + user->features &= ~(NETIF_F_SG | NETIF_F_FRAGLIST); + if (ds->needs_standalone_vlan_filtering) + user->features |= NETIF_F_HW_VLAN_CTAG_FILTER; +} + +int dsa_user_suspend(struct net_device *user_dev) +{ + struct dsa_port *dp = dsa_user_to_port(user_dev); + + if (!netif_running(user_dev)) + return 0; + + netif_device_detach(user_dev); + + rtnl_lock(); + phylink_stop(dp->pl); + rtnl_unlock(); + + return 0; +} + +int dsa_user_resume(struct net_device *user_dev) +{ + struct dsa_port *dp = dsa_user_to_port(user_dev); + + if (!netif_running(user_dev)) + return 0; + + netif_device_attach(user_dev); + + rtnl_lock(); + phylink_start(dp->pl); + rtnl_unlock(); + + return 0; +} + +int dsa_user_create(struct dsa_port *port) +{ + struct net_device *conduit = dsa_port_to_conduit(port); + struct dsa_switch *ds = port->ds; + struct net_device *user_dev; + struct dsa_user_priv *p; + const char *name; + int assign_type; + int ret; + + if (!ds->num_tx_queues) + ds->num_tx_queues = 1; + + if (port->name) { + name = port->name; + assign_type = NET_NAME_PREDICTABLE; + } else { + name = "eth%d"; + assign_type = NET_NAME_ENUM; + } + + user_dev = alloc_netdev_mqs(sizeof(struct dsa_user_priv), name, + assign_type, ether_setup, + ds->num_tx_queues, 1); + if (user_dev == NULL) + return -ENOMEM; + + user_dev->rtnl_link_ops = &dsa_link_ops; + user_dev->ethtool_ops = &dsa_user_ethtool_ops; +#if IS_ENABLED(CONFIG_DCB) + user_dev->dcbnl_ops = &dsa_user_dcbnl_ops; +#endif + if (!is_zero_ether_addr(port->mac)) + eth_hw_addr_set(user_dev, port->mac); + else + eth_hw_addr_inherit(user_dev, conduit); + user_dev->priv_flags |= IFF_NO_QUEUE; + if (dsa_switch_supports_uc_filtering(ds)) + user_dev->priv_flags |= IFF_UNICAST_FLT; + user_dev->netdev_ops = &dsa_user_netdev_ops; + if (ds->ops->port_max_mtu) + user_dev->max_mtu = ds->ops->port_max_mtu(ds, port->index); + SET_NETDEV_DEVTYPE(user_dev, &dsa_type); + + SET_NETDEV_DEV(user_dev, port->ds->dev); + SET_NETDEV_DEVLINK_PORT(user_dev, &port->devlink_port); + user_dev->dev.of_node = port->dn; + user_dev->vlan_features = conduit->vlan_features; + + p = netdev_priv(user_dev); + user_dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); + if (!user_dev->tstats) { + free_netdev(user_dev); + return -ENOMEM; + } + + ret = gro_cells_init(&p->gcells, user_dev); + if (ret) + goto out_free; + + p->dp = port; + INIT_LIST_HEAD(&p->mall_tc_list); + port->user = user_dev; + dsa_user_setup_tagger(user_dev); + + netif_carrier_off(user_dev); + + ret = dsa_user_phy_setup(user_dev); + if (ret) { + netdev_err(user_dev, + "error %d setting up PHY for tree %d, switch %d, port %d\n", + ret, ds->dst->index, ds->index, port->index); + goto out_gcells; + } + + rtnl_lock(); + + ret = dsa_user_change_mtu(user_dev, ETH_DATA_LEN); + if (ret && ret != -EOPNOTSUPP) + dev_warn(ds->dev, "nonfatal error %d setting MTU to %d on port %d\n", + ret, ETH_DATA_LEN, port->index); + + ret = register_netdevice(user_dev); + if (ret) { + netdev_err(conduit, "error %d registering interface %s\n", + ret, user_dev->name); + rtnl_unlock(); + goto out_phy; + } + + if (IS_ENABLED(CONFIG_DCB)) { + ret = dsa_user_dcbnl_init(user_dev); + if (ret) { + netdev_err(user_dev, + "failed to initialize DCB: %pe\n", + ERR_PTR(ret)); + rtnl_unlock(); + goto out_unregister; + } + } + + ret = netdev_upper_dev_link(conduit, user_dev, NULL); + + rtnl_unlock(); + + if (ret) + goto out_unregister; + + return 0; + +out_unregister: + unregister_netdev(user_dev); +out_phy: + rtnl_lock(); + phylink_disconnect_phy(p->dp->pl); + rtnl_unlock(); + dsa_port_phylink_destroy(p->dp); +out_gcells: + gro_cells_destroy(&p->gcells); +out_free: + free_percpu(user_dev->tstats); + free_netdev(user_dev); + port->user = NULL; + return ret; +} + +void dsa_user_destroy(struct net_device *user_dev) +{ + struct net_device *conduit = dsa_user_to_conduit(user_dev); + struct dsa_port *dp = dsa_user_to_port(user_dev); + struct dsa_user_priv *p = netdev_priv(user_dev); + + netif_carrier_off(user_dev); + rtnl_lock(); + netdev_upper_dev_unlink(conduit, user_dev); + unregister_netdevice(user_dev); + phylink_disconnect_phy(dp->pl); + rtnl_unlock(); + + dsa_port_phylink_destroy(dp); + gro_cells_destroy(&p->gcells); + free_percpu(user_dev->tstats); + free_netdev(user_dev); +} + +int dsa_user_change_conduit(struct net_device *dev, struct net_device *conduit, + struct netlink_ext_ack *extack) +{ + struct net_device *old_conduit = dsa_user_to_conduit(dev); + struct dsa_port *dp = dsa_user_to_port(dev); + struct dsa_switch *ds = dp->ds; + struct net_device *upper; + struct list_head *iter; + int err; + + if (conduit == old_conduit) + return 0; + + if (!ds->ops->port_change_conduit) { + NL_SET_ERR_MSG_MOD(extack, + "Driver does not support changing DSA conduit"); + return -EOPNOTSUPP; + } + + if (!netdev_uses_dsa(conduit)) { + NL_SET_ERR_MSG_MOD(extack, + "Interface not eligible as DSA conduit"); + return -EOPNOTSUPP; + } + + netdev_for_each_upper_dev_rcu(conduit, upper, iter) { + if (dsa_user_dev_check(upper)) + continue; + if (netif_is_bridge_master(upper)) + continue; + NL_SET_ERR_MSG_MOD(extack, "Cannot join conduit with unknown uppers"); + return -EOPNOTSUPP; + } + + /* Since we allow live-changing the DSA conduit, plus we auto-open the + * DSA conduit when the user port opens => we need to ensure that the + * new DSA conduit is open too. + */ + if (dev->flags & IFF_UP) { + err = dev_open(conduit, extack); + if (err) + return err; + } + + netdev_upper_dev_unlink(old_conduit, dev); + + err = netdev_upper_dev_link(conduit, dev, extack); + if (err) + goto out_revert_old_conduit_unlink; + + err = dsa_port_change_conduit(dp, conduit, extack); + if (err) + goto out_revert_conduit_link; + + /* Update the MTU of the new CPU port through cross-chip notifiers */ + err = dsa_user_change_mtu(dev, dev->mtu); + if (err && err != -EOPNOTSUPP) { + netdev_warn(dev, + "nonfatal error updating MTU with new conduit: %pe\n", + ERR_PTR(err)); + } + + /* If the port doesn't have its own MAC address and relies on the DSA + * conduit's one, inherit it again from the new DSA conduit. + */ + if (is_zero_ether_addr(dp->mac)) + eth_hw_addr_inherit(dev, conduit); + + return 0; + +out_revert_conduit_link: + netdev_upper_dev_unlink(conduit, dev); +out_revert_old_conduit_unlink: + netdev_upper_dev_link(old_conduit, dev, NULL); + return err; +} + +bool dsa_user_dev_check(const struct net_device *dev) +{ + return dev->netdev_ops == &dsa_user_netdev_ops; +} +EXPORT_SYMBOL_GPL(dsa_user_dev_check); + +static int dsa_user_changeupper(struct net_device *dev, + struct netdev_notifier_changeupper_info *info) +{ + struct dsa_port *dp = dsa_user_to_port(dev); + struct netlink_ext_ack *extack; + int err = NOTIFY_DONE; + + if (!dsa_user_dev_check(dev)) + return err; + + extack = netdev_notifier_info_to_extack(&info->info); + + if (netif_is_bridge_master(info->upper_dev)) { + if (info->linking) { + err = dsa_port_bridge_join(dp, info->upper_dev, extack); + if (!err) + dsa_bridge_mtu_normalization(dp); + if (err == -EOPNOTSUPP) { + NL_SET_ERR_MSG_WEAK_MOD(extack, + "Offloading not supported"); + err = 0; + } + err = notifier_from_errno(err); + } else { + dsa_port_bridge_leave(dp, info->upper_dev); + err = NOTIFY_OK; + } + } else if (netif_is_lag_master(info->upper_dev)) { + if (info->linking) { + err = dsa_port_lag_join(dp, info->upper_dev, + info->upper_info, extack); + if (err == -EOPNOTSUPP) { + NL_SET_ERR_MSG_WEAK_MOD(extack, + "Offloading not supported"); + err = 0; + } + err = notifier_from_errno(err); + } else { + dsa_port_lag_leave(dp, info->upper_dev); + err = NOTIFY_OK; + } + } else if (is_hsr_master(info->upper_dev)) { + if (info->linking) { + err = dsa_port_hsr_join(dp, info->upper_dev, extack); + if (err == -EOPNOTSUPP) { + NL_SET_ERR_MSG_WEAK_MOD(extack, + "Offloading not supported"); + err = 0; + } + err = notifier_from_errno(err); + } else { + dsa_port_hsr_leave(dp, info->upper_dev); + err = NOTIFY_OK; + } + } + + return err; +} + +static int dsa_user_prechangeupper(struct net_device *dev, + struct netdev_notifier_changeupper_info *info) +{ + struct dsa_port *dp = dsa_user_to_port(dev); + + if (!dsa_user_dev_check(dev)) + return NOTIFY_DONE; + + if (netif_is_bridge_master(info->upper_dev) && !info->linking) + dsa_port_pre_bridge_leave(dp, info->upper_dev); + else if (netif_is_lag_master(info->upper_dev) && !info->linking) + dsa_port_pre_lag_leave(dp, info->upper_dev); + /* dsa_port_pre_hsr_leave is not yet necessary since hsr devices cannot + * meaningfully placed under a bridge yet + */ + + return NOTIFY_DONE; +} + +static int +dsa_user_lag_changeupper(struct net_device *dev, + struct netdev_notifier_changeupper_info *info) +{ + struct net_device *lower; + struct list_head *iter; + int err = NOTIFY_DONE; + struct dsa_port *dp; + + if (!netif_is_lag_master(dev)) + return err; + + netdev_for_each_lower_dev(dev, lower, iter) { + if (!dsa_user_dev_check(lower)) + continue; + + dp = dsa_user_to_port(lower); + if (!dp->lag) + /* Software LAG */ + continue; + + err = dsa_user_changeupper(lower, info); + if (notifier_to_errno(err)) + break; + } + + return err; +} + +/* Same as dsa_user_lag_changeupper() except that it calls + * dsa_user_prechangeupper() + */ +static int +dsa_user_lag_prechangeupper(struct net_device *dev, + struct netdev_notifier_changeupper_info *info) +{ + struct net_device *lower; + struct list_head *iter; + int err = NOTIFY_DONE; + struct dsa_port *dp; + + if (!netif_is_lag_master(dev)) + return err; + + netdev_for_each_lower_dev(dev, lower, iter) { + if (!dsa_user_dev_check(lower)) + continue; + + dp = dsa_user_to_port(lower); + if (!dp->lag) + /* Software LAG */ + continue; + + err = dsa_user_prechangeupper(lower, info); + if (notifier_to_errno(err)) + break; + } + + return err; +} + +static int +dsa_prevent_bridging_8021q_upper(struct net_device *dev, + struct netdev_notifier_changeupper_info *info) +{ + struct netlink_ext_ack *ext_ack; + struct net_device *user, *br; + struct dsa_port *dp; + + ext_ack = netdev_notifier_info_to_extack(&info->info); + + if (!is_vlan_dev(dev)) + return NOTIFY_DONE; + + user = vlan_dev_real_dev(dev); + if (!dsa_user_dev_check(user)) + return NOTIFY_DONE; + + dp = dsa_user_to_port(user); + br = dsa_port_bridge_dev_get(dp); + if (!br) + return NOTIFY_DONE; + + /* Deny enslaving a VLAN device into a VLAN-aware bridge */ + if (br_vlan_enabled(br) && + netif_is_bridge_master(info->upper_dev) && info->linking) { + NL_SET_ERR_MSG_MOD(ext_ack, + "Cannot make VLAN device join VLAN-aware bridge"); + return notifier_from_errno(-EINVAL); + } + + return NOTIFY_DONE; +} + +static int +dsa_user_check_8021q_upper(struct net_device *dev, + struct netdev_notifier_changeupper_info *info) +{ + struct dsa_port *dp = dsa_user_to_port(dev); + struct net_device *br = dsa_port_bridge_dev_get(dp); + struct bridge_vlan_info br_info; + struct netlink_ext_ack *extack; + int err = NOTIFY_DONE; + u16 vid; + + if (!br || !br_vlan_enabled(br)) + return NOTIFY_DONE; + + extack = netdev_notifier_info_to_extack(&info->info); + vid = vlan_dev_vlan_id(info->upper_dev); + + /* br_vlan_get_info() returns -EINVAL or -ENOENT if the + * device, respectively the VID is not found, returning + * 0 means success, which is a failure for us here. + */ + err = br_vlan_get_info(br, vid, &br_info); + if (err == 0) { + NL_SET_ERR_MSG_MOD(extack, + "This VLAN is already configured by the bridge"); + return notifier_from_errno(-EBUSY); + } + + return NOTIFY_DONE; +} + +static int +dsa_user_prechangeupper_sanity_check(struct net_device *dev, + struct netdev_notifier_changeupper_info *info) +{ + struct dsa_switch *ds; + struct dsa_port *dp; + int err; + + if (!dsa_user_dev_check(dev)) + return dsa_prevent_bridging_8021q_upper(dev, info); + + dp = dsa_user_to_port(dev); + ds = dp->ds; + + if (ds->ops->port_prechangeupper) { + err = ds->ops->port_prechangeupper(ds, dp->index, info); + if (err) + return notifier_from_errno(err); + } + + if (is_vlan_dev(info->upper_dev)) + return dsa_user_check_8021q_upper(dev, info); + + return NOTIFY_DONE; +} + +/* To be eligible as a DSA conduit, a LAG must have all lower interfaces be + * eligible DSA conduits. Additionally, all LAG slaves must be DSA conduits of + * switches in the same switch tree. + */ +static int dsa_lag_conduit_validate(struct net_device *lag_dev, + struct netlink_ext_ack *extack) +{ + struct net_device *lower1, *lower2; + struct list_head *iter1, *iter2; + + netdev_for_each_lower_dev(lag_dev, lower1, iter1) { + netdev_for_each_lower_dev(lag_dev, lower2, iter2) { + if (!netdev_uses_dsa(lower1) || + !netdev_uses_dsa(lower2)) { + NL_SET_ERR_MSG_MOD(extack, + "All LAG ports must be eligible as DSA conduits"); + return notifier_from_errno(-EINVAL); + } + + if (lower1 == lower2) + continue; + + if (!dsa_port_tree_same(lower1->dsa_ptr, + lower2->dsa_ptr)) { + NL_SET_ERR_MSG_MOD(extack, + "LAG contains DSA conduits of disjoint switch trees"); + return notifier_from_errno(-EINVAL); + } + } + } + + return NOTIFY_DONE; +} + +static int +dsa_conduit_prechangeupper_sanity_check(struct net_device *conduit, + struct netdev_notifier_changeupper_info *info) +{ + struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(&info->info); + + if (!netdev_uses_dsa(conduit)) + return NOTIFY_DONE; + + if (!info->linking) + return NOTIFY_DONE; + + /* Allow DSA switch uppers */ + if (dsa_user_dev_check(info->upper_dev)) + return NOTIFY_DONE; + + /* Allow bridge uppers of DSA conduits, subject to further + * restrictions in dsa_bridge_prechangelower_sanity_check() + */ + if (netif_is_bridge_master(info->upper_dev)) + return NOTIFY_DONE; + + /* Allow LAG uppers, subject to further restrictions in + * dsa_lag_conduit_prechangelower_sanity_check() + */ + if (netif_is_lag_master(info->upper_dev)) + return dsa_lag_conduit_validate(info->upper_dev, extack); + + NL_SET_ERR_MSG_MOD(extack, + "DSA conduit cannot join unknown upper interfaces"); + return notifier_from_errno(-EBUSY); +} + +static int +dsa_lag_conduit_prechangelower_sanity_check(struct net_device *dev, + struct netdev_notifier_changeupper_info *info) +{ + struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(&info->info); + struct net_device *lag_dev = info->upper_dev; + struct net_device *lower; + struct list_head *iter; + + if (!netdev_uses_dsa(lag_dev) || !netif_is_lag_master(lag_dev)) + return NOTIFY_DONE; + + if (!info->linking) + return NOTIFY_DONE; + + if (!netdev_uses_dsa(dev)) { + NL_SET_ERR_MSG(extack, + "Only DSA conduits can join a LAG DSA conduit"); + return notifier_from_errno(-EINVAL); + } + + netdev_for_each_lower_dev(lag_dev, lower, iter) { + if (!dsa_port_tree_same(dev->dsa_ptr, lower->dsa_ptr)) { + NL_SET_ERR_MSG(extack, + "Interface is DSA conduit for a different switch tree than this LAG"); + return notifier_from_errno(-EINVAL); + } + + break; + } + + return NOTIFY_DONE; +} + +/* Don't allow bridging of DSA conduits, since the bridge layer rx_handler + * prevents the DSA fake ethertype handler to be invoked, so we don't get the + * chance to strip off and parse the DSA switch tag protocol header (the bridge + * layer just returns RX_HANDLER_CONSUMED, stopping RX processing for these + * frames). + * The only case where that would not be an issue is when bridging can already + * be offloaded, such as when the DSA conduit is itself a DSA or plain switchdev + * port, and is bridged only with other ports from the same hardware device. + */ +static int +dsa_bridge_prechangelower_sanity_check(struct net_device *new_lower, + struct netdev_notifier_changeupper_info *info) +{ + struct net_device *br = info->upper_dev; + struct netlink_ext_ack *extack; + struct net_device *lower; + struct list_head *iter; + + if (!netif_is_bridge_master(br)) + return NOTIFY_DONE; + + if (!info->linking) + return NOTIFY_DONE; + + extack = netdev_notifier_info_to_extack(&info->info); + + netdev_for_each_lower_dev(br, lower, iter) { + if (!netdev_uses_dsa(new_lower) && !netdev_uses_dsa(lower)) + continue; + + if (!netdev_port_same_parent_id(lower, new_lower)) { + NL_SET_ERR_MSG(extack, + "Cannot do software bridging with a DSA conduit"); + return notifier_from_errno(-EINVAL); + } + } + + return NOTIFY_DONE; +} + +static void dsa_tree_migrate_ports_from_lag_conduit(struct dsa_switch_tree *dst, + struct net_device *lag_dev) +{ + struct net_device *new_conduit = dsa_tree_find_first_conduit(dst); + struct dsa_port *dp; + int err; + + dsa_tree_for_each_user_port(dp, dst) { + if (dsa_port_to_conduit(dp) != lag_dev) + continue; + + err = dsa_user_change_conduit(dp->user, new_conduit, NULL); + if (err) { + netdev_err(dp->user, + "failed to restore conduit to %s: %pe\n", + new_conduit->name, ERR_PTR(err)); + } + } +} + +static int dsa_conduit_lag_join(struct net_device *conduit, + struct net_device *lag_dev, + struct netdev_lag_upper_info *uinfo, + struct netlink_ext_ack *extack) +{ + struct dsa_port *cpu_dp = conduit->dsa_ptr; + struct dsa_switch_tree *dst = cpu_dp->dst; + struct dsa_port *dp; + int err; + + err = dsa_conduit_lag_setup(lag_dev, cpu_dp, uinfo, extack); + if (err) + return err; + + dsa_tree_for_each_user_port(dp, dst) { + if (dsa_port_to_conduit(dp) != conduit) + continue; + + err = dsa_user_change_conduit(dp->user, lag_dev, extack); + if (err) + goto restore; + } + + return 0; + +restore: + dsa_tree_for_each_user_port_continue_reverse(dp, dst) { + if (dsa_port_to_conduit(dp) != lag_dev) + continue; + + err = dsa_user_change_conduit(dp->user, conduit, NULL); + if (err) { + netdev_err(dp->user, + "failed to restore conduit to %s: %pe\n", + conduit->name, ERR_PTR(err)); + } + } + + dsa_conduit_lag_teardown(lag_dev, conduit->dsa_ptr); + + return err; +} + +static void dsa_conduit_lag_leave(struct net_device *conduit, + struct net_device *lag_dev) +{ + struct dsa_port *dp, *cpu_dp = lag_dev->dsa_ptr; + struct dsa_switch_tree *dst = cpu_dp->dst; + struct dsa_port *new_cpu_dp = NULL; + struct net_device *lower; + struct list_head *iter; + + netdev_for_each_lower_dev(lag_dev, lower, iter) { + if (netdev_uses_dsa(lower)) { + new_cpu_dp = lower->dsa_ptr; + break; + } + } + + if (new_cpu_dp) { + /* Update the CPU port of the user ports still under the LAG + * so that dsa_port_to_conduit() continues to work properly + */ + dsa_tree_for_each_user_port(dp, dst) + if (dsa_port_to_conduit(dp) == lag_dev) + dp->cpu_dp = new_cpu_dp; + + /* Update the index of the virtual CPU port to match the lowest + * physical CPU port + */ + lag_dev->dsa_ptr = new_cpu_dp; + wmb(); + } else { + /* If the LAG DSA conduit has no ports left, migrate back all + * user ports to the first physical CPU port + */ + dsa_tree_migrate_ports_from_lag_conduit(dst, lag_dev); + } + + /* This DSA conduit has left its LAG in any case, so let + * the CPU port leave the hardware LAG as well + */ + dsa_conduit_lag_teardown(lag_dev, conduit->dsa_ptr); +} + +static int dsa_conduit_changeupper(struct net_device *dev, + struct netdev_notifier_changeupper_info *info) +{ + struct netlink_ext_ack *extack; + int err = NOTIFY_DONE; + + if (!netdev_uses_dsa(dev)) + return err; + + extack = netdev_notifier_info_to_extack(&info->info); + + if (netif_is_lag_master(info->upper_dev)) { + if (info->linking) { + err = dsa_conduit_lag_join(dev, info->upper_dev, + info->upper_info, extack); + err = notifier_from_errno(err); + } else { + dsa_conduit_lag_leave(dev, info->upper_dev); + err = NOTIFY_OK; + } + } + + return err; +} + +static int dsa_user_netdevice_event(struct notifier_block *nb, + unsigned long event, void *ptr) +{ + struct net_device *dev = netdev_notifier_info_to_dev(ptr); + + switch (event) { + case NETDEV_PRECHANGEUPPER: { + struct netdev_notifier_changeupper_info *info = ptr; + int err; + + err = dsa_user_prechangeupper_sanity_check(dev, info); + if (notifier_to_errno(err)) + return err; + + err = dsa_conduit_prechangeupper_sanity_check(dev, info); + if (notifier_to_errno(err)) + return err; + + err = dsa_lag_conduit_prechangelower_sanity_check(dev, info); + if (notifier_to_errno(err)) + return err; + + err = dsa_bridge_prechangelower_sanity_check(dev, info); + if (notifier_to_errno(err)) + return err; + + err = dsa_user_prechangeupper(dev, ptr); + if (notifier_to_errno(err)) + return err; + + err = dsa_user_lag_prechangeupper(dev, ptr); + if (notifier_to_errno(err)) + return err; + + break; + } + case NETDEV_CHANGEUPPER: { + int err; + + err = dsa_user_changeupper(dev, ptr); + if (notifier_to_errno(err)) + return err; + + err = dsa_user_lag_changeupper(dev, ptr); + if (notifier_to_errno(err)) + return err; + + err = dsa_conduit_changeupper(dev, ptr); + if (notifier_to_errno(err)) + return err; + + break; + } + case NETDEV_CHANGELOWERSTATE: { + struct netdev_notifier_changelowerstate_info *info = ptr; + struct dsa_port *dp; + int err = 0; + + if (dsa_user_dev_check(dev)) { + dp = dsa_user_to_port(dev); + + err = dsa_port_lag_change(dp, info->lower_state_info); + } + + /* Mirror LAG port events on DSA conduits that are in + * a LAG towards their respective switch CPU ports + */ + if (netdev_uses_dsa(dev)) { + dp = dev->dsa_ptr; + + err = dsa_port_lag_change(dp, info->lower_state_info); + } + + return notifier_from_errno(err); + } + case NETDEV_CHANGE: + case NETDEV_UP: { + /* Track state of conduit port. + * DSA driver may require the conduit port (and indirectly + * the tagger) to be available for some special operation. + */ + if (netdev_uses_dsa(dev)) { + struct dsa_port *cpu_dp = dev->dsa_ptr; + struct dsa_switch_tree *dst = cpu_dp->ds->dst; + + /* Track when the conduit port is UP */ + dsa_tree_conduit_oper_state_change(dst, dev, + netif_oper_up(dev)); + + /* Track when the conduit port is ready and can accept + * packet. + * NETDEV_UP event is not enough to flag a port as ready. + * We also have to wait for linkwatch_do_dev to dev_activate + * and emit a NETDEV_CHANGE event. + * We check if a conduit port is ready by checking if the dev + * have a qdisc assigned and is not noop. + */ + dsa_tree_conduit_admin_state_change(dst, dev, + !qdisc_tx_is_noop(dev)); + + return NOTIFY_OK; + } + + return NOTIFY_DONE; + } + case NETDEV_GOING_DOWN: { + struct dsa_port *dp, *cpu_dp; + struct dsa_switch_tree *dst; + LIST_HEAD(close_list); + + if (!netdev_uses_dsa(dev)) + return NOTIFY_DONE; + + cpu_dp = dev->dsa_ptr; + dst = cpu_dp->ds->dst; + + dsa_tree_conduit_admin_state_change(dst, dev, false); + + list_for_each_entry(dp, &dst->ports, list) { + if (!dsa_port_is_user(dp)) + continue; + + if (dp->cpu_dp != cpu_dp) + continue; + + list_add(&dp->user->close_list, &close_list); + } + + dev_close_many(&close_list, true); + + return NOTIFY_OK; + } + default: + break; + } + + return NOTIFY_DONE; +} + +static void +dsa_fdb_offload_notify(struct dsa_switchdev_event_work *switchdev_work) +{ + struct switchdev_notifier_fdb_info info = {}; + + info.addr = switchdev_work->addr; + info.vid = switchdev_work->vid; + info.offloaded = true; + call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED, + switchdev_work->orig_dev, &info.info, NULL); +} + +static void dsa_user_switchdev_event_work(struct work_struct *work) +{ + struct dsa_switchdev_event_work *switchdev_work = + container_of(work, struct dsa_switchdev_event_work, work); + const unsigned char *addr = switchdev_work->addr; + struct net_device *dev = switchdev_work->dev; + u16 vid = switchdev_work->vid; + struct dsa_switch *ds; + struct dsa_port *dp; + int err; + + dp = dsa_user_to_port(dev); + ds = dp->ds; + + switch (switchdev_work->event) { + case SWITCHDEV_FDB_ADD_TO_DEVICE: + if (switchdev_work->host_addr) + err = dsa_port_bridge_host_fdb_add(dp, addr, vid); + else if (dp->lag) + err = dsa_port_lag_fdb_add(dp, addr, vid); + else + err = dsa_port_fdb_add(dp, addr, vid); + if (err) { + dev_err(ds->dev, + "port %d failed to add %pM vid %d to fdb: %d\n", + dp->index, addr, vid, err); + break; + } + dsa_fdb_offload_notify(switchdev_work); + break; + + case SWITCHDEV_FDB_DEL_TO_DEVICE: + if (switchdev_work->host_addr) + err = dsa_port_bridge_host_fdb_del(dp, addr, vid); + else if (dp->lag) + err = dsa_port_lag_fdb_del(dp, addr, vid); + else + err = dsa_port_fdb_del(dp, addr, vid); + if (err) { + dev_err(ds->dev, + "port %d failed to delete %pM vid %d from fdb: %d\n", + dp->index, addr, vid, err); + } + + break; + } + + kfree(switchdev_work); +} + +static bool dsa_foreign_dev_check(const struct net_device *dev, + const struct net_device *foreign_dev) +{ + const struct dsa_port *dp = dsa_user_to_port(dev); + struct dsa_switch_tree *dst = dp->ds->dst; + + if (netif_is_bridge_master(foreign_dev)) + return !dsa_tree_offloads_bridge_dev(dst, foreign_dev); + + if (netif_is_bridge_port(foreign_dev)) + return !dsa_tree_offloads_bridge_port(dst, foreign_dev); + + /* Everything else is foreign */ + return true; +} + +static int dsa_user_fdb_event(struct net_device *dev, + struct net_device *orig_dev, + unsigned long event, const void *ctx, + const struct switchdev_notifier_fdb_info *fdb_info) +{ + struct dsa_switchdev_event_work *switchdev_work; + struct dsa_port *dp = dsa_user_to_port(dev); + bool host_addr = fdb_info->is_local; + struct dsa_switch *ds = dp->ds; + + if (ctx && ctx != dp) + return 0; + + if (!dp->bridge) + return 0; + + if (switchdev_fdb_is_dynamically_learned(fdb_info)) { + if (dsa_port_offloads_bridge_port(dp, orig_dev)) + return 0; + + /* FDB entries learned by the software bridge or by foreign + * bridge ports should be installed as host addresses only if + * the driver requests assisted learning. + */ + if (!ds->assisted_learning_on_cpu_port) + return 0; + } + + /* Also treat FDB entries on foreign interfaces bridged with us as host + * addresses. + */ + if (dsa_foreign_dev_check(dev, orig_dev)) + host_addr = true; + + /* Check early that we're not doing work in vain. + * Host addresses on LAG ports still require regular FDB ops, + * since the CPU port isn't in a LAG. + */ + if (dp->lag && !host_addr) { + if (!ds->ops->lag_fdb_add || !ds->ops->lag_fdb_del) + return -EOPNOTSUPP; + } else { + if (!ds->ops->port_fdb_add || !ds->ops->port_fdb_del) + return -EOPNOTSUPP; + } + + switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC); + if (!switchdev_work) + return -ENOMEM; + + netdev_dbg(dev, "%s FDB entry towards %s, addr %pM vid %d%s\n", + event == SWITCHDEV_FDB_ADD_TO_DEVICE ? "Adding" : "Deleting", + orig_dev->name, fdb_info->addr, fdb_info->vid, + host_addr ? " as host address" : ""); + + INIT_WORK(&switchdev_work->work, dsa_user_switchdev_event_work); + switchdev_work->event = event; + switchdev_work->dev = dev; + switchdev_work->orig_dev = orig_dev; + + ether_addr_copy(switchdev_work->addr, fdb_info->addr); + switchdev_work->vid = fdb_info->vid; + switchdev_work->host_addr = host_addr; + + dsa_schedule_work(&switchdev_work->work); + + return 0; +} + +/* Called under rcu_read_lock() */ +static int dsa_user_switchdev_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct net_device *dev = switchdev_notifier_info_to_dev(ptr); + int err; + + switch (event) { + case SWITCHDEV_PORT_ATTR_SET: + err = switchdev_handle_port_attr_set(dev, ptr, + dsa_user_dev_check, + dsa_user_port_attr_set); + return notifier_from_errno(err); + case SWITCHDEV_FDB_ADD_TO_DEVICE: + case SWITCHDEV_FDB_DEL_TO_DEVICE: + err = switchdev_handle_fdb_event_to_device(dev, event, ptr, + dsa_user_dev_check, + dsa_foreign_dev_check, + dsa_user_fdb_event); + return notifier_from_errno(err); + default: + return NOTIFY_DONE; + } + + return NOTIFY_OK; +} + +static int dsa_user_switchdev_blocking_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct net_device *dev = switchdev_notifier_info_to_dev(ptr); + int err; + + switch (event) { + case SWITCHDEV_PORT_OBJ_ADD: + err = switchdev_handle_port_obj_add_foreign(dev, ptr, + dsa_user_dev_check, + dsa_foreign_dev_check, + dsa_user_port_obj_add); + return notifier_from_errno(err); + case SWITCHDEV_PORT_OBJ_DEL: + err = switchdev_handle_port_obj_del_foreign(dev, ptr, + dsa_user_dev_check, + dsa_foreign_dev_check, + dsa_user_port_obj_del); + return notifier_from_errno(err); + case SWITCHDEV_PORT_ATTR_SET: + err = switchdev_handle_port_attr_set(dev, ptr, + dsa_user_dev_check, + dsa_user_port_attr_set); + return notifier_from_errno(err); + } + + return NOTIFY_DONE; +} + +static struct notifier_block dsa_user_nb __read_mostly = { + .notifier_call = dsa_user_netdevice_event, +}; + +struct notifier_block dsa_user_switchdev_notifier = { + .notifier_call = dsa_user_switchdev_event, +}; + +struct notifier_block dsa_user_switchdev_blocking_notifier = { + .notifier_call = dsa_user_switchdev_blocking_event, +}; + +int dsa_user_register_notifier(void) +{ + struct notifier_block *nb; + int err; + + err = register_netdevice_notifier(&dsa_user_nb); + if (err) + return err; + + err = register_switchdev_notifier(&dsa_user_switchdev_notifier); + if (err) + goto err_switchdev_nb; + + nb = &dsa_user_switchdev_blocking_notifier; + err = register_switchdev_blocking_notifier(nb); + if (err) + goto err_switchdev_blocking_nb; + + return 0; + +err_switchdev_blocking_nb: + unregister_switchdev_notifier(&dsa_user_switchdev_notifier); +err_switchdev_nb: + unregister_netdevice_notifier(&dsa_user_nb); + return err; +} + +void dsa_user_unregister_notifier(void) +{ + struct notifier_block *nb; + int err; + + nb = &dsa_user_switchdev_blocking_notifier; + err = unregister_switchdev_blocking_notifier(nb); + if (err) + pr_err("DSA: failed to unregister switchdev blocking notifier (%d)\n", err); + + err = unregister_switchdev_notifier(&dsa_user_switchdev_notifier); + if (err) + pr_err("DSA: failed to unregister switchdev notifier (%d)\n", err); + + err = unregister_netdevice_notifier(&dsa_user_nb); + if (err) + pr_err("DSA: failed to unregister user notifier (%d)\n", err); +} diff --git a/net/dsa/user.h b/net/dsa/user.h new file mode 100644 index 000000000000..996069130bea --- /dev/null +++ b/net/dsa/user.h @@ -0,0 +1,69 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#ifndef __DSA_USER_H +#define __DSA_USER_H + +#include +#include +#include +#include +#include +#include +#include + +struct net_device; +struct netlink_ext_ack; + +extern struct notifier_block dsa_user_switchdev_notifier; +extern struct notifier_block dsa_user_switchdev_blocking_notifier; + +struct dsa_user_priv { + /* Copy of CPU port xmit for faster access in user transmit hot path */ + struct sk_buff * (*xmit)(struct sk_buff *skb, + struct net_device *dev); + + struct gro_cells gcells; + + /* DSA port data, such as switch, port index, etc. */ + struct dsa_port *dp; + +#ifdef CONFIG_NET_POLL_CONTROLLER + struct netpoll *netpoll; +#endif + + /* TC context */ + struct list_head mall_tc_list; +}; + +void dsa_user_mii_bus_init(struct dsa_switch *ds); +int dsa_user_create(struct dsa_port *dp); +void dsa_user_destroy(struct net_device *user_dev); +int dsa_user_suspend(struct net_device *user_dev); +int dsa_user_resume(struct net_device *user_dev); +int dsa_user_register_notifier(void); +void dsa_user_unregister_notifier(void); +void dsa_user_sync_ha(struct net_device *dev); +void dsa_user_unsync_ha(struct net_device *dev); +void dsa_user_setup_tagger(struct net_device *user); +int dsa_user_change_mtu(struct net_device *dev, int new_mtu); +int dsa_user_change_conduit(struct net_device *dev, struct net_device *conduit, + struct netlink_ext_ack *extack); +int dsa_user_manage_vlan_filtering(struct net_device *dev, + bool vlan_filtering); + +static inline struct dsa_port *dsa_user_to_port(const struct net_device *dev) +{ + struct dsa_user_priv *p = netdev_priv(dev); + + return p->dp; +} + +static inline struct net_device * +dsa_user_to_conduit(const struct net_device *dev) +{ + struct dsa_port *dp = dsa_user_to_port(dev); + + return dsa_port_to_conduit(dp); +} + +#endif -- cgit v1.2.3