summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--net/dsa/dsa_priv.h14
-rw-r--r--net/dsa/tag_brcm.c4
-rw-r--r--net/dsa/tag_dsa.c15
-rw-r--r--net/dsa/tag_hellcreek.c2
-rw-r--r--net/dsa/tag_ksz.c2
-rw-r--r--net/dsa/tag_lan9303.c3
-rw-r--r--net/dsa/tag_mtk.c2
-rw-r--r--net/dsa/tag_ocelot.c2
-rw-r--r--net/dsa/tag_ocelot_8021q.c2
-rw-r--r--net/dsa/tag_rtl4_a.c2
-rw-r--r--net/dsa/tag_sja1105.c20
-rw-r--r--net/dsa/tag_xrs700x.c2
12 files changed, 50 insertions, 20 deletions
diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h
index da3ad02d6ceb..e43c5dc04282 100644
--- a/net/dsa/dsa_priv.h
+++ b/net/dsa/dsa_priv.h
@@ -440,6 +440,20 @@ dsa_find_designated_bridge_port_by_vid(struct net_device *master, u16 vid)
return NULL;
}
+/* If the ingress port offloads the bridge, we mark the frame as autonomously
+ * forwarded by hardware, so the software bridge doesn't forward in twice, back
+ * to us, because we already did. However, if we're in fallback mode and we do
+ * software bridging, we are not offloading it, therefore the dp->bridge_dev
+ * pointer is not populated, and flooding needs to be done by software (we are
+ * effectively operating in standalone ports mode).
+ */
+static inline void dsa_default_offload_fwd_mark(struct sk_buff *skb)
+{
+ struct dsa_port *dp = dsa_slave_to_port(skb->dev);
+
+ skb->offload_fwd_mark = !!(dp->bridge_dev);
+}
+
/* switch.c */
int dsa_switch_register_notifier(struct dsa_switch *ds);
void dsa_switch_unregister_notifier(struct dsa_switch *ds);
diff --git a/net/dsa/tag_brcm.c b/net/dsa/tag_brcm.c
index 0750af951fc9..a27f5096777a 100644
--- a/net/dsa/tag_brcm.c
+++ b/net/dsa/tag_brcm.c
@@ -167,7 +167,7 @@ static struct sk_buff *brcm_tag_rcv_ll(struct sk_buff *skb,
/* Remove Broadcom tag and update checksum */
skb_pull_rcsum(skb, BRCM_TAG_LEN);
- skb->offload_fwd_mark = 1;
+ dsa_default_offload_fwd_mark(skb);
return skb;
}
@@ -271,7 +271,7 @@ static struct sk_buff *brcm_leg_tag_rcv(struct sk_buff *skb,
/* Remove Broadcom tag and update checksum */
skb_pull_rcsum(skb, BRCM_LEG_TAG_LEN);
- skb->offload_fwd_mark = 1;
+ dsa_default_offload_fwd_mark(skb);
/* Move the Ethernet DA and SA */
memmove(skb->data - ETH_HLEN,
diff --git a/net/dsa/tag_dsa.c b/net/dsa/tag_dsa.c
index 0f258218c8cf..3607499d0697 100644
--- a/net/dsa/tag_dsa.c
+++ b/net/dsa/tag_dsa.c
@@ -198,8 +198,8 @@ static struct sk_buff *dsa_xmit_ll(struct sk_buff *skb, struct net_device *dev,
static struct sk_buff *dsa_rcv_ll(struct sk_buff *skb, struct net_device *dev,
u8 extra)
{
+ bool trap = false, trunk = false;
int source_device, source_port;
- bool trunk = false;
enum dsa_code code;
enum dsa_cmd cmd;
u8 *dsa_header;
@@ -210,8 +210,6 @@ static struct sk_buff *dsa_rcv_ll(struct sk_buff *skb, struct net_device *dev,
cmd = dsa_header[0] >> 6;
switch (cmd) {
case DSA_CMD_FORWARD:
- skb->offload_fwd_mark = 1;
-
trunk = !!(dsa_header[1] & 7);
break;
@@ -230,7 +228,6 @@ static struct sk_buff *dsa_rcv_ll(struct sk_buff *skb, struct net_device *dev,
* device (like a bridge) that forwarding has
* already been done by hardware.
*/
- skb->offload_fwd_mark = 1;
break;
case DSA_CODE_MGMT_TRAP:
case DSA_CODE_IGMP_MLD_TRAP:
@@ -238,6 +235,7 @@ static struct sk_buff *dsa_rcv_ll(struct sk_buff *skb, struct net_device *dev,
/* Traps have, by definition, not been
* forwarded by hardware, so don't mark them.
*/
+ trap = true;
break;
default:
/* Reserved code, this could be anything. Drop
@@ -271,6 +269,15 @@ static struct sk_buff *dsa_rcv_ll(struct sk_buff *skb, struct net_device *dev,
if (!skb->dev)
return NULL;
+ /* When using LAG offload, skb->dev is not a DSA slave interface,
+ * so we cannot call dsa_default_offload_fwd_mark and we need to
+ * special-case it.
+ */
+ if (trunk)
+ skb->offload_fwd_mark = true;
+ else if (!trap)
+ dsa_default_offload_fwd_mark(skb);
+
/* If the 'tagged' bit is set; convert the DSA tag to a 802.1Q
* tag, and delete the ethertype (extra) if applicable. If the
* 'tagged' bit is cleared; delete the DSA tag, and ethertype
diff --git a/net/dsa/tag_hellcreek.c b/net/dsa/tag_hellcreek.c
index 424130f85f59..c41208cbd936 100644
--- a/net/dsa/tag_hellcreek.c
+++ b/net/dsa/tag_hellcreek.c
@@ -44,7 +44,7 @@ static struct sk_buff *hellcreek_rcv(struct sk_buff *skb,
pskb_trim_rcsum(skb, skb->len - HELLCREEK_TAG_LEN);
- skb->offload_fwd_mark = true;
+ dsa_default_offload_fwd_mark(skb);
return skb;
}
diff --git a/net/dsa/tag_ksz.c b/net/dsa/tag_ksz.c
index a201ccf2435d..1c2dfa80f9b0 100644
--- a/net/dsa/tag_ksz.c
+++ b/net/dsa/tag_ksz.c
@@ -24,7 +24,7 @@ static struct sk_buff *ksz_common_rcv(struct sk_buff *skb,
pskb_trim_rcsum(skb, skb->len - len);
- skb->offload_fwd_mark = true;
+ dsa_default_offload_fwd_mark(skb);
return skb;
}
diff --git a/net/dsa/tag_lan9303.c b/net/dsa/tag_lan9303.c
index 26207ef39ebc..cf7cf2fa1240 100644
--- a/net/dsa/tag_lan9303.c
+++ b/net/dsa/tag_lan9303.c
@@ -115,7 +115,8 @@ static struct sk_buff *lan9303_rcv(struct sk_buff *skb, struct net_device *dev,
skb_pull_rcsum(skb, 2 + 2);
memmove(skb->data - ETH_HLEN, skb->data - (ETH_HLEN + LAN9303_TAG_LEN),
2 * ETH_ALEN);
- skb->offload_fwd_mark = !(lan9303_tag1 & LAN9303_TAG_RX_TRAPPED_TO_CPU);
+ if (!(lan9303_tag1 & LAN9303_TAG_RX_TRAPPED_TO_CPU))
+ dsa_default_offload_fwd_mark(skb);
return skb;
}
diff --git a/net/dsa/tag_mtk.c b/net/dsa/tag_mtk.c
index cc3ba864ad5b..3fb80e43f3a5 100644
--- a/net/dsa/tag_mtk.c
+++ b/net/dsa/tag_mtk.c
@@ -92,7 +92,7 @@ static struct sk_buff *mtk_tag_rcv(struct sk_buff *skb, struct net_device *dev,
if (!skb->dev)
return NULL;
- skb->offload_fwd_mark = 1;
+ dsa_default_offload_fwd_mark(skb);
return skb;
}
diff --git a/net/dsa/tag_ocelot.c b/net/dsa/tag_ocelot.c
index 190f4bfd3bef..3252634a29b8 100644
--- a/net/dsa/tag_ocelot.c
+++ b/net/dsa/tag_ocelot.c
@@ -104,7 +104,7 @@ static struct sk_buff *ocelot_rcv(struct sk_buff *skb,
*/
return NULL;
- skb->offload_fwd_mark = 1;
+ dsa_default_offload_fwd_mark(skb);
skb->priority = qos_class;
/* Ocelot switches copy frames unmodified to the CPU. However, it is
diff --git a/net/dsa/tag_ocelot_8021q.c b/net/dsa/tag_ocelot_8021q.c
index d0781b058610..c95de71d13b0 100644
--- a/net/dsa/tag_ocelot_8021q.c
+++ b/net/dsa/tag_ocelot_8021q.c
@@ -49,7 +49,7 @@ static struct sk_buff *ocelot_rcv(struct sk_buff *skb,
if (!skb->dev)
return NULL;
- skb->offload_fwd_mark = 1;
+ dsa_default_offload_fwd_mark(skb);
return skb;
}
diff --git a/net/dsa/tag_rtl4_a.c b/net/dsa/tag_rtl4_a.c
index 57c46b4ab2b3..f6b63aad6551 100644
--- a/net/dsa/tag_rtl4_a.c
+++ b/net/dsa/tag_rtl4_a.c
@@ -114,7 +114,7 @@ static struct sk_buff *rtl4a_tag_rcv(struct sk_buff *skb,
skb->data - ETH_HLEN - RTL4_A_HDR_LEN,
2 * ETH_ALEN);
- skb->offload_fwd_mark = 1;
+ dsa_default_offload_fwd_mark(skb);
return skb;
}
diff --git a/net/dsa/tag_sja1105.c b/net/dsa/tag_sja1105.c
index c1f993d592ef..664cb802b71a 100644
--- a/net/dsa/tag_sja1105.c
+++ b/net/dsa/tag_sja1105.c
@@ -405,8 +405,6 @@ static struct sk_buff *sja1105_rcv(struct sk_buff *skb,
is_link_local = sja1105_is_link_local(skb);
is_meta = sja1105_is_meta_frame(skb);
- skb->offload_fwd_mark = 1;
-
if (sja1105_skb_has_tag_8021q(skb)) {
/* Normal traffic path. */
sja1105_vlan_rcv(skb, &source_port, &switch_id, &vid);
@@ -437,6 +435,9 @@ static struct sk_buff *sja1105_rcv(struct sk_buff *skb,
return NULL;
}
+ if (!is_link_local)
+ dsa_default_offload_fwd_mark(skb);
+
return sja1105_rcv_meta_state_machine(skb, &meta, is_link_local,
is_meta);
}
@@ -480,7 +481,8 @@ static struct sk_buff *sja1110_rcv_meta(struct sk_buff *skb, u16 rx_header)
static struct sk_buff *sja1110_rcv_inband_control_extension(struct sk_buff *skb,
int *source_port,
- int *switch_id)
+ int *switch_id,
+ bool *host_only)
{
u16 rx_header;
@@ -494,6 +496,9 @@ static struct sk_buff *sja1110_rcv_inband_control_extension(struct sk_buff *skb,
*/
rx_header = ntohs(*(__be16 *)skb->data);
+ if (rx_header & SJA1110_RX_HEADER_HOST_ONLY)
+ *host_only = true;
+
if (rx_header & SJA1110_RX_HEADER_IS_METADATA)
return sja1110_rcv_meta(skb, rx_header);
@@ -545,13 +550,13 @@ static struct sk_buff *sja1110_rcv(struct sk_buff *skb,
struct packet_type *pt)
{
int source_port = -1, switch_id = -1;
+ bool host_only = false;
u16 vid;
- skb->offload_fwd_mark = 1;
-
if (sja1110_skb_has_inband_control_extension(skb)) {
skb = sja1110_rcv_inband_control_extension(skb, &source_port,
- &switch_id);
+ &switch_id,
+ &host_only);
if (!skb)
return NULL;
}
@@ -569,6 +574,9 @@ static struct sk_buff *sja1110_rcv(struct sk_buff *skb,
return NULL;
}
+ if (!host_only)
+ dsa_default_offload_fwd_mark(skb);
+
return skb;
}
diff --git a/net/dsa/tag_xrs700x.c b/net/dsa/tag_xrs700x.c
index a31ff7fcb45f..da231c16ac82 100644
--- a/net/dsa/tag_xrs700x.c
+++ b/net/dsa/tag_xrs700x.c
@@ -46,7 +46,7 @@ static struct sk_buff *xrs700x_rcv(struct sk_buff *skb, struct net_device *dev,
return NULL;
/* Frame is forwarded by hardware, don't forward in software. */
- skb->offload_fwd_mark = 1;
+ dsa_default_offload_fwd_mark(skb);
return skb;
}