summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
authorJason Wang <jasowang@redhat.com>2014-01-10 12:18:26 +0400
committerDavid S. Miller <davem@davemloft.net>2014-01-10 22:23:08 +0400
commitf663dd9aaf9ed124f25f0f8452edf238f087ad50 (patch)
tree2aee7dfcfd373c6905de5b2a9f810f9e209156b6 /net
parentb13ba1b83f524732523db1079e56478b32c85c96 (diff)
downloadlinux-f663dd9aaf9ed124f25f0f8452edf238f087ad50.tar.xz
net: core: explicitly select a txq before doing l2 forwarding
Currently, the tx queue were selected implicitly in ndo_dfwd_start_xmit(). The will cause several issues: - NETIF_F_LLTX were removed for macvlan, so txq lock were done for macvlan instead of lower device which misses the necessary txq synchronization for lower device such as txq stopping or frozen required by dev watchdog or control path. - dev_hard_start_xmit() was called with NULL txq which bypasses the net device watchdog. - dev_hard_start_xmit() does not check txq everywhere which will lead a crash when tso is disabled for lower device. Fix this by explicitly introducing a new param for .ndo_select_queue() for just selecting queues in the case of l2 forwarding offload. netdev_pick_tx() was also extended to accept this parameter and dev_queue_xmit_accel() was used to do l2 forwarding transmission. With this fixes, NETIF_F_LLTX could be preserved for macvlan and there's no need to check txq against NULL in dev_hard_start_xmit(). Also there's no need to keep a dedicated ndo_dfwd_start_xmit() and we can just reuse the code of dev_queue_xmit() to do the transmission. In the future, it was also required for macvtap l2 forwarding support since it provides a necessary synchronization method. Cc: John Fastabend <john.r.fastabend@intel.com> Cc: Neil Horman <nhorman@tuxdriver.com> Cc: e1000-devel@lists.sourceforge.net Signed-off-by: Jason Wang <jasowang@redhat.com> Acked-by: Neil Horman <nhorman@tuxdriver.com> Acked-by: John Fastabend <john.r.fastabend@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/core/dev.c29
-rw-r--r--net/core/flow_dissector.c10
-rw-r--r--net/core/netpoll.c2
-rw-r--r--net/mac80211/iface.c6
-rw-r--r--net/sched/sch_generic.c2
5 files changed, 30 insertions, 19 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 4fc17221545d..0ce469e5ec80 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2539,7 +2539,7 @@ static inline int skb_needs_linearize(struct sk_buff *skb,
}
int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
- struct netdev_queue *txq, void *accel_priv)
+ struct netdev_queue *txq)
{
const struct net_device_ops *ops = dev->netdev_ops;
int rc = NETDEV_TX_OK;
@@ -2605,13 +2605,10 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
dev_queue_xmit_nit(skb, dev);
skb_len = skb->len;
- if (accel_priv)
- rc = ops->ndo_dfwd_start_xmit(skb, dev, accel_priv);
- else
rc = ops->ndo_start_xmit(skb, dev);
trace_net_dev_xmit(skb, rc, dev, skb_len);
- if (rc == NETDEV_TX_OK && txq)
+ if (rc == NETDEV_TX_OK)
txq_trans_update(txq);
return rc;
}
@@ -2627,10 +2624,7 @@ gso:
dev_queue_xmit_nit(nskb, dev);
skb_len = nskb->len;
- if (accel_priv)
- rc = ops->ndo_dfwd_start_xmit(nskb, dev, accel_priv);
- else
- rc = ops->ndo_start_xmit(nskb, dev);
+ rc = ops->ndo_start_xmit(nskb, dev);
trace_net_dev_xmit(nskb, rc, dev, skb_len);
if (unlikely(rc != NETDEV_TX_OK)) {
if (rc & ~NETDEV_TX_MASK)
@@ -2811,7 +2805,7 @@ EXPORT_SYMBOL(dev_loopback_xmit);
* the BH enable code must have IRQs enabled so that it will not deadlock.
* --BLG
*/
-int dev_queue_xmit(struct sk_buff *skb)
+int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
{
struct net_device *dev = skb->dev;
struct netdev_queue *txq;
@@ -2827,7 +2821,7 @@ int dev_queue_xmit(struct sk_buff *skb)
skb_update_prio(skb);
- txq = netdev_pick_tx(dev, skb);
+ txq = netdev_pick_tx(dev, skb, accel_priv);
q = rcu_dereference_bh(txq->qdisc);
#ifdef CONFIG_NET_CLS_ACT
@@ -2863,7 +2857,7 @@ int dev_queue_xmit(struct sk_buff *skb)
if (!netif_xmit_stopped(txq)) {
__this_cpu_inc(xmit_recursion);
- rc = dev_hard_start_xmit(skb, dev, txq, NULL);
+ rc = dev_hard_start_xmit(skb, dev, txq);
__this_cpu_dec(xmit_recursion);
if (dev_xmit_complete(rc)) {
HARD_TX_UNLOCK(dev, txq);
@@ -2892,8 +2886,19 @@ out:
rcu_read_unlock_bh();
return rc;
}
+
+int dev_queue_xmit(struct sk_buff *skb)
+{
+ return __dev_queue_xmit(skb, NULL);
+}
EXPORT_SYMBOL(dev_queue_xmit);
+int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv)
+{
+ return __dev_queue_xmit(skb, accel_priv);
+}
+EXPORT_SYMBOL(dev_queue_xmit_accel);
+
/*=======================================================================
Receiver routines
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index d6ef17322500..2fc5beaf5783 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -395,17 +395,21 @@ u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
EXPORT_SYMBOL(__netdev_pick_tx);
struct netdev_queue *netdev_pick_tx(struct net_device *dev,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ void *accel_priv)
{
int queue_index = 0;
if (dev->real_num_tx_queues != 1) {
const struct net_device_ops *ops = dev->netdev_ops;
if (ops->ndo_select_queue)
- queue_index = ops->ndo_select_queue(dev, skb);
+ queue_index = ops->ndo_select_queue(dev, skb,
+ accel_priv);
else
queue_index = __netdev_pick_tx(dev, skb);
- queue_index = dev_cap_txqueue(dev, queue_index);
+
+ if (!accel_priv)
+ queue_index = dev_cap_txqueue(dev, queue_index);
}
skb_set_queue_mapping(skb, queue_index);
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 303097874633..19fe9c717ced 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -375,7 +375,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) {
struct netdev_queue *txq;
- txq = netdev_pick_tx(dev, skb);
+ txq = netdev_pick_tx(dev, skb, NULL);
/* try until next clock tick */
for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 36c3a4cbcabf..a0757913046e 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -1061,7 +1061,8 @@ static void ieee80211_uninit(struct net_device *dev)
}
static u16 ieee80211_netdev_select_queue(struct net_device *dev,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ void *accel_priv)
{
return ieee80211_select_queue(IEEE80211_DEV_TO_SUB_IF(dev), skb);
}
@@ -1078,7 +1079,8 @@ static const struct net_device_ops ieee80211_dataif_ops = {
};
static u16 ieee80211_monitor_select_queue(struct net_device *dev,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ void *accel_priv)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
struct ieee80211_local *local = sdata->local;
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 922a09406ba7..7fc899a943a8 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -126,7 +126,7 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
HARD_TX_LOCK(dev, txq, smp_processor_id());
if (!netif_xmit_frozen_or_stopped(txq))
- ret = dev_hard_start_xmit(skb, dev, txq, NULL);
+ ret = dev_hard_start_xmit(skb, dev, txq);
HARD_TX_UNLOCK(dev, txq);