summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJakub Kicinski <kuba@kernel.org>2022-11-23 07:42:11 +0300
committerJakub Kicinski <kuba@kernel.org>2022-11-23 07:42:12 +0300
commitaf42736301cb8b4a949d3afa0dc9aaf89522601a (patch)
treede2e670bd88ecebaa0275006ca946a9ce335b290
parent8263ee81f659ff773e5ee0a6f2adb9c6b5b4b0cc (diff)
parent5e8d3dc73e800027f116ec964885d761d3c00777 (diff)
downloadlinux-af42736301cb8b4a949d3afa0dc9aaf89522601a.tar.xz
Merge branch 'revert-veth-avoid-drop-packets-when-xdp_redirect-performs-and-its-fix'
Heng Qi says: ==================== Revert "veth: Avoid drop packets when xdp_redirect performs" and its fix This patch 2e0de6366ac16 enables napi of the peer veth automatically when the veth loads the xdp, but it breaks down as reported by Paolo and John. So reverting it and its fix, we will rework the patch and make it more robust based on comments. ==================== Link: https://lore.kernel.org/r/20221122035015.19296-1-hengqi@linux.alibaba.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
-rw-r--r--drivers/net/veth.c88
1 files changed, 12 insertions, 76 deletions
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 2a4592780141..ac7c0653695f 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -1119,14 +1119,10 @@ static void veth_disable_xdp_range(struct net_device *dev, int start, int end,
static int veth_enable_xdp(struct net_device *dev)
{
+ bool napi_already_on = veth_gro_requested(dev) && (dev->flags & IFF_UP);
struct veth_priv *priv = netdev_priv(dev);
- bool napi_already_on;
- struct veth_rq *rq;
int err, i;
- rq = &priv->rq[0];
- napi_already_on = rcu_access_pointer(rq->napi);
-
if (!xdp_rxq_info_is_reg(&priv->rq[0].xdp_rxq)) {
err = veth_enable_xdp_range(dev, 0, dev->real_num_rx_queues, napi_already_on);
if (err)
@@ -1327,28 +1323,18 @@ revert:
static int veth_open(struct net_device *dev)
{
- struct veth_priv *peer_priv, *priv = netdev_priv(dev);
+ struct veth_priv *priv = netdev_priv(dev);
struct net_device *peer = rtnl_dereference(priv->peer);
- struct veth_rq *peer_rq;
int err;
if (!peer)
return -ENOTCONN;
- peer_priv = netdev_priv(peer);
- peer_rq = &peer_priv->rq[0];
-
if (priv->_xdp_prog) {
err = veth_enable_xdp(dev);
if (err)
return err;
- /* refer to the logic in veth_xdp_set() */
- if (!rtnl_dereference(peer_rq->napi)) {
- err = veth_napi_enable(peer);
- if (err)
- return err;
- }
- } else if (veth_gro_requested(dev) || peer_priv->_xdp_prog) {
+ } else if (veth_gro_requested(dev)) {
err = veth_napi_enable(dev);
if (err)
return err;
@@ -1364,29 +1350,17 @@ static int veth_open(struct net_device *dev)
static int veth_close(struct net_device *dev)
{
- struct veth_priv *peer_priv, *priv = netdev_priv(dev);
+ struct veth_priv *priv = netdev_priv(dev);
struct net_device *peer = rtnl_dereference(priv->peer);
- struct veth_rq *peer_rq;
netif_carrier_off(dev);
- if (peer) {
- peer_priv = netdev_priv(peer);
- peer_rq = &peer_priv->rq[0];
- }
+ if (peer)
+ netif_carrier_off(peer);
- if (priv->_xdp_prog) {
+ if (priv->_xdp_prog)
veth_disable_xdp(dev);
- /* refer to the logic in veth_xdp_set */
- if (peer && rtnl_dereference(peer_rq->napi)) {
- if (!veth_gro_requested(peer) && !peer_priv->_xdp_prog)
- veth_napi_del(peer);
- }
- } else if (veth_gro_requested(dev) || (peer && peer_priv->_xdp_prog)) {
+ else if (veth_gro_requested(dev))
veth_napi_del(dev);
- }
-
- if (peer)
- netif_carrier_off(peer);
return 0;
}
@@ -1496,21 +1470,17 @@ static int veth_set_features(struct net_device *dev,
{
netdev_features_t changed = features ^ dev->features;
struct veth_priv *priv = netdev_priv(dev);
- struct veth_rq *rq = &priv->rq[0];
int err;
if (!(changed & NETIF_F_GRO) || !(dev->flags & IFF_UP) || priv->_xdp_prog)
return 0;
if (features & NETIF_F_GRO) {
- if (!rtnl_dereference(rq->napi)) {
- err = veth_napi_enable(dev);
- if (err)
- return err;
- }
+ err = veth_napi_enable(dev);
+ if (err)
+ return err;
} else {
- if (rtnl_dereference(rq->napi))
- veth_napi_del(dev);
+ veth_napi_del(dev);
}
return 0;
}
@@ -1542,19 +1512,14 @@ static int veth_xdp_set(struct net_device *dev, struct bpf_prog *prog,
struct netlink_ext_ack *extack)
{
struct veth_priv *priv = netdev_priv(dev);
- struct veth_priv *peer_priv;
struct bpf_prog *old_prog;
- struct veth_rq *peer_rq;
struct net_device *peer;
- bool napi_already_off;
unsigned int max_mtu;
- bool noreq_napi;
int err;
old_prog = priv->_xdp_prog;
priv->_xdp_prog = prog;
peer = rtnl_dereference(priv->peer);
- peer_priv = netdev_priv(peer);
if (prog) {
if (!peer) {
@@ -1591,24 +1556,6 @@ static int veth_xdp_set(struct net_device *dev, struct bpf_prog *prog,
}
}
- if (peer && (peer->flags & IFF_UP)) {
- peer_rq = &peer_priv->rq[0];
-
- /* If the peer hasn't enabled GRO and loaded xdp,
- * then we enable napi automatically if its napi
- * is not ready.
- */
- napi_already_off = !rtnl_dereference(peer_rq->napi);
- if (napi_already_off) {
- err = veth_napi_enable(peer);
- if (err) {
- NL_SET_ERR_MSG_MOD(extack,
- "Failed to automatically enable napi of peer");
- goto err;
- }
- }
- }
-
if (!old_prog) {
peer->hw_features &= ~NETIF_F_GSO_SOFTWARE;
peer->max_mtu = max_mtu;
@@ -1623,17 +1570,6 @@ static int veth_xdp_set(struct net_device *dev, struct bpf_prog *prog,
if (peer) {
peer->hw_features |= NETIF_F_GSO_SOFTWARE;
peer->max_mtu = ETH_MAX_MTU;
- peer_rq = &peer_priv->rq[0];
-
- /* If the peer doesn't has its xdp and enabled
- * GRO, then we disable napi if its napi is ready;
- */
- if (rtnl_dereference(peer_rq->napi)) {
- noreq_napi = !veth_gro_requested(peer) &&
- !peer_priv->_xdp_prog;
- if (noreq_napi && (peer->flags & IFF_UP))
- veth_napi_del(peer);
- }
}
}
bpf_prog_put(old_prog);