summaryrefslogtreecommitdiff
path: root/net/mac80211/mesh_hwmp.c
diff options
context:
space:
mode:
authorJavier Cardona <javier@cozybit.com>2011-11-04 08:11:10 +0400
committerJohn W. Linville <linville@tuxdriver.com>2011-11-10 01:01:02 +0400
commitf3011cf9deb689bd68279c728c501a4166983c19 (patch)
tree7d9945a2934c6de78063c358a6d350af8b6a19a9 /net/mac80211/mesh_hwmp.c
parent7e1e386421e2ec7804b77f2c1c8e2517e82ecb7e (diff)
downloadlinux-f3011cf9deb689bd68279c728c501a4166983c19.tar.xz
mac80211: Avoid filling up mesh preq queue with redundant requests
Don't accept redundant PREQs for a given destination. This fixes a problem under high load: kernel: [20386.250913] mesh_queue_preq: 235 callbacks suppressed kernel: [20386.253335] Mesh HWMP (mesh0): PREQ node queue full kernel: [20386.253352] Mesh HWMP (mesh0): PREQ node queue full (...) The 802.11s protocol has a provision to limit the rate of path requests (PREQs) are transmitted (dot11MeshHWMPpreqMinInterval) but there was no limit on the rate at which PREQs were being queued up. There is a valid reason for queuing PREQs: this way we can even out PREQ bursts. But queueing multiple PREQs for the same destination is useless. Reported-by: Pedro Larbig <pedro.larbig@carhs.de> Signed-off-by: Javier Cardona <javier@cozybit.com> Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'net/mac80211/mesh_hwmp.c')
-rw-r--r--net/mac80211/mesh_hwmp.c15
1 files changed, 13 insertions, 2 deletions
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index 9a1f8bbc49b8..b22b223ccde1 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -867,9 +867,19 @@ static void mesh_queue_preq(struct mesh_path *mpath, u8 flags)
return;
}
+ spin_lock_bh(&mpath->state_lock);
+ if (mpath->flags & MESH_PATH_REQ_QUEUED) {
+ spin_unlock_bh(&mpath->state_lock);
+ spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
+ return;
+ }
+
memcpy(preq_node->dst, mpath->dst, ETH_ALEN);
preq_node->flags = flags;
+ mpath->flags |= MESH_PATH_REQ_QUEUED;
+ spin_unlock_bh(&mpath->state_lock);
+
list_add_tail(&preq_node->list, &ifmsh->preq_queue.list);
++ifmsh->preq_queue_len;
spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
@@ -921,6 +931,7 @@ void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata)
goto enddiscovery;
spin_lock_bh(&mpath->state_lock);
+ mpath->flags &= ~MESH_PATH_REQ_QUEUED;
if (preq_node->flags & PREQ_Q_F_START) {
if (mpath->flags & MESH_PATH_RESOLVING) {
spin_unlock_bh(&mpath->state_lock);
@@ -1028,8 +1039,7 @@ int mesh_nexthop_lookup(struct sk_buff *skb,
mesh_queue_preq(mpath, PREQ_Q_F_START);
}
- if (skb_queue_len(&mpath->frame_queue) >=
- MESH_FRAME_QUEUE_LEN)
+ if (skb_queue_len(&mpath->frame_queue) >= MESH_FRAME_QUEUE_LEN)
skb_to_free = skb_dequeue(&mpath->frame_queue);
info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
@@ -1061,6 +1071,7 @@ void mesh_path_timer(unsigned long data)
} else if (mpath->discovery_retries < max_preq_retries(sdata)) {
++mpath->discovery_retries;
mpath->discovery_timeout *= 2;
+ mpath->flags &= ~MESH_PATH_REQ_QUEUED;
spin_unlock_bh(&mpath->state_lock);
mesh_queue_preq(mpath, 0);
} else {