summaryrefslogtreecommitdiff
path: root/net/mac80211
diff options
context:
space:
mode:
authorColin Ian King <colin.i.king@intel.com>2024-02-16 02:21:51 +0300
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2024-05-02 17:32:33 +0300
commit1ba4d2adf1aafb3917d4cdec314757e96cbca634 (patch)
treef81efab89b325d6d6f243348f62fca4cb43cb9ec /net/mac80211
parent80ea4ee9f99c28d150ab376437e66c04d4769863 (diff)
downloadlinux-1ba4d2adf1aafb3917d4cdec314757e96cbca634.tar.xz
wifi: mac80211: clean up assignments to pointer cache.
[ Upstream commit ba4b1fa3128b2fbf14e167230315cbd9074b629b ] The assignment to pointer cache in function mesh_fast_tx_gc can be made at the declaration time rather than a later assignment. There are also 3 functions where pointer cache is being initialized at declaration time and later re-assigned again with the same value, these are redundant and can be removed. Cleans up code and three clang scan build warnings: warning: Value stored to 'cache' during its initialization is never read [deadcode.DeadStores] Signed-off-by: Colin Ian King <colin.i.king@intel.com> Reviewed-by: Simon Horman <horms@kernel.org> Link: https://msgid.link/20240215232151.2075483-1-colin.i.king@gmail.com Signed-off-by: Johannes Berg <johannes.berg@intel.com> Stable-dep-of: 8c75cdcdf869 ("wifi: mac80211: split mesh fast tx cache into local/proxied/forwarded") Signed-off-by: Sasha Levin <sashal@kernel.org>
Diffstat (limited to 'net/mac80211')
-rw-r--r--net/mac80211/mesh_pathtbl.c6
1 files changed, 1 insertions, 5 deletions
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index 3e52aaa57b1f..c7173190f9b9 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -595,11 +595,10 @@ unlock_sta:
void mesh_fast_tx_gc(struct ieee80211_sub_if_data *sdata)
{
unsigned long timeout = msecs_to_jiffies(MESH_FAST_TX_CACHE_TIMEOUT);
- struct mesh_tx_cache *cache;
+ struct mesh_tx_cache *cache = &sdata->u.mesh.tx_cache;
struct ieee80211_mesh_fast_tx *entry;
struct hlist_node *n;
- cache = &sdata->u.mesh.tx_cache;
if (atomic_read(&cache->rht.nelems) < MESH_FAST_TX_CACHE_THRESHOLD_SIZE)
return;
@@ -617,7 +616,6 @@ void mesh_fast_tx_flush_mpath(struct mesh_path *mpath)
struct ieee80211_mesh_fast_tx *entry;
struct hlist_node *n;
- cache = &sdata->u.mesh.tx_cache;
spin_lock_bh(&cache->walk_lock);
hlist_for_each_entry_safe(entry, n, &cache->walk_head, walk_list)
if (entry->mpath == mpath)
@@ -632,7 +630,6 @@ void mesh_fast_tx_flush_sta(struct ieee80211_sub_if_data *sdata,
struct ieee80211_mesh_fast_tx *entry;
struct hlist_node *n;
- cache = &sdata->u.mesh.tx_cache;
spin_lock_bh(&cache->walk_lock);
hlist_for_each_entry_safe(entry, n, &cache->walk_head, walk_list)
if (rcu_access_pointer(entry->mpath->next_hop) == sta)
@@ -646,7 +643,6 @@ void mesh_fast_tx_flush_addr(struct ieee80211_sub_if_data *sdata,
struct mesh_tx_cache *cache = &sdata->u.mesh.tx_cache;
struct ieee80211_mesh_fast_tx *entry;
- cache = &sdata->u.mesh.tx_cache;
spin_lock_bh(&cache->walk_lock);
entry = rhashtable_lookup_fast(&cache->rht, addr, fast_tx_rht_params);
if (entry)