summaryrefslogtreecommitdiff
path: root/net/mac80211/rc80211_minstrel_ht.c
diff options
context:
space:
mode:
authorFelix Fietkau <nbd@nbd.name>2021-01-27 08:57:32 +0300
committerJohannes Berg <johannes.berg@intel.com>2021-02-12 10:57:24 +0300
commit7aece471a0e6e3cb84a89ce09de075c91f58d357 (patch)
treea882e99c8953ec24ac565e7f72107767d5309e5e /net/mac80211/rc80211_minstrel_ht.c
parent2012a2f7bcd2aa515430a75f1227471ab4ebd7df (diff)
downloadlinux-7aece471a0e6e3cb84a89ce09de075c91f58d357.tar.xz
mac80211: minstrel_ht: reduce the need to sample slower rates
In order to more gracefully be able to fall back to lower rates without too much throughput fluctuations, initialize all untested rates below tested ones to the maximum probabilty of higher rates. Usually this leads to untested lower rates getting initialized with a probability value of 100%, making them better candidates for fallback without having to rely on random probing Signed-off-by: Felix Fietkau <nbd@nbd.name> Link: https://lore.kernel.org/r/20210127055735.78599-3-nbd@nbd.name Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Diffstat (limited to 'net/mac80211/rc80211_minstrel_ht.c')
-rw-r--r--net/mac80211/rc80211_minstrel_ht.c18
1 files changed, 8 insertions, 10 deletions
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
index 7846782840a9..4d4eb4aa46bd 100644
--- a/net/mac80211/rc80211_minstrel_ht.c
+++ b/net/mac80211/rc80211_minstrel_ht.c
@@ -791,14 +791,11 @@ minstrel_ht_calc_rate_stats(struct minstrel_priv *mp,
unsigned int cur_prob;
if (unlikely(mrs->attempts > 0)) {
- mrs->sample_skipped = 0;
cur_prob = MINSTREL_FRAC(mrs->success, mrs->attempts);
minstrel_filter_avg_add(&mrs->prob_avg,
&mrs->prob_avg_1, cur_prob);
mrs->att_hist += mrs->attempts;
mrs->succ_hist += mrs->success;
- } else {
- mrs->sample_skipped++;
}
mrs->last_success = mrs->success;
@@ -851,7 +848,6 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
mi->ampdu_packets = 0;
}
- mi->sample_slow = 0;
mi->sample_count = 0;
if (mi->supported[MINSTREL_CCK_GROUP])
@@ -882,6 +878,7 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
/* Find best rate sets within all MCS groups*/
for (group = 0; group < ARRAY_SIZE(minstrel_mcs_groups); group++) {
u16 *tp_rate = tmp_mcs_tp_rate;
+ u16 last_prob = 0;
mg = &mi->groups[group];
if (!mi->supported[group])
@@ -896,7 +893,7 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
if (group == MINSTREL_CCK_GROUP && ht_supported)
tp_rate = tmp_legacy_tp_rate;
- for (i = 0; i < MCS_GROUP_RATES; i++) {
+ for (i = MCS_GROUP_RATES - 1; i >= 0; i--) {
if (!(mi->supported[group] & BIT(i)))
continue;
@@ -905,6 +902,11 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
mrs = &mg->rates[i];
mrs->retry_updated = false;
minstrel_ht_calc_rate_stats(mp, mrs);
+
+ if (mrs->att_hist)
+ last_prob = max(last_prob, mrs->prob_avg);
+ else
+ mrs->prob_avg = max(last_prob, mrs->prob_avg);
cur_prob = mrs->prob_avg;
if (minstrel_ht_get_tp_avg(mi, group, i, cur_prob) == 0)
@@ -1469,13 +1471,9 @@ minstrel_get_sample_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
if (sample_dur >= minstrel_get_duration(tp_rate2) &&
(cur_max_tp_streams - 1 <
minstrel_mcs_groups[sample_group].streams ||
- sample_dur >= minstrel_get_duration(mi->max_prob_rate))) {
- if (mrs->sample_skipped < 20)
+ sample_dur >= minstrel_get_duration(mi->max_prob_rate)))
return -1;
- if (mi->sample_slow++ > 2)
- return -1;
- }
mi->sample_tries--;
return sample_idx;