linux-stable/net/mac80211/rc80211_minstrel_ht.c

2062 lines
53 KiB
C
Raw Normal View History

// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2010-2013 Felix Fietkau <nbd@openwrt.org>
* Copyright (C) 2019-2022 Intel Corporation
*/
#include <linux/netdevice.h>
#include <linux/types.h>
#include <linux/skbuff.h>
#include <linux/debugfs.h>
#include <linux/random.h>
#include <linux/moduleparam.h>
#include <linux/ieee80211.h>
wifi: mac80211: fix memory corruption in minstrel_ht_update_rates() During our testing of WFM200 module over SDIO on i.MX6Q-based platform, we discovered a memory corruption on the system, tracing back to the wfx driver. Using kfence, it was possible to trace it back to the root cause, which is hw->max_rates set to 8 in wfx_init_common, while the maximum defined by IEEE80211_TX_TABLE_SIZE is 4. This causes array out-of-bounds writes during updates of the rate table, as seen below: BUG: KFENCE: memory corruption in kfree_rcu_work+0x320/0x36c Corrupted memory at 0xe0a4ffe0 [ 0x03 0x03 0x03 0x03 0x01 0x00 0x00 0x02 0x02 0x02 0x09 0x00 0x21 0xbb 0xbb 0xbb ] (in kfence-#81): kfree_rcu_work+0x320/0x36c process_one_work+0x3ec/0x920 worker_thread+0x60/0x7a4 kthread+0x174/0x1b4 ret_from_fork+0x14/0x2c 0x0 kfence-#81: 0xe0a4ffc0-0xe0a4ffdf, size=32, cache=kmalloc-64 allocated by task 297 on cpu 0 at 631.039555s: minstrel_ht_update_rates+0x38/0x2b0 [mac80211] rate_control_tx_status+0xb4/0x148 [mac80211] ieee80211_tx_status_ext+0x364/0x1030 [mac80211] ieee80211_tx_status+0xe0/0x118 [mac80211] ieee80211_tasklet_handler+0xb0/0xe0 [mac80211] tasklet_action_common.constprop.0+0x11c/0x148 __do_softirq+0x1a4/0x61c irq_exit+0xcc/0x104 call_with_stack+0x18/0x20 __irq_svc+0x80/0xb0 wq_worker_sleeping+0x10/0x100 wq_worker_sleeping+0x10/0x100 schedule+0x50/0xe0 schedule_timeout+0x2e0/0x474 wait_for_completion+0xdc/0x1ec mmc_wait_for_req_done+0xc4/0xf8 mmc_io_rw_extended+0x3b4/0x4ec sdio_io_rw_ext_helper+0x290/0x384 sdio_memcpy_toio+0x30/0x38 wfx_sdio_copy_to_io+0x88/0x108 [wfx] wfx_data_write+0x88/0x1f0 [wfx] bh_work+0x1c8/0xcc0 [wfx] process_one_work+0x3ec/0x920 worker_thread+0x60/0x7a4 kthread+0x174/0x1b4 ret_from_fork+0x14/0x2c 0x0 After discussion on the wireless mailing list it was clarified that the issue has been introduced by: commit ee0e16ab756a ("mac80211: minstrel_ht: fill all requested rates") and fix shall be in minstrel_ht_update_rates in rc80211_minstrel_ht.c. Fixes: ee0e16ab756a ("mac80211: minstrel_ht: fill all requested rates") Link: https://lore.kernel.org/all/12e5adcd-8aed-f0f7-70cc-4fb7b656b829@camlingroup.com/ Link: https://lore.kernel.org/linux-wireless/20220915131445.30600-1-lech.perczak@camlingroup.com/ Cc: Jérôme Pouiller <jerome.pouiller@silabs.com> Cc: Johannes Berg <johannes@sipsolutions.net> Cc: Peter Seiderer <ps.report@gmx.net> Cc: Kalle Valo <kvalo@kernel.org> Cc: Krzysztof Drobiński <krzysztof.drobinski@camlingroup.com>, Signed-off-by: Paweł Lenkow <pawel.lenkow@camlingroup.com> Signed-off-by: Lech Perczak <lech.perczak@camlingroup.com> Reviewed-by: Peter Seiderer <ps.report@gmx.net> Reviewed-by: Jérôme Pouiller <jerome.pouiller@silabs.com> Acked-by: Felix Fietkau <nbd@nbd.name> Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2022-09-19 15:01:35 +00:00
#include <linux/minmax.h>
#include <net/mac80211.h>
#include "rate.h"
#include "sta_info.h"
#include "rc80211_minstrel_ht.h"
#define AVG_AMPDU_SIZE 16
#define AVG_PKT_SIZE 1200
/* Number of bits for an average sized packet */
#define MCS_NBITS ((AVG_PKT_SIZE * AVG_AMPDU_SIZE) << 3)
/* Number of symbols for a packet with (bps) bits per symbol */
#define MCS_NSYMS(bps) DIV_ROUND_UP(MCS_NBITS, (bps))
/* Transmission time (nanoseconds) for a packet containing (syms) symbols */
#define MCS_SYMBOL_TIME(sgi, syms) \
(sgi ? \
((syms) * 18000 + 4000) / 5 : /* syms * 3.6 us */ \
((syms) * 1000) << 2 /* syms * 4 us */ \
)
/* Transmit duration for the raw data part of an average sized packet */
#define MCS_DURATION(streams, sgi, bps) \
(MCS_SYMBOL_TIME(sgi, MCS_NSYMS((streams) * (bps))) / AVG_AMPDU_SIZE)
#define BW_20 0
#define BW_40 1
#define BW_80 2
/*
* Define group sort order: HT40 -> SGI -> #streams
*/
#define GROUP_IDX(_streams, _sgi, _ht40) \
MINSTREL_HT_GROUP_0 + \
MINSTREL_MAX_STREAMS * 2 * _ht40 + \
MINSTREL_MAX_STREAMS * _sgi + \
_streams - 1
#define _MAX(a, b) (((a)>(b))?(a):(b))
#define GROUP_SHIFT(duration) \
_MAX(0, 16 - __builtin_clz(duration))
/* MCS rate information for an MCS group */
#define __MCS_GROUP(_streams, _sgi, _ht40, _s) \
[GROUP_IDX(_streams, _sgi, _ht40)] = { \
.streams = _streams, \
.shift = _s, \
mac80211: minstrel_ht: improve rate probing for devices with static fallback On some devices that only support static rate fallback tables sending rate control probing packets can be really expensive. Probing lower rates can already hurt throughput quite a bit. What hurts even more is the fact that on mt76x0/mt76x2, single probing packets can only be forced by directing packets at a different internal hardware queue, which causes some heavy reordering and extra latency. The reordering issue is mainly problematic while pushing lots of packets to a particular station. If there is little activity, the overhead of probing is neglegible. The static fallback behavior is designed to pretty much only handle rate control algorithms that use only a very limited set of rates on which the algorithm switches up/down based on packet error rate. In order to better support that kind of hardware, this patch implements a different approach to rate probing where it switches to a slightly higher rate, waits for tx status feedback, then updates the stats and switches back to the new max throughput rate. This only triggers above a packet rate of 100 per stats interval (~50ms). For that kind of probing, the code has to reduce the set of probing rates a lot more compared to single packet probing, so it uses only one packet per MCS group which is either slightly faster, or as close as possible to the max throughput rate. This allows switching between similar rates with different numbers of streams. The algorithm assumes that the hardware will work its way lower within an MCS group in case of retransmissions, so that lower rates don't have to be probed by the high packets per second rate probing code. To further reduce the search space, it also does not probe rates with lower channel bandwidth than the max throughput rate. At the moment, these changes will only affect mt76x0/mt76x2. Signed-off-by: Felix Fietkau <nbd@nbd.name> Link: https://lore.kernel.org/r/20190820095449.45255-4-nbd@nbd.name Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2019-08-20 09:54:49 +00:00
.bw = _ht40, \
.flags = \
IEEE80211_TX_RC_MCS | \
(_sgi ? IEEE80211_TX_RC_SHORT_GI : 0) | \
(_ht40 ? IEEE80211_TX_RC_40_MHZ_WIDTH : 0), \
.duration = { \
MCS_DURATION(_streams, _sgi, _ht40 ? 54 : 26) >> _s, \
MCS_DURATION(_streams, _sgi, _ht40 ? 108 : 52) >> _s, \
MCS_DURATION(_streams, _sgi, _ht40 ? 162 : 78) >> _s, \
MCS_DURATION(_streams, _sgi, _ht40 ? 216 : 104) >> _s, \
MCS_DURATION(_streams, _sgi, _ht40 ? 324 : 156) >> _s, \
MCS_DURATION(_streams, _sgi, _ht40 ? 432 : 208) >> _s, \
MCS_DURATION(_streams, _sgi, _ht40 ? 486 : 234) >> _s, \
MCS_DURATION(_streams, _sgi, _ht40 ? 540 : 260) >> _s \
} \
}
#define MCS_GROUP_SHIFT(_streams, _sgi, _ht40) \
GROUP_SHIFT(MCS_DURATION(_streams, _sgi, _ht40 ? 54 : 26))
#define MCS_GROUP(_streams, _sgi, _ht40) \
__MCS_GROUP(_streams, _sgi, _ht40, \
MCS_GROUP_SHIFT(_streams, _sgi, _ht40))
#define VHT_GROUP_IDX(_streams, _sgi, _bw) \
(MINSTREL_VHT_GROUP_0 + \
MINSTREL_MAX_STREAMS * 2 * (_bw) + \
MINSTREL_MAX_STREAMS * (_sgi) + \
(_streams) - 1)
#define BW2VBPS(_bw, r3, r2, r1) \
(_bw == BW_80 ? r3 : _bw == BW_40 ? r2 : r1)
#define __VHT_GROUP(_streams, _sgi, _bw, _s) \
[VHT_GROUP_IDX(_streams, _sgi, _bw)] = { \
.streams = _streams, \
.shift = _s, \
mac80211: minstrel_ht: improve rate probing for devices with static fallback On some devices that only support static rate fallback tables sending rate control probing packets can be really expensive. Probing lower rates can already hurt throughput quite a bit. What hurts even more is the fact that on mt76x0/mt76x2, single probing packets can only be forced by directing packets at a different internal hardware queue, which causes some heavy reordering and extra latency. The reordering issue is mainly problematic while pushing lots of packets to a particular station. If there is little activity, the overhead of probing is neglegible. The static fallback behavior is designed to pretty much only handle rate control algorithms that use only a very limited set of rates on which the algorithm switches up/down based on packet error rate. In order to better support that kind of hardware, this patch implements a different approach to rate probing where it switches to a slightly higher rate, waits for tx status feedback, then updates the stats and switches back to the new max throughput rate. This only triggers above a packet rate of 100 per stats interval (~50ms). For that kind of probing, the code has to reduce the set of probing rates a lot more compared to single packet probing, so it uses only one packet per MCS group which is either slightly faster, or as close as possible to the max throughput rate. This allows switching between similar rates with different numbers of streams. The algorithm assumes that the hardware will work its way lower within an MCS group in case of retransmissions, so that lower rates don't have to be probed by the high packets per second rate probing code. To further reduce the search space, it also does not probe rates with lower channel bandwidth than the max throughput rate. At the moment, these changes will only affect mt76x0/mt76x2. Signed-off-by: Felix Fietkau <nbd@nbd.name> Link: https://lore.kernel.org/r/20190820095449.45255-4-nbd@nbd.name Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2019-08-20 09:54:49 +00:00
.bw = _bw, \
.flags = \
IEEE80211_TX_RC_VHT_MCS | \
(_sgi ? IEEE80211_TX_RC_SHORT_GI : 0) | \
(_bw == BW_80 ? IEEE80211_TX_RC_80_MHZ_WIDTH : \
_bw == BW_40 ? IEEE80211_TX_RC_40_MHZ_WIDTH : 0), \
.duration = { \
MCS_DURATION(_streams, _sgi, \
BW2VBPS(_bw, 117, 54, 26)) >> _s, \
MCS_DURATION(_streams, _sgi, \
BW2VBPS(_bw, 234, 108, 52)) >> _s, \
MCS_DURATION(_streams, _sgi, \
BW2VBPS(_bw, 351, 162, 78)) >> _s, \
MCS_DURATION(_streams, _sgi, \
BW2VBPS(_bw, 468, 216, 104)) >> _s, \
MCS_DURATION(_streams, _sgi, \
BW2VBPS(_bw, 702, 324, 156)) >> _s, \
MCS_DURATION(_streams, _sgi, \
BW2VBPS(_bw, 936, 432, 208)) >> _s, \
MCS_DURATION(_streams, _sgi, \
BW2VBPS(_bw, 1053, 486, 234)) >> _s, \
MCS_DURATION(_streams, _sgi, \
BW2VBPS(_bw, 1170, 540, 260)) >> _s, \
MCS_DURATION(_streams, _sgi, \
BW2VBPS(_bw, 1404, 648, 312)) >> _s, \
MCS_DURATION(_streams, _sgi, \
BW2VBPS(_bw, 1560, 720, 346)) >> _s \
} \
}
#define VHT_GROUP_SHIFT(_streams, _sgi, _bw) \
GROUP_SHIFT(MCS_DURATION(_streams, _sgi, \
BW2VBPS(_bw, 117, 54, 26)))
#define VHT_GROUP(_streams, _sgi, _bw) \
__VHT_GROUP(_streams, _sgi, _bw, \
VHT_GROUP_SHIFT(_streams, _sgi, _bw))
#define CCK_DURATION(_bitrate, _short) \
(1000 * (10 /* SIFS */ + \
(_short ? 72 + 24 : 144 + 48) + \
(8 * (AVG_PKT_SIZE + 4) * 10) / (_bitrate)))
#define CCK_DURATION_LIST(_short, _s) \
CCK_DURATION(10, _short) >> _s, \
CCK_DURATION(20, _short) >> _s, \
CCK_DURATION(55, _short) >> _s, \
CCK_DURATION(110, _short) >> _s
#define __CCK_GROUP(_s) \
[MINSTREL_CCK_GROUP] = { \
.streams = 1, \
.flags = 0, \
.shift = _s, \
.duration = { \
CCK_DURATION_LIST(false, _s), \
CCK_DURATION_LIST(true, _s) \
} \
}
#define CCK_GROUP_SHIFT \
GROUP_SHIFT(CCK_DURATION(10, false))
#define CCK_GROUP __CCK_GROUP(CCK_GROUP_SHIFT)
#define OFDM_DURATION(_bitrate) \
(1000 * (16 /* SIFS + signal ext */ + \
16 /* T_PREAMBLE */ + \
4 /* T_SIGNAL */ + \
4 * (((16 + 80 * (AVG_PKT_SIZE + 4) + 6) / \
((_bitrate) * 4)))))
#define OFDM_DURATION_LIST(_s) \
OFDM_DURATION(60) >> _s, \
OFDM_DURATION(90) >> _s, \
OFDM_DURATION(120) >> _s, \
OFDM_DURATION(180) >> _s, \
OFDM_DURATION(240) >> _s, \
OFDM_DURATION(360) >> _s, \
OFDM_DURATION(480) >> _s, \
OFDM_DURATION(540) >> _s
#define __OFDM_GROUP(_s) \
[MINSTREL_OFDM_GROUP] = { \
.streams = 1, \
.flags = 0, \
.shift = _s, \
.duration = { \
OFDM_DURATION_LIST(_s), \
} \
}
#define OFDM_GROUP_SHIFT \
GROUP_SHIFT(OFDM_DURATION(60))
#define OFDM_GROUP __OFDM_GROUP(OFDM_GROUP_SHIFT)
static bool minstrel_vht_only = true;
module_param(minstrel_vht_only, bool, 0644);
MODULE_PARM_DESC(minstrel_vht_only,
"Use only VHT rates when VHT is supported by sta.");
/*
* To enable sufficiently targeted rate sampling, MCS rates are divided into
* groups, based on the number of streams and flags (HT40, SGI) that they
* use.
*
* Sortorder has to be fixed for GROUP_IDX macro to be applicable:
* BW -> SGI -> #streams
*/
const struct mcs_group minstrel_mcs_groups[] = {
MCS_GROUP(1, 0, BW_20),
MCS_GROUP(2, 0, BW_20),
MCS_GROUP(3, 0, BW_20),
MCS_GROUP(4, 0, BW_20),
MCS_GROUP(1, 1, BW_20),
MCS_GROUP(2, 1, BW_20),
MCS_GROUP(3, 1, BW_20),
MCS_GROUP(4, 1, BW_20),
MCS_GROUP(1, 0, BW_40),
MCS_GROUP(2, 0, BW_40),
MCS_GROUP(3, 0, BW_40),
MCS_GROUP(4, 0, BW_40),
MCS_GROUP(1, 1, BW_40),
MCS_GROUP(2, 1, BW_40),
MCS_GROUP(3, 1, BW_40),
MCS_GROUP(4, 1, BW_40),
CCK_GROUP,
OFDM_GROUP,
VHT_GROUP(1, 0, BW_20),
VHT_GROUP(2, 0, BW_20),
VHT_GROUP(3, 0, BW_20),
VHT_GROUP(4, 0, BW_20),
VHT_GROUP(1, 1, BW_20),
VHT_GROUP(2, 1, BW_20),
VHT_GROUP(3, 1, BW_20),
VHT_GROUP(4, 1, BW_20),
VHT_GROUP(1, 0, BW_40),
VHT_GROUP(2, 0, BW_40),
VHT_GROUP(3, 0, BW_40),
VHT_GROUP(4, 0, BW_40),
VHT_GROUP(1, 1, BW_40),
VHT_GROUP(2, 1, BW_40),
VHT_GROUP(3, 1, BW_40),
VHT_GROUP(4, 1, BW_40),
VHT_GROUP(1, 0, BW_80),
VHT_GROUP(2, 0, BW_80),
VHT_GROUP(3, 0, BW_80),
VHT_GROUP(4, 0, BW_80),
VHT_GROUP(1, 1, BW_80),
VHT_GROUP(2, 1, BW_80),
VHT_GROUP(3, 1, BW_80),
VHT_GROUP(4, 1, BW_80),
};
const s16 minstrel_cck_bitrates[4] = { 10, 20, 55, 110 };
const s16 minstrel_ofdm_bitrates[8] = { 60, 90, 120, 180, 240, 360, 480, 540 };
static u8 sample_table[SAMPLE_COLUMNS][MCS_GROUP_RATES] __read_mostly;
mac80211: minstrel_ht: significantly redesign the rate probing strategy The biggest flaw in current minstrel_ht is the fact that it needs way too many probing packets to be able to quickly find the best rate. Depending on the wifi hardware and operating mode, this can significantly reduce throughput when not operating at the highest available data rate. In order to be able to significantly reduce the amount of rate sampling, we need a much smarter selection of probing rates. The new approach introduced by this patch maintains a limited set of available rates to be tested during a statistics window. They are split into distinct categories: - MINSTREL_SAMPLE_TYPE_INC - incremental rate upgrade: Pick the next rate group and find the first rate that is faster than the current max. throughput rate - MINSTREL_SAMPLE_TYPE_JUMP - random testing of higher rates: Pick a random rate from the next group that is faster than the current max throughput rate. This allows faster adaptation when the link changes significantly - MINSTREL_SAMPLE_TYPE_SLOW - test a rate between max_prob, max_tp2 and max_tp in order to reduce the gap between them In order to prioritize sampling, every 6 attempts are split into 3x INC, 2x JUMP, 1x SLOW. Available rates are checked and refilled on every stats window update. With this approach, we finally get a very small delta in throughput when comparing setting the optimal data rate as a fixed rate vs normal rate control operation. Signed-off-by: Felix Fietkau <nbd@nbd.name> Link: https://lore.kernel.org/r/20210127055735.78599-4-nbd@nbd.name Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2021-01-27 05:57:33 +00:00
static const u8 minstrel_sample_seq[] = {
MINSTREL_SAMPLE_TYPE_INC,
MINSTREL_SAMPLE_TYPE_JUMP,
MINSTREL_SAMPLE_TYPE_INC,
MINSTREL_SAMPLE_TYPE_JUMP,
MINSTREL_SAMPLE_TYPE_INC,
MINSTREL_SAMPLE_TYPE_SLOW,
};
static void
minstrel_ht_update_rates(struct minstrel_priv *mp, struct minstrel_ht_sta *mi);
/*
* Some VHT MCSes are invalid (when Ndbps / Nes is not an integer)
* e.g for MCS9@20MHzx1Nss: Ndbps=8x52*(5/6) Nes=1
*
* Returns the valid mcs map for struct minstrel_mcs_group_data.supported
*/
static u16
minstrel_get_valid_vht_rates(int bw, int nss, __le16 mcs_map)
{
u16 mask = 0;
if (bw == BW_20) {
if (nss != 3 && nss != 6)
mask = BIT(9);
} else if (bw == BW_80) {
if (nss == 3 || nss == 7)
mask = BIT(6);
else if (nss == 6)
mask = BIT(9);
} else {
WARN_ON(bw != BW_40);
}
switch ((le16_to_cpu(mcs_map) >> (2 * (nss - 1))) & 3) {
case IEEE80211_VHT_MCS_SUPPORT_0_7:
mask |= 0x300;
break;
case IEEE80211_VHT_MCS_SUPPORT_0_8:
mask |= 0x200;
break;
case IEEE80211_VHT_MCS_SUPPORT_0_9:
break;
default:
mask = 0x3ff;
}
return 0x3ff & ~mask;
}
static bool
minstrel_ht_is_legacy_group(int group)
{
return group == MINSTREL_CCK_GROUP ||
group == MINSTREL_OFDM_GROUP;
}
/*
* Look up an MCS group index based on mac80211 rate information
*/
static int
minstrel_ht_get_group_idx(struct ieee80211_tx_rate *rate)
{
return GROUP_IDX((rate->idx / 8) + 1,
!!(rate->flags & IEEE80211_TX_RC_SHORT_GI),
!!(rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH));
}
/*
* Look up an MCS group index based on new cfg80211 rate_info.
*/
static int
minstrel_ht_ri_get_group_idx(struct rate_info *rate)
{
return GROUP_IDX((rate->mcs / 8) + 1,
!!(rate->flags & RATE_INFO_FLAGS_SHORT_GI),
!!(rate->bw & RATE_INFO_BW_40));
}
static int
minstrel_vht_get_group_idx(struct ieee80211_tx_rate *rate)
{
return VHT_GROUP_IDX(ieee80211_rate_get_vht_nss(rate),
!!(rate->flags & IEEE80211_TX_RC_SHORT_GI),
!!(rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) +
2*!!(rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH));
}
/*
* Look up an MCS group index based on new cfg80211 rate_info.
*/
static int
minstrel_vht_ri_get_group_idx(struct rate_info *rate)
{
return VHT_GROUP_IDX(rate->nss,
!!(rate->flags & RATE_INFO_FLAGS_SHORT_GI),
!!(rate->bw & RATE_INFO_BW_40) +
2*!!(rate->bw & RATE_INFO_BW_80));
}
static struct minstrel_rate_stats *
minstrel_ht_get_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
struct ieee80211_tx_rate *rate)
{
int group, idx;
if (rate->flags & IEEE80211_TX_RC_MCS) {
group = minstrel_ht_get_group_idx(rate);
idx = rate->idx % 8;
goto out;
}
if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
group = minstrel_vht_get_group_idx(rate);
idx = ieee80211_rate_get_vht_mcs(rate);
goto out;
}
group = MINSTREL_CCK_GROUP;
for (idx = 0; idx < ARRAY_SIZE(mp->cck_rates); idx++) {
mac80211: minstrel_ht: fix where rate stats are stored (fixes debugfs output) Using an ath9k card the debugfs output of minstrel_ht looks like the following (note the zero values for the first four rates sum-of success/attempts): best ____________rate__________ ____statistics___ _____last____ ______sum-of________ mode guard # rate [name idx airtime max_tp] [avg(tp) avg(prob)] [retry|suc|att] [#success | #attempts] OFDM 1 DP 6.0M 272 1640 5.2 3.1 53.8 3 0 0 0 0 OFDM 1 C 9.0M 273 1104 7.7 4.6 53.8 4 0 0 0 0 OFDM 1 B 12.0M 274 836 10.0 6.0 53.8 4 0 0 0 0 OFDM 1 A S 18.0M 275 568 14.3 8.5 53.8 5 0 0 0 0 OFDM 1 S 24.0M 276 436 18.1 0.0 0.0 5 0 1 80 1778 OFDM 1 36.0M 277 300 24.9 0.0 0.0 0 0 1 0 107 OFDM 1 S 48.0M 278 236 30.4 0.0 0.0 0 0 0 0 75 OFDM 1 54.0M 279 212 33.0 0.0 0.0 0 0 0 0 72 Total packet count:: ideal 16582 lookaround 885 Average # of aggregated frames per A-MPDU: 1.0 Debugging showed that the rate statistics for the first four rates where stored in the MINSTREL_CCK_GROUP instead of the MINSTREL_OFDM_GROUP because in minstrel_ht_get_stats() the supported check was not honoured as done in various other places, e.g net/mac80211/rc80211_minstrel_ht_debugfs.c: 74 if (!(mi->supported[i] & BIT(j))) 75 continue; With the patch applied the output looks good: best ____________rate__________ ____statistics___ _____last____ ______sum-of________ mode guard # rate [name idx airtime max_tp] [avg(tp) avg(prob)] [retry|suc|att] [#success | #attempts] OFDM 1 D 6.0M 272 1640 5.2 5.2 100.0 3 0 0 1 1 OFDM 1 C 9.0M 273 1104 7.7 7.7 100.0 4 0 0 38 38 OFDM 1 B 12.0M 274 836 10.0 9.9 89.5 4 2 2 372 395 OFDM 1 A P 18.0M 275 568 14.3 14.3 97.2 5 52 53 6956 7181 OFDM 1 S 24.0M 276 436 18.1 0.0 0.0 0 0 1 6 163 OFDM 1 36.0M 277 300 24.9 0.0 0.0 0 0 1 0 35 OFDM 1 S 48.0M 278 236 30.4 0.0 0.0 0 0 0 0 38 OFDM 1 S 54.0M 279 212 33.0 0.0 0.0 0 0 0 0 38 Total packet count:: ideal 7097 lookaround 287 Average # of aggregated frames per A-MPDU: 1.0 Signed-off-by: Peter Seiderer <ps.report@gmx.net> Link: https://lore.kernel.org/r/20220404165414.1036-1-ps.report@gmx.net Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2022-04-04 16:54:14 +00:00
if (!(mi->supported[group] & BIT(idx)))
continue;
if (rate->idx != mp->cck_rates[idx])
continue;
/* short preamble */
if ((mi->supported[group] & BIT(idx + 4)) &&
(rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE))
idx += 4;
goto out;
}
group = MINSTREL_OFDM_GROUP;
for (idx = 0; idx < ARRAY_SIZE(mp->ofdm_rates[0]); idx++)
if (rate->idx == mp->ofdm_rates[mi->band][idx])
goto out;
idx = 0;
out:
return &mi->groups[group].rates[idx];
}
/*
* Get the minstrel rate statistics for specified STA and rate info.
*/
static struct minstrel_rate_stats *
minstrel_ht_ri_get_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
struct ieee80211_rate_status *rate_status)
{
int group, idx;
struct rate_info *rate = &rate_status->rate_idx;
if (rate->flags & RATE_INFO_FLAGS_MCS) {
group = minstrel_ht_ri_get_group_idx(rate);
idx = rate->mcs % 8;
goto out;
}
if (rate->flags & RATE_INFO_FLAGS_VHT_MCS) {
group = minstrel_vht_ri_get_group_idx(rate);
idx = rate->mcs;
goto out;
}
group = MINSTREL_CCK_GROUP;
for (idx = 0; idx < ARRAY_SIZE(mp->cck_rates); idx++) {
if (rate->legacy != minstrel_cck_bitrates[ mp->cck_rates[idx] ])
continue;
/* short preamble */
if ((mi->supported[group] & BIT(idx + 4)) &&
mi->use_short_preamble)
idx += 4;
goto out;
}
group = MINSTREL_OFDM_GROUP;
for (idx = 0; idx < ARRAY_SIZE(mp->ofdm_rates[0]); idx++)
if (rate->legacy == minstrel_ofdm_bitrates[ mp->ofdm_rates[mi->band][idx] ])
goto out;
idx = 0;
out:
return &mi->groups[group].rates[idx];
}
static inline struct minstrel_rate_stats *
minstrel_get_ratestats(struct minstrel_ht_sta *mi, int index)
{
return &mi->groups[MI_RATE_GROUP(index)].rates[MI_RATE_IDX(index)];
}
static inline int minstrel_get_duration(int index)
{
const struct mcs_group *group = &minstrel_mcs_groups[MI_RATE_GROUP(index)];
unsigned int duration = group->duration[MI_RATE_IDX(index)];
return duration << group->shift;
}
static unsigned int
minstrel_ht_avg_ampdu_len(struct minstrel_ht_sta *mi)
{
int duration;
if (mi->avg_ampdu_len)
return MINSTREL_TRUNC(mi->avg_ampdu_len);
if (minstrel_ht_is_legacy_group(MI_RATE_GROUP(mi->max_tp_rate[0])))
return 1;
duration = minstrel_get_duration(mi->max_tp_rate[0]);
if (duration > 400 * 1000)
return 2;
if (duration > 250 * 1000)
return 4;
if (duration > 150 * 1000)
return 8;
return 16;
}
/*
* Return current throughput based on the average A-MPDU length, taking into
* account the expected number of retransmissions and their expected length
*/
int
minstrel_ht_get_tp_avg(struct minstrel_ht_sta *mi, int group, int rate,
int prob_avg)
{
unsigned int nsecs = 0, overhead = mi->overhead;
unsigned int ampdu_len = 1;
/* do not account throughput if success prob is below 10% */
if (prob_avg < MINSTREL_FRAC(10, 100))
return 0;
if (minstrel_ht_is_legacy_group(group))
overhead = mi->overhead_legacy;
else
ampdu_len = minstrel_ht_avg_ampdu_len(mi);
nsecs = 1000 * overhead / ampdu_len;
nsecs += minstrel_mcs_groups[group].duration[rate] <<
minstrel_mcs_groups[group].shift;
/*
* For the throughput calculation, limit the probability value to 90% to
* account for collision related packet error rate fluctuation
* (prob is scaled - see MINSTREL_FRAC above)
*/
if (prob_avg > MINSTREL_FRAC(90, 100))
prob_avg = MINSTREL_FRAC(90, 100);
return MINSTREL_TRUNC(100 * ((prob_avg * 1000000) / nsecs));
}
/*
* Find & sort topmost throughput rates
*
* If multiple rates provide equal throughput the sorting is based on their
* current success probability. Higher success probability is preferred among
* MCS groups, CCK rates do not provide aggregation and are therefore at last.
*/
static void
minstrel_ht_sort_best_tp_rates(struct minstrel_ht_sta *mi, u16 index,
u16 *tp_list)
{
int cur_group, cur_idx, cur_tp_avg, cur_prob;
int tmp_group, tmp_idx, tmp_tp_avg, tmp_prob;
int j = MAX_THR_RATES;
cur_group = MI_RATE_GROUP(index);
cur_idx = MI_RATE_IDX(index);
cur_prob = mi->groups[cur_group].rates[cur_idx].prob_avg;
cur_tp_avg = minstrel_ht_get_tp_avg(mi, cur_group, cur_idx, cur_prob);
do {
tmp_group = MI_RATE_GROUP(tp_list[j - 1]);
tmp_idx = MI_RATE_IDX(tp_list[j - 1]);
tmp_prob = mi->groups[tmp_group].rates[tmp_idx].prob_avg;
tmp_tp_avg = minstrel_ht_get_tp_avg(mi, tmp_group, tmp_idx,
tmp_prob);
if (cur_tp_avg < tmp_tp_avg ||
(cur_tp_avg == tmp_tp_avg && cur_prob <= tmp_prob))
break;
j--;
} while (j > 0);
if (j < MAX_THR_RATES - 1) {
memmove(&tp_list[j + 1], &tp_list[j], (sizeof(*tp_list) *
(MAX_THR_RATES - (j + 1))));
}
if (j < MAX_THR_RATES)
tp_list[j] = index;
}
/*
* Find and set the topmost probability rate per sta and per group
*/
static void
minstrel_ht_set_best_prob_rate(struct minstrel_ht_sta *mi, u16 *dest, u16 index)
{
struct minstrel_mcs_group_data *mg;
struct minstrel_rate_stats *mrs;
int tmp_group, tmp_idx, tmp_tp_avg, tmp_prob;
int max_tp_group, max_tp_idx, max_tp_prob;
int cur_tp_avg, cur_group, cur_idx;
int max_gpr_group, max_gpr_idx;
int max_gpr_tp_avg, max_gpr_prob;
cur_group = MI_RATE_GROUP(index);
cur_idx = MI_RATE_IDX(index);
mg = &mi->groups[cur_group];
mrs = &mg->rates[cur_idx];
tmp_group = MI_RATE_GROUP(*dest);
tmp_idx = MI_RATE_IDX(*dest);
tmp_prob = mi->groups[tmp_group].rates[tmp_idx].prob_avg;
tmp_tp_avg = minstrel_ht_get_tp_avg(mi, tmp_group, tmp_idx, tmp_prob);
/* if max_tp_rate[0] is from MCS_GROUP max_prob_rate get selected from
* MCS_GROUP as well as CCK_GROUP rates do not allow aggregation */
max_tp_group = MI_RATE_GROUP(mi->max_tp_rate[0]);
max_tp_idx = MI_RATE_IDX(mi->max_tp_rate[0]);
max_tp_prob = mi->groups[max_tp_group].rates[max_tp_idx].prob_avg;
if (minstrel_ht_is_legacy_group(MI_RATE_GROUP(index)) &&
!minstrel_ht_is_legacy_group(max_tp_group))
return;
/* skip rates faster than max tp rate with lower prob */
if (minstrel_get_duration(mi->max_tp_rate[0]) > minstrel_get_duration(index) &&
mrs->prob_avg < max_tp_prob)
return;
max_gpr_group = MI_RATE_GROUP(mg->max_group_prob_rate);
max_gpr_idx = MI_RATE_IDX(mg->max_group_prob_rate);
max_gpr_prob = mi->groups[max_gpr_group].rates[max_gpr_idx].prob_avg;
if (mrs->prob_avg > MINSTREL_FRAC(75, 100)) {
cur_tp_avg = minstrel_ht_get_tp_avg(mi, cur_group, cur_idx,
mrs->prob_avg);
if (cur_tp_avg > tmp_tp_avg)
*dest = index;
max_gpr_tp_avg = minstrel_ht_get_tp_avg(mi, max_gpr_group,
max_gpr_idx,
max_gpr_prob);
if (cur_tp_avg > max_gpr_tp_avg)
mg->max_group_prob_rate = index;
} else {
if (mrs->prob_avg > tmp_prob)
*dest = index;
if (mrs->prob_avg > max_gpr_prob)
mg->max_group_prob_rate = index;
}
}
/*
* Assign new rate set per sta and use CCK rates only if the fastest
* rate (max_tp_rate[0]) is from CCK group. This prohibits such sorted
* rate sets where MCS and CCK rates are mixed, because CCK rates can
* not use aggregation.
*/
static void
minstrel_ht_assign_best_tp_rates(struct minstrel_ht_sta *mi,
u16 tmp_mcs_tp_rate[MAX_THR_RATES],
u16 tmp_legacy_tp_rate[MAX_THR_RATES])
{
unsigned int tmp_group, tmp_idx, tmp_cck_tp, tmp_mcs_tp, tmp_prob;
int i;
tmp_group = MI_RATE_GROUP(tmp_legacy_tp_rate[0]);
tmp_idx = MI_RATE_IDX(tmp_legacy_tp_rate[0]);
tmp_prob = mi->groups[tmp_group].rates[tmp_idx].prob_avg;
tmp_cck_tp = minstrel_ht_get_tp_avg(mi, tmp_group, tmp_idx, tmp_prob);
tmp_group = MI_RATE_GROUP(tmp_mcs_tp_rate[0]);
tmp_idx = MI_RATE_IDX(tmp_mcs_tp_rate[0]);
tmp_prob = mi->groups[tmp_group].rates[tmp_idx].prob_avg;
tmp_mcs_tp = minstrel_ht_get_tp_avg(mi, tmp_group, tmp_idx, tmp_prob);
if (tmp_cck_tp > tmp_mcs_tp) {
for(i = 0; i < MAX_THR_RATES; i++) {
minstrel_ht_sort_best_tp_rates(mi, tmp_legacy_tp_rate[i],
tmp_mcs_tp_rate);
}
}
}
/*
* Try to increase robustness of max_prob rate by decrease number of
* streams if possible.
*/
static inline void
minstrel_ht_prob_rate_reduce_streams(struct minstrel_ht_sta *mi)
{
struct minstrel_mcs_group_data *mg;
int tmp_max_streams, group, tmp_idx, tmp_prob;
int tmp_tp = 0;
mac80211: prepare sta handling for MLO support Currently in mac80211 each STA object is represented using sta_info datastructure with the associated STA specific information and drivers access ieee80211_sta part of it. With MLO (Multi Link Operation) support being added in 802.11be standard, though the association is logically with a single Multi Link capable STA, at the physical level communication can happen via different advertised links (uniquely identified by Channel, operating class, BSSID) and hence the need to handle multiple link STA parameters within a composite sta_info object called the MLD STA. The different link STA part of MLD STA are identified using the link address which can be same or different as the MLD STA address and unique link id based on the link vif. To support extension of such a model, the sta_info datastructure is modified to hold multiple link STA objects with link specific params currently within sta_info moved to this new structure. Similarly this is done for ieee80211_sta as well which will be accessed within mac80211 as well as by drivers, hence trivial driver changes are expected to support this. For current non MLO supported drivers, only one link STA is present and link information is accessed via 'deflink' member. For MLO drivers, we still need to define the APIs etc. to get the correct link ID and access the correct part of the station info. Currently in mac80211, all link STA info are accessed directly via deflink. These will be updated to access via link pointers indexed by link id with MLO support patches, with link id being 0 for non MLO supported cases. Except for couple of macro related changes, below spatch takes care of updating mac80211 and driver code to access to the link STA info via deflink. @ieee80211_sta@ struct ieee80211_sta *s; struct sta_info *si; identifier var = {supp_rates, ht_cap, vht_cap, he_cap, he_6ghz_capa, eht_cap, rx_nss, bandwidth, txpwr}; @@ ( s-> - var + deflink.var | si->sta. - var + deflink.var ) @sta_info@ struct sta_info *si; identifier var = {gtk, pcpu_rx_stats, rx_stats, rx_stats_avg, status_stats, tx_stats, cur_max_bandwidth}; @@ ( si-> - var + deflink.var ) Signed-off-by: Sriram R <quic_srirrama@quicinc.com> Link: https://lore.kernel.org/r/1649086883-13246-1-git-send-email-quic_srirrama@quicinc.com [remove MLO-drivers notes from commit message, not clear yet; run spatch] Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2022-04-04 15:41:23 +00:00
if (!mi->sta->deflink.ht_cap.ht_supported)
return;
group = MI_RATE_GROUP(mi->max_tp_rate[0]);
tmp_max_streams = minstrel_mcs_groups[group].streams;
for (group = 0; group < ARRAY_SIZE(minstrel_mcs_groups); group++) {
mg = &mi->groups[group];
if (!mi->supported[group] || group == MINSTREL_CCK_GROUP)
continue;
tmp_idx = MI_RATE_IDX(mg->max_group_prob_rate);
tmp_prob = mi->groups[group].rates[tmp_idx].prob_avg;
if (tmp_tp < minstrel_ht_get_tp_avg(mi, group, tmp_idx, tmp_prob) &&
(minstrel_mcs_groups[group].streams < tmp_max_streams)) {
mi->max_prob_rate = mg->max_group_prob_rate;
tmp_tp = minstrel_ht_get_tp_avg(mi, group,
tmp_idx,
tmp_prob);
}
}
}
mac80211: minstrel_ht: significantly redesign the rate probing strategy The biggest flaw in current minstrel_ht is the fact that it needs way too many probing packets to be able to quickly find the best rate. Depending on the wifi hardware and operating mode, this can significantly reduce throughput when not operating at the highest available data rate. In order to be able to significantly reduce the amount of rate sampling, we need a much smarter selection of probing rates. The new approach introduced by this patch maintains a limited set of available rates to be tested during a statistics window. They are split into distinct categories: - MINSTREL_SAMPLE_TYPE_INC - incremental rate upgrade: Pick the next rate group and find the first rate that is faster than the current max. throughput rate - MINSTREL_SAMPLE_TYPE_JUMP - random testing of higher rates: Pick a random rate from the next group that is faster than the current max throughput rate. This allows faster adaptation when the link changes significantly - MINSTREL_SAMPLE_TYPE_SLOW - test a rate between max_prob, max_tp2 and max_tp in order to reduce the gap between them In order to prioritize sampling, every 6 attempts are split into 3x INC, 2x JUMP, 1x SLOW. Available rates are checked and refilled on every stats window update. With this approach, we finally get a very small delta in throughput when comparing setting the optimal data rate as a fixed rate vs normal rate control operation. Signed-off-by: Felix Fietkau <nbd@nbd.name> Link: https://lore.kernel.org/r/20210127055735.78599-4-nbd@nbd.name Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2021-01-27 05:57:33 +00:00
static u16
__minstrel_ht_get_sample_rate(struct minstrel_ht_sta *mi,
enum minstrel_sample_type type)
mac80211: minstrel_ht: improve rate probing for devices with static fallback On some devices that only support static rate fallback tables sending rate control probing packets can be really expensive. Probing lower rates can already hurt throughput quite a bit. What hurts even more is the fact that on mt76x0/mt76x2, single probing packets can only be forced by directing packets at a different internal hardware queue, which causes some heavy reordering and extra latency. The reordering issue is mainly problematic while pushing lots of packets to a particular station. If there is little activity, the overhead of probing is neglegible. The static fallback behavior is designed to pretty much only handle rate control algorithms that use only a very limited set of rates on which the algorithm switches up/down based on packet error rate. In order to better support that kind of hardware, this patch implements a different approach to rate probing where it switches to a slightly higher rate, waits for tx status feedback, then updates the stats and switches back to the new max throughput rate. This only triggers above a packet rate of 100 per stats interval (~50ms). For that kind of probing, the code has to reduce the set of probing rates a lot more compared to single packet probing, so it uses only one packet per MCS group which is either slightly faster, or as close as possible to the max throughput rate. This allows switching between similar rates with different numbers of streams. The algorithm assumes that the hardware will work its way lower within an MCS group in case of retransmissions, so that lower rates don't have to be probed by the high packets per second rate probing code. To further reduce the search space, it also does not probe rates with lower channel bandwidth than the max throughput rate. At the moment, these changes will only affect mt76x0/mt76x2. Signed-off-by: Felix Fietkau <nbd@nbd.name> Link: https://lore.kernel.org/r/20190820095449.45255-4-nbd@nbd.name Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2019-08-20 09:54:49 +00:00
{
mac80211: minstrel_ht: significantly redesign the rate probing strategy The biggest flaw in current minstrel_ht is the fact that it needs way too many probing packets to be able to quickly find the best rate. Depending on the wifi hardware and operating mode, this can significantly reduce throughput when not operating at the highest available data rate. In order to be able to significantly reduce the amount of rate sampling, we need a much smarter selection of probing rates. The new approach introduced by this patch maintains a limited set of available rates to be tested during a statistics window. They are split into distinct categories: - MINSTREL_SAMPLE_TYPE_INC - incremental rate upgrade: Pick the next rate group and find the first rate that is faster than the current max. throughput rate - MINSTREL_SAMPLE_TYPE_JUMP - random testing of higher rates: Pick a random rate from the next group that is faster than the current max throughput rate. This allows faster adaptation when the link changes significantly - MINSTREL_SAMPLE_TYPE_SLOW - test a rate between max_prob, max_tp2 and max_tp in order to reduce the gap between them In order to prioritize sampling, every 6 attempts are split into 3x INC, 2x JUMP, 1x SLOW. Available rates are checked and refilled on every stats window update. With this approach, we finally get a very small delta in throughput when comparing setting the optimal data rate as a fixed rate vs normal rate control operation. Signed-off-by: Felix Fietkau <nbd@nbd.name> Link: https://lore.kernel.org/r/20210127055735.78599-4-nbd@nbd.name Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2021-01-27 05:57:33 +00:00
u16 *rates = mi->sample[type].sample_rates;
u16 cur;
int i;
mac80211: minstrel_ht: improve rate probing for devices with static fallback On some devices that only support static rate fallback tables sending rate control probing packets can be really expensive. Probing lower rates can already hurt throughput quite a bit. What hurts even more is the fact that on mt76x0/mt76x2, single probing packets can only be forced by directing packets at a different internal hardware queue, which causes some heavy reordering and extra latency. The reordering issue is mainly problematic while pushing lots of packets to a particular station. If there is little activity, the overhead of probing is neglegible. The static fallback behavior is designed to pretty much only handle rate control algorithms that use only a very limited set of rates on which the algorithm switches up/down based on packet error rate. In order to better support that kind of hardware, this patch implements a different approach to rate probing where it switches to a slightly higher rate, waits for tx status feedback, then updates the stats and switches back to the new max throughput rate. This only triggers above a packet rate of 100 per stats interval (~50ms). For that kind of probing, the code has to reduce the set of probing rates a lot more compared to single packet probing, so it uses only one packet per MCS group which is either slightly faster, or as close as possible to the max throughput rate. This allows switching between similar rates with different numbers of streams. The algorithm assumes that the hardware will work its way lower within an MCS group in case of retransmissions, so that lower rates don't have to be probed by the high packets per second rate probing code. To further reduce the search space, it also does not probe rates with lower channel bandwidth than the max throughput rate. At the moment, these changes will only affect mt76x0/mt76x2. Signed-off-by: Felix Fietkau <nbd@nbd.name> Link: https://lore.kernel.org/r/20190820095449.45255-4-nbd@nbd.name Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2019-08-20 09:54:49 +00:00
mac80211: minstrel_ht: significantly redesign the rate probing strategy The biggest flaw in current minstrel_ht is the fact that it needs way too many probing packets to be able to quickly find the best rate. Depending on the wifi hardware and operating mode, this can significantly reduce throughput when not operating at the highest available data rate. In order to be able to significantly reduce the amount of rate sampling, we need a much smarter selection of probing rates. The new approach introduced by this patch maintains a limited set of available rates to be tested during a statistics window. They are split into distinct categories: - MINSTREL_SAMPLE_TYPE_INC - incremental rate upgrade: Pick the next rate group and find the first rate that is faster than the current max. throughput rate - MINSTREL_SAMPLE_TYPE_JUMP - random testing of higher rates: Pick a random rate from the next group that is faster than the current max throughput rate. This allows faster adaptation when the link changes significantly - MINSTREL_SAMPLE_TYPE_SLOW - test a rate between max_prob, max_tp2 and max_tp in order to reduce the gap between them In order to prioritize sampling, every 6 attempts are split into 3x INC, 2x JUMP, 1x SLOW. Available rates are checked and refilled on every stats window update. With this approach, we finally get a very small delta in throughput when comparing setting the optimal data rate as a fixed rate vs normal rate control operation. Signed-off-by: Felix Fietkau <nbd@nbd.name> Link: https://lore.kernel.org/r/20210127055735.78599-4-nbd@nbd.name Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2021-01-27 05:57:33 +00:00
for (i = 0; i < MINSTREL_SAMPLE_RATES; i++) {
if (!rates[i])
mac80211: minstrel_ht: improve rate probing for devices with static fallback On some devices that only support static rate fallback tables sending rate control probing packets can be really expensive. Probing lower rates can already hurt throughput quite a bit. What hurts even more is the fact that on mt76x0/mt76x2, single probing packets can only be forced by directing packets at a different internal hardware queue, which causes some heavy reordering and extra latency. The reordering issue is mainly problematic while pushing lots of packets to a particular station. If there is little activity, the overhead of probing is neglegible. The static fallback behavior is designed to pretty much only handle rate control algorithms that use only a very limited set of rates on which the algorithm switches up/down based on packet error rate. In order to better support that kind of hardware, this patch implements a different approach to rate probing where it switches to a slightly higher rate, waits for tx status feedback, then updates the stats and switches back to the new max throughput rate. This only triggers above a packet rate of 100 per stats interval (~50ms). For that kind of probing, the code has to reduce the set of probing rates a lot more compared to single packet probing, so it uses only one packet per MCS group which is either slightly faster, or as close as possible to the max throughput rate. This allows switching between similar rates with different numbers of streams. The algorithm assumes that the hardware will work its way lower within an MCS group in case of retransmissions, so that lower rates don't have to be probed by the high packets per second rate probing code. To further reduce the search space, it also does not probe rates with lower channel bandwidth than the max throughput rate. At the moment, these changes will only affect mt76x0/mt76x2. Signed-off-by: Felix Fietkau <nbd@nbd.name> Link: https://lore.kernel.org/r/20190820095449.45255-4-nbd@nbd.name Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2019-08-20 09:54:49 +00:00
continue;
mac80211: minstrel_ht: significantly redesign the rate probing strategy The biggest flaw in current minstrel_ht is the fact that it needs way too many probing packets to be able to quickly find the best rate. Depending on the wifi hardware and operating mode, this can significantly reduce throughput when not operating at the highest available data rate. In order to be able to significantly reduce the amount of rate sampling, we need a much smarter selection of probing rates. The new approach introduced by this patch maintains a limited set of available rates to be tested during a statistics window. They are split into distinct categories: - MINSTREL_SAMPLE_TYPE_INC - incremental rate upgrade: Pick the next rate group and find the first rate that is faster than the current max. throughput rate - MINSTREL_SAMPLE_TYPE_JUMP - random testing of higher rates: Pick a random rate from the next group that is faster than the current max throughput rate. This allows faster adaptation when the link changes significantly - MINSTREL_SAMPLE_TYPE_SLOW - test a rate between max_prob, max_tp2 and max_tp in order to reduce the gap between them In order to prioritize sampling, every 6 attempts are split into 3x INC, 2x JUMP, 1x SLOW. Available rates are checked and refilled on every stats window update. With this approach, we finally get a very small delta in throughput when comparing setting the optimal data rate as a fixed rate vs normal rate control operation. Signed-off-by: Felix Fietkau <nbd@nbd.name> Link: https://lore.kernel.org/r/20210127055735.78599-4-nbd@nbd.name Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2021-01-27 05:57:33 +00:00
cur = rates[i];
rates[i] = 0;
return cur;
mac80211: minstrel_ht: improve rate probing for devices with static fallback On some devices that only support static rate fallback tables sending rate control probing packets can be really expensive. Probing lower rates can already hurt throughput quite a bit. What hurts even more is the fact that on mt76x0/mt76x2, single probing packets can only be forced by directing packets at a different internal hardware queue, which causes some heavy reordering and extra latency. The reordering issue is mainly problematic while pushing lots of packets to a particular station. If there is little activity, the overhead of probing is neglegible. The static fallback behavior is designed to pretty much only handle rate control algorithms that use only a very limited set of rates on which the algorithm switches up/down based on packet error rate. In order to better support that kind of hardware, this patch implements a different approach to rate probing where it switches to a slightly higher rate, waits for tx status feedback, then updates the stats and switches back to the new max throughput rate. This only triggers above a packet rate of 100 per stats interval (~50ms). For that kind of probing, the code has to reduce the set of probing rates a lot more compared to single packet probing, so it uses only one packet per MCS group which is either slightly faster, or as close as possible to the max throughput rate. This allows switching between similar rates with different numbers of streams. The algorithm assumes that the hardware will work its way lower within an MCS group in case of retransmissions, so that lower rates don't have to be probed by the high packets per second rate probing code. To further reduce the search space, it also does not probe rates with lower channel bandwidth than the max throughput rate. At the moment, these changes will only affect mt76x0/mt76x2. Signed-off-by: Felix Fietkau <nbd@nbd.name> Link: https://lore.kernel.org/r/20190820095449.45255-4-nbd@nbd.name Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2019-08-20 09:54:49 +00:00
}
mac80211: minstrel_ht: significantly redesign the rate probing strategy The biggest flaw in current minstrel_ht is the fact that it needs way too many probing packets to be able to quickly find the best rate. Depending on the wifi hardware and operating mode, this can significantly reduce throughput when not operating at the highest available data rate. In order to be able to significantly reduce the amount of rate sampling, we need a much smarter selection of probing rates. The new approach introduced by this patch maintains a limited set of available rates to be tested during a statistics window. They are split into distinct categories: - MINSTREL_SAMPLE_TYPE_INC - incremental rate upgrade: Pick the next rate group and find the first rate that is faster than the current max. throughput rate - MINSTREL_SAMPLE_TYPE_JUMP - random testing of higher rates: Pick a random rate from the next group that is faster than the current max throughput rate. This allows faster adaptation when the link changes significantly - MINSTREL_SAMPLE_TYPE_SLOW - test a rate between max_prob, max_tp2 and max_tp in order to reduce the gap between them In order to prioritize sampling, every 6 attempts are split into 3x INC, 2x JUMP, 1x SLOW. Available rates are checked and refilled on every stats window update. With this approach, we finally get a very small delta in throughput when comparing setting the optimal data rate as a fixed rate vs normal rate control operation. Signed-off-by: Felix Fietkau <nbd@nbd.name> Link: https://lore.kernel.org/r/20210127055735.78599-4-nbd@nbd.name Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2021-01-27 05:57:33 +00:00
return 0;
mac80211: minstrel_ht: improve rate probing for devices with static fallback On some devices that only support static rate fallback tables sending rate control probing packets can be really expensive. Probing lower rates can already hurt throughput quite a bit. What hurts even more is the fact that on mt76x0/mt76x2, single probing packets can only be forced by directing packets at a different internal hardware queue, which causes some heavy reordering and extra latency. The reordering issue is mainly problematic while pushing lots of packets to a particular station. If there is little activity, the overhead of probing is neglegible. The static fallback behavior is designed to pretty much only handle rate control algorithms that use only a very limited set of rates on which the algorithm switches up/down based on packet error rate. In order to better support that kind of hardware, this patch implements a different approach to rate probing where it switches to a slightly higher rate, waits for tx status feedback, then updates the stats and switches back to the new max throughput rate. This only triggers above a packet rate of 100 per stats interval (~50ms). For that kind of probing, the code has to reduce the set of probing rates a lot more compared to single packet probing, so it uses only one packet per MCS group which is either slightly faster, or as close as possible to the max throughput rate. This allows switching between similar rates with different numbers of streams. The algorithm assumes that the hardware will work its way lower within an MCS group in case of retransmissions, so that lower rates don't have to be probed by the high packets per second rate probing code. To further reduce the search space, it also does not probe rates with lower channel bandwidth than the max throughput rate. At the moment, these changes will only affect mt76x0/mt76x2. Signed-off-by: Felix Fietkau <nbd@nbd.name> Link: https://lore.kernel.org/r/20190820095449.45255-4-nbd@nbd.name Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2019-08-20 09:54:49 +00:00
}
static inline int
minstrel_ewma(int old, int new, int weight)
{
int diff, incr;
diff = new - old;
incr = (EWMA_DIV - weight) * diff / EWMA_DIV;
return old + incr;
}
static inline int minstrel_filter_avg_add(u16 *prev_1, u16 *prev_2, s32 in)
{
s32 out_1 = *prev_1;
s32 out_2 = *prev_2;
s32 val;
if (!in)
in += 1;
if (!out_1) {
val = out_1 = in;
goto out;
}
val = MINSTREL_AVG_COEFF1 * in;
val += MINSTREL_AVG_COEFF2 * out_1;
val += MINSTREL_AVG_COEFF3 * out_2;
val >>= MINSTREL_SCALE;
if (val > 1 << MINSTREL_SCALE)
val = 1 << MINSTREL_SCALE;
if (val < 0)
val = 1;
out:
*prev_2 = out_1;
*prev_1 = val;
return val;
}
/*
* Recalculate statistics and counters of a given rate
*/
static void
minstrel_ht_calc_rate_stats(struct minstrel_priv *mp,
struct minstrel_rate_stats *mrs)
{
unsigned int cur_prob;
if (unlikely(mrs->attempts > 0)) {
cur_prob = MINSTREL_FRAC(mrs->success, mrs->attempts);
minstrel_filter_avg_add(&mrs->prob_avg,
&mrs->prob_avg_1, cur_prob);
mrs->att_hist += mrs->attempts;
mrs->succ_hist += mrs->success;
}
mrs->last_success = mrs->success;
mrs->last_attempts = mrs->attempts;
mrs->success = 0;
mrs->attempts = 0;
}
mac80211: minstrel_ht: significantly redesign the rate probing strategy The biggest flaw in current minstrel_ht is the fact that it needs way too many probing packets to be able to quickly find the best rate. Depending on the wifi hardware and operating mode, this can significantly reduce throughput when not operating at the highest available data rate. In order to be able to significantly reduce the amount of rate sampling, we need a much smarter selection of probing rates. The new approach introduced by this patch maintains a limited set of available rates to be tested during a statistics window. They are split into distinct categories: - MINSTREL_SAMPLE_TYPE_INC - incremental rate upgrade: Pick the next rate group and find the first rate that is faster than the current max. throughput rate - MINSTREL_SAMPLE_TYPE_JUMP - random testing of higher rates: Pick a random rate from the next group that is faster than the current max throughput rate. This allows faster adaptation when the link changes significantly - MINSTREL_SAMPLE_TYPE_SLOW - test a rate between max_prob, max_tp2 and max_tp in order to reduce the gap between them In order to prioritize sampling, every 6 attempts are split into 3x INC, 2x JUMP, 1x SLOW. Available rates are checked and refilled on every stats window update. With this approach, we finally get a very small delta in throughput when comparing setting the optimal data rate as a fixed rate vs normal rate control operation. Signed-off-by: Felix Fietkau <nbd@nbd.name> Link: https://lore.kernel.org/r/20210127055735.78599-4-nbd@nbd.name Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2021-01-27 05:57:33 +00:00
static bool
minstrel_ht_find_sample_rate(struct minstrel_ht_sta *mi, int type, int idx)
{
int i;
for (i = 0; i < MINSTREL_SAMPLE_RATES; i++) {
u16 cur = mi->sample[type].sample_rates[i];
if (cur == idx)
return true;
if (!cur)
break;
}
return false;
}
static int
minstrel_ht_move_sample_rates(struct minstrel_ht_sta *mi, int type,
u32 fast_rate_dur, u32 slow_rate_dur)
{
u16 *rates = mi->sample[type].sample_rates;
int i, j;
for (i = 0, j = 0; i < MINSTREL_SAMPLE_RATES; i++) {
u32 duration;
bool valid = false;
u16 cur;
cur = rates[i];
if (!cur)
continue;
duration = minstrel_get_duration(cur);
switch (type) {
case MINSTREL_SAMPLE_TYPE_SLOW:
valid = duration > fast_rate_dur &&
duration < slow_rate_dur;
break;
case MINSTREL_SAMPLE_TYPE_INC:
case MINSTREL_SAMPLE_TYPE_JUMP:
valid = duration < fast_rate_dur;
break;
default:
valid = false;
break;
}
if (!valid) {
rates[i] = 0;
continue;
}
if (i == j)
continue;
rates[j++] = cur;
rates[i] = 0;
}
return j;
}
static int
minstrel_ht_group_min_rate_offset(struct minstrel_ht_sta *mi, int group,
u32 max_duration)
{
u16 supported = mi->supported[group];
int i;
for (i = 0; i < MCS_GROUP_RATES && supported; i++, supported >>= 1) {
if (!(supported & BIT(0)))
continue;
if (minstrel_get_duration(MI_RATE(group, i)) >= max_duration)
continue;
return i;
}
return -1;
}
/*
* Incremental update rates:
* Flip through groups and pick the first group rate that is faster than the
* highest currently selected rate
*/
static u16
minstrel_ht_next_inc_rate(struct minstrel_ht_sta *mi, u32 fast_rate_dur)
{
u8 type = MINSTREL_SAMPLE_TYPE_INC;
int i, index = 0;
u8 group;
group = mi->sample[type].sample_group;
for (i = 0; i < ARRAY_SIZE(minstrel_mcs_groups); i++) {
group = (group + 1) % ARRAY_SIZE(minstrel_mcs_groups);
index = minstrel_ht_group_min_rate_offset(mi, group,
fast_rate_dur);
if (index < 0)
continue;
index = MI_RATE(group, index & 0xf);
if (!minstrel_ht_find_sample_rate(mi, type, index))
goto out;
}
index = 0;
out:
mi->sample[type].sample_group = group;
return index;
}
static int
minstrel_ht_next_group_sample_rate(struct minstrel_ht_sta *mi, int group,
u16 supported, int offset)
{
struct minstrel_mcs_group_data *mg = &mi->groups[group];
u16 idx;
int i;
for (i = 0; i < MCS_GROUP_RATES; i++) {
idx = sample_table[mg->column][mg->index];
if (++mg->index >= MCS_GROUP_RATES) {
mg->index = 0;
if (++mg->column >= ARRAY_SIZE(sample_table))
mg->column = 0;
}
if (idx < offset)
continue;
if (!(supported & BIT(idx)))
continue;
return MI_RATE(group, idx);
}
return -1;
}
/*
* Jump rates:
* Sample random rates, use those that are faster than the highest
* currently selected rate. Rates between the fastest and the slowest
* get sorted into the slow sample bucket, but only if it has room
*/
static u16
minstrel_ht_next_jump_rate(struct minstrel_ht_sta *mi, u32 fast_rate_dur,
u32 slow_rate_dur, int *slow_rate_ofs)
{
struct minstrel_rate_stats *mrs;
u32 max_duration = slow_rate_dur;
int i, index, offset;
u16 *slow_rates;
u16 supported;
u32 duration;
u8 group;
if (*slow_rate_ofs >= MINSTREL_SAMPLE_RATES)
max_duration = fast_rate_dur;
slow_rates = mi->sample[MINSTREL_SAMPLE_TYPE_SLOW].sample_rates;
group = mi->sample[MINSTREL_SAMPLE_TYPE_JUMP].sample_group;
for (i = 0; i < ARRAY_SIZE(minstrel_mcs_groups); i++) {
u8 type;
group = (group + 1) % ARRAY_SIZE(minstrel_mcs_groups);
supported = mi->supported[group];
if (!supported)
continue;
offset = minstrel_ht_group_min_rate_offset(mi, group,
max_duration);
if (offset < 0)
continue;
index = minstrel_ht_next_group_sample_rate(mi, group, supported,
offset);
if (index < 0)
continue;
duration = minstrel_get_duration(index);
if (duration < fast_rate_dur)
type = MINSTREL_SAMPLE_TYPE_JUMP;
else
type = MINSTREL_SAMPLE_TYPE_SLOW;
if (minstrel_ht_find_sample_rate(mi, type, index))
continue;
if (type == MINSTREL_SAMPLE_TYPE_JUMP)
goto found;
if (*slow_rate_ofs >= MINSTREL_SAMPLE_RATES)
continue;
if (duration >= slow_rate_dur)
continue;
/* skip slow rates with high success probability */
mrs = minstrel_get_ratestats(mi, index);
if (mrs->prob_avg > MINSTREL_FRAC(95, 100))
continue;
slow_rates[(*slow_rate_ofs)++] = index;
if (*slow_rate_ofs >= MINSTREL_SAMPLE_RATES)
max_duration = fast_rate_dur;
}
index = 0;
found:
mi->sample[MINSTREL_SAMPLE_TYPE_JUMP].sample_group = group;
return index;
}
static void
minstrel_ht_refill_sample_rates(struct minstrel_ht_sta *mi)
{
u32 prob_dur = minstrel_get_duration(mi->max_prob_rate);
u32 tp_dur = minstrel_get_duration(mi->max_tp_rate[0]);
u32 tp2_dur = minstrel_get_duration(mi->max_tp_rate[1]);
u32 fast_rate_dur = min(min(tp_dur, tp2_dur), prob_dur);
u32 slow_rate_dur = max(max(tp_dur, tp2_dur), prob_dur);
u16 *rates;
int i, j;
rates = mi->sample[MINSTREL_SAMPLE_TYPE_INC].sample_rates;
i = minstrel_ht_move_sample_rates(mi, MINSTREL_SAMPLE_TYPE_INC,
fast_rate_dur, slow_rate_dur);
while (i < MINSTREL_SAMPLE_RATES) {
rates[i] = minstrel_ht_next_inc_rate(mi, tp_dur);
if (!rates[i])
break;
i++;
}
rates = mi->sample[MINSTREL_SAMPLE_TYPE_JUMP].sample_rates;
i = minstrel_ht_move_sample_rates(mi, MINSTREL_SAMPLE_TYPE_JUMP,
fast_rate_dur, slow_rate_dur);
j = minstrel_ht_move_sample_rates(mi, MINSTREL_SAMPLE_TYPE_SLOW,
fast_rate_dur, slow_rate_dur);
while (i < MINSTREL_SAMPLE_RATES) {
rates[i] = minstrel_ht_next_jump_rate(mi, fast_rate_dur,
slow_rate_dur, &j);
if (!rates[i])
break;
i++;
}
for (i = 0; i < ARRAY_SIZE(mi->sample); i++)
memcpy(mi->sample[i].cur_sample_rates, mi->sample[i].sample_rates,
sizeof(mi->sample[i].cur_sample_rates));
}
/*
* Update rate statistics and select new primary rates
*
* Rules for rate selection:
* - max_prob_rate must use only one stream, as a tradeoff between delivery
* probability and throughput during strong fluctuations
* - as long as the max prob rate has a probability of more than 75%, pick
* higher throughput rates, even if the probablity is a bit lower
*/
static void
minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
{
struct minstrel_mcs_group_data *mg;
struct minstrel_rate_stats *mrs;
int group, i, j, cur_prob;
u16 tmp_mcs_tp_rate[MAX_THR_RATES], tmp_group_tp_rate[MAX_THR_RATES];
u16 tmp_legacy_tp_rate[MAX_THR_RATES], tmp_max_prob_rate;
u16 index;
mac80211: prepare sta handling for MLO support Currently in mac80211 each STA object is represented using sta_info datastructure with the associated STA specific information and drivers access ieee80211_sta part of it. With MLO (Multi Link Operation) support being added in 802.11be standard, though the association is logically with a single Multi Link capable STA, at the physical level communication can happen via different advertised links (uniquely identified by Channel, operating class, BSSID) and hence the need to handle multiple link STA parameters within a composite sta_info object called the MLD STA. The different link STA part of MLD STA are identified using the link address which can be same or different as the MLD STA address and unique link id based on the link vif. To support extension of such a model, the sta_info datastructure is modified to hold multiple link STA objects with link specific params currently within sta_info moved to this new structure. Similarly this is done for ieee80211_sta as well which will be accessed within mac80211 as well as by drivers, hence trivial driver changes are expected to support this. For current non MLO supported drivers, only one link STA is present and link information is accessed via 'deflink' member. For MLO drivers, we still need to define the APIs etc. to get the correct link ID and access the correct part of the station info. Currently in mac80211, all link STA info are accessed directly via deflink. These will be updated to access via link pointers indexed by link id with MLO support patches, with link id being 0 for non MLO supported cases. Except for couple of macro related changes, below spatch takes care of updating mac80211 and driver code to access to the link STA info via deflink. @ieee80211_sta@ struct ieee80211_sta *s; struct sta_info *si; identifier var = {supp_rates, ht_cap, vht_cap, he_cap, he_6ghz_capa, eht_cap, rx_nss, bandwidth, txpwr}; @@ ( s-> - var + deflink.var | si->sta. - var + deflink.var ) @sta_info@ struct sta_info *si; identifier var = {gtk, pcpu_rx_stats, rx_stats, rx_stats_avg, status_stats, tx_stats, cur_max_bandwidth}; @@ ( si-> - var + deflink.var ) Signed-off-by: Sriram R <quic_srirrama@quicinc.com> Link: https://lore.kernel.org/r/1649086883-13246-1-git-send-email-quic_srirrama@quicinc.com [remove MLO-drivers notes from commit message, not clear yet; run spatch] Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2022-04-04 15:41:23 +00:00
bool ht_supported = mi->sta->deflink.ht_cap.ht_supported;
if (mi->ampdu_packets > 0) {
if (!ieee80211_hw_check(mp->hw, TX_STATUS_NO_AMPDU_LEN))
mi->avg_ampdu_len = minstrel_ewma(mi->avg_ampdu_len,
MINSTREL_FRAC(mi->ampdu_len, mi->ampdu_packets),
EWMA_LEVEL);
else
mi->avg_ampdu_len = 0;
mi->ampdu_len = 0;
mi->ampdu_packets = 0;
}
if (mi->supported[MINSTREL_CCK_GROUP])
group = MINSTREL_CCK_GROUP;
else if (mi->supported[MINSTREL_OFDM_GROUP])
group = MINSTREL_OFDM_GROUP;
else
group = 0;
index = MI_RATE(group, 0);
for (j = 0; j < ARRAY_SIZE(tmp_legacy_tp_rate); j++)
tmp_legacy_tp_rate[j] = index;
if (mi->supported[MINSTREL_VHT_GROUP_0])
group = MINSTREL_VHT_GROUP_0;
else if (ht_supported)
group = MINSTREL_HT_GROUP_0;
else if (mi->supported[MINSTREL_CCK_GROUP])
group = MINSTREL_CCK_GROUP;
else
group = MINSTREL_OFDM_GROUP;
index = MI_RATE(group, 0);
tmp_max_prob_rate = index;
for (j = 0; j < ARRAY_SIZE(tmp_mcs_tp_rate); j++)
tmp_mcs_tp_rate[j] = index;
/* Find best rate sets within all MCS groups*/
for (group = 0; group < ARRAY_SIZE(minstrel_mcs_groups); group++) {
u16 *tp_rate = tmp_mcs_tp_rate;
u16 last_prob = 0;
mg = &mi->groups[group];
if (!mi->supported[group])
continue;
/* (re)Initialize group rate indexes */
for(j = 0; j < MAX_THR_RATES; j++)
tmp_group_tp_rate[j] = MI_RATE(group, 0);
if (group == MINSTREL_CCK_GROUP && ht_supported)
tp_rate = tmp_legacy_tp_rate;
for (i = MCS_GROUP_RATES - 1; i >= 0; i--) {
if (!(mi->supported[group] & BIT(i)))
continue;
index = MI_RATE(group, i);
mrs = &mg->rates[i];
mrs->retry_updated = false;
minstrel_ht_calc_rate_stats(mp, mrs);
if (mrs->att_hist)
last_prob = max(last_prob, mrs->prob_avg);
else
mrs->prob_avg = max(last_prob, mrs->prob_avg);
cur_prob = mrs->prob_avg;
if (minstrel_ht_get_tp_avg(mi, group, i, cur_prob) == 0)
continue;
/* Find max throughput rate set */
minstrel_ht_sort_best_tp_rates(mi, index, tp_rate);
/* Find max throughput rate set within a group */
minstrel_ht_sort_best_tp_rates(mi, index,
tmp_group_tp_rate);
}
memcpy(mg->max_group_tp_rate, tmp_group_tp_rate,
sizeof(mg->max_group_tp_rate));
}
/* Assign new rate set per sta */
minstrel_ht_assign_best_tp_rates(mi, tmp_mcs_tp_rate,
tmp_legacy_tp_rate);
memcpy(mi->max_tp_rate, tmp_mcs_tp_rate, sizeof(mi->max_tp_rate));
for (group = 0; group < ARRAY_SIZE(minstrel_mcs_groups); group++) {
if (!mi->supported[group])
continue;
mg = &mi->groups[group];
mg->max_group_prob_rate = MI_RATE(group, 0);
for (i = 0; i < MCS_GROUP_RATES; i++) {
if (!(mi->supported[group] & BIT(i)))
continue;
index = MI_RATE(group, i);
/* Find max probability rate per group and global */
minstrel_ht_set_best_prob_rate(mi, &tmp_max_prob_rate,
index);
}
}
mi->max_prob_rate = tmp_max_prob_rate;
/* Try to increase robustness of max_prob_rate*/
minstrel_ht_prob_rate_reduce_streams(mi);
mac80211: minstrel_ht: significantly redesign the rate probing strategy The biggest flaw in current minstrel_ht is the fact that it needs way too many probing packets to be able to quickly find the best rate. Depending on the wifi hardware and operating mode, this can significantly reduce throughput when not operating at the highest available data rate. In order to be able to significantly reduce the amount of rate sampling, we need a much smarter selection of probing rates. The new approach introduced by this patch maintains a limited set of available rates to be tested during a statistics window. They are split into distinct categories: - MINSTREL_SAMPLE_TYPE_INC - incremental rate upgrade: Pick the next rate group and find the first rate that is faster than the current max. throughput rate - MINSTREL_SAMPLE_TYPE_JUMP - random testing of higher rates: Pick a random rate from the next group that is faster than the current max throughput rate. This allows faster adaptation when the link changes significantly - MINSTREL_SAMPLE_TYPE_SLOW - test a rate between max_prob, max_tp2 and max_tp in order to reduce the gap between them In order to prioritize sampling, every 6 attempts are split into 3x INC, 2x JUMP, 1x SLOW. Available rates are checked and refilled on every stats window update. With this approach, we finally get a very small delta in throughput when comparing setting the optimal data rate as a fixed rate vs normal rate control operation. Signed-off-by: Felix Fietkau <nbd@nbd.name> Link: https://lore.kernel.org/r/20210127055735.78599-4-nbd@nbd.name Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2021-01-27 05:57:33 +00:00
minstrel_ht_refill_sample_rates(mi);
#ifdef CONFIG_MAC80211_DEBUGFS
/* use fixed index if set */
if (mp->fixed_rate_idx != -1) {
for (i = 0; i < 4; i++)
mi->max_tp_rate[i] = mp->fixed_rate_idx;
mi->max_prob_rate = mp->fixed_rate_idx;
}
#endif
/* Reset update timer */
mi->last_stats_update = jiffies;
mac80211: minstrel_ht: significantly redesign the rate probing strategy The biggest flaw in current minstrel_ht is the fact that it needs way too many probing packets to be able to quickly find the best rate. Depending on the wifi hardware and operating mode, this can significantly reduce throughput when not operating at the highest available data rate. In order to be able to significantly reduce the amount of rate sampling, we need a much smarter selection of probing rates. The new approach introduced by this patch maintains a limited set of available rates to be tested during a statistics window. They are split into distinct categories: - MINSTREL_SAMPLE_TYPE_INC - incremental rate upgrade: Pick the next rate group and find the first rate that is faster than the current max. throughput rate - MINSTREL_SAMPLE_TYPE_JUMP - random testing of higher rates: Pick a random rate from the next group that is faster than the current max throughput rate. This allows faster adaptation when the link changes significantly - MINSTREL_SAMPLE_TYPE_SLOW - test a rate between max_prob, max_tp2 and max_tp in order to reduce the gap between them In order to prioritize sampling, every 6 attempts are split into 3x INC, 2x JUMP, 1x SLOW. Available rates are checked and refilled on every stats window update. With this approach, we finally get a very small delta in throughput when comparing setting the optimal data rate as a fixed rate vs normal rate control operation. Signed-off-by: Felix Fietkau <nbd@nbd.name> Link: https://lore.kernel.org/r/20210127055735.78599-4-nbd@nbd.name Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2021-01-27 05:57:33 +00:00
mi->sample_time = jiffies;
}
static bool
minstrel_ht_txstat_valid(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
struct ieee80211_tx_rate *rate)
{
int i;
if (rate->idx < 0)
return false;
if (!rate->count)
return false;
if (rate->flags & IEEE80211_TX_RC_MCS ||
rate->flags & IEEE80211_TX_RC_VHT_MCS)
return true;
for (i = 0; i < ARRAY_SIZE(mp->cck_rates); i++)
if (rate->idx == mp->cck_rates[i])
return true;
for (i = 0; i < ARRAY_SIZE(mp->ofdm_rates[0]); i++)
if (rate->idx == mp->ofdm_rates[mi->band][i])
return true;
return false;
}
/*
* Check whether rate_status contains valid information.
*/
static bool
minstrel_ht_ri_txstat_valid(struct minstrel_priv *mp,
struct minstrel_ht_sta *mi,
struct ieee80211_rate_status *rate_status)
{
int i;
if (!rate_status)
return false;
if (!rate_status->try_count)
return false;
if (rate_status->rate_idx.flags & RATE_INFO_FLAGS_MCS ||
rate_status->rate_idx.flags & RATE_INFO_FLAGS_VHT_MCS)
return true;
for (i = 0; i < ARRAY_SIZE(mp->cck_rates); i++) {
if (rate_status->rate_idx.legacy ==
minstrel_cck_bitrates[ mp->cck_rates[i] ])
return true;
}
for (i = 0; i < ARRAY_SIZE(mp->ofdm_rates); i++) {
if (rate_status->rate_idx.legacy ==
minstrel_ofdm_bitrates[ mp->ofdm_rates[mi->band][i] ])
return true;
}
return false;
}
static void
minstrel_downgrade_rate(struct minstrel_ht_sta *mi, u16 *idx, bool primary)
{
int group, orig_group;
orig_group = group = MI_RATE_GROUP(*idx);
while (group > 0) {
group--;
if (!mi->supported[group])
continue;
if (minstrel_mcs_groups[group].streams >
minstrel_mcs_groups[orig_group].streams)
continue;
if (primary)
*idx = mi->groups[group].max_group_tp_rate[0];
else
*idx = mi->groups[group].max_group_tp_rate[1];
break;
}
}
static void
minstrel_ht_tx_status(void *priv, struct ieee80211_supported_band *sband,
void *priv_sta, struct ieee80211_tx_status *st)
{
struct ieee80211_tx_info *info = st->info;
struct minstrel_ht_sta *mi = priv_sta;
struct ieee80211_tx_rate *ar = info->status.rates;
struct minstrel_rate_stats *rate, *rate2;
struct minstrel_priv *mp = priv;
u32 update_interval = mp->update_interval;
bool last, update = false;
int i;
/* Ignore packet that was sent with noAck flag */
if (info->flags & IEEE80211_TX_CTL_NO_ACK)
return;
/* This packet was aggregated but doesn't carry status info */
if ((info->flags & IEEE80211_TX_CTL_AMPDU) &&
!(info->flags & IEEE80211_TX_STAT_AMPDU))
return;
if (!(info->flags & IEEE80211_TX_STAT_AMPDU)) {
info->status.ampdu_ack_len =
(info->flags & IEEE80211_TX_STAT_ACK ? 1 : 0);
info->status.ampdu_len = 1;
}
/* wraparound */
if (mi->total_packets >= ~0 - info->status.ampdu_len) {
mi->total_packets = 0;
mi->sample_packets = 0;
}
mi->total_packets += info->status.ampdu_len;
if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
mi->sample_packets += info->status.ampdu_len;
mi->ampdu_packets++;
mi->ampdu_len += info->status.ampdu_len;
if (st->rates && st->n_rates) {
last = !minstrel_ht_ri_txstat_valid(mp, mi, &(st->rates[0]));
for (i = 0; !last; i++) {
last = (i == st->n_rates - 1) ||
!minstrel_ht_ri_txstat_valid(mp, mi,
&(st->rates[i + 1]));
rate = minstrel_ht_ri_get_stats(mp, mi,
&(st->rates[i]));
if (last)
rate->success += info->status.ampdu_ack_len;
rate->attempts += st->rates[i].try_count *
info->status.ampdu_len;
}
} else {
last = !minstrel_ht_txstat_valid(mp, mi, &ar[0]);
for (i = 0; !last; i++) {
last = (i == IEEE80211_TX_MAX_RATES - 1) ||
!minstrel_ht_txstat_valid(mp, mi, &ar[i + 1]);
rate = minstrel_ht_get_stats(mp, mi, &ar[i]);
if (last)
rate->success += info->status.ampdu_ack_len;
rate->attempts += ar[i].count * info->status.ampdu_len;
}
}
mac80211: minstrel_ht: improve rate probing for devices with static fallback On some devices that only support static rate fallback tables sending rate control probing packets can be really expensive. Probing lower rates can already hurt throughput quite a bit. What hurts even more is the fact that on mt76x0/mt76x2, single probing packets can only be forced by directing packets at a different internal hardware queue, which causes some heavy reordering and extra latency. The reordering issue is mainly problematic while pushing lots of packets to a particular station. If there is little activity, the overhead of probing is neglegible. The static fallback behavior is designed to pretty much only handle rate control algorithms that use only a very limited set of rates on which the algorithm switches up/down based on packet error rate. In order to better support that kind of hardware, this patch implements a different approach to rate probing where it switches to a slightly higher rate, waits for tx status feedback, then updates the stats and switches back to the new max throughput rate. This only triggers above a packet rate of 100 per stats interval (~50ms). For that kind of probing, the code has to reduce the set of probing rates a lot more compared to single packet probing, so it uses only one packet per MCS group which is either slightly faster, or as close as possible to the max throughput rate. This allows switching between similar rates with different numbers of streams. The algorithm assumes that the hardware will work its way lower within an MCS group in case of retransmissions, so that lower rates don't have to be probed by the high packets per second rate probing code. To further reduce the search space, it also does not probe rates with lower channel bandwidth than the max throughput rate. At the moment, these changes will only affect mt76x0/mt76x2. Signed-off-by: Felix Fietkau <nbd@nbd.name> Link: https://lore.kernel.org/r/20190820095449.45255-4-nbd@nbd.name Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2019-08-20 09:54:49 +00:00
if (mp->hw->max_rates > 1) {
/*
* check for sudden death of spatial multiplexing,
* downgrade to a lower number of streams if necessary.
*/
rate = minstrel_get_ratestats(mi, mi->max_tp_rate[0]);
if (rate->attempts > 30 &&
rate->success < rate->attempts / 4) {
mac80211: minstrel_ht: improve rate probing for devices with static fallback On some devices that only support static rate fallback tables sending rate control probing packets can be really expensive. Probing lower rates can already hurt throughput quite a bit. What hurts even more is the fact that on mt76x0/mt76x2, single probing packets can only be forced by directing packets at a different internal hardware queue, which causes some heavy reordering and extra latency. The reordering issue is mainly problematic while pushing lots of packets to a particular station. If there is little activity, the overhead of probing is neglegible. The static fallback behavior is designed to pretty much only handle rate control algorithms that use only a very limited set of rates on which the algorithm switches up/down based on packet error rate. In order to better support that kind of hardware, this patch implements a different approach to rate probing where it switches to a slightly higher rate, waits for tx status feedback, then updates the stats and switches back to the new max throughput rate. This only triggers above a packet rate of 100 per stats interval (~50ms). For that kind of probing, the code has to reduce the set of probing rates a lot more compared to single packet probing, so it uses only one packet per MCS group which is either slightly faster, or as close as possible to the max throughput rate. This allows switching between similar rates with different numbers of streams. The algorithm assumes that the hardware will work its way lower within an MCS group in case of retransmissions, so that lower rates don't have to be probed by the high packets per second rate probing code. To further reduce the search space, it also does not probe rates with lower channel bandwidth than the max throughput rate. At the moment, these changes will only affect mt76x0/mt76x2. Signed-off-by: Felix Fietkau <nbd@nbd.name> Link: https://lore.kernel.org/r/20190820095449.45255-4-nbd@nbd.name Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2019-08-20 09:54:49 +00:00
minstrel_downgrade_rate(mi, &mi->max_tp_rate[0], true);
update = true;
}
rate2 = minstrel_get_ratestats(mi, mi->max_tp_rate[1]);
if (rate2->attempts > 30 &&
rate2->success < rate2->attempts / 4) {
mac80211: minstrel_ht: improve rate probing for devices with static fallback On some devices that only support static rate fallback tables sending rate control probing packets can be really expensive. Probing lower rates can already hurt throughput quite a bit. What hurts even more is the fact that on mt76x0/mt76x2, single probing packets can only be forced by directing packets at a different internal hardware queue, which causes some heavy reordering and extra latency. The reordering issue is mainly problematic while pushing lots of packets to a particular station. If there is little activity, the overhead of probing is neglegible. The static fallback behavior is designed to pretty much only handle rate control algorithms that use only a very limited set of rates on which the algorithm switches up/down based on packet error rate. In order to better support that kind of hardware, this patch implements a different approach to rate probing where it switches to a slightly higher rate, waits for tx status feedback, then updates the stats and switches back to the new max throughput rate. This only triggers above a packet rate of 100 per stats interval (~50ms). For that kind of probing, the code has to reduce the set of probing rates a lot more compared to single packet probing, so it uses only one packet per MCS group which is either slightly faster, or as close as possible to the max throughput rate. This allows switching between similar rates with different numbers of streams. The algorithm assumes that the hardware will work its way lower within an MCS group in case of retransmissions, so that lower rates don't have to be probed by the high packets per second rate probing code. To further reduce the search space, it also does not probe rates with lower channel bandwidth than the max throughput rate. At the moment, these changes will only affect mt76x0/mt76x2. Signed-off-by: Felix Fietkau <nbd@nbd.name> Link: https://lore.kernel.org/r/20190820095449.45255-4-nbd@nbd.name Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2019-08-20 09:54:49 +00:00
minstrel_downgrade_rate(mi, &mi->max_tp_rate[1], false);
update = true;
}
}
if (time_after(jiffies, mi->last_stats_update + update_interval)) {
update = true;
minstrel_ht_update_stats(mp, mi);
}
if (update)
minstrel_ht_update_rates(mp, mi);
}
static void
minstrel_calc_retransmit(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
int index)
{
struct minstrel_rate_stats *mrs;
unsigned int tx_time, tx_time_rtscts, tx_time_data;
unsigned int cw = mp->cw_min;
unsigned int ctime = 0;
unsigned int t_slot = 9; /* FIXME */
unsigned int ampdu_len = minstrel_ht_avg_ampdu_len(mi);
unsigned int overhead = 0, overhead_rtscts = 0;
mrs = minstrel_get_ratestats(mi, index);
if (mrs->prob_avg < MINSTREL_FRAC(1, 10)) {
mrs->retry_count = 1;
mrs->retry_count_rtscts = 1;
return;
}
mrs->retry_count = 2;
mrs->retry_count_rtscts = 2;
mrs->retry_updated = true;
tx_time_data = minstrel_get_duration(index) * ampdu_len / 1000;
/* Contention time for first 2 tries */
ctime = (t_slot * cw) >> 1;
cw = min((cw << 1) | 1, mp->cw_max);
ctime += (t_slot * cw) >> 1;
cw = min((cw << 1) | 1, mp->cw_max);
if (minstrel_ht_is_legacy_group(MI_RATE_GROUP(index))) {
overhead = mi->overhead_legacy;
overhead_rtscts = mi->overhead_legacy_rtscts;
} else {
overhead = mi->overhead;
overhead_rtscts = mi->overhead_rtscts;
}
/* Total TX time for data and Contention after first 2 tries */
tx_time = ctime + 2 * (overhead + tx_time_data);
tx_time_rtscts = ctime + 2 * (overhead_rtscts + tx_time_data);
/* See how many more tries we can fit inside segment size */
do {
/* Contention time for this try */
ctime = (t_slot * cw) >> 1;
cw = min((cw << 1) | 1, mp->cw_max);
/* Total TX time after this try */
tx_time += ctime + overhead + tx_time_data;
tx_time_rtscts += ctime + overhead_rtscts + tx_time_data;
if (tx_time_rtscts < mp->segment_size)
mrs->retry_count_rtscts++;
} while ((tx_time < mp->segment_size) &&
(++mrs->retry_count < mp->max_retry));
}
static void
minstrel_ht_set_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
struct ieee80211_sta_rates *ratetbl, int offset, int index)
{
int group_idx = MI_RATE_GROUP(index);
const struct mcs_group *group = &minstrel_mcs_groups[group_idx];
struct minstrel_rate_stats *mrs;
u8 idx;
u16 flags = group->flags;
mrs = minstrel_get_ratestats(mi, index);
if (!mrs->retry_updated)
minstrel_calc_retransmit(mp, mi, index);
if (mrs->prob_avg < MINSTREL_FRAC(20, 100) || !mrs->retry_count) {
ratetbl->rate[offset].count = 2;
ratetbl->rate[offset].count_rts = 2;
ratetbl->rate[offset].count_cts = 2;
} else {
ratetbl->rate[offset].count = mrs->retry_count;
ratetbl->rate[offset].count_cts = mrs->retry_count;
ratetbl->rate[offset].count_rts = mrs->retry_count_rtscts;
}
index = MI_RATE_IDX(index);
if (group_idx == MINSTREL_CCK_GROUP)
idx = mp->cck_rates[index % ARRAY_SIZE(mp->cck_rates)];
else if (group_idx == MINSTREL_OFDM_GROUP)
idx = mp->ofdm_rates[mi->band][index %
ARRAY_SIZE(mp->ofdm_rates[0])];
else if (flags & IEEE80211_TX_RC_VHT_MCS)
idx = ((group->streams - 1) << 4) |
(index & 0xF);
else
idx = index + (group->streams - 1) * 8;
/* enable RTS/CTS if needed:
* - if station is in dynamic SMPS (and streams > 1)
* - for fallback rates, to increase chances of getting through
*/
if (offset > 0 ||
(mi->sta->deflink.smps_mode == IEEE80211_SMPS_DYNAMIC &&
group->streams > 1)) {
ratetbl->rate[offset].count = ratetbl->rate[offset].count_rts;
flags |= IEEE80211_TX_RC_USE_RTS_CTS;
}
ratetbl->rate[offset].idx = idx;
ratetbl->rate[offset].flags = flags;
}
static inline int
minstrel_ht_get_prob_avg(struct minstrel_ht_sta *mi, int rate)
{
int group = MI_RATE_GROUP(rate);
rate = MI_RATE_IDX(rate);
return mi->groups[group].rates[rate].prob_avg;
}
static int
minstrel_ht_get_max_amsdu_len(struct minstrel_ht_sta *mi)
{
int group = MI_RATE_GROUP(mi->max_prob_rate);
const struct mcs_group *g = &minstrel_mcs_groups[group];
int rate = MI_RATE_IDX(mi->max_prob_rate);
unsigned int duration;
/* Disable A-MSDU if max_prob_rate is bad */
if (mi->groups[group].rates[rate].prob_avg < MINSTREL_FRAC(50, 100))
return 1;
duration = g->duration[rate];
duration <<= g->shift;
/* If the rate is slower than single-stream MCS1, make A-MSDU limit small */
if (duration > MCS_DURATION(1, 0, 52))
return 500;
/*
* If the rate is slower than single-stream MCS4, limit A-MSDU to usual
* data packet size
*/
if (duration > MCS_DURATION(1, 0, 104))
return 1600;
/*
* If the rate is slower than single-stream MCS7, or if the max throughput
* rate success probability is less than 75%, limit A-MSDU to twice the usual
* data packet size
*/
if (duration > MCS_DURATION(1, 0, 260) ||
(minstrel_ht_get_prob_avg(mi, mi->max_tp_rate[0]) <
MINSTREL_FRAC(75, 100)))
return 3200;
/*
* HT A-MPDU limits maximum MPDU size under BA agreement to 4095 bytes.
* Since aggregation sessions are started/stopped without txq flush, use
* the limit here to avoid the complexity of having to de-aggregate
* packets in the queue.
*/
mac80211: prepare sta handling for MLO support Currently in mac80211 each STA object is represented using sta_info datastructure with the associated STA specific information and drivers access ieee80211_sta part of it. With MLO (Multi Link Operation) support being added in 802.11be standard, though the association is logically with a single Multi Link capable STA, at the physical level communication can happen via different advertised links (uniquely identified by Channel, operating class, BSSID) and hence the need to handle multiple link STA parameters within a composite sta_info object called the MLD STA. The different link STA part of MLD STA are identified using the link address which can be same or different as the MLD STA address and unique link id based on the link vif. To support extension of such a model, the sta_info datastructure is modified to hold multiple link STA objects with link specific params currently within sta_info moved to this new structure. Similarly this is done for ieee80211_sta as well which will be accessed within mac80211 as well as by drivers, hence trivial driver changes are expected to support this. For current non MLO supported drivers, only one link STA is present and link information is accessed via 'deflink' member. For MLO drivers, we still need to define the APIs etc. to get the correct link ID and access the correct part of the station info. Currently in mac80211, all link STA info are accessed directly via deflink. These will be updated to access via link pointers indexed by link id with MLO support patches, with link id being 0 for non MLO supported cases. Except for couple of macro related changes, below spatch takes care of updating mac80211 and driver code to access to the link STA info via deflink. @ieee80211_sta@ struct ieee80211_sta *s; struct sta_info *si; identifier var = {supp_rates, ht_cap, vht_cap, he_cap, he_6ghz_capa, eht_cap, rx_nss, bandwidth, txpwr}; @@ ( s-> - var + deflink.var | si->sta. - var + deflink.var ) @sta_info@ struct sta_info *si; identifier var = {gtk, pcpu_rx_stats, rx_stats, rx_stats_avg, status_stats, tx_stats, cur_max_bandwidth}; @@ ( si-> - var + deflink.var ) Signed-off-by: Sriram R <quic_srirrama@quicinc.com> Link: https://lore.kernel.org/r/1649086883-13246-1-git-send-email-quic_srirrama@quicinc.com [remove MLO-drivers notes from commit message, not clear yet; run spatch] Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2022-04-04 15:41:23 +00:00
if (!mi->sta->deflink.vht_cap.vht_supported)
return IEEE80211_MAX_MPDU_LEN_HT_BA;
/* unlimited */
return 0;
}
static void
minstrel_ht_update_rates(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
{
struct ieee80211_sta_rates *rates;
int i = 0;
wifi: mac80211: fix memory corruption in minstrel_ht_update_rates() During our testing of WFM200 module over SDIO on i.MX6Q-based platform, we discovered a memory corruption on the system, tracing back to the wfx driver. Using kfence, it was possible to trace it back to the root cause, which is hw->max_rates set to 8 in wfx_init_common, while the maximum defined by IEEE80211_TX_TABLE_SIZE is 4. This causes array out-of-bounds writes during updates of the rate table, as seen below: BUG: KFENCE: memory corruption in kfree_rcu_work+0x320/0x36c Corrupted memory at 0xe0a4ffe0 [ 0x03 0x03 0x03 0x03 0x01 0x00 0x00 0x02 0x02 0x02 0x09 0x00 0x21 0xbb 0xbb 0xbb ] (in kfence-#81): kfree_rcu_work+0x320/0x36c process_one_work+0x3ec/0x920 worker_thread+0x60/0x7a4 kthread+0x174/0x1b4 ret_from_fork+0x14/0x2c 0x0 kfence-#81: 0xe0a4ffc0-0xe0a4ffdf, size=32, cache=kmalloc-64 allocated by task 297 on cpu 0 at 631.039555s: minstrel_ht_update_rates+0x38/0x2b0 [mac80211] rate_control_tx_status+0xb4/0x148 [mac80211] ieee80211_tx_status_ext+0x364/0x1030 [mac80211] ieee80211_tx_status+0xe0/0x118 [mac80211] ieee80211_tasklet_handler+0xb0/0xe0 [mac80211] tasklet_action_common.constprop.0+0x11c/0x148 __do_softirq+0x1a4/0x61c irq_exit+0xcc/0x104 call_with_stack+0x18/0x20 __irq_svc+0x80/0xb0 wq_worker_sleeping+0x10/0x100 wq_worker_sleeping+0x10/0x100 schedule+0x50/0xe0 schedule_timeout+0x2e0/0x474 wait_for_completion+0xdc/0x1ec mmc_wait_for_req_done+0xc4/0xf8 mmc_io_rw_extended+0x3b4/0x4ec sdio_io_rw_ext_helper+0x290/0x384 sdio_memcpy_toio+0x30/0x38 wfx_sdio_copy_to_io+0x88/0x108 [wfx] wfx_data_write+0x88/0x1f0 [wfx] bh_work+0x1c8/0xcc0 [wfx] process_one_work+0x3ec/0x920 worker_thread+0x60/0x7a4 kthread+0x174/0x1b4 ret_from_fork+0x14/0x2c 0x0 After discussion on the wireless mailing list it was clarified that the issue has been introduced by: commit ee0e16ab756a ("mac80211: minstrel_ht: fill all requested rates") and fix shall be in minstrel_ht_update_rates in rc80211_minstrel_ht.c. Fixes: ee0e16ab756a ("mac80211: minstrel_ht: fill all requested rates") Link: https://lore.kernel.org/all/12e5adcd-8aed-f0f7-70cc-4fb7b656b829@camlingroup.com/ Link: https://lore.kernel.org/linux-wireless/20220915131445.30600-1-lech.perczak@camlingroup.com/ Cc: Jérôme Pouiller <jerome.pouiller@silabs.com> Cc: Johannes Berg <johannes@sipsolutions.net> Cc: Peter Seiderer <ps.report@gmx.net> Cc: Kalle Valo <kvalo@kernel.org> Cc: Krzysztof Drobiński <krzysztof.drobinski@camlingroup.com>, Signed-off-by: Paweł Lenkow <pawel.lenkow@camlingroup.com> Signed-off-by: Lech Perczak <lech.perczak@camlingroup.com> Reviewed-by: Peter Seiderer <ps.report@gmx.net> Reviewed-by: Jérôme Pouiller <jerome.pouiller@silabs.com> Acked-by: Felix Fietkau <nbd@nbd.name> Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2022-09-19 15:01:35 +00:00
int max_rates = min_t(int, mp->hw->max_rates, IEEE80211_TX_RATE_TABLE_SIZE);
rates = kzalloc(sizeof(*rates), GFP_ATOMIC);
if (!rates)
return;
/* Start with max_tp_rate[0] */
minstrel_ht_set_rate(mp, mi, rates, i++, mi->max_tp_rate[0]);
/* Fill up remaining, keep one entry for max_probe_rate */
wifi: mac80211: fix memory corruption in minstrel_ht_update_rates() During our testing of WFM200 module over SDIO on i.MX6Q-based platform, we discovered a memory corruption on the system, tracing back to the wfx driver. Using kfence, it was possible to trace it back to the root cause, which is hw->max_rates set to 8 in wfx_init_common, while the maximum defined by IEEE80211_TX_TABLE_SIZE is 4. This causes array out-of-bounds writes during updates of the rate table, as seen below: BUG: KFENCE: memory corruption in kfree_rcu_work+0x320/0x36c Corrupted memory at 0xe0a4ffe0 [ 0x03 0x03 0x03 0x03 0x01 0x00 0x00 0x02 0x02 0x02 0x09 0x00 0x21 0xbb 0xbb 0xbb ] (in kfence-#81): kfree_rcu_work+0x320/0x36c process_one_work+0x3ec/0x920 worker_thread+0x60/0x7a4 kthread+0x174/0x1b4 ret_from_fork+0x14/0x2c 0x0 kfence-#81: 0xe0a4ffc0-0xe0a4ffdf, size=32, cache=kmalloc-64 allocated by task 297 on cpu 0 at 631.039555s: minstrel_ht_update_rates+0x38/0x2b0 [mac80211] rate_control_tx_status+0xb4/0x148 [mac80211] ieee80211_tx_status_ext+0x364/0x1030 [mac80211] ieee80211_tx_status+0xe0/0x118 [mac80211] ieee80211_tasklet_handler+0xb0/0xe0 [mac80211] tasklet_action_common.constprop.0+0x11c/0x148 __do_softirq+0x1a4/0x61c irq_exit+0xcc/0x104 call_with_stack+0x18/0x20 __irq_svc+0x80/0xb0 wq_worker_sleeping+0x10/0x100 wq_worker_sleeping+0x10/0x100 schedule+0x50/0xe0 schedule_timeout+0x2e0/0x474 wait_for_completion+0xdc/0x1ec mmc_wait_for_req_done+0xc4/0xf8 mmc_io_rw_extended+0x3b4/0x4ec sdio_io_rw_ext_helper+0x290/0x384 sdio_memcpy_toio+0x30/0x38 wfx_sdio_copy_to_io+0x88/0x108 [wfx] wfx_data_write+0x88/0x1f0 [wfx] bh_work+0x1c8/0xcc0 [wfx] process_one_work+0x3ec/0x920 worker_thread+0x60/0x7a4 kthread+0x174/0x1b4 ret_from_fork+0x14/0x2c 0x0 After discussion on the wireless mailing list it was clarified that the issue has been introduced by: commit ee0e16ab756a ("mac80211: minstrel_ht: fill all requested rates") and fix shall be in minstrel_ht_update_rates in rc80211_minstrel_ht.c. Fixes: ee0e16ab756a ("mac80211: minstrel_ht: fill all requested rates") Link: https://lore.kernel.org/all/12e5adcd-8aed-f0f7-70cc-4fb7b656b829@camlingroup.com/ Link: https://lore.kernel.org/linux-wireless/20220915131445.30600-1-lech.perczak@camlingroup.com/ Cc: Jérôme Pouiller <jerome.pouiller@silabs.com> Cc: Johannes Berg <johannes@sipsolutions.net> Cc: Peter Seiderer <ps.report@gmx.net> Cc: Kalle Valo <kvalo@kernel.org> Cc: Krzysztof Drobiński <krzysztof.drobinski@camlingroup.com>, Signed-off-by: Paweł Lenkow <pawel.lenkow@camlingroup.com> Signed-off-by: Lech Perczak <lech.perczak@camlingroup.com> Reviewed-by: Peter Seiderer <ps.report@gmx.net> Reviewed-by: Jérôme Pouiller <jerome.pouiller@silabs.com> Acked-by: Felix Fietkau <nbd@nbd.name> Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2022-09-19 15:01:35 +00:00
for (; i < (max_rates - 1); i++)
minstrel_ht_set_rate(mp, mi, rates, i, mi->max_tp_rate[i]);
wifi: mac80211: fix memory corruption in minstrel_ht_update_rates() During our testing of WFM200 module over SDIO on i.MX6Q-based platform, we discovered a memory corruption on the system, tracing back to the wfx driver. Using kfence, it was possible to trace it back to the root cause, which is hw->max_rates set to 8 in wfx_init_common, while the maximum defined by IEEE80211_TX_TABLE_SIZE is 4. This causes array out-of-bounds writes during updates of the rate table, as seen below: BUG: KFENCE: memory corruption in kfree_rcu_work+0x320/0x36c Corrupted memory at 0xe0a4ffe0 [ 0x03 0x03 0x03 0x03 0x01 0x00 0x00 0x02 0x02 0x02 0x09 0x00 0x21 0xbb 0xbb 0xbb ] (in kfence-#81): kfree_rcu_work+0x320/0x36c process_one_work+0x3ec/0x920 worker_thread+0x60/0x7a4 kthread+0x174/0x1b4 ret_from_fork+0x14/0x2c 0x0 kfence-#81: 0xe0a4ffc0-0xe0a4ffdf, size=32, cache=kmalloc-64 allocated by task 297 on cpu 0 at 631.039555s: minstrel_ht_update_rates+0x38/0x2b0 [mac80211] rate_control_tx_status+0xb4/0x148 [mac80211] ieee80211_tx_status_ext+0x364/0x1030 [mac80211] ieee80211_tx_status+0xe0/0x118 [mac80211] ieee80211_tasklet_handler+0xb0/0xe0 [mac80211] tasklet_action_common.constprop.0+0x11c/0x148 __do_softirq+0x1a4/0x61c irq_exit+0xcc/0x104 call_with_stack+0x18/0x20 __irq_svc+0x80/0xb0 wq_worker_sleeping+0x10/0x100 wq_worker_sleeping+0x10/0x100 schedule+0x50/0xe0 schedule_timeout+0x2e0/0x474 wait_for_completion+0xdc/0x1ec mmc_wait_for_req_done+0xc4/0xf8 mmc_io_rw_extended+0x3b4/0x4ec sdio_io_rw_ext_helper+0x290/0x384 sdio_memcpy_toio+0x30/0x38 wfx_sdio_copy_to_io+0x88/0x108 [wfx] wfx_data_write+0x88/0x1f0 [wfx] bh_work+0x1c8/0xcc0 [wfx] process_one_work+0x3ec/0x920 worker_thread+0x60/0x7a4 kthread+0x174/0x1b4 ret_from_fork+0x14/0x2c 0x0 After discussion on the wireless mailing list it was clarified that the issue has been introduced by: commit ee0e16ab756a ("mac80211: minstrel_ht: fill all requested rates") and fix shall be in minstrel_ht_update_rates in rc80211_minstrel_ht.c. Fixes: ee0e16ab756a ("mac80211: minstrel_ht: fill all requested rates") Link: https://lore.kernel.org/all/12e5adcd-8aed-f0f7-70cc-4fb7b656b829@camlingroup.com/ Link: https://lore.kernel.org/linux-wireless/20220915131445.30600-1-lech.perczak@camlingroup.com/ Cc: Jérôme Pouiller <jerome.pouiller@silabs.com> Cc: Johannes Berg <johannes@sipsolutions.net> Cc: Peter Seiderer <ps.report@gmx.net> Cc: Kalle Valo <kvalo@kernel.org> Cc: Krzysztof Drobiński <krzysztof.drobinski@camlingroup.com>, Signed-off-by: Paweł Lenkow <pawel.lenkow@camlingroup.com> Signed-off-by: Lech Perczak <lech.perczak@camlingroup.com> Reviewed-by: Peter Seiderer <ps.report@gmx.net> Reviewed-by: Jérôme Pouiller <jerome.pouiller@silabs.com> Acked-by: Felix Fietkau <nbd@nbd.name> Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2022-09-19 15:01:35 +00:00
if (i < max_rates)
minstrel_ht_set_rate(mp, mi, rates, i++, mi->max_prob_rate);
if (i < IEEE80211_TX_RATE_TABLE_SIZE)
rates->rate[i].idx = -1;
mi->sta->deflink.agg.max_rc_amsdu_len = minstrel_ht_get_max_amsdu_len(mi);
ieee80211_sta_recalc_aggregates(mi->sta);
rate_control_set_rates(mp->hw, mi->sta, rates);
}
mac80211: minstrel_ht: significantly redesign the rate probing strategy The biggest flaw in current minstrel_ht is the fact that it needs way too many probing packets to be able to quickly find the best rate. Depending on the wifi hardware and operating mode, this can significantly reduce throughput when not operating at the highest available data rate. In order to be able to significantly reduce the amount of rate sampling, we need a much smarter selection of probing rates. The new approach introduced by this patch maintains a limited set of available rates to be tested during a statistics window. They are split into distinct categories: - MINSTREL_SAMPLE_TYPE_INC - incremental rate upgrade: Pick the next rate group and find the first rate that is faster than the current max. throughput rate - MINSTREL_SAMPLE_TYPE_JUMP - random testing of higher rates: Pick a random rate from the next group that is faster than the current max throughput rate. This allows faster adaptation when the link changes significantly - MINSTREL_SAMPLE_TYPE_SLOW - test a rate between max_prob, max_tp2 and max_tp in order to reduce the gap between them In order to prioritize sampling, every 6 attempts are split into 3x INC, 2x JUMP, 1x SLOW. Available rates are checked and refilled on every stats window update. With this approach, we finally get a very small delta in throughput when comparing setting the optimal data rate as a fixed rate vs normal rate control operation. Signed-off-by: Felix Fietkau <nbd@nbd.name> Link: https://lore.kernel.org/r/20210127055735.78599-4-nbd@nbd.name Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2021-01-27 05:57:33 +00:00
static u16
minstrel_ht_get_sample_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
{
mac80211: minstrel_ht: significantly redesign the rate probing strategy The biggest flaw in current minstrel_ht is the fact that it needs way too many probing packets to be able to quickly find the best rate. Depending on the wifi hardware and operating mode, this can significantly reduce throughput when not operating at the highest available data rate. In order to be able to significantly reduce the amount of rate sampling, we need a much smarter selection of probing rates. The new approach introduced by this patch maintains a limited set of available rates to be tested during a statistics window. They are split into distinct categories: - MINSTREL_SAMPLE_TYPE_INC - incremental rate upgrade: Pick the next rate group and find the first rate that is faster than the current max. throughput rate - MINSTREL_SAMPLE_TYPE_JUMP - random testing of higher rates: Pick a random rate from the next group that is faster than the current max throughput rate. This allows faster adaptation when the link changes significantly - MINSTREL_SAMPLE_TYPE_SLOW - test a rate between max_prob, max_tp2 and max_tp in order to reduce the gap between them In order to prioritize sampling, every 6 attempts are split into 3x INC, 2x JUMP, 1x SLOW. Available rates are checked and refilled on every stats window update. With this approach, we finally get a very small delta in throughput when comparing setting the optimal data rate as a fixed rate vs normal rate control operation. Signed-off-by: Felix Fietkau <nbd@nbd.name> Link: https://lore.kernel.org/r/20210127055735.78599-4-nbd@nbd.name Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2021-01-27 05:57:33 +00:00
u8 seq;
mac80211: minstrel_ht: significantly redesign the rate probing strategy The biggest flaw in current minstrel_ht is the fact that it needs way too many probing packets to be able to quickly find the best rate. Depending on the wifi hardware and operating mode, this can significantly reduce throughput when not operating at the highest available data rate. In order to be able to significantly reduce the amount of rate sampling, we need a much smarter selection of probing rates. The new approach introduced by this patch maintains a limited set of available rates to be tested during a statistics window. They are split into distinct categories: - MINSTREL_SAMPLE_TYPE_INC - incremental rate upgrade: Pick the next rate group and find the first rate that is faster than the current max. throughput rate - MINSTREL_SAMPLE_TYPE_JUMP - random testing of higher rates: Pick a random rate from the next group that is faster than the current max throughput rate. This allows faster adaptation when the link changes significantly - MINSTREL_SAMPLE_TYPE_SLOW - test a rate between max_prob, max_tp2 and max_tp in order to reduce the gap between them In order to prioritize sampling, every 6 attempts are split into 3x INC, 2x JUMP, 1x SLOW. Available rates are checked and refilled on every stats window update. With this approach, we finally get a very small delta in throughput when comparing setting the optimal data rate as a fixed rate vs normal rate control operation. Signed-off-by: Felix Fietkau <nbd@nbd.name> Link: https://lore.kernel.org/r/20210127055735.78599-4-nbd@nbd.name Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2021-01-27 05:57:33 +00:00
if (mp->hw->max_rates > 1) {
seq = mi->sample_seq;
mi->sample_seq = (seq + 1) % ARRAY_SIZE(minstrel_sample_seq);
seq = minstrel_sample_seq[seq];
} else {
mac80211: minstrel_ht: significantly redesign the rate probing strategy The biggest flaw in current minstrel_ht is the fact that it needs way too many probing packets to be able to quickly find the best rate. Depending on the wifi hardware and operating mode, this can significantly reduce throughput when not operating at the highest available data rate. In order to be able to significantly reduce the amount of rate sampling, we need a much smarter selection of probing rates. The new approach introduced by this patch maintains a limited set of available rates to be tested during a statistics window. They are split into distinct categories: - MINSTREL_SAMPLE_TYPE_INC - incremental rate upgrade: Pick the next rate group and find the first rate that is faster than the current max. throughput rate - MINSTREL_SAMPLE_TYPE_JUMP - random testing of higher rates: Pick a random rate from the next group that is faster than the current max throughput rate. This allows faster adaptation when the link changes significantly - MINSTREL_SAMPLE_TYPE_SLOW - test a rate between max_prob, max_tp2 and max_tp in order to reduce the gap between them In order to prioritize sampling, every 6 attempts are split into 3x INC, 2x JUMP, 1x SLOW. Available rates are checked and refilled on every stats window update. With this approach, we finally get a very small delta in throughput when comparing setting the optimal data rate as a fixed rate vs normal rate control operation. Signed-off-by: Felix Fietkau <nbd@nbd.name> Link: https://lore.kernel.org/r/20210127055735.78599-4-nbd@nbd.name Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2021-01-27 05:57:33 +00:00
seq = MINSTREL_SAMPLE_TYPE_INC;
}
mac80211: minstrel_ht: significantly redesign the rate probing strategy The biggest flaw in current minstrel_ht is the fact that it needs way too many probing packets to be able to quickly find the best rate. Depending on the wifi hardware and operating mode, this can significantly reduce throughput when not operating at the highest available data rate. In order to be able to significantly reduce the amount of rate sampling, we need a much smarter selection of probing rates. The new approach introduced by this patch maintains a limited set of available rates to be tested during a statistics window. They are split into distinct categories: - MINSTREL_SAMPLE_TYPE_INC - incremental rate upgrade: Pick the next rate group and find the first rate that is faster than the current max. throughput rate - MINSTREL_SAMPLE_TYPE_JUMP - random testing of higher rates: Pick a random rate from the next group that is faster than the current max throughput rate. This allows faster adaptation when the link changes significantly - MINSTREL_SAMPLE_TYPE_SLOW - test a rate between max_prob, max_tp2 and max_tp in order to reduce the gap between them In order to prioritize sampling, every 6 attempts are split into 3x INC, 2x JUMP, 1x SLOW. Available rates are checked and refilled on every stats window update. With this approach, we finally get a very small delta in throughput when comparing setting the optimal data rate as a fixed rate vs normal rate control operation. Signed-off-by: Felix Fietkau <nbd@nbd.name> Link: https://lore.kernel.org/r/20210127055735.78599-4-nbd@nbd.name Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2021-01-27 05:57:33 +00:00
return __minstrel_ht_get_sample_rate(mi, seq);
}
static void
minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
struct ieee80211_tx_rate_control *txrc)
{
const struct mcs_group *sample_group;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(txrc->skb);
struct ieee80211_tx_rate *rate = &info->status.rates[0];
struct minstrel_ht_sta *mi = priv_sta;
struct minstrel_priv *mp = priv;
mac80211: minstrel_ht: significantly redesign the rate probing strategy The biggest flaw in current minstrel_ht is the fact that it needs way too many probing packets to be able to quickly find the best rate. Depending on the wifi hardware and operating mode, this can significantly reduce throughput when not operating at the highest available data rate. In order to be able to significantly reduce the amount of rate sampling, we need a much smarter selection of probing rates. The new approach introduced by this patch maintains a limited set of available rates to be tested during a statistics window. They are split into distinct categories: - MINSTREL_SAMPLE_TYPE_INC - incremental rate upgrade: Pick the next rate group and find the first rate that is faster than the current max. throughput rate - MINSTREL_SAMPLE_TYPE_JUMP - random testing of higher rates: Pick a random rate from the next group that is faster than the current max throughput rate. This allows faster adaptation when the link changes significantly - MINSTREL_SAMPLE_TYPE_SLOW - test a rate between max_prob, max_tp2 and max_tp in order to reduce the gap between them In order to prioritize sampling, every 6 attempts are split into 3x INC, 2x JUMP, 1x SLOW. Available rates are checked and refilled on every stats window update. With this approach, we finally get a very small delta in throughput when comparing setting the optimal data rate as a fixed rate vs normal rate control operation. Signed-off-by: Felix Fietkau <nbd@nbd.name> Link: https://lore.kernel.org/r/20210127055735.78599-4-nbd@nbd.name Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2021-01-27 05:57:33 +00:00
u16 sample_idx;
info->flags |= mi->tx_flags;
#ifdef CONFIG_MAC80211_DEBUGFS
if (mp->fixed_rate_idx != -1)
return;
#endif
/* Don't use EAPOL frames for sampling on non-mrr hw */
if (mp->hw->max_rates == 1 &&
(info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO))
mac80211: minstrel_ht: significantly redesign the rate probing strategy The biggest flaw in current minstrel_ht is the fact that it needs way too many probing packets to be able to quickly find the best rate. Depending on the wifi hardware and operating mode, this can significantly reduce throughput when not operating at the highest available data rate. In order to be able to significantly reduce the amount of rate sampling, we need a much smarter selection of probing rates. The new approach introduced by this patch maintains a limited set of available rates to be tested during a statistics window. They are split into distinct categories: - MINSTREL_SAMPLE_TYPE_INC - incremental rate upgrade: Pick the next rate group and find the first rate that is faster than the current max. throughput rate - MINSTREL_SAMPLE_TYPE_JUMP - random testing of higher rates: Pick a random rate from the next group that is faster than the current max throughput rate. This allows faster adaptation when the link changes significantly - MINSTREL_SAMPLE_TYPE_SLOW - test a rate between max_prob, max_tp2 and max_tp in order to reduce the gap between them In order to prioritize sampling, every 6 attempts are split into 3x INC, 2x JUMP, 1x SLOW. Available rates are checked and refilled on every stats window update. With this approach, we finally get a very small delta in throughput when comparing setting the optimal data rate as a fixed rate vs normal rate control operation. Signed-off-by: Felix Fietkau <nbd@nbd.name> Link: https://lore.kernel.org/r/20210127055735.78599-4-nbd@nbd.name Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2021-01-27 05:57:33 +00:00
return;
if (time_is_after_jiffies(mi->sample_time))
mac80211: minstrel_ht: significantly redesign the rate probing strategy The biggest flaw in current minstrel_ht is the fact that it needs way too many probing packets to be able to quickly find the best rate. Depending on the wifi hardware and operating mode, this can significantly reduce throughput when not operating at the highest available data rate. In order to be able to significantly reduce the amount of rate sampling, we need a much smarter selection of probing rates. The new approach introduced by this patch maintains a limited set of available rates to be tested during a statistics window. They are split into distinct categories: - MINSTREL_SAMPLE_TYPE_INC - incremental rate upgrade: Pick the next rate group and find the first rate that is faster than the current max. throughput rate - MINSTREL_SAMPLE_TYPE_JUMP - random testing of higher rates: Pick a random rate from the next group that is faster than the current max throughput rate. This allows faster adaptation when the link changes significantly - MINSTREL_SAMPLE_TYPE_SLOW - test a rate between max_prob, max_tp2 and max_tp in order to reduce the gap between them In order to prioritize sampling, every 6 attempts are split into 3x INC, 2x JUMP, 1x SLOW. Available rates are checked and refilled on every stats window update. With this approach, we finally get a very small delta in throughput when comparing setting the optimal data rate as a fixed rate vs normal rate control operation. Signed-off-by: Felix Fietkau <nbd@nbd.name> Link: https://lore.kernel.org/r/20210127055735.78599-4-nbd@nbd.name Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2021-01-27 05:57:33 +00:00
return;
mac80211: minstrel_ht: significantly redesign the rate probing strategy The biggest flaw in current minstrel_ht is the fact that it needs way too many probing packets to be able to quickly find the best rate. Depending on the wifi hardware and operating mode, this can significantly reduce throughput when not operating at the highest available data rate. In order to be able to significantly reduce the amount of rate sampling, we need a much smarter selection of probing rates. The new approach introduced by this patch maintains a limited set of available rates to be tested during a statistics window. They are split into distinct categories: - MINSTREL_SAMPLE_TYPE_INC - incremental rate upgrade: Pick the next rate group and find the first rate that is faster than the current max. throughput rate - MINSTREL_SAMPLE_TYPE_JUMP - random testing of higher rates: Pick a random rate from the next group that is faster than the current max throughput rate. This allows faster adaptation when the link changes significantly - MINSTREL_SAMPLE_TYPE_SLOW - test a rate between max_prob, max_tp2 and max_tp in order to reduce the gap between them In order to prioritize sampling, every 6 attempts are split into 3x INC, 2x JUMP, 1x SLOW. Available rates are checked and refilled on every stats window update. With this approach, we finally get a very small delta in throughput when comparing setting the optimal data rate as a fixed rate vs normal rate control operation. Signed-off-by: Felix Fietkau <nbd@nbd.name> Link: https://lore.kernel.org/r/20210127055735.78599-4-nbd@nbd.name Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2021-01-27 05:57:33 +00:00
mi->sample_time = jiffies + MINSTREL_SAMPLE_INTERVAL;
sample_idx = minstrel_ht_get_sample_rate(mp, mi);
if (!sample_idx)
return;
sample_group = &minstrel_mcs_groups[MI_RATE_GROUP(sample_idx)];
sample_idx = MI_RATE_IDX(sample_idx);
if (sample_group == &minstrel_mcs_groups[MINSTREL_CCK_GROUP] &&
(sample_idx >= 4) != txrc->short_preamble)
return;
info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
rate->count = 1;
if (sample_group == &minstrel_mcs_groups[MINSTREL_CCK_GROUP]) {
int idx = sample_idx % ARRAY_SIZE(mp->cck_rates);
rate->idx = mp->cck_rates[idx];
} else if (sample_group == &minstrel_mcs_groups[MINSTREL_OFDM_GROUP]) {
int idx = sample_idx % ARRAY_SIZE(mp->ofdm_rates[0]);
rate->idx = mp->ofdm_rates[mi->band][idx];
} else if (sample_group->flags & IEEE80211_TX_RC_VHT_MCS) {
ieee80211_rate_set_vht(rate, MI_RATE_IDX(sample_idx),
sample_group->streams);
} else {
rate->idx = sample_idx + (sample_group->streams - 1) * 8;
}
rate->flags = sample_group->flags;
}
static void
minstrel_ht_update_cck(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
struct ieee80211_supported_band *sband,
struct ieee80211_sta *sta)
{
int i;
if (sband->band != NL80211_BAND_2GHZ)
return;
mac80211: prepare sta handling for MLO support Currently in mac80211 each STA object is represented using sta_info datastructure with the associated STA specific information and drivers access ieee80211_sta part of it. With MLO (Multi Link Operation) support being added in 802.11be standard, though the association is logically with a single Multi Link capable STA, at the physical level communication can happen via different advertised links (uniquely identified by Channel, operating class, BSSID) and hence the need to handle multiple link STA parameters within a composite sta_info object called the MLD STA. The different link STA part of MLD STA are identified using the link address which can be same or different as the MLD STA address and unique link id based on the link vif. To support extension of such a model, the sta_info datastructure is modified to hold multiple link STA objects with link specific params currently within sta_info moved to this new structure. Similarly this is done for ieee80211_sta as well which will be accessed within mac80211 as well as by drivers, hence trivial driver changes are expected to support this. For current non MLO supported drivers, only one link STA is present and link information is accessed via 'deflink' member. For MLO drivers, we still need to define the APIs etc. to get the correct link ID and access the correct part of the station info. Currently in mac80211, all link STA info are accessed directly via deflink. These will be updated to access via link pointers indexed by link id with MLO support patches, with link id being 0 for non MLO supported cases. Except for couple of macro related changes, below spatch takes care of updating mac80211 and driver code to access to the link STA info via deflink. @ieee80211_sta@ struct ieee80211_sta *s; struct sta_info *si; identifier var = {supp_rates, ht_cap, vht_cap, he_cap, he_6ghz_capa, eht_cap, rx_nss, bandwidth, txpwr}; @@ ( s-> - var + deflink.var | si->sta. - var + deflink.var ) @sta_info@ struct sta_info *si; identifier var = {gtk, pcpu_rx_stats, rx_stats, rx_stats_avg, status_stats, tx_stats, cur_max_bandwidth}; @@ ( si-> - var + deflink.var ) Signed-off-by: Sriram R <quic_srirrama@quicinc.com> Link: https://lore.kernel.org/r/1649086883-13246-1-git-send-email-quic_srirrama@quicinc.com [remove MLO-drivers notes from commit message, not clear yet; run spatch] Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2022-04-04 15:41:23 +00:00
if (sta->deflink.ht_cap.ht_supported &&
!ieee80211_hw_check(mp->hw, SUPPORTS_HT_CCK_RATES))
return;
for (i = 0; i < 4; i++) {
if (mp->cck_rates[i] == 0xff ||
!rate_supported(sta, sband->band, mp->cck_rates[i]))
continue;
mi->supported[MINSTREL_CCK_GROUP] |= BIT(i);
if (sband->bitrates[i].flags & IEEE80211_RATE_SHORT_PREAMBLE)
mi->supported[MINSTREL_CCK_GROUP] |= BIT(i + 4);
}
}
static void
minstrel_ht_update_ofdm(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
struct ieee80211_supported_band *sband,
struct ieee80211_sta *sta)
{
const u8 *rates;
int i;
mac80211: prepare sta handling for MLO support Currently in mac80211 each STA object is represented using sta_info datastructure with the associated STA specific information and drivers access ieee80211_sta part of it. With MLO (Multi Link Operation) support being added in 802.11be standard, though the association is logically with a single Multi Link capable STA, at the physical level communication can happen via different advertised links (uniquely identified by Channel, operating class, BSSID) and hence the need to handle multiple link STA parameters within a composite sta_info object called the MLD STA. The different link STA part of MLD STA are identified using the link address which can be same or different as the MLD STA address and unique link id based on the link vif. To support extension of such a model, the sta_info datastructure is modified to hold multiple link STA objects with link specific params currently within sta_info moved to this new structure. Similarly this is done for ieee80211_sta as well which will be accessed within mac80211 as well as by drivers, hence trivial driver changes are expected to support this. For current non MLO supported drivers, only one link STA is present and link information is accessed via 'deflink' member. For MLO drivers, we still need to define the APIs etc. to get the correct link ID and access the correct part of the station info. Currently in mac80211, all link STA info are accessed directly via deflink. These will be updated to access via link pointers indexed by link id with MLO support patches, with link id being 0 for non MLO supported cases. Except for couple of macro related changes, below spatch takes care of updating mac80211 and driver code to access to the link STA info via deflink. @ieee80211_sta@ struct ieee80211_sta *s; struct sta_info *si; identifier var = {supp_rates, ht_cap, vht_cap, he_cap, he_6ghz_capa, eht_cap, rx_nss, bandwidth, txpwr}; @@ ( s-> - var + deflink.var | si->sta. - var + deflink.var ) @sta_info@ struct sta_info *si; identifier var = {gtk, pcpu_rx_stats, rx_stats, rx_stats_avg, status_stats, tx_stats, cur_max_bandwidth}; @@ ( si-> - var + deflink.var ) Signed-off-by: Sriram R <quic_srirrama@quicinc.com> Link: https://lore.kernel.org/r/1649086883-13246-1-git-send-email-quic_srirrama@quicinc.com [remove MLO-drivers notes from commit message, not clear yet; run spatch] Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2022-04-04 15:41:23 +00:00
if (sta->deflink.ht_cap.ht_supported)
return;
rates = mp->ofdm_rates[sband->band];
for (i = 0; i < ARRAY_SIZE(mp->ofdm_rates[0]); i++) {
if (rates[i] == 0xff ||
!rate_supported(sta, sband->band, rates[i]))
continue;
mi->supported[MINSTREL_OFDM_GROUP] |= BIT(i);
}
}
static void
minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband,
struct cfg80211_chan_def *chandef,
struct ieee80211_sta *sta, void *priv_sta)
{
struct minstrel_priv *mp = priv;
struct minstrel_ht_sta *mi = priv_sta;
mac80211: prepare sta handling for MLO support Currently in mac80211 each STA object is represented using sta_info datastructure with the associated STA specific information and drivers access ieee80211_sta part of it. With MLO (Multi Link Operation) support being added in 802.11be standard, though the association is logically with a single Multi Link capable STA, at the physical level communication can happen via different advertised links (uniquely identified by Channel, operating class, BSSID) and hence the need to handle multiple link STA parameters within a composite sta_info object called the MLD STA. The different link STA part of MLD STA are identified using the link address which can be same or different as the MLD STA address and unique link id based on the link vif. To support extension of such a model, the sta_info datastructure is modified to hold multiple link STA objects with link specific params currently within sta_info moved to this new structure. Similarly this is done for ieee80211_sta as well which will be accessed within mac80211 as well as by drivers, hence trivial driver changes are expected to support this. For current non MLO supported drivers, only one link STA is present and link information is accessed via 'deflink' member. For MLO drivers, we still need to define the APIs etc. to get the correct link ID and access the correct part of the station info. Currently in mac80211, all link STA info are accessed directly via deflink. These will be updated to access via link pointers indexed by link id with MLO support patches, with link id being 0 for non MLO supported cases. Except for couple of macro related changes, below spatch takes care of updating mac80211 and driver code to access to the link STA info via deflink. @ieee80211_sta@ struct ieee80211_sta *s; struct sta_info *si; identifier var = {supp_rates, ht_cap, vht_cap, he_cap, he_6ghz_capa, eht_cap, rx_nss, bandwidth, txpwr}; @@ ( s-> - var + deflink.var | si->sta. - var + deflink.var ) @sta_info@ struct sta_info *si; identifier var = {gtk, pcpu_rx_stats, rx_stats, rx_stats_avg, status_stats, tx_stats, cur_max_bandwidth}; @@ ( si-> - var + deflink.var ) Signed-off-by: Sriram R <quic_srirrama@quicinc.com> Link: https://lore.kernel.org/r/1649086883-13246-1-git-send-email-quic_srirrama@quicinc.com [remove MLO-drivers notes from commit message, not clear yet; run spatch] Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2022-04-04 15:41:23 +00:00
struct ieee80211_mcs_info *mcs = &sta->deflink.ht_cap.mcs;
u16 ht_cap = sta->deflink.ht_cap.cap;
struct ieee80211_sta_vht_cap *vht_cap = &sta->deflink.vht_cap;
const struct ieee80211_rate *ctl_rate;
struct sta_info *sta_info;
bool ldpc, erp;
int use_vht;
int n_supported = 0;
int ack_dur;
int stbc;
int i;
BUILD_BUG_ON(ARRAY_SIZE(minstrel_mcs_groups) != MINSTREL_GROUPS_NB);
if (vht_cap->vht_supported)
use_vht = vht_cap->vht_mcs.tx_mcs_map != cpu_to_le16(~0);
else
use_vht = 0;
memset(mi, 0, sizeof(*mi));
mi->sta = sta;
mi->band = sband->band;
mi->last_stats_update = jiffies;
ack_dur = ieee80211_frame_duration(sband->band, 10, 60, 1, 1, 0);
mi->overhead = ieee80211_frame_duration(sband->band, 0, 60, 1, 1, 0);
mi->overhead += ack_dur;
mi->overhead_rtscts = mi->overhead + 2 * ack_dur;
ctl_rate = &sband->bitrates[rate_lowest_index(sband, sta)];
erp = ctl_rate->flags & IEEE80211_RATE_ERP_G;
ack_dur = ieee80211_frame_duration(sband->band, 10,
ctl_rate->bitrate, erp, 1,
ieee80211_chandef_get_shift(chandef));
mi->overhead_legacy = ack_dur;
mi->overhead_legacy_rtscts = mi->overhead_legacy + 2 * ack_dur;
mi->avg_ampdu_len = MINSTREL_FRAC(1, 1);
if (!use_vht) {
stbc = (ht_cap & IEEE80211_HT_CAP_RX_STBC) >>
IEEE80211_HT_CAP_RX_STBC_SHIFT;
ldpc = ht_cap & IEEE80211_HT_CAP_LDPC_CODING;
} else {
stbc = (vht_cap->cap & IEEE80211_VHT_CAP_RXSTBC_MASK) >>
IEEE80211_VHT_CAP_RXSTBC_SHIFT;
ldpc = vht_cap->cap & IEEE80211_VHT_CAP_RXLDPC;
}
mi->tx_flags |= stbc << IEEE80211_TX_CTL_STBC_SHIFT;
if (ldpc)
mi->tx_flags |= IEEE80211_TX_CTL_LDPC;
for (i = 0; i < ARRAY_SIZE(mi->groups); i++) {
u32 gflags = minstrel_mcs_groups[i].flags;
int bw, nss;
mi->supported[i] = 0;
if (minstrel_ht_is_legacy_group(i))
continue;
if (gflags & IEEE80211_TX_RC_SHORT_GI) {
if (gflags & IEEE80211_TX_RC_40_MHZ_WIDTH) {
if (!(ht_cap & IEEE80211_HT_CAP_SGI_40))
continue;
} else {
if (!(ht_cap & IEEE80211_HT_CAP_SGI_20))
continue;
}
}
if (gflags & IEEE80211_TX_RC_40_MHZ_WIDTH &&
mac80211: prepare sta handling for MLO support Currently in mac80211 each STA object is represented using sta_info datastructure with the associated STA specific information and drivers access ieee80211_sta part of it. With MLO (Multi Link Operation) support being added in 802.11be standard, though the association is logically with a single Multi Link capable STA, at the physical level communication can happen via different advertised links (uniquely identified by Channel, operating class, BSSID) and hence the need to handle multiple link STA parameters within a composite sta_info object called the MLD STA. The different link STA part of MLD STA are identified using the link address which can be same or different as the MLD STA address and unique link id based on the link vif. To support extension of such a model, the sta_info datastructure is modified to hold multiple link STA objects with link specific params currently within sta_info moved to this new structure. Similarly this is done for ieee80211_sta as well which will be accessed within mac80211 as well as by drivers, hence trivial driver changes are expected to support this. For current non MLO supported drivers, only one link STA is present and link information is accessed via 'deflink' member. For MLO drivers, we still need to define the APIs etc. to get the correct link ID and access the correct part of the station info. Currently in mac80211, all link STA info are accessed directly via deflink. These will be updated to access via link pointers indexed by link id with MLO support patches, with link id being 0 for non MLO supported cases. Except for couple of macro related changes, below spatch takes care of updating mac80211 and driver code to access to the link STA info via deflink. @ieee80211_sta@ struct ieee80211_sta *s; struct sta_info *si; identifier var = {supp_rates, ht_cap, vht_cap, he_cap, he_6ghz_capa, eht_cap, rx_nss, bandwidth, txpwr}; @@ ( s-> - var + deflink.var | si->sta. - var + deflink.var ) @sta_info@ struct sta_info *si; identifier var = {gtk, pcpu_rx_stats, rx_stats, rx_stats_avg, status_stats, tx_stats, cur_max_bandwidth}; @@ ( si-> - var + deflink.var ) Signed-off-by: Sriram R <quic_srirrama@quicinc.com> Link: https://lore.kernel.org/r/1649086883-13246-1-git-send-email-quic_srirrama@quicinc.com [remove MLO-drivers notes from commit message, not clear yet; run spatch] Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2022-04-04 15:41:23 +00:00
sta->deflink.bandwidth < IEEE80211_STA_RX_BW_40)
continue;
nss = minstrel_mcs_groups[i].streams;
/* Mark MCS > 7 as unsupported if STA is in static SMPS mode */
if (sta->deflink.smps_mode == IEEE80211_SMPS_STATIC && nss > 1)
continue;
/* HT rate */
if (gflags & IEEE80211_TX_RC_MCS) {
if (use_vht && minstrel_vht_only)
continue;
mi->supported[i] = mcs->rx_mask[nss - 1];
if (mi->supported[i])
n_supported++;
continue;
}
/* VHT rate */
if (!vht_cap->vht_supported ||
WARN_ON(!(gflags & IEEE80211_TX_RC_VHT_MCS)) ||
WARN_ON(gflags & IEEE80211_TX_RC_160_MHZ_WIDTH))
continue;
if (gflags & IEEE80211_TX_RC_80_MHZ_WIDTH) {
mac80211: prepare sta handling for MLO support Currently in mac80211 each STA object is represented using sta_info datastructure with the associated STA specific information and drivers access ieee80211_sta part of it. With MLO (Multi Link Operation) support being added in 802.11be standard, though the association is logically with a single Multi Link capable STA, at the physical level communication can happen via different advertised links (uniquely identified by Channel, operating class, BSSID) and hence the need to handle multiple link STA parameters within a composite sta_info object called the MLD STA. The different link STA part of MLD STA are identified using the link address which can be same or different as the MLD STA address and unique link id based on the link vif. To support extension of such a model, the sta_info datastructure is modified to hold multiple link STA objects with link specific params currently within sta_info moved to this new structure. Similarly this is done for ieee80211_sta as well which will be accessed within mac80211 as well as by drivers, hence trivial driver changes are expected to support this. For current non MLO supported drivers, only one link STA is present and link information is accessed via 'deflink' member. For MLO drivers, we still need to define the APIs etc. to get the correct link ID and access the correct part of the station info. Currently in mac80211, all link STA info are accessed directly via deflink. These will be updated to access via link pointers indexed by link id with MLO support patches, with link id being 0 for non MLO supported cases. Except for couple of macro related changes, below spatch takes care of updating mac80211 and driver code to access to the link STA info via deflink. @ieee80211_sta@ struct ieee80211_sta *s; struct sta_info *si; identifier var = {supp_rates, ht_cap, vht_cap, he_cap, he_6ghz_capa, eht_cap, rx_nss, bandwidth, txpwr}; @@ ( s-> - var + deflink.var | si->sta. - var + deflink.var ) @sta_info@ struct sta_info *si; identifier var = {gtk, pcpu_rx_stats, rx_stats, rx_stats_avg, status_stats, tx_stats, cur_max_bandwidth}; @@ ( si-> - var + deflink.var ) Signed-off-by: Sriram R <quic_srirrama@quicinc.com> Link: https://lore.kernel.org/r/1649086883-13246-1-git-send-email-quic_srirrama@quicinc.com [remove MLO-drivers notes from commit message, not clear yet; run spatch] Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2022-04-04 15:41:23 +00:00
if (sta->deflink.bandwidth < IEEE80211_STA_RX_BW_80 ||
((gflags & IEEE80211_TX_RC_SHORT_GI) &&
!(vht_cap->cap & IEEE80211_VHT_CAP_SHORT_GI_80))) {
continue;
}
}
if (gflags & IEEE80211_TX_RC_40_MHZ_WIDTH)
bw = BW_40;
else if (gflags & IEEE80211_TX_RC_80_MHZ_WIDTH)
bw = BW_80;
else
bw = BW_20;
mi->supported[i] = minstrel_get_valid_vht_rates(bw, nss,
vht_cap->vht_mcs.tx_mcs_map);
if (mi->supported[i])
n_supported++;
}
sta_info = container_of(sta, struct sta_info, sta);
mi->use_short_preamble = test_sta_flag(sta_info, WLAN_STA_SHORT_PREAMBLE) &&
sta_info->sdata->vif.bss_conf.use_short_preamble;
minstrel_ht_update_cck(mp, mi, sband, sta);
minstrel_ht_update_ofdm(mp, mi, sband, sta);
/* create an initial rate table with the lowest supported rates */
minstrel_ht_update_stats(mp, mi);
minstrel_ht_update_rates(mp, mi);
}
static void
minstrel_ht_rate_init(void *priv, struct ieee80211_supported_band *sband,
struct cfg80211_chan_def *chandef,
struct ieee80211_sta *sta, void *priv_sta)
{
minstrel_ht_update_caps(priv, sband, chandef, sta, priv_sta);
}
static void
minstrel_ht_rate_update(void *priv, struct ieee80211_supported_band *sband,
struct cfg80211_chan_def *chandef,
struct ieee80211_sta *sta, void *priv_sta,
u32 changed)
{
minstrel_ht_update_caps(priv, sband, chandef, sta, priv_sta);
}
static void *
minstrel_ht_alloc_sta(void *priv, struct ieee80211_sta *sta, gfp_t gfp)
{
struct ieee80211_supported_band *sband;
struct minstrel_ht_sta *mi;
struct minstrel_priv *mp = priv;
struct ieee80211_hw *hw = mp->hw;
int max_rates = 0;
int i;
for (i = 0; i < NUM_NL80211_BANDS; i++) {
sband = hw->wiphy->bands[i];
if (sband && sband->n_bitrates > max_rates)
max_rates = sband->n_bitrates;
}
return kzalloc(sizeof(*mi), gfp);
}
static void
minstrel_ht_free_sta(void *priv, struct ieee80211_sta *sta, void *priv_sta)
{
kfree(priv_sta);
}
static void
minstrel_ht_fill_rate_array(u8 *dest, struct ieee80211_supported_band *sband,
const s16 *bitrates, int n_rates, u32 rate_flags)
{
int i, j;
for (i = 0; i < sband->n_bitrates; i++) {
struct ieee80211_rate *rate = &sband->bitrates[i];
if ((rate_flags & sband->bitrates[i].flags) != rate_flags)
continue;
for (j = 0; j < n_rates; j++) {
if (rate->bitrate != bitrates[j])
continue;
dest[j] = i;
break;
}
}
}
static void
minstrel_ht_init_cck_rates(struct minstrel_priv *mp)
{
static const s16 bitrates[4] = { 10, 20, 55, 110 };
struct ieee80211_supported_band *sband;
u32 rate_flags = ieee80211_chandef_rate_flags(&mp->hw->conf.chandef);
memset(mp->cck_rates, 0xff, sizeof(mp->cck_rates));
sband = mp->hw->wiphy->bands[NL80211_BAND_2GHZ];
if (!sband)
return;
BUILD_BUG_ON(ARRAY_SIZE(mp->cck_rates) != ARRAY_SIZE(bitrates));
minstrel_ht_fill_rate_array(mp->cck_rates, sband,
minstrel_cck_bitrates,
ARRAY_SIZE(minstrel_cck_bitrates),
rate_flags);
}
static void
minstrel_ht_init_ofdm_rates(struct minstrel_priv *mp, enum nl80211_band band)
{
static const s16 bitrates[8] = { 60, 90, 120, 180, 240, 360, 480, 540 };
struct ieee80211_supported_band *sband;
u32 rate_flags = ieee80211_chandef_rate_flags(&mp->hw->conf.chandef);
memset(mp->ofdm_rates[band], 0xff, sizeof(mp->ofdm_rates[band]));
sband = mp->hw->wiphy->bands[band];
if (!sband)
return;
BUILD_BUG_ON(ARRAY_SIZE(mp->ofdm_rates[band]) != ARRAY_SIZE(bitrates));
minstrel_ht_fill_rate_array(mp->ofdm_rates[band], sband,
minstrel_ofdm_bitrates,
ARRAY_SIZE(minstrel_ofdm_bitrates),
rate_flags);
}
static void *
minstrel_ht_alloc(struct ieee80211_hw *hw)
{
struct minstrel_priv *mp;
int i;
mp = kzalloc(sizeof(struct minstrel_priv), GFP_ATOMIC);
if (!mp)
return NULL;
/* contention window settings
* Just an approximation. Using the per-queue values would complicate
* the calculations and is probably unnecessary */
mp->cw_min = 15;
mp->cw_max = 1023;
/* maximum time that the hw is allowed to stay in one MRR segment */
mp->segment_size = 6000;
if (hw->max_rate_tries > 0)
mp->max_retry = hw->max_rate_tries;
else
/* safe default, does not necessarily have to match hw properties */
mp->max_retry = 7;
if (hw->max_rates >= 4)
mp->has_mrr = true;
mp->hw = hw;
mp->update_interval = HZ / 20;
minstrel_ht_init_cck_rates(mp);
for (i = 0; i < ARRAY_SIZE(mp->hw->wiphy->bands); i++)
minstrel_ht_init_ofdm_rates(mp, i);
return mp;
}
#ifdef CONFIG_MAC80211_DEBUGFS
static void minstrel_ht_add_debugfs(struct ieee80211_hw *hw, void *priv,
struct dentry *debugfsdir)
{
struct minstrel_priv *mp = priv;
mp->fixed_rate_idx = (u32) -1;
debugfs_create_u32("fixed_rate_idx", S_IRUGO | S_IWUGO, debugfsdir,
&mp->fixed_rate_idx);
}
#endif
static void
minstrel_ht_free(void *priv)
{
kfree(priv);
}
static u32 minstrel_ht_get_expected_throughput(void *priv_sta)
{
struct minstrel_ht_sta *mi = priv_sta;
int i, j, prob, tp_avg;
i = MI_RATE_GROUP(mi->max_tp_rate[0]);
j = MI_RATE_IDX(mi->max_tp_rate[0]);
prob = mi->groups[i].rates[j].prob_avg;
/* convert tp_avg from pkt per second in kbps */
tp_avg = minstrel_ht_get_tp_avg(mi, i, j, prob) * 10;
tp_avg = tp_avg * AVG_PKT_SIZE * 8 / 1024;
return tp_avg;
}
static const struct rate_control_ops mac80211_minstrel_ht = {
.name = "minstrel_ht",
.capa = RATE_CTRL_CAPA_AMPDU_TRIGGER,
.tx_status_ext = minstrel_ht_tx_status,
.get_rate = minstrel_ht_get_rate,
.rate_init = minstrel_ht_rate_init,
.rate_update = minstrel_ht_rate_update,
.alloc_sta = minstrel_ht_alloc_sta,
.free_sta = minstrel_ht_free_sta,
.alloc = minstrel_ht_alloc,
.free = minstrel_ht_free,
#ifdef CONFIG_MAC80211_DEBUGFS
.add_debugfs = minstrel_ht_add_debugfs,
.add_sta_debugfs = minstrel_ht_add_sta_debugfs,
#endif
.get_expected_throughput = minstrel_ht_get_expected_throughput,
};
static void __init init_sample_table(void)
{
int col, i, new_idx;
u8 rnd[MCS_GROUP_RATES];
memset(sample_table, 0xff, sizeof(sample_table));
for (col = 0; col < SAMPLE_COLUMNS; col++) {
get_random_bytes(rnd, sizeof(rnd));
for (i = 0; i < MCS_GROUP_RATES; i++) {
new_idx = (i + rnd[i]) % MCS_GROUP_RATES;
while (sample_table[col][new_idx] != 0xff)
new_idx = (new_idx + 1) % MCS_GROUP_RATES;
sample_table[col][new_idx] = i;
}
}
}
int __init
rc80211_minstrel_init(void)
{
init_sample_table();
return ieee80211_rate_control_register(&mac80211_minstrel_ht);
}
void
rc80211_minstrel_exit(void)
{
ieee80211_rate_control_unregister(&mac80211_minstrel_ht);
}