2019-06-04 16:11:33 +08:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-only */
|
2008-10-06 00:07:45 +08:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2008 Felix Fietkau <nbd@openwrt.org>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef __RC_MINSTREL_H
|
|
|
|
#define __RC_MINSTREL_H
|
|
|
|
|
2013-04-17 19:43:22 +08:00
|
|
|
#define EWMA_LEVEL 96 /* ewma weighting factor [/EWMA_DIV] */
|
|
|
|
#define EWMA_DIV 128
|
2013-03-05 06:30:05 +08:00
|
|
|
#define SAMPLE_COLUMNS 10 /* number of columns in sample table */
|
|
|
|
|
2013-03-05 06:30:02 +08:00
|
|
|
/* scaled fraction values */
|
2016-12-15 03:46:59 +08:00
|
|
|
#define MINSTREL_SCALE 12
|
2013-03-05 06:30:02 +08:00
|
|
|
#define MINSTREL_FRAC(val, div) (((val) << MINSTREL_SCALE) / div)
|
|
|
|
#define MINSTREL_TRUNC(val) ((val) >> MINSTREL_SCALE)
|
|
|
|
|
2013-03-05 06:30:07 +08:00
|
|
|
/* number of highest throughput rates to consider*/
|
|
|
|
#define MAX_THR_RATES 4
|
|
|
|
|
2013-03-05 06:30:01 +08:00
|
|
|
/*
|
|
|
|
* Perform EWMA (Exponentially Weighted Moving Average) calculation
|
2015-03-25 04:09:36 +08:00
|
|
|
*/
|
2013-03-05 06:30:01 +08:00
|
|
|
static inline int
|
|
|
|
minstrel_ewma(int old, int new, int weight)
|
|
|
|
{
|
2015-03-25 04:09:42 +08:00
|
|
|
int diff, incr;
|
|
|
|
|
|
|
|
diff = new - old;
|
|
|
|
incr = (EWMA_DIV - weight) * diff / EWMA_DIV;
|
|
|
|
|
|
|
|
return old + incr;
|
2013-03-05 06:30:01 +08:00
|
|
|
}
|
|
|
|
|
2014-09-10 05:22:13 +08:00
|
|
|
struct minstrel_rate_stats {
|
|
|
|
/* current / last sampling period attempts/success counters */
|
2014-12-17 20:38:34 +08:00
|
|
|
u16 attempts, last_attempts;
|
|
|
|
u16 success, last_success;
|
2014-09-10 05:22:13 +08:00
|
|
|
|
|
|
|
/* total attempts/success counters */
|
2016-12-15 03:46:56 +08:00
|
|
|
u32 att_hist, succ_hist;
|
2014-09-10 05:22:13 +08:00
|
|
|
|
2018-10-07 01:35:07 +08:00
|
|
|
/* prob_ewma - exponential weighted moving average of prob */
|
2016-12-15 03:47:01 +08:00
|
|
|
u16 prob_ewma;
|
2014-09-10 05:22:13 +08:00
|
|
|
|
|
|
|
/* maximum retry counts */
|
2014-12-17 20:38:34 +08:00
|
|
|
u8 retry_count;
|
|
|
|
u8 retry_count_rtscts;
|
2014-09-10 05:22:13 +08:00
|
|
|
|
|
|
|
u8 sample_skipped;
|
|
|
|
bool retry_updated;
|
|
|
|
};
|
2013-03-05 06:30:01 +08:00
|
|
|
|
2008-10-06 00:07:45 +08:00
|
|
|
struct minstrel_rate {
|
|
|
|
int bitrate;
|
2014-12-17 20:38:34 +08:00
|
|
|
|
|
|
|
s8 rix;
|
|
|
|
u8 retry_count_cts;
|
|
|
|
u8 adjusted_retry_count;
|
2008-10-06 00:07:45 +08:00
|
|
|
|
|
|
|
unsigned int perfect_tx_time;
|
|
|
|
unsigned int ack_time;
|
|
|
|
|
2008-10-16 01:13:59 +08:00
|
|
|
int sample_limit;
|
2008-10-06 00:07:45 +08:00
|
|
|
|
2014-09-10 05:22:13 +08:00
|
|
|
struct minstrel_rate_stats stats;
|
2008-10-06 00:07:45 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
struct minstrel_sta_info {
|
2013-04-22 22:14:43 +08:00
|
|
|
struct ieee80211_sta *sta;
|
|
|
|
|
2015-03-25 04:09:39 +08:00
|
|
|
unsigned long last_stats_update;
|
2008-10-06 00:07:45 +08:00
|
|
|
unsigned int sp_ack_dur;
|
|
|
|
unsigned int rate_avg;
|
|
|
|
|
|
|
|
unsigned int lowest_rix;
|
|
|
|
|
2013-03-05 06:30:07 +08:00
|
|
|
u8 max_tp_rate[MAX_THR_RATES];
|
|
|
|
u8 max_prob_rate;
|
2014-09-10 05:22:13 +08:00
|
|
|
unsigned int total_packets;
|
|
|
|
unsigned int sample_packets;
|
2008-10-06 00:07:45 +08:00
|
|
|
int sample_deferred;
|
|
|
|
|
2013-03-05 06:30:03 +08:00
|
|
|
unsigned int sample_row;
|
2008-10-06 00:07:45 +08:00
|
|
|
unsigned int sample_column;
|
|
|
|
|
|
|
|
int n_rates;
|
|
|
|
struct minstrel_rate *r;
|
2008-10-16 01:13:59 +08:00
|
|
|
bool prev_sample;
|
2008-10-06 00:07:45 +08:00
|
|
|
|
|
|
|
/* sampling table */
|
|
|
|
u8 *sample_table;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct minstrel_priv {
|
|
|
|
struct ieee80211_hw *hw;
|
|
|
|
bool has_mrr;
|
mac80211: minstrel_ht: improve rate probing for devices with static fallback
On some devices that only support static rate fallback tables sending rate
control probing packets can be really expensive.
Probing lower rates can already hurt throughput quite a bit. What hurts even
more is the fact that on mt76x0/mt76x2, single probing packets can only be
forced by directing packets at a different internal hardware queue, which
causes some heavy reordering and extra latency.
The reordering issue is mainly problematic while pushing lots of packets to
a particular station. If there is little activity, the overhead of probing is
neglegible.
The static fallback behavior is designed to pretty much only handle rate
control algorithms that use only a very limited set of rates on which the
algorithm switches up/down based on packet error rate.
In order to better support that kind of hardware, this patch implements a
different approach to rate probing where it switches to a slightly higher rate,
waits for tx status feedback, then updates the stats and switches back to
the new max throughput rate. This only triggers above a packet rate of 100
per stats interval (~50ms).
For that kind of probing, the code has to reduce the set of probing rates
a lot more compared to single packet probing, so it uses only one packet
per MCS group which is either slightly faster, or as close as possible to
the max throughput rate.
This allows switching between similar rates with different numbers of
streams. The algorithm assumes that the hardware will work its way lower
within an MCS group in case of retransmissions, so that lower rates don't
have to be probed by the high packets per second rate probing code.
To further reduce the search space, it also does not probe rates with lower
channel bandwidth than the max throughput rate.
At the moment, these changes will only affect mt76x0/mt76x2.
Signed-off-by: Felix Fietkau <nbd@nbd.name>
Link: https://lore.kernel.org/r/20190820095449.45255-4-nbd@nbd.name
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
2019-08-20 17:54:49 +08:00
|
|
|
u32 sample_switch;
|
2008-10-06 00:07:45 +08:00
|
|
|
unsigned int cw_min;
|
|
|
|
unsigned int cw_max;
|
|
|
|
unsigned int max_retry;
|
|
|
|
unsigned int segment_size;
|
|
|
|
unsigned int update_interval;
|
|
|
|
unsigned int lookaround_rate;
|
|
|
|
unsigned int lookaround_rate_mrr;
|
2011-05-21 02:29:17 +08:00
|
|
|
|
2013-02-13 17:51:08 +08:00
|
|
|
u8 cck_rates[4];
|
|
|
|
|
2011-05-21 02:29:17 +08:00
|
|
|
#ifdef CONFIG_MAC80211_DEBUGFS
|
|
|
|
/*
|
|
|
|
* enable fixed rate processing per RC
|
|
|
|
* - write static index to debugfs:ieee80211/phyX/rc/fixed_rate_idx
|
|
|
|
* - write -1 to enable RC processing again
|
|
|
|
* - setting will be applied on next update
|
|
|
|
*/
|
|
|
|
u32 fixed_rate_idx;
|
|
|
|
#endif
|
2008-10-06 00:07:45 +08:00
|
|
|
};
|
|
|
|
|
2010-03-02 05:17:38 +08:00
|
|
|
struct minstrel_debugfs_info {
|
|
|
|
size_t len;
|
|
|
|
char buf[];
|
|
|
|
};
|
|
|
|
|
2014-01-21 06:29:34 +08:00
|
|
|
extern const struct rate_control_ops mac80211_minstrel;
|
2008-10-06 00:07:45 +08:00
|
|
|
void minstrel_add_sta_debugfs(void *priv, void *priv_sta, struct dentry *dir);
|
|
|
|
|
2015-03-25 04:09:38 +08:00
|
|
|
/* Recalculate success probabilities and counters for a given rate using EWMA */
|
2015-03-25 04:09:39 +08:00
|
|
|
void minstrel_calc_rate_stats(struct minstrel_rate_stats *mrs);
|
2015-03-25 04:09:41 +08:00
|
|
|
int minstrel_get_tp_avg(struct minstrel_rate *mr, int prob_ewma);
|
2015-03-25 04:09:38 +08:00
|
|
|
|
2010-03-02 05:21:40 +08:00
|
|
|
/* debugfs */
|
|
|
|
int minstrel_stats_open(struct inode *inode, struct file *file);
|
2015-03-25 04:09:36 +08:00
|
|
|
int minstrel_stats_csv_open(struct inode *inode, struct file *file);
|
2010-03-02 05:21:40 +08:00
|
|
|
|
2008-10-06 00:07:45 +08:00
|
|
|
#endif
|