2008-10-06 00:07:45 +08:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2008 Felix Fietkau <nbd@openwrt.org>
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* Based on minstrel.c:
|
|
|
|
* Copyright (C) 2005-2007 Derek Smithies <derek@indranet.co.nz>
|
|
|
|
* Sponsored by Indranet Technologies Ltd
|
|
|
|
*
|
|
|
|
* Based on sample.c:
|
|
|
|
* Copyright (c) 2005 John Bicket
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer,
|
|
|
|
* without modification.
|
|
|
|
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
|
|
|
|
* similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
|
|
|
|
* redistribution must be conditioned upon including a substantially
|
|
|
|
* similar Disclaimer requirement for further binary redistribution.
|
|
|
|
* 3. Neither the names of the above-listed copyright holders nor the names
|
|
|
|
* of any contributors may be used to endorse or promote products derived
|
|
|
|
* from this software without specific prior written permission.
|
|
|
|
*
|
|
|
|
* Alternatively, this software may be distributed under the terms of the
|
|
|
|
* GNU General Public License ("GPL") version 2 as published by the Free
|
|
|
|
* Software Foundation.
|
|
|
|
*
|
|
|
|
* NO WARRANTY
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
|
|
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
|
|
* LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
|
|
|
|
* AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
|
|
|
|
* THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
|
|
|
|
* OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
|
|
|
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
|
|
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
|
|
|
|
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
|
|
|
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
|
|
|
|
* THE POSSIBILITY OF SUCH DAMAGES.
|
|
|
|
*/
|
|
|
|
#include <linux/netdevice.h>
|
|
|
|
#include <linux/types.h>
|
|
|
|
#include <linux/skbuff.h>
|
|
|
|
#include <linux/debugfs.h>
|
|
|
|
#include <linux/random.h>
|
|
|
|
#include <linux/ieee80211.h>
|
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h
percpu.h is included by sched.h and module.h and thus ends up being
included when building most .c files. percpu.h includes slab.h which
in turn includes gfp.h making everything defined by the two files
universally available and complicating inclusion dependencies.
percpu.h -> slab.h dependency is about to be removed. Prepare for
this change by updating users of gfp and slab facilities include those
headers directly instead of assuming availability. As this conversion
needs to touch large number of source files, the following script is
used as the basis of conversion.
http://userweb.kernel.org/~tj/misc/slabh-sweep.py
The script does the followings.
* Scan files for gfp and slab usages and update includes such that
only the necessary includes are there. ie. if only gfp is used,
gfp.h, if slab is used, slab.h.
* When the script inserts a new include, it looks at the include
blocks and try to put the new include such that its order conforms
to its surrounding. It's put in the include block which contains
core kernel includes, in the same order that the rest are ordered -
alphabetical, Christmas tree, rev-Xmas-tree or at the end if there
doesn't seem to be any matching order.
* If the script can't find a place to put a new include (mostly
because the file doesn't have fitting include block), it prints out
an error message indicating which .h file needs to be added to the
file.
The conversion was done in the following steps.
1. The initial automatic conversion of all .c files updated slightly
over 4000 files, deleting around 700 includes and adding ~480 gfp.h
and ~3000 slab.h inclusions. The script emitted errors for ~400
files.
2. Each error was manually checked. Some didn't need the inclusion,
some needed manual addition while adding it to implementation .h or
embedding .c file was more appropriate for others. This step added
inclusions to around 150 files.
3. The script was run again and the output was compared to the edits
from #2 to make sure no file was left behind.
4. Several build tests were done and a couple of problems were fixed.
e.g. lib/decompress_*.c used malloc/free() wrappers around slab
APIs requiring slab.h to be added manually.
5. The script was run on all .h files but without automatically
editing them as sprinkling gfp.h and slab.h inclusions around .h
files could easily lead to inclusion dependency hell. Most gfp.h
inclusion directives were ignored as stuff from gfp.h was usually
wildly available and often used in preprocessor macros. Each
slab.h inclusion directive was examined and added manually as
necessary.
6. percpu.h was updated not to include slab.h.
7. Build test were done on the following configurations and failures
were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my
distributed build env didn't work with gcov compiles) and a few
more options had to be turned off depending on archs to make things
build (like ipr on powerpc/64 which failed due to missing writeq).
* x86 and x86_64 UP and SMP allmodconfig and a custom test config.
* powerpc and powerpc64 SMP allmodconfig
* sparc and sparc64 SMP allmodconfig
* ia64 SMP allmodconfig
* s390 SMP allmodconfig
* alpha SMP allmodconfig
* um on x86_64 SMP allmodconfig
8. percpu.h modifications were reverted so that it could be applied as
a separate patch and serve as bisection point.
Given the fact that I had only a couple of failures from tests on step
6, I'm fairly confident about the coverage of this conversion patch.
If there is a breakage, it's likely to be something in one of the arch
headers which should be easily discoverable easily on most builds of
the specific arch.
Signed-off-by: Tejun Heo <tj@kernel.org>
Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 16:04:11 +08:00
|
|
|
#include <linux/slab.h>
|
2008-10-06 00:07:45 +08:00
|
|
|
#include <net/mac80211.h>
|
|
|
|
#include "rate.h"
|
|
|
|
#include "rc80211_minstrel.h"
|
|
|
|
|
|
|
|
#define SAMPLE_COLUMNS 10
|
|
|
|
#define SAMPLE_TBL(_mi, _idx, _col) \
|
|
|
|
_mi->sample_table[(_idx * SAMPLE_COLUMNS) + _col]
|
|
|
|
|
|
|
|
/* convert mac80211 rate index to local array index */
|
|
|
|
static inline int
|
|
|
|
rix_to_ndx(struct minstrel_sta_info *mi, int rix)
|
|
|
|
{
|
|
|
|
int i = rix;
|
|
|
|
for (i = rix; i >= 0; i--)
|
|
|
|
if (mi->r[i].rix == rix)
|
|
|
|
break;
|
|
|
|
return i;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
minstrel_update_stats(struct minstrel_priv *mp, struct minstrel_sta_info *mi)
|
|
|
|
{
|
|
|
|
u32 max_tp = 0, index_max_tp = 0, index_max_tp2 = 0;
|
|
|
|
u32 max_prob = 0, index_max_prob = 0;
|
|
|
|
u32 usecs;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
mi->stats_update = jiffies;
|
|
|
|
for (i = 0; i < mi->n_rates; i++) {
|
|
|
|
struct minstrel_rate *mr = &mi->r[i];
|
|
|
|
|
|
|
|
usecs = mr->perfect_tx_time;
|
|
|
|
if (!usecs)
|
|
|
|
usecs = 1000000;
|
|
|
|
|
|
|
|
if (mr->attempts) {
|
2013-03-05 06:30:02 +08:00
|
|
|
mr->cur_prob = MINSTREL_FRAC(mr->success, mr->attempts);
|
2008-10-06 00:07:45 +08:00
|
|
|
mr->succ_hist += mr->success;
|
|
|
|
mr->att_hist += mr->attempts;
|
2013-03-05 06:30:01 +08:00
|
|
|
mr->probability = minstrel_ewma(mr->probability,
|
|
|
|
mr->cur_prob,
|
|
|
|
EWMA_LEVEL);
|
|
|
|
mr->cur_tp = mr->probability * (1000000 / usecs);
|
2008-10-06 00:07:45 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
mr->last_success = mr->success;
|
|
|
|
mr->last_attempts = mr->attempts;
|
|
|
|
mr->success = 0;
|
|
|
|
mr->attempts = 0;
|
|
|
|
|
|
|
|
/* Sample less often below the 10% chance of success.
|
|
|
|
* Sample less often above the 95% chance of success. */
|
2013-03-05 06:30:02 +08:00
|
|
|
if (mr->probability > MINSTREL_FRAC(95, 100) ||
|
|
|
|
mr->probability < MINSTREL_FRAC(10, 100)) {
|
2008-10-06 00:07:45 +08:00
|
|
|
mr->adjusted_retry_count = mr->retry_count >> 1;
|
|
|
|
if (mr->adjusted_retry_count > 2)
|
|
|
|
mr->adjusted_retry_count = 2;
|
2008-10-16 01:13:59 +08:00
|
|
|
mr->sample_limit = 4;
|
2008-10-06 00:07:45 +08:00
|
|
|
} else {
|
2008-10-16 01:13:59 +08:00
|
|
|
mr->sample_limit = -1;
|
2008-10-06 00:07:45 +08:00
|
|
|
mr->adjusted_retry_count = mr->retry_count;
|
|
|
|
}
|
|
|
|
if (!mr->adjusted_retry_count)
|
|
|
|
mr->adjusted_retry_count = 2;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < mi->n_rates; i++) {
|
|
|
|
struct minstrel_rate *mr = &mi->r[i];
|
|
|
|
if (max_tp < mr->cur_tp) {
|
|
|
|
index_max_tp = i;
|
|
|
|
max_tp = mr->cur_tp;
|
|
|
|
}
|
|
|
|
if (max_prob < mr->probability) {
|
|
|
|
index_max_prob = i;
|
|
|
|
max_prob = mr->probability;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
max_tp = 0;
|
|
|
|
for (i = 0; i < mi->n_rates; i++) {
|
|
|
|
struct minstrel_rate *mr = &mi->r[i];
|
|
|
|
|
|
|
|
if (i == index_max_tp)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (max_tp < mr->cur_tp) {
|
|
|
|
index_max_tp2 = i;
|
|
|
|
max_tp = mr->cur_tp;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
mi->max_tp_rate = index_max_tp;
|
|
|
|
mi->max_tp_rate2 = index_max_tp2;
|
|
|
|
mi->max_prob_rate = index_max_prob;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
minstrel_tx_status(void *priv, struct ieee80211_supported_band *sband,
|
|
|
|
struct ieee80211_sta *sta, void *priv_sta,
|
|
|
|
struct sk_buff *skb)
|
|
|
|
{
|
2012-11-16 01:27:56 +08:00
|
|
|
struct minstrel_priv *mp = priv;
|
2008-10-06 00:07:45 +08:00
|
|
|
struct minstrel_sta_info *mi = priv_sta;
|
|
|
|
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
2008-10-21 18:40:02 +08:00
|
|
|
struct ieee80211_tx_rate *ar = info->status.rates;
|
|
|
|
int i, ndx;
|
|
|
|
int success;
|
2008-10-06 00:07:45 +08:00
|
|
|
|
2008-10-21 18:40:02 +08:00
|
|
|
success = !!(info->flags & IEEE80211_TX_STAT_ACK);
|
2008-10-06 00:07:45 +08:00
|
|
|
|
2008-10-21 18:40:02 +08:00
|
|
|
for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
|
|
|
|
if (ar[i].idx < 0)
|
2008-10-06 00:07:45 +08:00
|
|
|
break;
|
|
|
|
|
2008-10-21 18:40:02 +08:00
|
|
|
ndx = rix_to_ndx(mi, ar[i].idx);
|
2009-07-03 13:25:08 +08:00
|
|
|
if (ndx < 0)
|
|
|
|
continue;
|
|
|
|
|
2008-10-21 18:40:02 +08:00
|
|
|
mi->r[ndx].attempts += ar[i].count;
|
2008-10-06 00:07:45 +08:00
|
|
|
|
2009-08-18 08:15:55 +08:00
|
|
|
if ((i != IEEE80211_TX_MAX_RATES - 1) && (ar[i + 1].idx < 0))
|
2008-10-06 00:07:45 +08:00
|
|
|
mi->r[ndx].success += success;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) && (i >= 0))
|
|
|
|
mi->sample_count++;
|
|
|
|
|
|
|
|
if (mi->sample_deferred > 0)
|
|
|
|
mi->sample_deferred--;
|
2012-11-16 01:27:56 +08:00
|
|
|
|
|
|
|
if (time_after(jiffies, mi->stats_update +
|
|
|
|
(mp->update_interval * HZ) / 1000))
|
|
|
|
minstrel_update_stats(mp, mi);
|
2008-10-06 00:07:45 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static inline unsigned int
|
|
|
|
minstrel_get_retry_count(struct minstrel_rate *mr,
|
|
|
|
struct ieee80211_tx_info *info)
|
|
|
|
{
|
|
|
|
unsigned int retry = mr->adjusted_retry_count;
|
|
|
|
|
2008-10-21 18:40:02 +08:00
|
|
|
if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS)
|
2008-10-06 00:07:45 +08:00
|
|
|
retry = max(2U, min(mr->retry_count_rtscts, retry));
|
2008-10-21 18:40:02 +08:00
|
|
|
else if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT)
|
2008-10-06 00:07:45 +08:00
|
|
|
retry = max(2U, min(mr->retry_count_cts, retry));
|
|
|
|
return retry;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
minstrel_get_next_sample(struct minstrel_sta_info *mi)
|
|
|
|
{
|
|
|
|
unsigned int sample_ndx;
|
|
|
|
sample_ndx = SAMPLE_TBL(mi, mi->sample_idx, mi->sample_column);
|
|
|
|
mi->sample_idx++;
|
mac80211: fix minstrel single-rate memory corruption
The minstrel rate controller periodically looks up rate indexes in
a sampling table. When accessing a specific row and column, minstrel
correctly does a bounds check which, on the surface, appears to handle
the case where mi->n_rates < 2. However, mi->sample_idx is actually
defined as an unsigned, so the right hand side is taken to be a huge
positive number when negative, and the check will always fail.
Consequently, the RC will overrun the array and cause random memory
corruption when communicating with a peer that has only a single rate.
The max value of mi->sample_idx is around 25 so casting to int should
have no ill effects.
Without the change, uptime is a few minutes under load with an AP
that has a single hard-coded rate, and both the AP and STA could
potentially crash. With the change, both lasted 12 hours with a
steady load.
Thanks to Ognjen Maric for providing the single-rate clue so I could
reproduce this.
This fixes http://bugzilla.kernel.org/show_bug.cgi?id=12490 on the
regression list (also http://bugzilla.kernel.org/show_bug.cgi?id=13000).
Cc: stable@kernel.org
Reported-by: Sergey S. Kostyliov <rathamahata@gmail.com>
Reported-by: Ognjen Maric <ognjen.maric@gmail.com>
Signed-off-by: Bob Copeland <me@bobcopeland.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
2009-06-05 20:21:50 +08:00
|
|
|
if ((int) mi->sample_idx > (mi->n_rates - 2)) {
|
2008-10-06 00:07:45 +08:00
|
|
|
mi->sample_idx = 0;
|
|
|
|
mi->sample_column++;
|
|
|
|
if (mi->sample_column >= SAMPLE_COLUMNS)
|
|
|
|
mi->sample_column = 0;
|
|
|
|
}
|
|
|
|
return sample_ndx;
|
|
|
|
}
|
|
|
|
|
2008-10-28 23:49:41 +08:00
|
|
|
static void
|
2008-10-21 18:40:02 +08:00
|
|
|
minstrel_get_rate(void *priv, struct ieee80211_sta *sta,
|
|
|
|
void *priv_sta, struct ieee80211_tx_rate_control *txrc)
|
2008-10-06 00:07:45 +08:00
|
|
|
{
|
2008-10-21 18:40:02 +08:00
|
|
|
struct sk_buff *skb = txrc->skb;
|
2008-10-06 00:07:45 +08:00
|
|
|
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
|
|
|
struct minstrel_sta_info *mi = priv_sta;
|
|
|
|
struct minstrel_priv *mp = priv;
|
2008-10-21 18:40:02 +08:00
|
|
|
struct ieee80211_tx_rate *ar = info->control.rates;
|
2008-10-06 00:07:45 +08:00
|
|
|
unsigned int ndx, sample_ndx = 0;
|
|
|
|
bool mrr;
|
|
|
|
bool sample_slower = false;
|
|
|
|
bool sample = false;
|
|
|
|
int i, delta;
|
|
|
|
int mrr_ndx[3];
|
|
|
|
int sample_rate;
|
|
|
|
|
2009-07-17 01:05:41 +08:00
|
|
|
if (rate_control_send_low(sta, priv_sta, txrc))
|
2008-10-06 00:07:45 +08:00
|
|
|
return;
|
|
|
|
|
2008-10-21 18:40:02 +08:00
|
|
|
mrr = mp->has_mrr && !txrc->rts && !txrc->bss_conf->use_cts_prot;
|
2008-10-06 00:07:45 +08:00
|
|
|
|
|
|
|
ndx = mi->max_tp_rate;
|
|
|
|
|
|
|
|
if (mrr)
|
|
|
|
sample_rate = mp->lookaround_rate_mrr;
|
|
|
|
else
|
|
|
|
sample_rate = mp->lookaround_rate;
|
|
|
|
|
|
|
|
mi->packet_count++;
|
|
|
|
delta = (mi->packet_count * sample_rate / 100) -
|
|
|
|
(mi->sample_count + mi->sample_deferred / 2);
|
|
|
|
|
|
|
|
/* delta > 0: sampling required */
|
2008-10-16 01:13:59 +08:00
|
|
|
if ((delta > 0) && (mrr || !mi->prev_sample)) {
|
|
|
|
struct minstrel_rate *msr;
|
2008-10-06 00:07:45 +08:00
|
|
|
if (mi->packet_count >= 10000) {
|
|
|
|
mi->sample_deferred = 0;
|
|
|
|
mi->sample_count = 0;
|
|
|
|
mi->packet_count = 0;
|
|
|
|
} else if (delta > mi->n_rates * 2) {
|
|
|
|
/* With multi-rate retry, not every planned sample
|
|
|
|
* attempt actually gets used, due to the way the retry
|
|
|
|
* chain is set up - [max_tp,sample,prob,lowest] for
|
|
|
|
* sample_rate < max_tp.
|
|
|
|
*
|
|
|
|
* If there's too much sampling backlog and the link
|
|
|
|
* starts getting worse, minstrel would start bursting
|
|
|
|
* out lots of sampling frames, which would result
|
|
|
|
* in a large throughput loss. */
|
|
|
|
mi->sample_count += (delta - mi->n_rates * 2);
|
|
|
|
}
|
|
|
|
|
|
|
|
sample_ndx = minstrel_get_next_sample(mi);
|
2008-10-16 01:13:59 +08:00
|
|
|
msr = &mi->r[sample_ndx];
|
2008-10-06 00:07:45 +08:00
|
|
|
sample = true;
|
2008-10-16 01:13:59 +08:00
|
|
|
sample_slower = mrr && (msr->perfect_tx_time >
|
2008-10-06 00:07:45 +08:00
|
|
|
mi->r[ndx].perfect_tx_time);
|
|
|
|
|
|
|
|
if (!sample_slower) {
|
2008-10-16 01:13:59 +08:00
|
|
|
if (msr->sample_limit != 0) {
|
|
|
|
ndx = sample_ndx;
|
|
|
|
mi->sample_count++;
|
|
|
|
if (msr->sample_limit > 0)
|
|
|
|
msr->sample_limit--;
|
|
|
|
} else {
|
|
|
|
sample = false;
|
|
|
|
}
|
2008-10-06 00:07:45 +08:00
|
|
|
} else {
|
|
|
|
/* Only use IEEE80211_TX_CTL_RATE_CTRL_PROBE to mark
|
|
|
|
* packets that have the sampling rate deferred to the
|
|
|
|
* second MRR stage. Increase the sample counter only
|
|
|
|
* if the deferred sample rate was actually used.
|
|
|
|
* Use the sample_deferred counter to make sure that
|
|
|
|
* the sampling is not done in large bursts */
|
|
|
|
info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
|
|
|
|
mi->sample_deferred++;
|
|
|
|
}
|
|
|
|
}
|
2008-10-16 01:13:59 +08:00
|
|
|
mi->prev_sample = sample;
|
|
|
|
|
|
|
|
/* If we're not using MRR and the sampling rate already
|
|
|
|
* has a probability of >95%, we shouldn't be attempting
|
|
|
|
* to use it, as this only wastes precious airtime */
|
2013-03-05 06:30:02 +08:00
|
|
|
if (!mrr && sample && (mi->r[ndx].probability > MINSTREL_FRAC(95, 100)))
|
2008-10-16 01:13:59 +08:00
|
|
|
ndx = mi->max_tp_rate;
|
|
|
|
|
2008-10-21 18:40:02 +08:00
|
|
|
ar[0].idx = mi->r[ndx].rix;
|
|
|
|
ar[0].count = minstrel_get_retry_count(&mi->r[ndx], info);
|
2008-10-06 00:07:45 +08:00
|
|
|
|
|
|
|
if (!mrr) {
|
2008-10-16 01:13:59 +08:00
|
|
|
if (!sample)
|
|
|
|
ar[0].count = mp->max_retry;
|
2008-10-21 18:40:02 +08:00
|
|
|
ar[1].idx = mi->lowest_rix;
|
|
|
|
ar[1].count = mp->max_retry;
|
2008-10-06 00:07:45 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* MRR setup */
|
|
|
|
if (sample) {
|
|
|
|
if (sample_slower)
|
|
|
|
mrr_ndx[0] = sample_ndx;
|
|
|
|
else
|
|
|
|
mrr_ndx[0] = mi->max_tp_rate;
|
|
|
|
} else {
|
|
|
|
mrr_ndx[0] = mi->max_tp_rate2;
|
|
|
|
}
|
|
|
|
mrr_ndx[1] = mi->max_prob_rate;
|
|
|
|
mrr_ndx[2] = 0;
|
2008-10-21 18:40:02 +08:00
|
|
|
for (i = 1; i < 4; i++) {
|
|
|
|
ar[i].idx = mi->r[mrr_ndx[i - 1]].rix;
|
|
|
|
ar[i].count = mi->r[mrr_ndx[i - 1]].adjusted_retry_count;
|
2008-10-06 00:07:45 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void
|
2012-04-11 14:47:56 +08:00
|
|
|
calc_rate_durations(enum ieee80211_band band,
|
|
|
|
struct minstrel_rate *d,
|
2011-11-10 22:13:11 +08:00
|
|
|
struct ieee80211_rate *rate)
|
2008-10-06 00:07:45 +08:00
|
|
|
{
|
|
|
|
int erp = !!(rate->flags & IEEE80211_RATE_ERP_G);
|
|
|
|
|
2012-04-11 14:47:56 +08:00
|
|
|
d->perfect_tx_time = ieee80211_frame_duration(band, 1200,
|
2008-10-06 00:07:45 +08:00
|
|
|
rate->bitrate, erp, 1);
|
2012-04-11 14:47:56 +08:00
|
|
|
d->ack_time = ieee80211_frame_duration(band, 10,
|
2008-10-06 00:07:45 +08:00
|
|
|
rate->bitrate, erp, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
init_sample_table(struct minstrel_sta_info *mi)
|
|
|
|
{
|
|
|
|
unsigned int i, col, new_idx;
|
|
|
|
unsigned int n_srates = mi->n_rates - 1;
|
|
|
|
u8 rnd[8];
|
|
|
|
|
|
|
|
mi->sample_column = 0;
|
|
|
|
mi->sample_idx = 0;
|
|
|
|
memset(mi->sample_table, 0, SAMPLE_COLUMNS * mi->n_rates);
|
|
|
|
|
|
|
|
for (col = 0; col < SAMPLE_COLUMNS; col++) {
|
|
|
|
for (i = 0; i < n_srates; i++) {
|
|
|
|
get_random_bytes(rnd, sizeof(rnd));
|
|
|
|
new_idx = (i + rnd[i & 7]) % n_srates;
|
|
|
|
|
|
|
|
while (SAMPLE_TBL(mi, new_idx, col) != 0)
|
|
|
|
new_idx = (new_idx + 1) % n_srates;
|
|
|
|
|
|
|
|
/* Don't sample the slowest rate (i.e. slowest base
|
|
|
|
* rate). We must presume that the slowest rate works
|
|
|
|
* fine, or else other management frames will also be
|
|
|
|
* failing and the link will break */
|
|
|
|
SAMPLE_TBL(mi, new_idx, col) = i + 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
minstrel_rate_init(void *priv, struct ieee80211_supported_band *sband,
|
|
|
|
struct ieee80211_sta *sta, void *priv_sta)
|
|
|
|
{
|
|
|
|
struct minstrel_sta_info *mi = priv_sta;
|
|
|
|
struct minstrel_priv *mp = priv;
|
2008-12-22 22:35:31 +08:00
|
|
|
struct ieee80211_rate *ctl_rate;
|
2008-10-06 00:07:45 +08:00
|
|
|
unsigned int i, n = 0;
|
|
|
|
unsigned int t_slot = 9; /* FIXME: get real slot time */
|
|
|
|
|
|
|
|
mi->lowest_rix = rate_lowest_index(sband, sta);
|
2008-12-22 22:35:31 +08:00
|
|
|
ctl_rate = &sband->bitrates[mi->lowest_rix];
|
2012-04-11 14:47:56 +08:00
|
|
|
mi->sp_ack_dur = ieee80211_frame_duration(sband->band, 10,
|
|
|
|
ctl_rate->bitrate,
|
2008-12-22 22:35:31 +08:00
|
|
|
!!(ctl_rate->flags & IEEE80211_RATE_ERP_G), 1);
|
2008-10-06 00:07:45 +08:00
|
|
|
|
|
|
|
for (i = 0; i < sband->n_bitrates; i++) {
|
|
|
|
struct minstrel_rate *mr = &mi->r[n];
|
|
|
|
unsigned int tx_time = 0, tx_time_cts = 0, tx_time_rtscts = 0;
|
|
|
|
unsigned int tx_time_single;
|
|
|
|
unsigned int cw = mp->cw_min;
|
|
|
|
|
|
|
|
if (!rate_supported(sta, sband->band, i))
|
|
|
|
continue;
|
|
|
|
n++;
|
|
|
|
memset(mr, 0, sizeof(*mr));
|
|
|
|
|
|
|
|
mr->rix = i;
|
|
|
|
mr->bitrate = sband->bitrates[i].bitrate / 5;
|
2012-04-11 14:47:56 +08:00
|
|
|
calc_rate_durations(sband->band, mr, &sband->bitrates[i]);
|
2008-10-06 00:07:45 +08:00
|
|
|
|
|
|
|
/* calculate maximum number of retransmissions before
|
|
|
|
* fallback (based on maximum segment size) */
|
2008-10-16 01:13:59 +08:00
|
|
|
mr->sample_limit = -1;
|
2008-10-06 00:07:45 +08:00
|
|
|
mr->retry_count = 1;
|
|
|
|
mr->retry_count_cts = 1;
|
|
|
|
mr->retry_count_rtscts = 1;
|
|
|
|
tx_time = mr->perfect_tx_time + mi->sp_ack_dur;
|
|
|
|
do {
|
|
|
|
/* add one retransmission */
|
|
|
|
tx_time_single = mr->ack_time + mr->perfect_tx_time;
|
|
|
|
|
|
|
|
/* contention window */
|
2011-05-11 10:00:45 +08:00
|
|
|
tx_time_single += (t_slot * cw) >> 1;
|
|
|
|
cw = min((cw << 1) | 1, mp->cw_max);
|
2008-10-06 00:07:45 +08:00
|
|
|
|
|
|
|
tx_time += tx_time_single;
|
|
|
|
tx_time_cts += tx_time_single + mi->sp_ack_dur;
|
|
|
|
tx_time_rtscts += tx_time_single + 2 * mi->sp_ack_dur;
|
|
|
|
if ((tx_time_cts < mp->segment_size) &&
|
|
|
|
(mr->retry_count_cts < mp->max_retry))
|
|
|
|
mr->retry_count_cts++;
|
|
|
|
if ((tx_time_rtscts < mp->segment_size) &&
|
|
|
|
(mr->retry_count_rtscts < mp->max_retry))
|
|
|
|
mr->retry_count_rtscts++;
|
|
|
|
} while ((tx_time < mp->segment_size) &&
|
|
|
|
(++mr->retry_count < mp->max_retry));
|
|
|
|
mr->adjusted_retry_count = mr->retry_count;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = n; i < sband->n_bitrates; i++) {
|
|
|
|
struct minstrel_rate *mr = &mi->r[i];
|
|
|
|
mr->rix = -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
mi->n_rates = n;
|
|
|
|
mi->stats_update = jiffies;
|
|
|
|
|
|
|
|
init_sample_table(mi);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void *
|
|
|
|
minstrel_alloc_sta(void *priv, struct ieee80211_sta *sta, gfp_t gfp)
|
|
|
|
{
|
|
|
|
struct ieee80211_supported_band *sband;
|
|
|
|
struct minstrel_sta_info *mi;
|
|
|
|
struct minstrel_priv *mp = priv;
|
|
|
|
struct ieee80211_hw *hw = mp->hw;
|
|
|
|
int max_rates = 0;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
mi = kzalloc(sizeof(struct minstrel_sta_info), gfp);
|
|
|
|
if (!mi)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
for (i = 0; i < IEEE80211_NUM_BANDS; i++) {
|
2009-05-05 00:04:55 +08:00
|
|
|
sband = hw->wiphy->bands[i];
|
2009-05-06 03:18:26 +08:00
|
|
|
if (sband && sband->n_bitrates > max_rates)
|
2008-10-06 00:07:45 +08:00
|
|
|
max_rates = sband->n_bitrates;
|
|
|
|
}
|
|
|
|
|
|
|
|
mi->r = kzalloc(sizeof(struct minstrel_rate) * max_rates, gfp);
|
|
|
|
if (!mi->r)
|
|
|
|
goto error;
|
|
|
|
|
|
|
|
mi->sample_table = kmalloc(SAMPLE_COLUMNS * max_rates, gfp);
|
|
|
|
if (!mi->sample_table)
|
|
|
|
goto error1;
|
|
|
|
|
|
|
|
mi->stats_update = jiffies;
|
|
|
|
return mi;
|
|
|
|
|
|
|
|
error1:
|
|
|
|
kfree(mi->r);
|
|
|
|
error:
|
|
|
|
kfree(mi);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
minstrel_free_sta(void *priv, struct ieee80211_sta *sta, void *priv_sta)
|
|
|
|
{
|
|
|
|
struct minstrel_sta_info *mi = priv_sta;
|
|
|
|
|
|
|
|
kfree(mi->sample_table);
|
|
|
|
kfree(mi->r);
|
|
|
|
kfree(mi);
|
|
|
|
}
|
|
|
|
|
2013-02-13 17:51:08 +08:00
|
|
|
static void
|
|
|
|
minstrel_init_cck_rates(struct minstrel_priv *mp)
|
|
|
|
{
|
|
|
|
static const int bitrates[4] = { 10, 20, 55, 110 };
|
|
|
|
struct ieee80211_supported_band *sband;
|
|
|
|
int i, j;
|
|
|
|
|
|
|
|
sband = mp->hw->wiphy->bands[IEEE80211_BAND_2GHZ];
|
|
|
|
if (!sband)
|
|
|
|
return;
|
|
|
|
|
|
|
|
for (i = 0, j = 0; i < sband->n_bitrates; i++) {
|
|
|
|
struct ieee80211_rate *rate = &sband->bitrates[i];
|
|
|
|
|
|
|
|
if (rate->flags & IEEE80211_RATE_ERP_G)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
for (j = 0; j < ARRAY_SIZE(bitrates); j++) {
|
|
|
|
if (rate->bitrate != bitrates[j])
|
|
|
|
continue;
|
|
|
|
|
|
|
|
mp->cck_rates[j] = i;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-10-06 00:07:45 +08:00
|
|
|
static void *
|
|
|
|
minstrel_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
|
|
|
|
{
|
|
|
|
struct minstrel_priv *mp;
|
|
|
|
|
|
|
|
mp = kzalloc(sizeof(struct minstrel_priv), GFP_ATOMIC);
|
|
|
|
if (!mp)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
/* contention window settings
|
|
|
|
* Just an approximation. Using the per-queue values would complicate
|
|
|
|
* the calculations and is probably unnecessary */
|
|
|
|
mp->cw_min = 15;
|
|
|
|
mp->cw_max = 1023;
|
|
|
|
|
|
|
|
/* number of packets (in %) to use for sampling other rates
|
|
|
|
* sample less often for non-mrr packets, because the overhead
|
|
|
|
* is much higher than with mrr */
|
|
|
|
mp->lookaround_rate = 5;
|
|
|
|
mp->lookaround_rate_mrr = 10;
|
|
|
|
|
|
|
|
/* maximum time that the hw is allowed to stay in one MRR segment */
|
|
|
|
mp->segment_size = 6000;
|
|
|
|
|
2008-10-21 18:40:02 +08:00
|
|
|
if (hw->max_rate_tries > 0)
|
|
|
|
mp->max_retry = hw->max_rate_tries;
|
2008-10-06 00:07:45 +08:00
|
|
|
else
|
|
|
|
/* safe default, does not necessarily have to match hw properties */
|
|
|
|
mp->max_retry = 7;
|
|
|
|
|
2008-10-21 18:40:02 +08:00
|
|
|
if (hw->max_rates >= 4)
|
2008-10-06 00:07:45 +08:00
|
|
|
mp->has_mrr = true;
|
|
|
|
|
|
|
|
mp->hw = hw;
|
|
|
|
mp->update_interval = 100;
|
|
|
|
|
2011-05-21 02:29:17 +08:00
|
|
|
#ifdef CONFIG_MAC80211_DEBUGFS
|
|
|
|
mp->fixed_rate_idx = (u32) -1;
|
|
|
|
mp->dbg_fixed_rate = debugfs_create_u32("fixed_rate_idx",
|
|
|
|
S_IRUGO | S_IWUGO, debugfsdir, &mp->fixed_rate_idx);
|
|
|
|
#endif
|
|
|
|
|
2013-02-13 17:51:08 +08:00
|
|
|
minstrel_init_cck_rates(mp);
|
|
|
|
|
2008-10-06 00:07:45 +08:00
|
|
|
return mp;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
minstrel_free(void *priv)
|
|
|
|
{
|
2011-05-21 02:29:17 +08:00
|
|
|
#ifdef CONFIG_MAC80211_DEBUGFS
|
|
|
|
debugfs_remove(((struct minstrel_priv *)priv)->dbg_fixed_rate);
|
|
|
|
#endif
|
2008-10-06 00:07:45 +08:00
|
|
|
kfree(priv);
|
|
|
|
}
|
|
|
|
|
2010-03-02 05:21:40 +08:00
|
|
|
struct rate_control_ops mac80211_minstrel = {
|
2008-10-06 00:07:45 +08:00
|
|
|
.name = "minstrel",
|
|
|
|
.tx_status = minstrel_tx_status,
|
|
|
|
.get_rate = minstrel_get_rate,
|
|
|
|
.rate_init = minstrel_rate_init,
|
|
|
|
.alloc = minstrel_alloc,
|
|
|
|
.free = minstrel_free,
|
|
|
|
.alloc_sta = minstrel_alloc_sta,
|
|
|
|
.free_sta = minstrel_free_sta,
|
|
|
|
#ifdef CONFIG_MAC80211_DEBUGFS
|
|
|
|
.add_sta_debugfs = minstrel_add_sta_debugfs,
|
|
|
|
.remove_sta_debugfs = minstrel_remove_sta_debugfs,
|
|
|
|
#endif
|
|
|
|
};
|
|
|
|
|
|
|
|
int __init
|
|
|
|
rc80211_minstrel_init(void)
|
|
|
|
{
|
|
|
|
return ieee80211_rate_control_register(&mac80211_minstrel);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
rc80211_minstrel_exit(void)
|
|
|
|
{
|
|
|
|
ieee80211_rate_control_unregister(&mac80211_minstrel);
|
|
|
|
}
|
|
|
|
|