mirror of https://gitee.com/openkylin/linux.git
mwifiex: restructure rx_reorder_tbl_lock usage
Driver must ensure that whenever it holds a pointer to the list entry mwifiex_rx_reorder_tbl, it must protect the same with rx_reorder_tbl_lock. At present there are many places where driver does not ensure this. To cover all cases, spinlocks in below funcions are moved out and made sure that the caller will hold the spinlock: mwifiex_11n_dispatch_pkt_until_start_win() mwifiex_11n_scan_and_dispatch() mwifiex_del_rx_reorder_entry() mwifiex_11n_get_rx_reorder_tbl() mwifiex_11n_find_last_seq_num() Signed-off-by: Ganapathi Bhat <gbhat@marvell.com> Signed-off-by: Kalle Valo <kvalo@codeaurora.org>
This commit is contained in:
parent
5631909364
commit
5188d5453b
|
@ -696,10 +696,11 @@ void mwifiex_11n_delba(struct mwifiex_private *priv, int tid)
|
|||
"Send delba to tid=%d, %pM\n",
|
||||
tid, rx_reor_tbl_ptr->ta);
|
||||
mwifiex_send_delba(priv, tid, rx_reor_tbl_ptr->ta, 0);
|
||||
goto exit;
|
||||
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
|
||||
flags);
|
||||
return;
|
||||
}
|
||||
}
|
||||
exit:
|
||||
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
|
||||
}
|
||||
|
||||
|
|
|
@ -103,6 +103,8 @@ static int mwifiex_11n_dispatch_pkt(struct mwifiex_private *priv, void *payload)
|
|||
* There could be holes in the buffer, which are skipped by the function.
|
||||
* Since the buffer is linear, the function uses rotation to simulate
|
||||
* circular buffer.
|
||||
*
|
||||
* The caller must hold rx_reorder_tbl_lock spinlock.
|
||||
*/
|
||||
static void
|
||||
mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv,
|
||||
|
@ -111,25 +113,21 @@ mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv,
|
|||
{
|
||||
int pkt_to_send, i;
|
||||
void *rx_tmp_ptr;
|
||||
unsigned long flags;
|
||||
|
||||
pkt_to_send = (start_win > tbl->start_win) ?
|
||||
min((start_win - tbl->start_win), tbl->win_size) :
|
||||
tbl->win_size;
|
||||
|
||||
for (i = 0; i < pkt_to_send; ++i) {
|
||||
spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
|
||||
rx_tmp_ptr = NULL;
|
||||
if (tbl->rx_reorder_ptr[i]) {
|
||||
rx_tmp_ptr = tbl->rx_reorder_ptr[i];
|
||||
tbl->rx_reorder_ptr[i] = NULL;
|
||||
}
|
||||
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
|
||||
if (rx_tmp_ptr)
|
||||
mwifiex_11n_dispatch_pkt(priv, rx_tmp_ptr);
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
|
||||
/*
|
||||
* We don't have a circular buffer, hence use rotation to simulate
|
||||
* circular buffer
|
||||
|
@ -140,7 +138,6 @@ mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv,
|
|||
}
|
||||
|
||||
tbl->start_win = start_win;
|
||||
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -150,6 +147,8 @@ mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv,
|
|||
* The start window is adjusted automatically when a hole is located.
|
||||
* Since the buffer is linear, the function uses rotation to simulate
|
||||
* circular buffer.
|
||||
*
|
||||
* The caller must hold rx_reorder_tbl_lock spinlock.
|
||||
*/
|
||||
static void
|
||||
mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
|
||||
|
@ -157,22 +156,15 @@ mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
|
|||
{
|
||||
int i, j, xchg;
|
||||
void *rx_tmp_ptr;
|
||||
unsigned long flags;
|
||||
|
||||
for (i = 0; i < tbl->win_size; ++i) {
|
||||
spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
|
||||
if (!tbl->rx_reorder_ptr[i]) {
|
||||
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
|
||||
flags);
|
||||
if (!tbl->rx_reorder_ptr[i])
|
||||
break;
|
||||
}
|
||||
rx_tmp_ptr = tbl->rx_reorder_ptr[i];
|
||||
tbl->rx_reorder_ptr[i] = NULL;
|
||||
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
|
||||
mwifiex_11n_dispatch_pkt(priv, rx_tmp_ptr);
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
|
||||
/*
|
||||
* We don't have a circular buffer, hence use rotation to simulate
|
||||
* circular buffer
|
||||
|
@ -185,7 +177,6 @@ mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
|
|||
}
|
||||
}
|
||||
tbl->start_win = (tbl->start_win + i) & (MAX_TID_VALUE - 1);
|
||||
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -193,6 +184,8 @@ mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
|
|||
*
|
||||
* The function stops the associated timer and dispatches all the
|
||||
* pending packets in the Rx reorder table before deletion.
|
||||
*
|
||||
* The caller must hold rx_reorder_tbl_lock spinlock.
|
||||
*/
|
||||
static void
|
||||
mwifiex_del_rx_reorder_entry(struct mwifiex_private *priv,
|
||||
|
@ -218,11 +211,7 @@ mwifiex_del_rx_reorder_entry(struct mwifiex_private *priv,
|
|||
|
||||
del_timer_sync(&tbl->timer_context.timer);
|
||||
tbl->timer_context.timer_is_set = false;
|
||||
|
||||
spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
|
||||
list_del(&tbl->list);
|
||||
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
|
||||
|
||||
kfree(tbl->rx_reorder_ptr);
|
||||
kfree(tbl);
|
||||
|
||||
|
@ -235,22 +224,17 @@ mwifiex_del_rx_reorder_entry(struct mwifiex_private *priv,
|
|||
/*
|
||||
* This function returns the pointer to an entry in Rx reordering
|
||||
* table which matches the given TA/TID pair.
|
||||
*
|
||||
* The caller must hold rx_reorder_tbl_lock spinlock.
|
||||
*/
|
||||
struct mwifiex_rx_reorder_tbl *
|
||||
mwifiex_11n_get_rx_reorder_tbl(struct mwifiex_private *priv, int tid, u8 *ta)
|
||||
{
|
||||
struct mwifiex_rx_reorder_tbl *tbl;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
|
||||
list_for_each_entry(tbl, &priv->rx_reorder_tbl_ptr, list) {
|
||||
if (!memcmp(tbl->ta, ta, ETH_ALEN) && tbl->tid == tid) {
|
||||
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
|
||||
flags);
|
||||
list_for_each_entry(tbl, &priv->rx_reorder_tbl_ptr, list)
|
||||
if (!memcmp(tbl->ta, ta, ETH_ALEN) && tbl->tid == tid)
|
||||
return tbl;
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
@ -267,14 +251,9 @@ void mwifiex_11n_del_rx_reorder_tbl_by_ta(struct mwifiex_private *priv, u8 *ta)
|
|||
return;
|
||||
|
||||
spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
|
||||
list_for_each_entry_safe(tbl, tmp, &priv->rx_reorder_tbl_ptr, list) {
|
||||
if (!memcmp(tbl->ta, ta, ETH_ALEN)) {
|
||||
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
|
||||
flags);
|
||||
list_for_each_entry_safe(tbl, tmp, &priv->rx_reorder_tbl_ptr, list)
|
||||
if (!memcmp(tbl->ta, ta, ETH_ALEN))
|
||||
mwifiex_del_rx_reorder_entry(priv, tbl);
|
||||
spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
|
||||
|
||||
return;
|
||||
|
@ -283,24 +262,18 @@ void mwifiex_11n_del_rx_reorder_tbl_by_ta(struct mwifiex_private *priv, u8 *ta)
|
|||
/*
|
||||
* This function finds the last sequence number used in the packets
|
||||
* buffered in Rx reordering table.
|
||||
*
|
||||
* The caller must hold rx_reorder_tbl_lock spinlock.
|
||||
*/
|
||||
static int
|
||||
mwifiex_11n_find_last_seq_num(struct reorder_tmr_cnxt *ctx)
|
||||
{
|
||||
struct mwifiex_rx_reorder_tbl *rx_reorder_tbl_ptr = ctx->ptr;
|
||||
struct mwifiex_private *priv = ctx->priv;
|
||||
unsigned long flags;
|
||||
int i;
|
||||
|
||||
spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
|
||||
for (i = rx_reorder_tbl_ptr->win_size - 1; i >= 0; --i) {
|
||||
if (rx_reorder_tbl_ptr->rx_reorder_ptr[i]) {
|
||||
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
|
||||
flags);
|
||||
for (i = rx_reorder_tbl_ptr->win_size - 1; i >= 0; --i)
|
||||
if (rx_reorder_tbl_ptr->rx_reorder_ptr[i])
|
||||
return i;
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
@ -318,17 +291,22 @@ mwifiex_flush_data(struct timer_list *t)
|
|||
struct reorder_tmr_cnxt *ctx =
|
||||
from_timer(ctx, t, timer);
|
||||
int start_win, seq_num;
|
||||
unsigned long flags;
|
||||
|
||||
ctx->timer_is_set = false;
|
||||
spin_lock_irqsave(&ctx->priv->rx_reorder_tbl_lock, flags);
|
||||
seq_num = mwifiex_11n_find_last_seq_num(ctx);
|
||||
|
||||
if (seq_num < 0)
|
||||
if (seq_num < 0) {
|
||||
spin_unlock_irqrestore(&ctx->priv->rx_reorder_tbl_lock, flags);
|
||||
return;
|
||||
}
|
||||
|
||||
mwifiex_dbg(ctx->priv->adapter, INFO, "info: flush data %d\n", seq_num);
|
||||
start_win = (ctx->ptr->start_win + seq_num + 1) & (MAX_TID_VALUE - 1);
|
||||
mwifiex_11n_dispatch_pkt_until_start_win(ctx->priv, ctx->ptr,
|
||||
start_win);
|
||||
spin_unlock_irqrestore(&ctx->priv->rx_reorder_tbl_lock, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -355,11 +333,14 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
|
|||
* If we get a TID, ta pair which is already present dispatch all the
|
||||
* the packets and move the window size until the ssn
|
||||
*/
|
||||
spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
|
||||
tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta);
|
||||
if (tbl) {
|
||||
mwifiex_11n_dispatch_pkt_until_start_win(priv, tbl, seq_num);
|
||||
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
|
||||
return;
|
||||
}
|
||||
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
|
||||
/* if !tbl then create one */
|
||||
new_node = kzalloc(sizeof(struct mwifiex_rx_reorder_tbl), GFP_KERNEL);
|
||||
if (!new_node)
|
||||
|
@ -570,16 +551,20 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
|
|||
int prev_start_win, start_win, end_win, win_size;
|
||||
u16 pkt_index;
|
||||
bool init_window_shift = false;
|
||||
unsigned long flags;
|
||||
int ret = 0;
|
||||
|
||||
spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
|
||||
tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta);
|
||||
if (!tbl) {
|
||||
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
|
||||
if (pkt_type != PKT_TYPE_BAR)
|
||||
mwifiex_11n_dispatch_pkt(priv, payload);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if ((pkt_type == PKT_TYPE_AMSDU) && !tbl->amsdu) {
|
||||
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
|
||||
mwifiex_11n_dispatch_pkt(priv, payload);
|
||||
return ret;
|
||||
}
|
||||
|
@ -666,6 +651,8 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
|
|||
if (!tbl->timer_context.timer_is_set ||
|
||||
prev_start_win != tbl->start_win)
|
||||
mwifiex_11n_rxreorder_timer_restart(tbl);
|
||||
|
||||
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -694,14 +681,18 @@ mwifiex_del_ba_tbl(struct mwifiex_private *priv, int tid, u8 *peer_mac,
|
|||
peer_mac, tid, initiator);
|
||||
|
||||
if (cleanup_rx_reorder_tbl) {
|
||||
spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
|
||||
tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid,
|
||||
peer_mac);
|
||||
if (!tbl) {
|
||||
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
|
||||
flags);
|
||||
mwifiex_dbg(priv->adapter, EVENT,
|
||||
"event: TID, TA not found in table\n");
|
||||
return;
|
||||
}
|
||||
mwifiex_del_rx_reorder_entry(priv, tbl);
|
||||
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
|
||||
} else {
|
||||
ptx_tbl = mwifiex_get_ba_tbl(priv, tid, peer_mac);
|
||||
if (!ptx_tbl) {
|
||||
|
@ -735,6 +726,7 @@ int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv,
|
|||
int tid, win_size;
|
||||
struct mwifiex_rx_reorder_tbl *tbl;
|
||||
uint16_t block_ack_param_set;
|
||||
unsigned long flags;
|
||||
|
||||
block_ack_param_set = le16_to_cpu(add_ba_rsp->block_ack_param_set);
|
||||
|
||||
|
@ -748,17 +740,20 @@ int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv,
|
|||
mwifiex_dbg(priv->adapter, ERROR, "ADDBA RSP: failed %pM tid=%d)\n",
|
||||
add_ba_rsp->peer_mac_addr, tid);
|
||||
|
||||
spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
|
||||
tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid,
|
||||
add_ba_rsp->peer_mac_addr);
|
||||
if (tbl)
|
||||
mwifiex_del_rx_reorder_entry(priv, tbl);
|
||||
|
||||
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
win_size = (block_ack_param_set & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK)
|
||||
>> BLOCKACKPARAM_WINSIZE_POS;
|
||||
|
||||
spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
|
||||
tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid,
|
||||
add_ba_rsp->peer_mac_addr);
|
||||
if (tbl) {
|
||||
|
@ -769,6 +764,7 @@ int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv,
|
|||
else
|
||||
tbl->amsdu = false;
|
||||
}
|
||||
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
|
||||
|
||||
mwifiex_dbg(priv->adapter, CMD,
|
||||
"cmd: ADDBA RSP: %pM tid=%d ssn=%d win_size=%d\n",
|
||||
|
@ -808,11 +804,8 @@ void mwifiex_11n_cleanup_reorder_tbl(struct mwifiex_private *priv)
|
|||
|
||||
spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
|
||||
list_for_each_entry_safe(del_tbl_ptr, tmp_node,
|
||||
&priv->rx_reorder_tbl_ptr, list) {
|
||||
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
|
||||
&priv->rx_reorder_tbl_ptr, list)
|
||||
mwifiex_del_rx_reorder_entry(priv, del_tbl_ptr);
|
||||
spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
|
||||
}
|
||||
INIT_LIST_HEAD(&priv->rx_reorder_tbl_ptr);
|
||||
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
|
||||
|
||||
|
@ -936,6 +929,7 @@ void mwifiex_11n_rxba_sync_event(struct mwifiex_private *priv,
|
|||
int tlv_buf_left = len;
|
||||
int ret;
|
||||
u8 *tmp;
|
||||
unsigned long flags;
|
||||
|
||||
mwifiex_dbg_dump(priv->adapter, EVT_D, "RXBA_SYNC event:",
|
||||
event_buf, len);
|
||||
|
@ -955,14 +949,18 @@ void mwifiex_11n_rxba_sync_event(struct mwifiex_private *priv,
|
|||
tlv_rxba->mac, tlv_rxba->tid, tlv_seq_num,
|
||||
tlv_bitmap_len);
|
||||
|
||||
spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
|
||||
rx_reor_tbl_ptr =
|
||||
mwifiex_11n_get_rx_reorder_tbl(priv, tlv_rxba->tid,
|
||||
tlv_rxba->mac);
|
||||
if (!rx_reor_tbl_ptr) {
|
||||
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
|
||||
flags);
|
||||
mwifiex_dbg(priv->adapter, ERROR,
|
||||
"Can not find rx_reorder_tbl!");
|
||||
return;
|
||||
}
|
||||
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
|
||||
|
||||
for (i = 0; i < tlv_bitmap_len; i++) {
|
||||
for (j = 0 ; j < 8; j++) {
|
||||
|
|
|
@ -421,12 +421,15 @@ int mwifiex_process_uap_rx_packet(struct mwifiex_private *priv,
|
|||
spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
|
||||
if (!priv->ap_11n_enabled ||
|
||||
(!mwifiex_11n_get_rx_reorder_tbl(priv, uap_rx_pd->priority, ta) &&
|
||||
(le16_to_cpu(uap_rx_pd->rx_pkt_type) != PKT_TYPE_AMSDU))) {
|
||||
ret = mwifiex_handle_uap_rx_forward(priv, skb);
|
||||
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
|
||||
return ret;
|
||||
}
|
||||
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
|
||||
|
||||
/* Reorder and send to kernel */
|
||||
pkt_type = (u8)le16_to_cpu(uap_rx_pd->rx_pkt_type);
|
||||
|
|
Loading…
Reference in New Issue