mirror of https://gitee.com/openkylin/linux.git
Merge branch 'for-linville' of git://github.com/kvalo/ath
This commit is contained in:
commit
1698aca040
|
@ -266,12 +266,12 @@ static inline void ath10k_ce_engine_int_status_clear(struct ath10k *ar,
|
|||
* ath10k_ce_sendlist_send.
|
||||
* The caller takes responsibility for any needed locking.
|
||||
*/
|
||||
static int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
|
||||
void *per_transfer_context,
|
||||
u32 buffer,
|
||||
unsigned int nbytes,
|
||||
unsigned int transfer_id,
|
||||
unsigned int flags)
|
||||
int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
|
||||
void *per_transfer_context,
|
||||
u32 buffer,
|
||||
unsigned int nbytes,
|
||||
unsigned int transfer_id,
|
||||
unsigned int flags)
|
||||
{
|
||||
struct ath10k *ar = ce_state->ar;
|
||||
struct ath10k_ce_ring *src_ring = ce_state->src_ring;
|
||||
|
@ -1067,9 +1067,9 @@ struct ath10k_ce_pipe *ath10k_ce_init(struct ath10k *ar,
|
|||
*
|
||||
* For the lack of a better place do the check here.
|
||||
*/
|
||||
BUILD_BUG_ON(TARGET_NUM_MSDU_DESC >
|
||||
BUILD_BUG_ON(2*TARGET_NUM_MSDU_DESC >
|
||||
(CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
|
||||
BUILD_BUG_ON(TARGET_10X_NUM_MSDU_DESC >
|
||||
BUILD_BUG_ON(2*TARGET_10X_NUM_MSDU_DESC >
|
||||
(CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
|
||||
|
||||
ret = ath10k_pci_wake(ar);
|
||||
|
|
|
@ -23,7 +23,7 @@
|
|||
|
||||
/* Maximum number of Copy Engine's supported */
|
||||
#define CE_COUNT_MAX 8
|
||||
#define CE_HTT_H2T_MSG_SRC_NENTRIES 2048
|
||||
#define CE_HTT_H2T_MSG_SRC_NENTRIES 4096
|
||||
|
||||
/* Descriptor rings must be aligned to this boundary */
|
||||
#define CE_DESC_RING_ALIGN 8
|
||||
|
@ -152,6 +152,13 @@ int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,
|
|||
unsigned int transfer_id,
|
||||
unsigned int flags);
|
||||
|
||||
int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
|
||||
void *per_transfer_context,
|
||||
u32 buffer,
|
||||
unsigned int nbytes,
|
||||
unsigned int transfer_id,
|
||||
unsigned int flags);
|
||||
|
||||
void ath10k_ce_send_cb_register(struct ath10k_ce_pipe *ce_state,
|
||||
void (*send_cb)(struct ath10k_ce_pipe *),
|
||||
int disable_interrupts);
|
||||
|
|
|
@ -62,16 +62,13 @@ struct ath10k;
|
|||
|
||||
struct ath10k_skb_cb {
|
||||
dma_addr_t paddr;
|
||||
bool is_mapped;
|
||||
bool is_aborted;
|
||||
u8 vdev_id;
|
||||
|
||||
struct {
|
||||
u8 tid;
|
||||
bool is_offchan;
|
||||
|
||||
u8 frag_len;
|
||||
u8 pad_len;
|
||||
struct ath10k_htt_txbuf *txbuf;
|
||||
u32 txbuf_paddr;
|
||||
} __packed htt;
|
||||
|
||||
struct {
|
||||
|
@ -87,32 +84,6 @@ static inline struct ath10k_skb_cb *ATH10K_SKB_CB(struct sk_buff *skb)
|
|||
return (struct ath10k_skb_cb *)&IEEE80211_SKB_CB(skb)->driver_data;
|
||||
}
|
||||
|
||||
static inline int ath10k_skb_map(struct device *dev, struct sk_buff *skb)
|
||||
{
|
||||
if (ATH10K_SKB_CB(skb)->is_mapped)
|
||||
return -EINVAL;
|
||||
|
||||
ATH10K_SKB_CB(skb)->paddr = dma_map_single(dev, skb->data, skb->len,
|
||||
DMA_TO_DEVICE);
|
||||
|
||||
if (unlikely(dma_mapping_error(dev, ATH10K_SKB_CB(skb)->paddr)))
|
||||
return -EIO;
|
||||
|
||||
ATH10K_SKB_CB(skb)->is_mapped = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int ath10k_skb_unmap(struct device *dev, struct sk_buff *skb)
|
||||
{
|
||||
if (!ATH10K_SKB_CB(skb)->is_mapped)
|
||||
return -EINVAL;
|
||||
|
||||
dma_unmap_single(dev, ATH10K_SKB_CB(skb)->paddr, skb->len,
|
||||
DMA_TO_DEVICE);
|
||||
ATH10K_SKB_CB(skb)->is_mapped = false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline u32 host_interest_item_address(u32 item_offset)
|
||||
{
|
||||
return QCA988X_HOST_INTEREST_ADDRESS + item_offset;
|
||||
|
@ -288,6 +259,7 @@ struct ath10k_vif {
|
|||
|
||||
u8 fixed_rate;
|
||||
u8 fixed_nss;
|
||||
u8 force_sgi;
|
||||
};
|
||||
|
||||
struct ath10k_vif_iter {
|
||||
|
|
|
@ -21,6 +21,14 @@
|
|||
#include <linux/kernel.h>
|
||||
#include "core.h"
|
||||
|
||||
struct ath10k_hif_sg_item {
|
||||
u16 transfer_id;
|
||||
void *transfer_context; /* NULL = tx completion callback not called */
|
||||
void *vaddr; /* for debugging mostly */
|
||||
u32 paddr;
|
||||
u16 len;
|
||||
};
|
||||
|
||||
struct ath10k_hif_cb {
|
||||
int (*tx_completion)(struct ath10k *ar,
|
||||
struct sk_buff *wbuf,
|
||||
|
@ -31,11 +39,9 @@ struct ath10k_hif_cb {
|
|||
};
|
||||
|
||||
struct ath10k_hif_ops {
|
||||
/* Send the head of a buffer to HIF for transmission to the target. */
|
||||
int (*send_head)(struct ath10k *ar, u8 pipe_id,
|
||||
unsigned int transfer_id,
|
||||
unsigned int nbytes,
|
||||
struct sk_buff *buf);
|
||||
/* send a scatter-gather list to the target */
|
||||
int (*tx_sg)(struct ath10k *ar, u8 pipe_id,
|
||||
struct ath10k_hif_sg_item *items, int n_items);
|
||||
|
||||
/*
|
||||
* API to handle HIF-specific BMI message exchanges, this API is
|
||||
|
@ -86,12 +92,11 @@ struct ath10k_hif_ops {
|
|||
};
|
||||
|
||||
|
||||
static inline int ath10k_hif_send_head(struct ath10k *ar, u8 pipe_id,
|
||||
unsigned int transfer_id,
|
||||
unsigned int nbytes,
|
||||
struct sk_buff *buf)
|
||||
static inline int ath10k_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
|
||||
struct ath10k_hif_sg_item *items,
|
||||
int n_items)
|
||||
{
|
||||
return ar->hif.ops->send_head(ar, pipe_id, transfer_id, nbytes, buf);
|
||||
return ar->hif.ops->tx_sg(ar, pipe_id, items, n_items);
|
||||
}
|
||||
|
||||
static inline int ath10k_hif_exchange_bmi_msg(struct ath10k *ar,
|
||||
|
|
|
@ -63,7 +63,9 @@ static struct sk_buff *ath10k_htc_build_tx_ctrl_skb(void *ar)
|
|||
static inline void ath10k_htc_restore_tx_skb(struct ath10k_htc *htc,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
ath10k_skb_unmap(htc->ar->dev, skb);
|
||||
struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
|
||||
|
||||
dma_unmap_single(htc->ar->dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE);
|
||||
skb_pull(skb, sizeof(struct ath10k_htc_hdr));
|
||||
}
|
||||
|
||||
|
@ -122,6 +124,9 @@ int ath10k_htc_send(struct ath10k_htc *htc,
|
|||
struct sk_buff *skb)
|
||||
{
|
||||
struct ath10k_htc_ep *ep = &htc->endpoint[eid];
|
||||
struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
|
||||
struct ath10k_hif_sg_item sg_item;
|
||||
struct device *dev = htc->ar->dev;
|
||||
int credits = 0;
|
||||
int ret;
|
||||
|
||||
|
@ -157,19 +162,25 @@ int ath10k_htc_send(struct ath10k_htc *htc,
|
|||
|
||||
ath10k_htc_prepare_tx_skb(ep, skb);
|
||||
|
||||
ret = ath10k_skb_map(htc->ar->dev, skb);
|
||||
skb_cb->paddr = dma_map_single(dev, skb->data, skb->len, DMA_TO_DEVICE);
|
||||
ret = dma_mapping_error(dev, skb_cb->paddr);
|
||||
if (ret)
|
||||
goto err_credits;
|
||||
|
||||
ret = ath10k_hif_send_head(htc->ar, ep->ul_pipe_id, ep->eid,
|
||||
skb->len, skb);
|
||||
sg_item.transfer_id = ep->eid;
|
||||
sg_item.transfer_context = skb;
|
||||
sg_item.vaddr = skb->data;
|
||||
sg_item.paddr = skb_cb->paddr;
|
||||
sg_item.len = skb->len;
|
||||
|
||||
ret = ath10k_hif_tx_sg(htc->ar, ep->ul_pipe_id, &sg_item, 1);
|
||||
if (ret)
|
||||
goto err_unmap;
|
||||
|
||||
return 0;
|
||||
|
||||
err_unmap:
|
||||
ath10k_skb_unmap(htc->ar->dev, skb);
|
||||
dma_unmap_single(dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE);
|
||||
err_credits:
|
||||
if (ep->tx_credit_flow_enabled) {
|
||||
spin_lock_bh(&htc->tx_lock);
|
||||
|
@ -191,10 +202,8 @@ static int ath10k_htc_tx_completion_handler(struct ath10k *ar,
|
|||
struct ath10k_htc *htc = &ar->htc;
|
||||
struct ath10k_htc_ep *ep = &htc->endpoint[eid];
|
||||
|
||||
if (!skb) {
|
||||
ath10k_warn("invalid sk_buff completion - NULL pointer. firmware crashed?\n");
|
||||
if (WARN_ON_ONCE(!skb))
|
||||
return 0;
|
||||
}
|
||||
|
||||
ath10k_htc_notify_tx_completion(ep, skb);
|
||||
/* the skb now belongs to the completion handler */
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
|
||||
#include <linux/bug.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/dmapool.h>
|
||||
|
||||
#include "htc.h"
|
||||
#include "rx_desc.h"
|
||||
|
@ -1181,11 +1182,20 @@ struct htt_rx_info {
|
|||
u32 info1;
|
||||
u32 info2;
|
||||
} rate;
|
||||
|
||||
u32 tsf;
|
||||
bool fcs_err;
|
||||
bool amsdu_more;
|
||||
bool mic_err;
|
||||
};
|
||||
|
||||
struct ath10k_htt_txbuf {
|
||||
struct htt_data_tx_desc_frag frags[2];
|
||||
struct ath10k_htc_hdr htc_hdr;
|
||||
struct htt_cmd_hdr cmd_hdr;
|
||||
struct htt_data_tx_desc cmd_tx;
|
||||
} __packed;
|
||||
|
||||
struct ath10k_htt {
|
||||
struct ath10k *ar;
|
||||
enum ath10k_htc_ep_id eid;
|
||||
|
@ -1267,11 +1277,18 @@ struct ath10k_htt {
|
|||
struct sk_buff **pending_tx;
|
||||
unsigned long *used_msdu_ids; /* bitmap */
|
||||
wait_queue_head_t empty_tx_wq;
|
||||
struct dma_pool *tx_pool;
|
||||
|
||||
/* set if host-fw communication goes haywire
|
||||
* used to avoid further failures */
|
||||
bool rx_confused;
|
||||
struct tasklet_struct rx_replenish_task;
|
||||
|
||||
/* This is used to group tx/rx completions separately and process them
|
||||
* in batches to reduce cache stalls */
|
||||
struct tasklet_struct txrx_compl_task;
|
||||
struct sk_buff_head tx_compl_q;
|
||||
struct sk_buff_head rx_compl_q;
|
||||
};
|
||||
|
||||
#define RX_HTT_HDR_STATUS_LEN 64
|
||||
|
@ -1343,4 +1360,5 @@ int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt);
|
|||
void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id);
|
||||
int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *);
|
||||
int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -43,7 +43,7 @@
|
|||
|
||||
|
||||
static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb);
|
||||
|
||||
static void ath10k_htt_txrx_compl_task(unsigned long ptr);
|
||||
|
||||
static int ath10k_htt_rx_ring_size(struct ath10k_htt *htt)
|
||||
{
|
||||
|
@ -225,18 +225,16 @@ static void ath10k_htt_rx_ring_refill_retry(unsigned long arg)
|
|||
ath10k_htt_rx_msdu_buff_replenish(htt);
|
||||
}
|
||||
|
||||
static unsigned ath10k_htt_rx_ring_elems(struct ath10k_htt *htt)
|
||||
{
|
||||
return (__le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr) -
|
||||
htt->rx_ring.sw_rd_idx.msdu_payld) & htt->rx_ring.size_mask;
|
||||
}
|
||||
|
||||
void ath10k_htt_rx_detach(struct ath10k_htt *htt)
|
||||
{
|
||||
int sw_rd_idx = htt->rx_ring.sw_rd_idx.msdu_payld;
|
||||
|
||||
del_timer_sync(&htt->rx_ring.refill_retry_timer);
|
||||
tasklet_kill(&htt->rx_replenish_task);
|
||||
tasklet_kill(&htt->txrx_compl_task);
|
||||
|
||||
skb_queue_purge(&htt->tx_compl_q);
|
||||
skb_queue_purge(&htt->rx_compl_q);
|
||||
|
||||
while (sw_rd_idx != __le32_to_cpu(*(htt->rx_ring.alloc_idx.vaddr))) {
|
||||
struct sk_buff *skb =
|
||||
|
@ -270,10 +268,12 @@ static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
|
|||
int idx;
|
||||
struct sk_buff *msdu;
|
||||
|
||||
spin_lock_bh(&htt->rx_ring.lock);
|
||||
lockdep_assert_held(&htt->rx_ring.lock);
|
||||
|
||||
if (ath10k_htt_rx_ring_elems(htt) == 0)
|
||||
ath10k_warn("htt rx ring is empty!\n");
|
||||
if (htt->rx_ring.fill_cnt == 0) {
|
||||
ath10k_warn("tried to pop sk_buff from an empty rx ring\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
idx = htt->rx_ring.sw_rd_idx.msdu_payld;
|
||||
msdu = htt->rx_ring.netbufs_ring[idx];
|
||||
|
@ -283,7 +283,6 @@ static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
|
|||
htt->rx_ring.sw_rd_idx.msdu_payld = idx;
|
||||
htt->rx_ring.fill_cnt--;
|
||||
|
||||
spin_unlock_bh(&htt->rx_ring.lock);
|
||||
return msdu;
|
||||
}
|
||||
|
||||
|
@ -307,8 +306,7 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
|
|||
struct sk_buff *msdu;
|
||||
struct htt_rx_desc *rx_desc;
|
||||
|
||||
if (ath10k_htt_rx_ring_elems(htt) == 0)
|
||||
ath10k_warn("htt rx ring is empty!\n");
|
||||
lockdep_assert_held(&htt->rx_ring.lock);
|
||||
|
||||
if (htt->rx_confused) {
|
||||
ath10k_warn("htt is confused. refusing rx\n");
|
||||
|
@ -400,6 +398,7 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
|
|||
msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.info0),
|
||||
RX_MSDU_START_INFO0_MSDU_LENGTH);
|
||||
msdu_chained = rx_desc->frag_info.ring2_more_count;
|
||||
msdu_chaining = msdu_chained;
|
||||
|
||||
if (msdu_len_invalid)
|
||||
msdu_len = 0;
|
||||
|
@ -427,7 +426,6 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
|
|||
|
||||
msdu->next = next;
|
||||
msdu = next;
|
||||
msdu_chaining = 1;
|
||||
}
|
||||
|
||||
last_msdu = __le32_to_cpu(rx_desc->msdu_end.info0) &
|
||||
|
@ -529,6 +527,12 @@ int ath10k_htt_rx_attach(struct ath10k_htt *htt)
|
|||
tasklet_init(&htt->rx_replenish_task, ath10k_htt_rx_replenish_task,
|
||||
(unsigned long)htt);
|
||||
|
||||
skb_queue_head_init(&htt->tx_compl_q);
|
||||
skb_queue_head_init(&htt->rx_compl_q);
|
||||
|
||||
tasklet_init(&htt->txrx_compl_task, ath10k_htt_txrx_compl_task,
|
||||
(unsigned long)htt);
|
||||
|
||||
ath10k_dbg(ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n",
|
||||
htt->rx_ring.size, htt->rx_ring.fill_level);
|
||||
return 0;
|
||||
|
@ -632,6 +636,12 @@ struct amsdu_subframe_hdr {
|
|||
__be16 len;
|
||||
} __packed;
|
||||
|
||||
static int ath10k_htt_rx_nwifi_hdrlen(struct ieee80211_hdr *hdr)
|
||||
{
|
||||
/* nwifi header is padded to 4 bytes. this fixes 4addr rx */
|
||||
return round_up(ieee80211_hdrlen(hdr->frame_control), 4);
|
||||
}
|
||||
|
||||
static void ath10k_htt_rx_amsdu(struct ath10k_htt *htt,
|
||||
struct htt_rx_info *info)
|
||||
{
|
||||
|
@ -681,7 +691,7 @@ static void ath10k_htt_rx_amsdu(struct ath10k_htt *htt,
|
|||
case RX_MSDU_DECAP_NATIVE_WIFI:
|
||||
/* pull decapped header and copy DA */
|
||||
hdr = (struct ieee80211_hdr *)skb->data;
|
||||
hdr_len = ieee80211_hdrlen(hdr->frame_control);
|
||||
hdr_len = ath10k_htt_rx_nwifi_hdrlen(hdr);
|
||||
memcpy(addr, ieee80211_get_DA(hdr), ETH_ALEN);
|
||||
skb_pull(skb, hdr_len);
|
||||
|
||||
|
@ -768,7 +778,7 @@ static void ath10k_htt_rx_msdu(struct ath10k_htt *htt, struct htt_rx_info *info)
|
|||
case RX_MSDU_DECAP_NATIVE_WIFI:
|
||||
/* Pull decapped header */
|
||||
hdr = (struct ieee80211_hdr *)skb->data;
|
||||
hdr_len = ieee80211_hdrlen(hdr->frame_control);
|
||||
hdr_len = ath10k_htt_rx_nwifi_hdrlen(hdr);
|
||||
skb_pull(skb, hdr_len);
|
||||
|
||||
/* Push original header */
|
||||
|
@ -846,6 +856,20 @@ static bool ath10k_htt_rx_has_mic_err(struct sk_buff *skb)
|
|||
return false;
|
||||
}
|
||||
|
||||
static bool ath10k_htt_rx_is_mgmt(struct sk_buff *skb)
|
||||
{
|
||||
struct htt_rx_desc *rxd;
|
||||
u32 flags;
|
||||
|
||||
rxd = (void *)skb->data - sizeof(*rxd);
|
||||
flags = __le32_to_cpu(rxd->attention.flags);
|
||||
|
||||
if (flags & RX_ATTENTION_FLAGS_MGMT_TYPE)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb)
|
||||
{
|
||||
struct htt_rx_desc *rxd;
|
||||
|
@ -877,6 +901,57 @@ static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb)
|
|||
return CHECKSUM_UNNECESSARY;
|
||||
}
|
||||
|
||||
static int ath10k_unchain_msdu(struct sk_buff *msdu_head)
|
||||
{
|
||||
struct sk_buff *next = msdu_head->next;
|
||||
struct sk_buff *to_free = next;
|
||||
int space;
|
||||
int total_len = 0;
|
||||
|
||||
/* TODO: Might could optimize this by using
|
||||
* skb_try_coalesce or similar method to
|
||||
* decrease copying, or maybe get mac80211 to
|
||||
* provide a way to just receive a list of
|
||||
* skb?
|
||||
*/
|
||||
|
||||
msdu_head->next = NULL;
|
||||
|
||||
/* Allocate total length all at once. */
|
||||
while (next) {
|
||||
total_len += next->len;
|
||||
next = next->next;
|
||||
}
|
||||
|
||||
space = total_len - skb_tailroom(msdu_head);
|
||||
if ((space > 0) &&
|
||||
(pskb_expand_head(msdu_head, 0, space, GFP_ATOMIC) < 0)) {
|
||||
/* TODO: bump some rx-oom error stat */
|
||||
/* put it back together so we can free the
|
||||
* whole list at once.
|
||||
*/
|
||||
msdu_head->next = to_free;
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* Walk list again, copying contents into
|
||||
* msdu_head
|
||||
*/
|
||||
next = to_free;
|
||||
while (next) {
|
||||
skb_copy_from_linear_data(next, skb_put(msdu_head, next->len),
|
||||
next->len);
|
||||
next = next->next;
|
||||
}
|
||||
|
||||
/* If here, we have consolidated skb. Free the
|
||||
* fragments and pass the main skb on up the
|
||||
* stack.
|
||||
*/
|
||||
ath10k_htt_rx_free_msdu_chain(to_free);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
|
||||
struct htt_rx_indication *rx)
|
||||
{
|
||||
|
@ -888,6 +963,8 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
|
|||
u8 *fw_desc;
|
||||
int i, j;
|
||||
|
||||
lockdep_assert_held(&htt->rx_ring.lock);
|
||||
|
||||
memset(&info, 0, sizeof(info));
|
||||
|
||||
fw_desc_len = __le16_to_cpu(rx->prefix.fw_rx_desc_bytes);
|
||||
|
@ -940,7 +1017,8 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
|
|||
status = info.status;
|
||||
|
||||
/* Skip mgmt frames while we handle this in WMI */
|
||||
if (status == HTT_RX_IND_MPDU_STATUS_MGMT_CTRL) {
|
||||
if (status == HTT_RX_IND_MPDU_STATUS_MGMT_CTRL ||
|
||||
ath10k_htt_rx_is_mgmt(msdu_head)) {
|
||||
ath10k_dbg(ATH10K_DBG_HTT, "htt rx mgmt ctrl\n");
|
||||
ath10k_htt_rx_free_msdu_chain(msdu_head);
|
||||
continue;
|
||||
|
@ -964,10 +1042,8 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
|
|||
continue;
|
||||
}
|
||||
|
||||
/* FIXME: we do not support chaining yet.
|
||||
* this needs investigation */
|
||||
if (msdu_chaining) {
|
||||
ath10k_warn("htt rx msdu_chaining is true\n");
|
||||
if (msdu_chaining &&
|
||||
(ath10k_unchain_msdu(msdu_head) < 0)) {
|
||||
ath10k_htt_rx_free_msdu_chain(msdu_head);
|
||||
continue;
|
||||
}
|
||||
|
@ -990,6 +1066,7 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
|
|||
info.rate.info0 = rx->ppdu.info0;
|
||||
info.rate.info1 = __le32_to_cpu(rx->ppdu.info1);
|
||||
info.rate.info2 = __le32_to_cpu(rx->ppdu.info2);
|
||||
info.tsf = __le32_to_cpu(rx->ppdu.tsf);
|
||||
|
||||
hdr = ath10k_htt_rx_skb_get_hdr(msdu_head);
|
||||
|
||||
|
@ -1023,8 +1100,11 @@ static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
|
|||
|
||||
msdu_head = NULL;
|
||||
msdu_tail = NULL;
|
||||
|
||||
spin_lock_bh(&htt->rx_ring.lock);
|
||||
msdu_chaining = ath10k_htt_rx_amsdu_pop(htt, &fw_desc, &fw_desc_len,
|
||||
&msdu_head, &msdu_tail);
|
||||
spin_unlock_bh(&htt->rx_ring.lock);
|
||||
|
||||
ath10k_dbg(ATH10K_DBG_HTT_DUMP, "htt rx frag ahead\n");
|
||||
|
||||
|
@ -1116,6 +1196,45 @@ static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
|
|||
}
|
||||
}
|
||||
|
||||
static void ath10k_htt_rx_frm_tx_compl(struct ath10k *ar,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
struct ath10k_htt *htt = &ar->htt;
|
||||
struct htt_resp *resp = (struct htt_resp *)skb->data;
|
||||
struct htt_tx_done tx_done = {};
|
||||
int status = MS(resp->data_tx_completion.flags, HTT_DATA_TX_STATUS);
|
||||
__le16 msdu_id;
|
||||
int i;
|
||||
|
||||
lockdep_assert_held(&htt->tx_lock);
|
||||
|
||||
switch (status) {
|
||||
case HTT_DATA_TX_STATUS_NO_ACK:
|
||||
tx_done.no_ack = true;
|
||||
break;
|
||||
case HTT_DATA_TX_STATUS_OK:
|
||||
break;
|
||||
case HTT_DATA_TX_STATUS_DISCARD:
|
||||
case HTT_DATA_TX_STATUS_POSTPONE:
|
||||
case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL:
|
||||
tx_done.discard = true;
|
||||
break;
|
||||
default:
|
||||
ath10k_warn("unhandled tx completion status %d\n", status);
|
||||
tx_done.discard = true;
|
||||
break;
|
||||
}
|
||||
|
||||
ath10k_dbg(ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n",
|
||||
resp->data_tx_completion.num_msdus);
|
||||
|
||||
for (i = 0; i < resp->data_tx_completion.num_msdus; i++) {
|
||||
msdu_id = resp->data_tx_completion.msdus[i];
|
||||
tx_done.msdu_id = __le16_to_cpu(msdu_id);
|
||||
ath10k_txrx_tx_unref(htt, &tx_done);
|
||||
}
|
||||
}
|
||||
|
||||
void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
|
||||
{
|
||||
struct ath10k_htt *htt = &ar->htt;
|
||||
|
@ -1134,10 +1253,12 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
|
|||
complete(&htt->target_version_received);
|
||||
break;
|
||||
}
|
||||
case HTT_T2H_MSG_TYPE_RX_IND: {
|
||||
ath10k_htt_rx_handler(htt, &resp->rx_ind);
|
||||
break;
|
||||
}
|
||||
case HTT_T2H_MSG_TYPE_RX_IND:
|
||||
spin_lock_bh(&htt->rx_ring.lock);
|
||||
__skb_queue_tail(&htt->rx_compl_q, skb);
|
||||
spin_unlock_bh(&htt->rx_ring.lock);
|
||||
tasklet_schedule(&htt->txrx_compl_task);
|
||||
return;
|
||||
case HTT_T2H_MSG_TYPE_PEER_MAP: {
|
||||
struct htt_peer_map_event ev = {
|
||||
.vdev_id = resp->peer_map.vdev_id,
|
||||
|
@ -1172,44 +1293,17 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
|
|||
break;
|
||||
}
|
||||
|
||||
spin_lock_bh(&htt->tx_lock);
|
||||
ath10k_txrx_tx_unref(htt, &tx_done);
|
||||
spin_unlock_bh(&htt->tx_lock);
|
||||
break;
|
||||
}
|
||||
case HTT_T2H_MSG_TYPE_TX_COMPL_IND: {
|
||||
struct htt_tx_done tx_done = {};
|
||||
int status = MS(resp->data_tx_completion.flags,
|
||||
HTT_DATA_TX_STATUS);
|
||||
__le16 msdu_id;
|
||||
int i;
|
||||
|
||||
switch (status) {
|
||||
case HTT_DATA_TX_STATUS_NO_ACK:
|
||||
tx_done.no_ack = true;
|
||||
break;
|
||||
case HTT_DATA_TX_STATUS_OK:
|
||||
break;
|
||||
case HTT_DATA_TX_STATUS_DISCARD:
|
||||
case HTT_DATA_TX_STATUS_POSTPONE:
|
||||
case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL:
|
||||
tx_done.discard = true;
|
||||
break;
|
||||
default:
|
||||
ath10k_warn("unhandled tx completion status %d\n",
|
||||
status);
|
||||
tx_done.discard = true;
|
||||
break;
|
||||
}
|
||||
|
||||
ath10k_dbg(ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n",
|
||||
resp->data_tx_completion.num_msdus);
|
||||
|
||||
for (i = 0; i < resp->data_tx_completion.num_msdus; i++) {
|
||||
msdu_id = resp->data_tx_completion.msdus[i];
|
||||
tx_done.msdu_id = __le16_to_cpu(msdu_id);
|
||||
ath10k_txrx_tx_unref(htt, &tx_done);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case HTT_T2H_MSG_TYPE_TX_COMPL_IND:
|
||||
spin_lock_bh(&htt->tx_lock);
|
||||
__skb_queue_tail(&htt->tx_compl_q, skb);
|
||||
spin_unlock_bh(&htt->tx_lock);
|
||||
tasklet_schedule(&htt->txrx_compl_task);
|
||||
return;
|
||||
case HTT_T2H_MSG_TYPE_SEC_IND: {
|
||||
struct ath10k *ar = htt->ar;
|
||||
struct htt_security_indication *ev = &resp->security_indication;
|
||||
|
@ -1249,3 +1343,25 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
|
|||
/* Free the indication buffer */
|
||||
dev_kfree_skb_any(skb);
|
||||
}
|
||||
|
||||
static void ath10k_htt_txrx_compl_task(unsigned long ptr)
|
||||
{
|
||||
struct ath10k_htt *htt = (struct ath10k_htt *)ptr;
|
||||
struct htt_resp *resp;
|
||||
struct sk_buff *skb;
|
||||
|
||||
spin_lock_bh(&htt->tx_lock);
|
||||
while ((skb = __skb_dequeue(&htt->tx_compl_q))) {
|
||||
ath10k_htt_rx_frm_tx_compl(htt->ar, skb);
|
||||
dev_kfree_skb_any(skb);
|
||||
}
|
||||
spin_unlock_bh(&htt->tx_lock);
|
||||
|
||||
spin_lock_bh(&htt->rx_ring.lock);
|
||||
while ((skb = __skb_dequeue(&htt->rx_compl_q))) {
|
||||
resp = (struct htt_resp *)skb->data;
|
||||
ath10k_htt_rx_handler(htt, &resp->rx_ind);
|
||||
dev_kfree_skb_any(skb);
|
||||
}
|
||||
spin_unlock_bh(&htt->rx_ring.lock);
|
||||
}
|
||||
|
|
|
@ -109,6 +109,14 @@ int ath10k_htt_tx_attach(struct ath10k_htt *htt)
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
htt->tx_pool = dma_pool_create("ath10k htt tx pool", htt->ar->dev,
|
||||
sizeof(struct ath10k_htt_txbuf), 4, 0);
|
||||
if (!htt->tx_pool) {
|
||||
kfree(htt->used_msdu_ids);
|
||||
kfree(htt->pending_tx);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -117,9 +125,7 @@ static void ath10k_htt_tx_cleanup_pending(struct ath10k_htt *htt)
|
|||
struct htt_tx_done tx_done = {0};
|
||||
int msdu_id;
|
||||
|
||||
/* No locks needed. Called after communication with the device has
|
||||
* been stopped. */
|
||||
|
||||
spin_lock_bh(&htt->tx_lock);
|
||||
for (msdu_id = 0; msdu_id < htt->max_num_pending_tx; msdu_id++) {
|
||||
if (!test_bit(msdu_id, htt->used_msdu_ids))
|
||||
continue;
|
||||
|
@ -132,6 +138,7 @@ static void ath10k_htt_tx_cleanup_pending(struct ath10k_htt *htt)
|
|||
|
||||
ath10k_txrx_tx_unref(htt, &tx_done);
|
||||
}
|
||||
spin_unlock_bh(&htt->tx_lock);
|
||||
}
|
||||
|
||||
void ath10k_htt_tx_detach(struct ath10k_htt *htt)
|
||||
|
@ -139,6 +146,7 @@ void ath10k_htt_tx_detach(struct ath10k_htt *htt)
|
|||
ath10k_htt_tx_cleanup_pending(htt);
|
||||
kfree(htt->pending_tx);
|
||||
kfree(htt->used_msdu_ids);
|
||||
dma_pool_destroy(htt->tx_pool);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -334,7 +342,9 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
|
|||
goto err_free_msdu_id;
|
||||
}
|
||||
|
||||
res = ath10k_skb_map(dev, msdu);
|
||||
skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
|
||||
DMA_TO_DEVICE);
|
||||
res = dma_mapping_error(dev, skb_cb->paddr);
|
||||
if (res)
|
||||
goto err_free_txdesc;
|
||||
|
||||
|
@ -348,8 +358,7 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
|
|||
memcpy(cmd->mgmt_tx.hdr, msdu->data,
|
||||
min_t(int, msdu->len, HTT_MGMT_FRM_HDR_DOWNLOAD_LEN));
|
||||
|
||||
skb_cb->htt.frag_len = 0;
|
||||
skb_cb->htt.pad_len = 0;
|
||||
skb_cb->htt.txbuf = NULL;
|
||||
|
||||
res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc);
|
||||
if (res)
|
||||
|
@ -358,7 +367,7 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
|
|||
return 0;
|
||||
|
||||
err_unmap_msdu:
|
||||
ath10k_skb_unmap(dev, msdu);
|
||||
dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
|
||||
err_free_txdesc:
|
||||
dev_kfree_skb_any(txdesc);
|
||||
err_free_msdu_id:
|
||||
|
@ -375,19 +384,19 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
|
|||
int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
|
||||
{
|
||||
struct device *dev = htt->ar->dev;
|
||||
struct htt_cmd *cmd;
|
||||
struct htt_data_tx_desc_frag *tx_frags;
|
||||
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
|
||||
struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
|
||||
struct sk_buff *txdesc = NULL;
|
||||
bool use_frags;
|
||||
u8 vdev_id = ATH10K_SKB_CB(msdu)->vdev_id;
|
||||
u8 tid;
|
||||
int prefetch_len, desc_len;
|
||||
int msdu_id = -1;
|
||||
struct ath10k_hif_sg_item sg_items[2];
|
||||
struct htt_data_tx_desc_frag *frags;
|
||||
u8 vdev_id = skb_cb->vdev_id;
|
||||
u8 tid = skb_cb->htt.tid;
|
||||
int prefetch_len;
|
||||
int res;
|
||||
u8 flags0;
|
||||
u16 flags1;
|
||||
u8 flags0 = 0;
|
||||
u16 msdu_id, flags1 = 0;
|
||||
dma_addr_t paddr;
|
||||
u32 frags_paddr;
|
||||
bool use_frags;
|
||||
|
||||
res = ath10k_htt_tx_inc_pending(htt);
|
||||
if (res)
|
||||
|
@ -406,114 +415,120 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
|
|||
prefetch_len = min(htt->prefetch_len, msdu->len);
|
||||
prefetch_len = roundup(prefetch_len, 4);
|
||||
|
||||
desc_len = sizeof(cmd->hdr) + sizeof(cmd->data_tx) + prefetch_len;
|
||||
|
||||
txdesc = ath10k_htc_alloc_skb(desc_len);
|
||||
if (!txdesc) {
|
||||
res = -ENOMEM;
|
||||
goto err_free_msdu_id;
|
||||
}
|
||||
|
||||
/* Since HTT 3.0 there is no separate mgmt tx command. However in case
|
||||
* of mgmt tx using TX_FRM there is not tx fragment list. Instead of tx
|
||||
* fragment list host driver specifies directly frame pointer. */
|
||||
use_frags = htt->target_version_major < 3 ||
|
||||
!ieee80211_is_mgmt(hdr->frame_control);
|
||||
|
||||
if (!IS_ALIGNED((unsigned long)txdesc->data, 4)) {
|
||||
ath10k_warn("htt alignment check failed. dropping packet.\n");
|
||||
res = -EIO;
|
||||
goto err_free_txdesc;
|
||||
}
|
||||
skb_cb->htt.txbuf = dma_pool_alloc(htt->tx_pool, GFP_ATOMIC,
|
||||
&paddr);
|
||||
if (!skb_cb->htt.txbuf)
|
||||
goto err_free_msdu_id;
|
||||
skb_cb->htt.txbuf_paddr = paddr;
|
||||
|
||||
if (use_frags) {
|
||||
skb_cb->htt.frag_len = sizeof(*tx_frags) * 2;
|
||||
skb_cb->htt.pad_len = (unsigned long)msdu->data -
|
||||
round_down((unsigned long)msdu->data, 4);
|
||||
|
||||
skb_push(msdu, skb_cb->htt.frag_len + skb_cb->htt.pad_len);
|
||||
} else {
|
||||
skb_cb->htt.frag_len = 0;
|
||||
skb_cb->htt.pad_len = 0;
|
||||
}
|
||||
|
||||
res = ath10k_skb_map(dev, msdu);
|
||||
skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
|
||||
DMA_TO_DEVICE);
|
||||
res = dma_mapping_error(dev, skb_cb->paddr);
|
||||
if (res)
|
||||
goto err_pull_txfrag;
|
||||
goto err_free_txbuf;
|
||||
|
||||
if (use_frags) {
|
||||
dma_sync_single_for_cpu(dev, skb_cb->paddr, msdu->len,
|
||||
DMA_TO_DEVICE);
|
||||
if (likely(use_frags)) {
|
||||
frags = skb_cb->htt.txbuf->frags;
|
||||
|
||||
/* tx fragment list must be terminated with zero-entry */
|
||||
tx_frags = (struct htt_data_tx_desc_frag *)msdu->data;
|
||||
tx_frags[0].paddr = __cpu_to_le32(skb_cb->paddr +
|
||||
skb_cb->htt.frag_len +
|
||||
skb_cb->htt.pad_len);
|
||||
tx_frags[0].len = __cpu_to_le32(msdu->len -
|
||||
skb_cb->htt.frag_len -
|
||||
skb_cb->htt.pad_len);
|
||||
tx_frags[1].paddr = __cpu_to_le32(0);
|
||||
tx_frags[1].len = __cpu_to_le32(0);
|
||||
frags[0].paddr = __cpu_to_le32(skb_cb->paddr);
|
||||
frags[0].len = __cpu_to_le32(msdu->len);
|
||||
frags[1].paddr = 0;
|
||||
frags[1].len = 0;
|
||||
|
||||
dma_sync_single_for_device(dev, skb_cb->paddr, msdu->len,
|
||||
DMA_TO_DEVICE);
|
||||
}
|
||||
|
||||
ath10k_dbg(ATH10K_DBG_HTT, "tx-msdu 0x%llx\n",
|
||||
(unsigned long long) ATH10K_SKB_CB(msdu)->paddr);
|
||||
ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "tx-msdu: ",
|
||||
msdu->data, msdu->len);
|
||||
|
||||
skb_put(txdesc, desc_len);
|
||||
cmd = (struct htt_cmd *)txdesc->data;
|
||||
|
||||
tid = ATH10K_SKB_CB(msdu)->htt.tid;
|
||||
|
||||
ath10k_dbg(ATH10K_DBG_HTT, "htt data tx using tid %hhu\n", tid);
|
||||
|
||||
flags0 = 0;
|
||||
if (!ieee80211_has_protected(hdr->frame_control))
|
||||
flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
|
||||
flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
|
||||
|
||||
if (use_frags)
|
||||
flags0 |= SM(ATH10K_HW_TXRX_NATIVE_WIFI,
|
||||
HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
|
||||
else
|
||||
|
||||
frags_paddr = skb_cb->htt.txbuf_paddr;
|
||||
} else {
|
||||
flags0 |= SM(ATH10K_HW_TXRX_MGMT,
|
||||
HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
|
||||
|
||||
flags1 = 0;
|
||||
frags_paddr = skb_cb->paddr;
|
||||
}
|
||||
|
||||
/* Normally all commands go through HTC which manages tx credits for
|
||||
* each endpoint and notifies when tx is completed.
|
||||
*
|
||||
* HTT endpoint is creditless so there's no need to care about HTC
|
||||
* flags. In that case it is trivial to fill the HTC header here.
|
||||
*
|
||||
* MSDU transmission is considered completed upon HTT event. This
|
||||
* implies no relevant resources can be freed until after the event is
|
||||
* received. That's why HTC tx completion handler itself is ignored by
|
||||
* setting NULL to transfer_context for all sg items.
|
||||
*
|
||||
* There is simply no point in pushing HTT TX_FRM through HTC tx path
|
||||
* as it's a waste of resources. By bypassing HTC it is possible to
|
||||
* avoid extra memory allocations, compress data structures and thus
|
||||
* improve performance. */
|
||||
|
||||
skb_cb->htt.txbuf->htc_hdr.eid = htt->eid;
|
||||
skb_cb->htt.txbuf->htc_hdr.len = __cpu_to_le16(
|
||||
sizeof(skb_cb->htt.txbuf->cmd_hdr) +
|
||||
sizeof(skb_cb->htt.txbuf->cmd_tx) +
|
||||
prefetch_len);
|
||||
skb_cb->htt.txbuf->htc_hdr.flags = 0;
|
||||
|
||||
if (!ieee80211_has_protected(hdr->frame_control))
|
||||
flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
|
||||
|
||||
flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
|
||||
|
||||
flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
|
||||
flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
|
||||
flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
|
||||
flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
|
||||
|
||||
cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM;
|
||||
cmd->data_tx.flags0 = flags0;
|
||||
cmd->data_tx.flags1 = __cpu_to_le16(flags1);
|
||||
cmd->data_tx.len = __cpu_to_le16(msdu->len -
|
||||
skb_cb->htt.frag_len -
|
||||
skb_cb->htt.pad_len);
|
||||
cmd->data_tx.id = __cpu_to_le16(msdu_id);
|
||||
cmd->data_tx.frags_paddr = __cpu_to_le32(skb_cb->paddr);
|
||||
cmd->data_tx.peerid = __cpu_to_le32(HTT_INVALID_PEERID);
|
||||
skb_cb->htt.txbuf->cmd_hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM;
|
||||
skb_cb->htt.txbuf->cmd_tx.flags0 = flags0;
|
||||
skb_cb->htt.txbuf->cmd_tx.flags1 = __cpu_to_le16(flags1);
|
||||
skb_cb->htt.txbuf->cmd_tx.len = __cpu_to_le16(msdu->len);
|
||||
skb_cb->htt.txbuf->cmd_tx.id = __cpu_to_le16(msdu_id);
|
||||
skb_cb->htt.txbuf->cmd_tx.frags_paddr = __cpu_to_le32(frags_paddr);
|
||||
skb_cb->htt.txbuf->cmd_tx.peerid = __cpu_to_le32(HTT_INVALID_PEERID);
|
||||
|
||||
memcpy(cmd->data_tx.prefetch, hdr, prefetch_len);
|
||||
ath10k_dbg(ATH10K_DBG_HTT,
|
||||
"htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %08x, msdu_paddr %08x vdev %hhu tid %hhu\n",
|
||||
flags0, flags1, msdu->len, msdu_id, frags_paddr,
|
||||
(u32)skb_cb->paddr, vdev_id, tid);
|
||||
ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ",
|
||||
msdu->data, msdu->len);
|
||||
|
||||
res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc);
|
||||
sg_items[0].transfer_id = 0;
|
||||
sg_items[0].transfer_context = NULL;
|
||||
sg_items[0].vaddr = &skb_cb->htt.txbuf->htc_hdr;
|
||||
sg_items[0].paddr = skb_cb->htt.txbuf_paddr +
|
||||
sizeof(skb_cb->htt.txbuf->frags);
|
||||
sg_items[0].len = sizeof(skb_cb->htt.txbuf->htc_hdr) +
|
||||
sizeof(skb_cb->htt.txbuf->cmd_hdr) +
|
||||
sizeof(skb_cb->htt.txbuf->cmd_tx);
|
||||
|
||||
sg_items[1].transfer_id = 0;
|
||||
sg_items[1].transfer_context = NULL;
|
||||
sg_items[1].vaddr = msdu->data;
|
||||
sg_items[1].paddr = skb_cb->paddr;
|
||||
sg_items[1].len = prefetch_len;
|
||||
|
||||
res = ath10k_hif_tx_sg(htt->ar,
|
||||
htt->ar->htc.endpoint[htt->eid].ul_pipe_id,
|
||||
sg_items, ARRAY_SIZE(sg_items));
|
||||
if (res)
|
||||
goto err_unmap_msdu;
|
||||
|
||||
return 0;
|
||||
|
||||
err_unmap_msdu:
|
||||
ath10k_skb_unmap(dev, msdu);
|
||||
err_pull_txfrag:
|
||||
skb_pull(msdu, skb_cb->htt.frag_len + skb_cb->htt.pad_len);
|
||||
err_free_txdesc:
|
||||
dev_kfree_skb_any(txdesc);
|
||||
dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
|
||||
err_free_txbuf:
|
||||
dma_pool_free(htt->tx_pool,
|
||||
skb_cb->htt.txbuf,
|
||||
skb_cb->htt.txbuf_paddr);
|
||||
err_free_msdu_id:
|
||||
spin_lock_bh(&htt->tx_lock);
|
||||
htt->pending_tx[msdu_id] = NULL;
|
||||
|
|
|
@ -323,13 +323,15 @@ static int ath10k_peer_create(struct ath10k *ar, u32 vdev_id, const u8 *addr)
|
|||
|
||||
ret = ath10k_wmi_peer_create(ar, vdev_id, addr);
|
||||
if (ret) {
|
||||
ath10k_warn("Failed to create wmi peer: %i\n", ret);
|
||||
ath10k_warn("Failed to create wmi peer %pM on vdev %i: %i\n",
|
||||
addr, vdev_id, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = ath10k_wait_for_peer_created(ar, vdev_id, addr);
|
||||
if (ret) {
|
||||
ath10k_warn("Failed to wait for created wmi peer: %i\n", ret);
|
||||
ath10k_warn("Failed to wait for created wmi peer %pM on vdev %i: %i\n",
|
||||
addr, vdev_id, ret);
|
||||
return ret;
|
||||
}
|
||||
spin_lock_bh(&ar->data_lock);
|
||||
|
@ -349,7 +351,8 @@ static int ath10k_mac_set_kickout(struct ath10k_vif *arvif)
|
|||
ret = ath10k_wmi_pdev_set_param(ar, param,
|
||||
ATH10K_KICKOUT_THRESHOLD);
|
||||
if (ret) {
|
||||
ath10k_warn("Failed to set kickout threshold: %d\n", ret);
|
||||
ath10k_warn("Failed to set kickout threshold on vdev %i: %d\n",
|
||||
arvif->vdev_id, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -357,8 +360,8 @@ static int ath10k_mac_set_kickout(struct ath10k_vif *arvif)
|
|||
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
|
||||
ATH10K_KEEPALIVE_MIN_IDLE);
|
||||
if (ret) {
|
||||
ath10k_warn("Failed to set keepalive minimum idle time : %d\n",
|
||||
ret);
|
||||
ath10k_warn("Failed to set keepalive minimum idle time on vdev %i : %d\n",
|
||||
arvif->vdev_id, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -366,8 +369,8 @@ static int ath10k_mac_set_kickout(struct ath10k_vif *arvif)
|
|||
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
|
||||
ATH10K_KEEPALIVE_MAX_IDLE);
|
||||
if (ret) {
|
||||
ath10k_warn("Failed to set keepalive maximum idle time: %d\n",
|
||||
ret);
|
||||
ath10k_warn("Failed to set keepalive maximum idle time on vdev %i: %d\n",
|
||||
arvif->vdev_id, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -375,8 +378,8 @@ static int ath10k_mac_set_kickout(struct ath10k_vif *arvif)
|
|||
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
|
||||
ATH10K_KEEPALIVE_MAX_UNRESPONSIVE);
|
||||
if (ret) {
|
||||
ath10k_warn("Failed to set keepalive maximum unresponsive time: %d\n",
|
||||
ret);
|
||||
ath10k_warn("Failed to set keepalive maximum unresponsive time on vdev %i: %d\n",
|
||||
arvif->vdev_id, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -529,13 +532,15 @@ static int ath10k_vdev_start(struct ath10k_vif *arvif)
|
|||
|
||||
ret = ath10k_wmi_vdev_start(ar, &arg);
|
||||
if (ret) {
|
||||
ath10k_warn("WMI vdev start failed: ret %d\n", ret);
|
||||
ath10k_warn("WMI vdev %i start failed: ret %d\n",
|
||||
arg.vdev_id, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = ath10k_vdev_setup_sync(ar);
|
||||
if (ret) {
|
||||
ath10k_warn("vdev setup failed %d\n", ret);
|
||||
ath10k_warn("vdev %i setup failed %d\n",
|
||||
arg.vdev_id, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -553,13 +558,15 @@ static int ath10k_vdev_stop(struct ath10k_vif *arvif)
|
|||
|
||||
ret = ath10k_wmi_vdev_stop(ar, arvif->vdev_id);
|
||||
if (ret) {
|
||||
ath10k_warn("WMI vdev stop failed: ret %d\n", ret);
|
||||
ath10k_warn("WMI vdev %i stop failed: ret %d\n",
|
||||
arvif->vdev_id, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = ath10k_vdev_setup_sync(ar);
|
||||
if (ret) {
|
||||
ath10k_warn("vdev setup failed %d\n", ret);
|
||||
ath10k_warn("vdev %i setup sync failed %d\n",
|
||||
arvif->vdev_id, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -597,19 +604,22 @@ static int ath10k_monitor_start(struct ath10k *ar, int vdev_id)
|
|||
|
||||
ret = ath10k_wmi_vdev_start(ar, &arg);
|
||||
if (ret) {
|
||||
ath10k_warn("Monitor vdev start failed: ret %d\n", ret);
|
||||
ath10k_warn("Monitor vdev %i start failed: ret %d\n",
|
||||
vdev_id, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = ath10k_vdev_setup_sync(ar);
|
||||
if (ret) {
|
||||
ath10k_warn("Monitor vdev setup failed %d\n", ret);
|
||||
ath10k_warn("Monitor vdev %i setup failed %d\n",
|
||||
vdev_id, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = ath10k_wmi_vdev_up(ar, vdev_id, 0, ar->mac_addr);
|
||||
if (ret) {
|
||||
ath10k_warn("Monitor vdev up failed: %d\n", ret);
|
||||
ath10k_warn("Monitor vdev %i up failed: %d\n",
|
||||
vdev_id, ret);
|
||||
goto vdev_stop;
|
||||
}
|
||||
|
||||
|
@ -621,7 +631,8 @@ static int ath10k_monitor_start(struct ath10k *ar, int vdev_id)
|
|||
vdev_stop:
|
||||
ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
|
||||
if (ret)
|
||||
ath10k_warn("Monitor vdev stop failed: %d\n", ret);
|
||||
ath10k_warn("Monitor vdev %i stop failed: %d\n",
|
||||
ar->monitor_vdev_id, ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -644,15 +655,18 @@ static int ath10k_monitor_stop(struct ath10k *ar)
|
|||
|
||||
ret = ath10k_wmi_vdev_down(ar, ar->monitor_vdev_id);
|
||||
if (ret)
|
||||
ath10k_warn("Monitor vdev down failed: %d\n", ret);
|
||||
ath10k_warn("Monitor vdev %i down failed: %d\n",
|
||||
ar->monitor_vdev_id, ret);
|
||||
|
||||
ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
|
||||
if (ret)
|
||||
ath10k_warn("Monitor vdev stop failed: %d\n", ret);
|
||||
ath10k_warn("Monitor vdev %i stop failed: %d\n",
|
||||
ar->monitor_vdev_id, ret);
|
||||
|
||||
ret = ath10k_vdev_setup_sync(ar);
|
||||
if (ret)
|
||||
ath10k_warn("Monitor_down sync failed: %d\n", ret);
|
||||
ath10k_warn("Monitor_down sync failed, vdev %i: %d\n",
|
||||
ar->monitor_vdev_id, ret);
|
||||
|
||||
ar->monitor_enabled = false;
|
||||
return ret;
|
||||
|
@ -682,7 +696,8 @@ static int ath10k_monitor_create(struct ath10k *ar)
|
|||
WMI_VDEV_TYPE_MONITOR,
|
||||
0, ar->mac_addr);
|
||||
if (ret) {
|
||||
ath10k_warn("WMI vdev monitor create failed: ret %d\n", ret);
|
||||
ath10k_warn("WMI vdev %i monitor create failed: ret %d\n",
|
||||
ar->monitor_vdev_id, ret);
|
||||
goto vdev_fail;
|
||||
}
|
||||
|
||||
|
@ -711,7 +726,8 @@ static int ath10k_monitor_destroy(struct ath10k *ar)
|
|||
|
||||
ret = ath10k_wmi_vdev_delete(ar, ar->monitor_vdev_id);
|
||||
if (ret) {
|
||||
ath10k_warn("WMI vdev monitor delete failed: %d\n", ret);
|
||||
ath10k_warn("WMI vdev %i monitor delete failed: %d\n",
|
||||
ar->monitor_vdev_id, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -839,7 +855,9 @@ static void ath10k_control_beaconing(struct ath10k_vif *arvif,
|
|||
|
||||
spin_lock_bh(&arvif->ar->data_lock);
|
||||
if (arvif->beacon) {
|
||||
ath10k_skb_unmap(arvif->ar->dev, arvif->beacon);
|
||||
dma_unmap_single(arvif->ar->dev,
|
||||
ATH10K_SKB_CB(arvif->beacon)->paddr,
|
||||
arvif->beacon->len, DMA_TO_DEVICE);
|
||||
dev_kfree_skb_any(arvif->beacon);
|
||||
|
||||
arvif->beacon = NULL;
|
||||
|
@ -862,8 +880,8 @@ static void ath10k_control_beaconing(struct ath10k_vif *arvif,
|
|||
ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
|
||||
arvif->bssid);
|
||||
if (ret) {
|
||||
ath10k_warn("Failed to bring up VDEV: %d\n",
|
||||
arvif->vdev_id);
|
||||
ath10k_warn("Failed to bring up vdev %d: %i\n",
|
||||
arvif->vdev_id, ret);
|
||||
ath10k_vdev_stop(arvif);
|
||||
return;
|
||||
}
|
||||
|
@ -943,8 +961,8 @@ static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif)
|
|||
ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param,
|
||||
conf->dynamic_ps_timeout);
|
||||
if (ret) {
|
||||
ath10k_warn("Failed to set inactivity time for VDEV: %d\n",
|
||||
arvif->vdev_id);
|
||||
ath10k_warn("Failed to set inactivity time for vdev %d: %i\n",
|
||||
arvif->vdev_id, ret);
|
||||
return ret;
|
||||
}
|
||||
} else {
|
||||
|
@ -1196,8 +1214,8 @@ static int ath10k_peer_assoc_qos_ap(struct ath10k *ar,
|
|||
WMI_AP_PS_PEER_PARAM_UAPSD,
|
||||
uapsd);
|
||||
if (ret) {
|
||||
ath10k_warn("failed to set ap ps peer param uapsd: %d\n",
|
||||
ret);
|
||||
ath10k_warn("failed to set ap ps peer param uapsd for vdev %i: %d\n",
|
||||
arvif->vdev_id, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1206,8 +1224,8 @@ static int ath10k_peer_assoc_qos_ap(struct ath10k *ar,
|
|||
WMI_AP_PS_PEER_PARAM_MAX_SP,
|
||||
max_sp);
|
||||
if (ret) {
|
||||
ath10k_warn("failed to set ap ps peer param max sp: %d\n",
|
||||
ret);
|
||||
ath10k_warn("failed to set ap ps peer param max sp for vdev %i: %d\n",
|
||||
arvif->vdev_id, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1218,8 +1236,8 @@ static int ath10k_peer_assoc_qos_ap(struct ath10k *ar,
|
|||
ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id, sta->addr,
|
||||
WMI_AP_PS_PEER_PARAM_AGEOUT_TIME, 10);
|
||||
if (ret) {
|
||||
ath10k_warn("failed to set ap ps peer param ageout time: %d\n",
|
||||
ret);
|
||||
ath10k_warn("failed to set ap ps peer param ageout time for vdev %i: %d\n",
|
||||
arvif->vdev_id, ret);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
@ -1411,8 +1429,8 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
|
|||
|
||||
ap_sta = ieee80211_find_sta(vif, bss_conf->bssid);
|
||||
if (!ap_sta) {
|
||||
ath10k_warn("Failed to find station entry for %pM\n",
|
||||
bss_conf->bssid);
|
||||
ath10k_warn("Failed to find station entry for %pM, vdev %i\n",
|
||||
bss_conf->bssid, arvif->vdev_id);
|
||||
rcu_read_unlock();
|
||||
return;
|
||||
}
|
||||
|
@ -1424,8 +1442,8 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
|
|||
ret = ath10k_peer_assoc_prepare(ar, arvif, ap_sta,
|
||||
bss_conf, &peer_arg);
|
||||
if (ret) {
|
||||
ath10k_warn("Peer assoc prepare failed for %pM\n: %d",
|
||||
bss_conf->bssid, ret);
|
||||
ath10k_warn("Peer assoc prepare failed for %pM vdev %i\n: %d",
|
||||
bss_conf->bssid, arvif->vdev_id, ret);
|
||||
rcu_read_unlock();
|
||||
return;
|
||||
}
|
||||
|
@ -1434,14 +1452,15 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
|
|||
|
||||
ret = ath10k_wmi_peer_assoc(ar, &peer_arg);
|
||||
if (ret) {
|
||||
ath10k_warn("Peer assoc failed for %pM\n: %d",
|
||||
bss_conf->bssid, ret);
|
||||
ath10k_warn("Peer assoc failed for %pM vdev %i\n: %d",
|
||||
bss_conf->bssid, arvif->vdev_id, ret);
|
||||
return;
|
||||
}
|
||||
|
||||
ret = ath10k_setup_peer_smps(ar, arvif, bss_conf->bssid, &ht_cap);
|
||||
if (ret) {
|
||||
ath10k_warn("failed to setup peer SMPS: %d\n", ret);
|
||||
ath10k_warn("failed to setup peer SMPS for vdev %i: %d\n",
|
||||
arvif->vdev_id, ret);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -1514,34 +1533,35 @@ static int ath10k_station_assoc(struct ath10k *ar, struct ath10k_vif *arvif,
|
|||
|
||||
ret = ath10k_peer_assoc_prepare(ar, arvif, sta, NULL, &peer_arg);
|
||||
if (ret) {
|
||||
ath10k_warn("WMI peer assoc prepare failed for %pM\n",
|
||||
sta->addr);
|
||||
ath10k_warn("WMI peer assoc prepare failed for %pM vdev %i: %i\n",
|
||||
sta->addr, arvif->vdev_id, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = ath10k_wmi_peer_assoc(ar, &peer_arg);
|
||||
if (ret) {
|
||||
ath10k_warn("Peer assoc failed for STA %pM\n: %d",
|
||||
sta->addr, ret);
|
||||
ath10k_warn("Peer assoc failed for STA %pM vdev %i: %d\n",
|
||||
sta->addr, arvif->vdev_id, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = ath10k_setup_peer_smps(ar, arvif, sta->addr, &sta->ht_cap);
|
||||
if (ret) {
|
||||
ath10k_warn("failed to setup peer SMPS: %d\n", ret);
|
||||
ath10k_warn("failed to setup peer SMPS for vdev: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = ath10k_install_peer_wep_keys(arvif, sta->addr);
|
||||
if (ret) {
|
||||
ath10k_warn("could not install peer wep keys (%d)\n", ret);
|
||||
ath10k_warn("could not install peer wep keys for vdev %i: %d\n",
|
||||
arvif->vdev_id, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = ath10k_peer_assoc_qos_ap(ar, arvif, sta);
|
||||
if (ret) {
|
||||
ath10k_warn("could not set qos params for STA %pM, %d\n",
|
||||
sta->addr, ret);
|
||||
ath10k_warn("could not set qos params for STA %pM for vdev %i: %d\n",
|
||||
sta->addr, arvif->vdev_id, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1557,7 +1577,8 @@ static int ath10k_station_disassoc(struct ath10k *ar, struct ath10k_vif *arvif,
|
|||
|
||||
ret = ath10k_clear_peer_keys(arvif, sta->addr);
|
||||
if (ret) {
|
||||
ath10k_warn("could not clear all peer wep keys (%d)\n", ret);
|
||||
ath10k_warn("could not clear all peer wep keys for vdev %i: %d\n",
|
||||
arvif->vdev_id, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -2524,7 +2545,8 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
|
|||
ret = ath10k_wmi_vdev_create(ar, arvif->vdev_id, arvif->vdev_type,
|
||||
arvif->vdev_subtype, vif->addr);
|
||||
if (ret) {
|
||||
ath10k_warn("WMI vdev create failed: ret %d\n", ret);
|
||||
ath10k_warn("WMI vdev %i create failed: ret %d\n",
|
||||
arvif->vdev_id, ret);
|
||||
goto err;
|
||||
}
|
||||
|
||||
|
@ -2535,7 +2557,8 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
|
|||
ret = ath10k_wmi_vdev_set_param(ar, 0, vdev_param,
|
||||
arvif->def_wep_key_idx);
|
||||
if (ret) {
|
||||
ath10k_warn("Failed to set default keyid: %d\n", ret);
|
||||
ath10k_warn("Failed to set vdev %i default keyid: %d\n",
|
||||
arvif->vdev_id, ret);
|
||||
goto err_vdev_delete;
|
||||
}
|
||||
|
||||
|
@ -2544,21 +2567,23 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
|
|||
ATH10K_HW_TXRX_NATIVE_WIFI);
|
||||
/* 10.X firmware does not support this VDEV parameter. Do not warn */
|
||||
if (ret && ret != -EOPNOTSUPP) {
|
||||
ath10k_warn("Failed to set TX encap: %d\n", ret);
|
||||
ath10k_warn("Failed to set vdev %i TX encap: %d\n",
|
||||
arvif->vdev_id, ret);
|
||||
goto err_vdev_delete;
|
||||
}
|
||||
|
||||
if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
|
||||
ret = ath10k_peer_create(ar, arvif->vdev_id, vif->addr);
|
||||
if (ret) {
|
||||
ath10k_warn("Failed to create peer for AP: %d\n", ret);
|
||||
ath10k_warn("Failed to create vdev %i peer for AP: %d\n",
|
||||
arvif->vdev_id, ret);
|
||||
goto err_vdev_delete;
|
||||
}
|
||||
|
||||
ret = ath10k_mac_set_kickout(arvif);
|
||||
if (ret) {
|
||||
ath10k_warn("Failed to set kickout parameters: %d\n",
|
||||
ret);
|
||||
ath10k_warn("Failed to set vdev %i kickout parameters: %d\n",
|
||||
arvif->vdev_id, ret);
|
||||
goto err_peer_delete;
|
||||
}
|
||||
}
|
||||
|
@ -2569,7 +2594,8 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
|
|||
ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
|
||||
param, value);
|
||||
if (ret) {
|
||||
ath10k_warn("Failed to set RX wake policy: %d\n", ret);
|
||||
ath10k_warn("Failed to set vdev %i RX wake policy: %d\n",
|
||||
arvif->vdev_id, ret);
|
||||
goto err_peer_delete;
|
||||
}
|
||||
|
||||
|
@ -2578,7 +2604,8 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
|
|||
ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
|
||||
param, value);
|
||||
if (ret) {
|
||||
ath10k_warn("Failed to set TX wake thresh: %d\n", ret);
|
||||
ath10k_warn("Failed to set vdev %i TX wake thresh: %d\n",
|
||||
arvif->vdev_id, ret);
|
||||
goto err_peer_delete;
|
||||
}
|
||||
|
||||
|
@ -2587,7 +2614,8 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
|
|||
ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
|
||||
param, value);
|
||||
if (ret) {
|
||||
ath10k_warn("Failed to set PSPOLL count: %d\n", ret);
|
||||
ath10k_warn("Failed to set vdev %i PSPOLL count: %d\n",
|
||||
arvif->vdev_id, ret);
|
||||
goto err_peer_delete;
|
||||
}
|
||||
}
|
||||
|
@ -2651,17 +2679,19 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
|
|||
if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
|
||||
ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id, vif->addr);
|
||||
if (ret)
|
||||
ath10k_warn("Failed to remove peer for AP: %d\n", ret);
|
||||
ath10k_warn("Failed to remove peer for AP vdev %i: %d\n",
|
||||
arvif->vdev_id, ret);
|
||||
|
||||
kfree(arvif->u.ap.noa_data);
|
||||
}
|
||||
|
||||
ath10k_dbg(ATH10K_DBG_MAC, "mac vdev delete %d (remove interface)\n",
|
||||
ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %i delete (remove interface)\n",
|
||||
arvif->vdev_id);
|
||||
|
||||
ret = ath10k_wmi_vdev_delete(ar, arvif->vdev_id);
|
||||
if (ret)
|
||||
ath10k_warn("WMI vdev delete failed: %d\n", ret);
|
||||
ath10k_warn("WMI vdev %i delete failed: %d\n",
|
||||
arvif->vdev_id, ret);
|
||||
|
||||
if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)
|
||||
ar->monitor_present = false;
|
||||
|
@ -2750,8 +2780,8 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
|
|||
arvif->vdev_id, arvif->beacon_interval);
|
||||
|
||||
if (ret)
|
||||
ath10k_warn("Failed to set beacon interval for VDEV: %d\n",
|
||||
arvif->vdev_id);
|
||||
ath10k_warn("Failed to set beacon interval for vdev %d: %i\n",
|
||||
arvif->vdev_id, ret);
|
||||
}
|
||||
|
||||
if (changed & BSS_CHANGED_BEACON) {
|
||||
|
@ -2763,8 +2793,8 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
|
|||
ret = ath10k_wmi_pdev_set_param(ar, pdev_param,
|
||||
WMI_BEACON_STAGGERED_MODE);
|
||||
if (ret)
|
||||
ath10k_warn("Failed to set beacon mode for VDEV: %d\n",
|
||||
arvif->vdev_id);
|
||||
ath10k_warn("Failed to set beacon mode for vdev %d: %i\n",
|
||||
arvif->vdev_id, ret);
|
||||
}
|
||||
|
||||
if (changed & BSS_CHANGED_BEACON_INFO) {
|
||||
|
@ -2778,8 +2808,8 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
|
|||
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
|
||||
arvif->dtim_period);
|
||||
if (ret)
|
||||
ath10k_warn("Failed to set dtim period for VDEV: %d\n",
|
||||
arvif->vdev_id);
|
||||
ath10k_warn("Failed to set dtim period for vdev %d: %i\n",
|
||||
arvif->vdev_id, ret);
|
||||
}
|
||||
|
||||
if (changed & BSS_CHANGED_SSID &&
|
||||
|
@ -2799,7 +2829,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
|
|||
ret = ath10k_peer_create(ar, arvif->vdev_id,
|
||||
info->bssid);
|
||||
if (ret)
|
||||
ath10k_warn("Failed to add peer %pM for vdev %d when changin bssid: %i\n",
|
||||
ath10k_warn("Failed to add peer %pM for vdev %d when changing bssid: %i\n",
|
||||
info->bssid, arvif->vdev_id, ret);
|
||||
|
||||
if (vif->type == NL80211_IFTYPE_STATION) {
|
||||
|
@ -2815,8 +2845,8 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
|
|||
|
||||
ret = ath10k_vdev_start(arvif);
|
||||
if (ret) {
|
||||
ath10k_warn("failed to start vdev: %d\n",
|
||||
ret);
|
||||
ath10k_warn("failed to start vdev %i: %d\n",
|
||||
arvif->vdev_id, ret);
|
||||
goto exit;
|
||||
}
|
||||
|
||||
|
@ -2851,8 +2881,8 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
|
|||
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
|
||||
cts_prot);
|
||||
if (ret)
|
||||
ath10k_warn("Failed to set CTS prot for VDEV: %d\n",
|
||||
arvif->vdev_id);
|
||||
ath10k_warn("Failed to set CTS prot for vdev %d: %d\n",
|
||||
arvif->vdev_id, ret);
|
||||
}
|
||||
|
||||
if (changed & BSS_CHANGED_ERP_SLOT) {
|
||||
|
@ -2870,8 +2900,8 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
|
|||
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
|
||||
slottime);
|
||||
if (ret)
|
||||
ath10k_warn("Failed to set erp slot for VDEV: %d\n",
|
||||
arvif->vdev_id);
|
||||
ath10k_warn("Failed to set erp slot for vdev %d: %i\n",
|
||||
arvif->vdev_id, ret);
|
||||
}
|
||||
|
||||
if (changed & BSS_CHANGED_ERP_PREAMBLE) {
|
||||
|
@ -2889,8 +2919,8 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
|
|||
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
|
||||
preamble);
|
||||
if (ret)
|
||||
ath10k_warn("Failed to set preamble for VDEV: %d\n",
|
||||
arvif->vdev_id);
|
||||
ath10k_warn("Failed to set preamble for vdev %d: %i\n",
|
||||
arvif->vdev_id, ret);
|
||||
}
|
||||
|
||||
if (changed & BSS_CHANGED_ASSOC) {
|
||||
|
@ -3021,8 +3051,8 @@ static void ath10k_set_key_h_def_keyidx(struct ath10k *ar,
|
|||
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
|
||||
key->keyidx);
|
||||
if (ret)
|
||||
ath10k_warn("failed to set group key as default key: %d\n",
|
||||
ret);
|
||||
ath10k_warn("failed to set vdev %i group key as default key: %d\n",
|
||||
arvif->vdev_id, ret);
|
||||
}
|
||||
|
||||
static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
|
||||
|
@ -3082,7 +3112,8 @@ static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
|
|||
|
||||
ret = ath10k_install_key(arvif, key, cmd, peer_addr);
|
||||
if (ret) {
|
||||
ath10k_warn("ath10k_install_key failed (%d)\n", ret);
|
||||
ath10k_warn("key installation failed for vdev %i peer %pM: %d\n",
|
||||
arvif->vdev_id, peer_addr, ret);
|
||||
goto exit;
|
||||
}
|
||||
|
||||
|
@ -3179,6 +3210,13 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
|
|||
int max_num_peers;
|
||||
int ret = 0;
|
||||
|
||||
if (old_state == IEEE80211_STA_NOTEXIST &&
|
||||
new_state == IEEE80211_STA_NONE) {
|
||||
memset(arsta, 0, sizeof(*arsta));
|
||||
arsta->arvif = arvif;
|
||||
INIT_WORK(&arsta->update_wk, ath10k_sta_rc_update_wk);
|
||||
}
|
||||
|
||||
/* cancel must be done outside the mutex to avoid deadlock */
|
||||
if ((old_state == IEEE80211_STA_NONE &&
|
||||
new_state == IEEE80211_STA_NOTEXIST))
|
||||
|
@ -3208,10 +3246,6 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
|
|||
"mac vdev %d peer create %pM (new sta) num_peers %d\n",
|
||||
arvif->vdev_id, sta->addr, ar->num_peers);
|
||||
|
||||
memset(arsta, 0, sizeof(*arsta));
|
||||
arsta->arvif = arvif;
|
||||
INIT_WORK(&arsta->update_wk, ath10k_sta_rc_update_wk);
|
||||
|
||||
ret = ath10k_peer_create(ar, arvif->vdev_id, sta->addr);
|
||||
if (ret)
|
||||
ath10k_warn("Failed to add peer %pM for vdev %d when adding a new sta: %i\n",
|
||||
|
@ -3226,8 +3260,8 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
|
|||
arvif->vdev_id, sta->addr);
|
||||
ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
|
||||
if (ret)
|
||||
ath10k_warn("Failed to delete peer: %pM for VDEV: %d\n",
|
||||
sta->addr, arvif->vdev_id);
|
||||
ath10k_warn("Failed to delete peer %pM for vdev %d: %i\n",
|
||||
sta->addr, arvif->vdev_id, ret);
|
||||
|
||||
if (vif->type == NL80211_IFTYPE_STATION)
|
||||
ath10k_bss_disassoc(hw, vif);
|
||||
|
@ -3243,8 +3277,8 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
|
|||
|
||||
ret = ath10k_station_assoc(ar, arvif, sta);
|
||||
if (ret)
|
||||
ath10k_warn("Failed to associate station: %pM\n",
|
||||
sta->addr);
|
||||
ath10k_warn("Failed to associate station %pM for vdev %i: %i\n",
|
||||
sta->addr, arvif->vdev_id, ret);
|
||||
} else if (old_state == IEEE80211_STA_ASSOC &&
|
||||
new_state == IEEE80211_STA_AUTH &&
|
||||
(vif->type == NL80211_IFTYPE_AP ||
|
||||
|
@ -3257,8 +3291,8 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
|
|||
|
||||
ret = ath10k_station_disassoc(ar, arvif, sta);
|
||||
if (ret)
|
||||
ath10k_warn("Failed to disassociate station: %pM\n",
|
||||
sta->addr);
|
||||
ath10k_warn("Failed to disassociate station: %pM vdev %i ret %i\n",
|
||||
sta->addr, arvif->vdev_id, ret);
|
||||
}
|
||||
exit:
|
||||
mutex_unlock(&ar->conf_mutex);
|
||||
|
@ -3539,7 +3573,8 @@ static void ath10k_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
|
|||
}), ATH10K_FLUSH_TIMEOUT_HZ);
|
||||
|
||||
if (ret <= 0 || skip)
|
||||
ath10k_warn("tx not flushed\n");
|
||||
ath10k_warn("tx not flushed (skip %i ar-state %i): %i\n",
|
||||
skip, ar->state, ret);
|
||||
|
||||
skip:
|
||||
mutex_unlock(&ar->conf_mutex);
|
||||
|
@ -3905,7 +3940,8 @@ static bool ath10k_get_fixed_rate_nss(const struct cfg80211_bitrate_mask *mask,
|
|||
|
||||
static int ath10k_set_fixed_rate_param(struct ath10k_vif *arvif,
|
||||
u8 fixed_rate,
|
||||
u8 fixed_nss)
|
||||
u8 fixed_nss,
|
||||
u8 force_sgi)
|
||||
{
|
||||
struct ath10k *ar = arvif->ar;
|
||||
u32 vdev_param;
|
||||
|
@ -3914,12 +3950,16 @@ static int ath10k_set_fixed_rate_param(struct ath10k_vif *arvif,
|
|||
mutex_lock(&ar->conf_mutex);
|
||||
|
||||
if (arvif->fixed_rate == fixed_rate &&
|
||||
arvif->fixed_nss == fixed_nss)
|
||||
arvif->fixed_nss == fixed_nss &&
|
||||
arvif->force_sgi == force_sgi)
|
||||
goto exit;
|
||||
|
||||
if (fixed_rate == WMI_FIXED_RATE_NONE)
|
||||
ath10k_dbg(ATH10K_DBG_MAC, "mac disable fixed bitrate mask\n");
|
||||
|
||||
if (force_sgi)
|
||||
ath10k_dbg(ATH10K_DBG_MAC, "mac force sgi\n");
|
||||
|
||||
vdev_param = ar->wmi.vdev_param->fixed_rate;
|
||||
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
|
||||
vdev_param, fixed_rate);
|
||||
|
@ -3945,6 +3985,19 @@ static int ath10k_set_fixed_rate_param(struct ath10k_vif *arvif,
|
|||
|
||||
arvif->fixed_nss = fixed_nss;
|
||||
|
||||
vdev_param = ar->wmi.vdev_param->sgi;
|
||||
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
|
||||
force_sgi);
|
||||
|
||||
if (ret) {
|
||||
ath10k_warn("Could not set sgi param %d: %d\n",
|
||||
force_sgi, ret);
|
||||
ret = -EINVAL;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
arvif->force_sgi = force_sgi;
|
||||
|
||||
exit:
|
||||
mutex_unlock(&ar->conf_mutex);
|
||||
return ret;
|
||||
|
@ -3959,6 +4012,11 @@ static int ath10k_set_bitrate_mask(struct ieee80211_hw *hw,
|
|||
enum ieee80211_band band = ar->hw->conf.chandef.chan->band;
|
||||
u8 fixed_rate = WMI_FIXED_RATE_NONE;
|
||||
u8 fixed_nss = ar->num_rf_chains;
|
||||
u8 force_sgi;
|
||||
|
||||
force_sgi = mask->control[band].gi;
|
||||
if (force_sgi == NL80211_TXRATE_FORCE_LGI)
|
||||
return -EINVAL;
|
||||
|
||||
if (!ath10k_default_bitrate_mask(ar, band, mask)) {
|
||||
if (!ath10k_get_fixed_rate_nss(mask, band,
|
||||
|
@ -3967,7 +4025,13 @@ static int ath10k_set_bitrate_mask(struct ieee80211_hw *hw,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
return ath10k_set_fixed_rate_param(arvif, fixed_rate, fixed_nss);
|
||||
if (fixed_rate == WMI_FIXED_RATE_NONE && force_sgi) {
|
||||
ath10k_warn("Could not force SGI usage for default rate settings\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return ath10k_set_fixed_rate_param(arvif, fixed_rate,
|
||||
fixed_nss, force_sgi);
|
||||
}
|
||||
|
||||
static void ath10k_channel_switch_beacon(struct ieee80211_hw *hw,
|
||||
|
@ -4060,6 +4124,16 @@ static void ath10k_sta_rc_update(struct ieee80211_hw *hw,
|
|||
ieee80211_queue_work(hw, &arsta->update_wk);
|
||||
}
|
||||
|
||||
static u64 ath10k_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
|
||||
{
|
||||
/*
|
||||
* FIXME: Return 0 for time being. Need to figure out whether FW
|
||||
* has the API to fetch 64-bit local TSF
|
||||
*/
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct ieee80211_ops ath10k_ops = {
|
||||
.tx = ath10k_tx,
|
||||
.start = ath10k_start,
|
||||
|
@ -4085,6 +4159,7 @@ static const struct ieee80211_ops ath10k_ops = {
|
|||
.set_bitrate_mask = ath10k_set_bitrate_mask,
|
||||
.channel_switch_beacon = ath10k_channel_switch_beacon,
|
||||
.sta_rc_update = ath10k_sta_rc_update,
|
||||
.get_tsf = ath10k_get_tsf,
|
||||
#ifdef CONFIG_PM
|
||||
.suspend = ath10k_suspend,
|
||||
.resume = ath10k_resume,
|
||||
|
@ -4361,7 +4436,7 @@ struct ath10k_vif *ath10k_get_arvif(struct ath10k *ar, u32 vdev_id)
|
|||
ath10k_get_arvif_iter,
|
||||
&arvif_iter);
|
||||
if (!arvif_iter.arvif) {
|
||||
ath10k_warn("No VIF found for VDEV: %d\n", vdev_id);
|
||||
ath10k_warn("No VIF found for vdev %d\n", vdev_id);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -4442,7 +4517,8 @@ int ath10k_mac_register(struct ath10k *ar)
|
|||
IEEE80211_HW_HAS_RATE_CONTROL |
|
||||
IEEE80211_HW_SUPPORTS_STATIC_SMPS |
|
||||
IEEE80211_HW_WANT_MONITOR_VIF |
|
||||
IEEE80211_HW_AP_LINK_PS;
|
||||
IEEE80211_HW_AP_LINK_PS |
|
||||
IEEE80211_HW_SPECTRUM_MGMT;
|
||||
|
||||
/* MSDU can have HTT TX fragment pushed in front. The additional 4
|
||||
* bytes is used for padding/alignment if necessary. */
|
||||
|
@ -4500,7 +4576,7 @@ int ath10k_mac_register(struct ath10k *ar)
|
|||
ret = ath_regd_init(&ar->ath_common.regulatory, ar->hw->wiphy,
|
||||
ath10k_reg_notifier);
|
||||
if (ret) {
|
||||
ath10k_err("Regulatory initialization failed\n");
|
||||
ath10k_err("Regulatory initialization failed: %i\n", ret);
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
|
|
|
@ -58,12 +58,10 @@ static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table) = {
|
|||
static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
|
||||
u32 *data);
|
||||
|
||||
static void ath10k_pci_process_ce(struct ath10k *ar);
|
||||
static int ath10k_pci_post_rx(struct ath10k *ar);
|
||||
static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
|
||||
int num);
|
||||
static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info);
|
||||
static void ath10k_pci_stop_ce(struct ath10k *ar);
|
||||
static int ath10k_pci_cold_reset(struct ath10k *ar);
|
||||
static int ath10k_pci_warm_reset(struct ath10k *ar);
|
||||
static int ath10k_pci_wait_for_target_init(struct ath10k *ar);
|
||||
|
@ -74,7 +72,6 @@ static void ath10k_pci_free_irq(struct ath10k *ar);
|
|||
static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
|
||||
struct ath10k_ce_pipe *rx_pipe,
|
||||
struct bmi_xfer *xfer);
|
||||
static void ath10k_pci_cleanup_ce(struct ath10k *ar);
|
||||
|
||||
static const struct ce_attr host_ce_config_wlan[] = {
|
||||
/* CE0: host->target HTC control and raw streams */
|
||||
|
@ -679,34 +676,12 @@ void ath10k_do_pci_sleep(struct ath10k *ar)
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* FIXME: Handle OOM properly.
|
||||
*/
|
||||
static inline
|
||||
struct ath10k_pci_compl *get_free_compl(struct ath10k_pci_pipe *pipe_info)
|
||||
{
|
||||
struct ath10k_pci_compl *compl = NULL;
|
||||
|
||||
spin_lock_bh(&pipe_info->pipe_lock);
|
||||
if (list_empty(&pipe_info->compl_free)) {
|
||||
ath10k_warn("Completion buffers are full\n");
|
||||
goto exit;
|
||||
}
|
||||
compl = list_first_entry(&pipe_info->compl_free,
|
||||
struct ath10k_pci_compl, list);
|
||||
list_del(&compl->list);
|
||||
exit:
|
||||
spin_unlock_bh(&pipe_info->pipe_lock);
|
||||
return compl;
|
||||
}
|
||||
|
||||
/* Called by lower (CE) layer when a send to Target completes. */
|
||||
static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state)
|
||||
{
|
||||
struct ath10k *ar = ce_state->ar;
|
||||
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
||||
struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
|
||||
struct ath10k_pci_compl *compl;
|
||||
struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
|
||||
void *transfer_context;
|
||||
u32 ce_data;
|
||||
unsigned int nbytes;
|
||||
|
@ -715,27 +690,12 @@ static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state)
|
|||
while (ath10k_ce_completed_send_next(ce_state, &transfer_context,
|
||||
&ce_data, &nbytes,
|
||||
&transfer_id) == 0) {
|
||||
compl = get_free_compl(pipe_info);
|
||||
if (!compl)
|
||||
break;
|
||||
/* no need to call tx completion for NULL pointers */
|
||||
if (transfer_context == NULL)
|
||||
continue;
|
||||
|
||||
compl->state = ATH10K_PCI_COMPL_SEND;
|
||||
compl->ce_state = ce_state;
|
||||
compl->pipe_info = pipe_info;
|
||||
compl->skb = transfer_context;
|
||||
compl->nbytes = nbytes;
|
||||
compl->transfer_id = transfer_id;
|
||||
compl->flags = 0;
|
||||
|
||||
/*
|
||||
* Add the completion to the processing queue.
|
||||
*/
|
||||
spin_lock_bh(&ar_pci->compl_lock);
|
||||
list_add_tail(&compl->list, &ar_pci->compl_process);
|
||||
spin_unlock_bh(&ar_pci->compl_lock);
|
||||
cb->tx_completion(ar, transfer_context, transfer_id);
|
||||
}
|
||||
|
||||
ath10k_pci_process_ce(ar);
|
||||
}
|
||||
|
||||
/* Called by lower (CE) layer when data is received from the Target. */
|
||||
|
@ -744,77 +704,100 @@ static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state)
|
|||
struct ath10k *ar = ce_state->ar;
|
||||
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
||||
struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
|
||||
struct ath10k_pci_compl *compl;
|
||||
struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
|
||||
struct sk_buff *skb;
|
||||
void *transfer_context;
|
||||
u32 ce_data;
|
||||
unsigned int nbytes;
|
||||
unsigned int nbytes, max_nbytes;
|
||||
unsigned int transfer_id;
|
||||
unsigned int flags;
|
||||
int err;
|
||||
|
||||
while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
|
||||
&ce_data, &nbytes, &transfer_id,
|
||||
&flags) == 0) {
|
||||
compl = get_free_compl(pipe_info);
|
||||
if (!compl)
|
||||
break;
|
||||
|
||||
compl->state = ATH10K_PCI_COMPL_RECV;
|
||||
compl->ce_state = ce_state;
|
||||
compl->pipe_info = pipe_info;
|
||||
compl->skb = transfer_context;
|
||||
compl->nbytes = nbytes;
|
||||
compl->transfer_id = transfer_id;
|
||||
compl->flags = flags;
|
||||
err = ath10k_pci_post_rx_pipe(pipe_info, 1);
|
||||
if (unlikely(err)) {
|
||||
/* FIXME: retry */
|
||||
ath10k_warn("failed to replenish CE rx ring %d: %d\n",
|
||||
pipe_info->pipe_num, err);
|
||||
}
|
||||
|
||||
skb = transfer_context;
|
||||
max_nbytes = skb->len + skb_tailroom(skb);
|
||||
dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
|
||||
skb->len + skb_tailroom(skb),
|
||||
DMA_FROM_DEVICE);
|
||||
/*
|
||||
* Add the completion to the processing queue.
|
||||
*/
|
||||
spin_lock_bh(&ar_pci->compl_lock);
|
||||
list_add_tail(&compl->list, &ar_pci->compl_process);
|
||||
spin_unlock_bh(&ar_pci->compl_lock);
|
||||
}
|
||||
max_nbytes, DMA_FROM_DEVICE);
|
||||
|
||||
ath10k_pci_process_ce(ar);
|
||||
if (unlikely(max_nbytes < nbytes)) {
|
||||
ath10k_warn("rxed more than expected (nbytes %d, max %d)",
|
||||
nbytes, max_nbytes);
|
||||
dev_kfree_skb_any(skb);
|
||||
continue;
|
||||
}
|
||||
|
||||
skb_put(skb, nbytes);
|
||||
cb->rx_completion(ar, skb, pipe_info->pipe_num);
|
||||
}
|
||||
}
|
||||
|
||||
/* Send the first nbytes bytes of the buffer */
|
||||
static int ath10k_pci_hif_send_head(struct ath10k *ar, u8 pipe_id,
|
||||
unsigned int transfer_id,
|
||||
unsigned int bytes, struct sk_buff *nbuf)
|
||||
static int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
|
||||
struct ath10k_hif_sg_item *items, int n_items)
|
||||
{
|
||||
struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(nbuf);
|
||||
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
||||
struct ath10k_pci_pipe *pipe_info = &(ar_pci->pipe_info[pipe_id]);
|
||||
struct ath10k_ce_pipe *ce_hdl = pipe_info->ce_hdl;
|
||||
unsigned int len;
|
||||
u32 flags = 0;
|
||||
int ret;
|
||||
struct ath10k_pci_pipe *pci_pipe = &ar_pci->pipe_info[pipe_id];
|
||||
struct ath10k_ce_pipe *ce_pipe = pci_pipe->ce_hdl;
|
||||
struct ath10k_ce_ring *src_ring = ce_pipe->src_ring;
|
||||
unsigned int nentries_mask = src_ring->nentries_mask;
|
||||
unsigned int sw_index = src_ring->sw_index;
|
||||
unsigned int write_index = src_ring->write_index;
|
||||
int err, i;
|
||||
|
||||
len = min(bytes, nbuf->len);
|
||||
bytes -= len;
|
||||
spin_lock_bh(&ar_pci->ce_lock);
|
||||
|
||||
if (len & 3)
|
||||
ath10k_warn("skb not aligned to 4-byte boundary (%d)\n", len);
|
||||
if (unlikely(CE_RING_DELTA(nentries_mask,
|
||||
write_index, sw_index - 1) < n_items)) {
|
||||
err = -ENOBUFS;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
for (i = 0; i < n_items - 1; i++) {
|
||||
ath10k_dbg(ATH10K_DBG_PCI,
|
||||
"pci tx item %d paddr 0x%08x len %d n_items %d\n",
|
||||
i, items[i].paddr, items[i].len, n_items);
|
||||
ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL, "item data: ",
|
||||
items[i].vaddr, items[i].len);
|
||||
|
||||
err = ath10k_ce_send_nolock(ce_pipe,
|
||||
items[i].transfer_context,
|
||||
items[i].paddr,
|
||||
items[i].len,
|
||||
items[i].transfer_id,
|
||||
CE_SEND_FLAG_GATHER);
|
||||
if (err)
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
/* `i` is equal to `n_items -1` after for() */
|
||||
|
||||
ath10k_dbg(ATH10K_DBG_PCI,
|
||||
"pci send data vaddr %p paddr 0x%llx len %d as %d bytes\n",
|
||||
nbuf->data, (unsigned long long) skb_cb->paddr,
|
||||
nbuf->len, len);
|
||||
ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL,
|
||||
"ath10k tx: data: ",
|
||||
nbuf->data, nbuf->len);
|
||||
"pci tx item %d paddr 0x%08x len %d n_items %d\n",
|
||||
i, items[i].paddr, items[i].len, n_items);
|
||||
ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL, "item data: ",
|
||||
items[i].vaddr, items[i].len);
|
||||
|
||||
ret = ath10k_ce_send(ce_hdl, nbuf, skb_cb->paddr, len, transfer_id,
|
||||
flags);
|
||||
if (ret)
|
||||
ath10k_warn("failed to send sk_buff to CE: %p\n", nbuf);
|
||||
err = ath10k_ce_send_nolock(ce_pipe,
|
||||
items[i].transfer_context,
|
||||
items[i].paddr,
|
||||
items[i].len,
|
||||
items[i].transfer_id,
|
||||
0);
|
||||
if (err)
|
||||
goto unlock;
|
||||
|
||||
return ret;
|
||||
err = 0;
|
||||
unlock:
|
||||
spin_unlock_bh(&ar_pci->ce_lock);
|
||||
return err;
|
||||
}
|
||||
|
||||
static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
|
||||
|
@ -903,52 +886,6 @@ static void ath10k_pci_hif_set_callbacks(struct ath10k *ar,
|
|||
sizeof(ar_pci->msg_callbacks_current));
|
||||
}
|
||||
|
||||
static int ath10k_pci_alloc_compl(struct ath10k *ar)
|
||||
{
|
||||
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
||||
const struct ce_attr *attr;
|
||||
struct ath10k_pci_pipe *pipe_info;
|
||||
struct ath10k_pci_compl *compl;
|
||||
int i, pipe_num, completions;
|
||||
|
||||
spin_lock_init(&ar_pci->compl_lock);
|
||||
INIT_LIST_HEAD(&ar_pci->compl_process);
|
||||
|
||||
for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
|
||||
pipe_info = &ar_pci->pipe_info[pipe_num];
|
||||
|
||||
spin_lock_init(&pipe_info->pipe_lock);
|
||||
INIT_LIST_HEAD(&pipe_info->compl_free);
|
||||
|
||||
/* Handle Diagnostic CE specially */
|
||||
if (pipe_info->ce_hdl == ar_pci->ce_diag)
|
||||
continue;
|
||||
|
||||
attr = &host_ce_config_wlan[pipe_num];
|
||||
completions = 0;
|
||||
|
||||
if (attr->src_nentries)
|
||||
completions += attr->src_nentries;
|
||||
|
||||
if (attr->dest_nentries)
|
||||
completions += attr->dest_nentries;
|
||||
|
||||
for (i = 0; i < completions; i++) {
|
||||
compl = kmalloc(sizeof(*compl), GFP_KERNEL);
|
||||
if (!compl) {
|
||||
ath10k_warn("No memory for completion state\n");
|
||||
ath10k_pci_cleanup_ce(ar);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
compl->state = ATH10K_PCI_COMPL_FREE;
|
||||
list_add_tail(&compl->list, &pipe_info->compl_free);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ath10k_pci_setup_ce_irq(struct ath10k *ar)
|
||||
{
|
||||
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
||||
|
@ -993,147 +930,6 @@ static void ath10k_pci_kill_tasklet(struct ath10k *ar)
|
|||
tasklet_kill(&ar_pci->pipe_info[i].intr);
|
||||
}
|
||||
|
||||
static void ath10k_pci_stop_ce(struct ath10k *ar)
|
||||
{
|
||||
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
||||
struct ath10k_pci_compl *compl;
|
||||
struct sk_buff *skb;
|
||||
|
||||
/* Mark pending completions as aborted, so that upper layers free up
|
||||
* their associated resources */
|
||||
spin_lock_bh(&ar_pci->compl_lock);
|
||||
list_for_each_entry(compl, &ar_pci->compl_process, list) {
|
||||
skb = compl->skb;
|
||||
ATH10K_SKB_CB(skb)->is_aborted = true;
|
||||
}
|
||||
spin_unlock_bh(&ar_pci->compl_lock);
|
||||
}
|
||||
|
||||
static void ath10k_pci_cleanup_ce(struct ath10k *ar)
|
||||
{
|
||||
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
|
||||
struct ath10k_pci_compl *compl, *tmp;
|
||||
struct ath10k_pci_pipe *pipe_info;
|
||||
struct sk_buff *netbuf;
|
||||
int pipe_num;
|
||||
|
||||
/* Free pending completions. */
|
||||
spin_lock_bh(&ar_pci->compl_lock);
|
||||
if (!list_empty(&ar_pci->compl_process))
|
||||
ath10k_warn("pending completions still present! possible memory leaks.\n");
|
||||
|
||||
list_for_each_entry_safe(compl, tmp, &ar_pci->compl_process, list) {
|
||||
list_del(&compl->list);
|
||||
netbuf = compl->skb;
|
||||
dev_kfree_skb_any(netbuf);
|
||||
kfree(compl);
|
||||
}
|
||||
spin_unlock_bh(&ar_pci->compl_lock);
|
||||
|
||||
/* Free unused completions for each pipe. */
|
||||
for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
|
||||
pipe_info = &ar_pci->pipe_info[pipe_num];
|
||||
|
||||
spin_lock_bh(&pipe_info->pipe_lock);
|
||||
list_for_each_entry_safe(compl, tmp,
|
||||
&pipe_info->compl_free, list) {
|
||||
list_del(&compl->list);
|
||||
kfree(compl);
|
||||
}
|
||||
spin_unlock_bh(&pipe_info->pipe_lock);
|
||||
}
|
||||
}
|
||||
|
||||
static void ath10k_pci_process_ce(struct ath10k *ar)
|
||||
{
|
||||
struct ath10k_pci *ar_pci = ar->hif.priv;
|
||||
struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
|
||||
struct ath10k_pci_compl *compl;
|
||||
struct sk_buff *skb;
|
||||
unsigned int nbytes;
|
||||
int ret, send_done = 0;
|
||||
|
||||
/* Upper layers aren't ready to handle tx/rx completions in parallel so
|
||||
* we must serialize all completion processing. */
|
||||
|
||||
spin_lock_bh(&ar_pci->compl_lock);
|
||||
if (ar_pci->compl_processing) {
|
||||
spin_unlock_bh(&ar_pci->compl_lock);
|
||||
return;
|
||||
}
|
||||
ar_pci->compl_processing = true;
|
||||
spin_unlock_bh(&ar_pci->compl_lock);
|
||||
|
||||
for (;;) {
|
||||
spin_lock_bh(&ar_pci->compl_lock);
|
||||
if (list_empty(&ar_pci->compl_process)) {
|
||||
spin_unlock_bh(&ar_pci->compl_lock);
|
||||
break;
|
||||
}
|
||||
compl = list_first_entry(&ar_pci->compl_process,
|
||||
struct ath10k_pci_compl, list);
|
||||
list_del(&compl->list);
|
||||
spin_unlock_bh(&ar_pci->compl_lock);
|
||||
|
||||
switch (compl->state) {
|
||||
case ATH10K_PCI_COMPL_SEND:
|
||||
cb->tx_completion(ar,
|
||||
compl->skb,
|
||||
compl->transfer_id);
|
||||
send_done = 1;
|
||||
break;
|
||||
case ATH10K_PCI_COMPL_RECV:
|
||||
ret = ath10k_pci_post_rx_pipe(compl->pipe_info, 1);
|
||||
if (ret) {
|
||||
ath10k_warn("failed to post RX buffer for pipe %d: %d\n",
|
||||
compl->pipe_info->pipe_num, ret);
|
||||
break;
|
||||
}
|
||||
|
||||
skb = compl->skb;
|
||||
nbytes = compl->nbytes;
|
||||
|
||||
ath10k_dbg(ATH10K_DBG_PCI,
|
||||
"ath10k_pci_ce_recv_data netbuf=%p nbytes=%d\n",
|
||||
skb, nbytes);
|
||||
ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL,
|
||||
"ath10k rx: ", skb->data, nbytes);
|
||||
|
||||
if (skb->len + skb_tailroom(skb) >= nbytes) {
|
||||
skb_trim(skb, 0);
|
||||
skb_put(skb, nbytes);
|
||||
cb->rx_completion(ar, skb,
|
||||
compl->pipe_info->pipe_num);
|
||||
} else {
|
||||
ath10k_warn("rxed more than expected (nbytes %d, max %d)",
|
||||
nbytes,
|
||||
skb->len + skb_tailroom(skb));
|
||||
}
|
||||
break;
|
||||
case ATH10K_PCI_COMPL_FREE:
|
||||
ath10k_warn("free completion cannot be processed\n");
|
||||
break;
|
||||
default:
|
||||
ath10k_warn("invalid completion state (%d)\n",
|
||||
compl->state);
|
||||
break;
|
||||
}
|
||||
|
||||
compl->state = ATH10K_PCI_COMPL_FREE;
|
||||
|
||||
/*
|
||||
* Add completion back to the pipe's free list.
|
||||
*/
|
||||
spin_lock_bh(&compl->pipe_info->pipe_lock);
|
||||
list_add_tail(&compl->list, &compl->pipe_info->compl_free);
|
||||
spin_unlock_bh(&compl->pipe_info->pipe_lock);
|
||||
}
|
||||
|
||||
spin_lock_bh(&ar_pci->compl_lock);
|
||||
ar_pci->compl_processing = false;
|
||||
spin_unlock_bh(&ar_pci->compl_lock);
|
||||
}
|
||||
|
||||
/* TODO - temporary mapping while we have too few CE's */
|
||||
static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar,
|
||||
u16 service_id, u8 *ul_pipe,
|
||||
|
@ -1305,17 +1101,11 @@ static int ath10k_pci_hif_start(struct ath10k *ar)
|
|||
ath10k_pci_free_early_irq(ar);
|
||||
ath10k_pci_kill_tasklet(ar);
|
||||
|
||||
ret = ath10k_pci_alloc_compl(ar);
|
||||
if (ret) {
|
||||
ath10k_warn("failed to allocate CE completions: %d\n", ret);
|
||||
goto err_early_irq;
|
||||
}
|
||||
|
||||
ret = ath10k_pci_request_irq(ar);
|
||||
if (ret) {
|
||||
ath10k_warn("failed to post RX buffers for all pipes: %d\n",
|
||||
ret);
|
||||
goto err_free_compl;
|
||||
goto err_early_irq;
|
||||
}
|
||||
|
||||
ret = ath10k_pci_setup_ce_irq(ar);
|
||||
|
@ -1339,10 +1129,6 @@ static int ath10k_pci_hif_start(struct ath10k *ar)
|
|||
ath10k_ce_disable_interrupts(ar);
|
||||
ath10k_pci_free_irq(ar);
|
||||
ath10k_pci_kill_tasklet(ar);
|
||||
ath10k_pci_stop_ce(ar);
|
||||
ath10k_pci_process_ce(ar);
|
||||
err_free_compl:
|
||||
ath10k_pci_cleanup_ce(ar);
|
||||
err_early_irq:
|
||||
/* Though there should be no interrupts (device was reset)
|
||||
* power_down() expects the early IRQ to be installed as per the
|
||||
|
@ -1413,18 +1199,10 @@ static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
|
|||
|
||||
while (ath10k_ce_cancel_send_next(ce_hdl, (void **)&netbuf,
|
||||
&ce_data, &nbytes, &id) == 0) {
|
||||
/*
|
||||
* Indicate the completion to higer layer to free
|
||||
* the buffer
|
||||
*/
|
||||
|
||||
if (!netbuf) {
|
||||
ath10k_warn("invalid sk_buff on CE %d - NULL pointer. firmware crashed?\n",
|
||||
ce_hdl->id);
|
||||
/* no need to call tx completion for NULL pointers */
|
||||
if (!netbuf)
|
||||
continue;
|
||||
}
|
||||
|
||||
ATH10K_SKB_CB(netbuf)->is_aborted = true;
|
||||
ar_pci->msg_callbacks_current.tx_completion(ar,
|
||||
netbuf,
|
||||
id);
|
||||
|
@ -1482,7 +1260,6 @@ static void ath10k_pci_hif_stop(struct ath10k *ar)
|
|||
|
||||
ath10k_pci_free_irq(ar);
|
||||
ath10k_pci_kill_tasklet(ar);
|
||||
ath10k_pci_stop_ce(ar);
|
||||
|
||||
ret = ath10k_pci_request_early_irq(ar);
|
||||
if (ret)
|
||||
|
@ -1492,8 +1269,6 @@ static void ath10k_pci_hif_stop(struct ath10k *ar)
|
|||
* not DMA nor interrupt. We process the leftovers and then free
|
||||
* everything else up. */
|
||||
|
||||
ath10k_pci_process_ce(ar);
|
||||
ath10k_pci_cleanup_ce(ar);
|
||||
ath10k_pci_buffer_cleanup(ar);
|
||||
|
||||
/* Make the sure the device won't access any structures on the host by
|
||||
|
@ -2269,7 +2044,7 @@ static int ath10k_pci_hif_resume(struct ath10k *ar)
|
|||
#endif
|
||||
|
||||
static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
|
||||
.send_head = ath10k_pci_hif_send_head,
|
||||
.tx_sg = ath10k_pci_hif_tx_sg,
|
||||
.exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg,
|
||||
.start = ath10k_pci_hif_start,
|
||||
.stop = ath10k_pci_hif_stop,
|
||||
|
|
|
@ -43,23 +43,6 @@ struct bmi_xfer {
|
|||
u32 resp_len;
|
||||
};
|
||||
|
||||
enum ath10k_pci_compl_state {
|
||||
ATH10K_PCI_COMPL_FREE = 0,
|
||||
ATH10K_PCI_COMPL_SEND,
|
||||
ATH10K_PCI_COMPL_RECV,
|
||||
};
|
||||
|
||||
struct ath10k_pci_compl {
|
||||
struct list_head list;
|
||||
enum ath10k_pci_compl_state state;
|
||||
struct ath10k_ce_pipe *ce_state;
|
||||
struct ath10k_pci_pipe *pipe_info;
|
||||
struct sk_buff *skb;
|
||||
unsigned int nbytes;
|
||||
unsigned int transfer_id;
|
||||
unsigned int flags;
|
||||
};
|
||||
|
||||
/*
|
||||
* PCI-specific Target state
|
||||
*
|
||||
|
@ -175,9 +158,6 @@ struct ath10k_pci_pipe {
|
|||
/* protects compl_free and num_send_allowed */
|
||||
spinlock_t pipe_lock;
|
||||
|
||||
/* List of free CE completion slots */
|
||||
struct list_head compl_free;
|
||||
|
||||
struct ath10k_pci *ar_pci;
|
||||
struct tasklet_struct intr;
|
||||
};
|
||||
|
@ -205,14 +185,6 @@ struct ath10k_pci {
|
|||
atomic_t keep_awake_count;
|
||||
bool verified_awake;
|
||||
|
||||
/* List of CE completions to be processed */
|
||||
struct list_head compl_process;
|
||||
|
||||
/* protects compl_processing and compl_process */
|
||||
spinlock_t compl_lock;
|
||||
|
||||
bool compl_processing;
|
||||
|
||||
struct ath10k_pci_pipe pipe_info[CE_COUNT_MAX];
|
||||
|
||||
struct ath10k_hif_cb msg_callbacks_current;
|
||||
|
|
|
@ -51,7 +51,8 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
|
|||
struct ieee80211_tx_info *info;
|
||||
struct ath10k_skb_cb *skb_cb;
|
||||
struct sk_buff *msdu;
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&htt->tx_lock);
|
||||
|
||||
ath10k_dbg(ATH10K_DBG_HTT, "htt tx completion msdu_id %u discard %d no_ack %d\n",
|
||||
tx_done->msdu_id, !!tx_done->discard, !!tx_done->no_ack);
|
||||
|
@ -65,12 +66,12 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
|
|||
msdu = htt->pending_tx[tx_done->msdu_id];
|
||||
skb_cb = ATH10K_SKB_CB(msdu);
|
||||
|
||||
ret = ath10k_skb_unmap(dev, msdu);
|
||||
if (ret)
|
||||
ath10k_warn("data skb unmap failed (%d)\n", ret);
|
||||
dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
|
||||
|
||||
if (skb_cb->htt.frag_len)
|
||||
skb_pull(msdu, skb_cb->htt.frag_len + skb_cb->htt.pad_len);
|
||||
if (skb_cb->htt.txbuf)
|
||||
dma_pool_free(htt->tx_pool,
|
||||
skb_cb->htt.txbuf,
|
||||
skb_cb->htt.txbuf_paddr);
|
||||
|
||||
ath10k_report_offchan_tx(htt->ar, msdu);
|
||||
|
||||
|
@ -92,13 +93,11 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
|
|||
/* we do not own the msdu anymore */
|
||||
|
||||
exit:
|
||||
spin_lock_bh(&htt->tx_lock);
|
||||
htt->pending_tx[tx_done->msdu_id] = NULL;
|
||||
ath10k_htt_tx_free_msdu_id(htt, tx_done->msdu_id);
|
||||
__ath10k_htt_tx_dec_pending(htt);
|
||||
if (htt->num_pending_tx == 0)
|
||||
wake_up(&htt->empty_tx_wq);
|
||||
spin_unlock_bh(&htt->tx_lock);
|
||||
}
|
||||
|
||||
static const u8 rx_legacy_rate_idx[] = {
|
||||
|
@ -258,6 +257,12 @@ void ath10k_process_rx(struct ath10k *ar, struct htt_rx_info *info)
|
|||
status->band = ch->band;
|
||||
status->freq = ch->center_freq;
|
||||
|
||||
if (info->rate.info0 & HTT_RX_INDICATION_INFO0_END_VALID) {
|
||||
/* TSF available only in 32-bit */
|
||||
status->mactime = info->tsf & 0xffffffff;
|
||||
status->flag |= RX_FLAG_MACTIME_END;
|
||||
}
|
||||
|
||||
ath10k_dbg(ATH10K_DBG_DATA,
|
||||
"rx skb %p len %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i\n",
|
||||
info->skb,
|
||||
|
@ -378,7 +383,8 @@ void ath10k_peer_unmap_event(struct ath10k_htt *htt,
|
|||
spin_lock_bh(&ar->data_lock);
|
||||
peer = ath10k_peer_find_by_id(ar, ev->peer_id);
|
||||
if (!peer) {
|
||||
ath10k_warn("unknown peer id %d\n", ev->peer_id);
|
||||
ath10k_warn("peer-unmap-event: unknown peer id %d\n",
|
||||
ev->peer_id);
|
||||
goto exit;
|
||||
}
|
||||
|
||||
|
|
|
@ -1360,7 +1360,7 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
|
|||
struct wmi_bcn_info *bcn_info;
|
||||
struct ath10k_vif *arvif;
|
||||
struct sk_buff *bcn;
|
||||
int vdev_id = 0;
|
||||
int ret, vdev_id = 0;
|
||||
|
||||
ath10k_dbg(ATH10K_DBG_MGMT, "WMI_HOST_SWBA_EVENTID\n");
|
||||
|
||||
|
@ -1435,16 +1435,27 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
|
|||
ath10k_warn("SWBA overrun on vdev %d\n",
|
||||
arvif->vdev_id);
|
||||
|
||||
ath10k_skb_unmap(ar->dev, arvif->beacon);
|
||||
dma_unmap_single(arvif->ar->dev,
|
||||
ATH10K_SKB_CB(arvif->beacon)->paddr,
|
||||
arvif->beacon->len, DMA_TO_DEVICE);
|
||||
dev_kfree_skb_any(arvif->beacon);
|
||||
}
|
||||
|
||||
ath10k_skb_map(ar->dev, bcn);
|
||||
ATH10K_SKB_CB(bcn)->paddr = dma_map_single(arvif->ar->dev,
|
||||
bcn->data, bcn->len,
|
||||
DMA_TO_DEVICE);
|
||||
ret = dma_mapping_error(arvif->ar->dev,
|
||||
ATH10K_SKB_CB(bcn)->paddr);
|
||||
if (ret) {
|
||||
ath10k_warn("failed to map beacon: %d\n", ret);
|
||||
goto skip;
|
||||
}
|
||||
|
||||
arvif->beacon = bcn;
|
||||
arvif->beacon_sent = false;
|
||||
|
||||
ath10k_wmi_tx_beacon_nowait(arvif);
|
||||
skip:
|
||||
spin_unlock_bh(&ar->data_lock);
|
||||
}
|
||||
}
|
||||
|
@ -3382,7 +3393,6 @@ int ath10k_wmi_scan_chan_list(struct ath10k *ar,
|
|||
ci->max_power = ch->max_power;
|
||||
ci->reg_power = ch->max_reg_power;
|
||||
ci->antenna_max = ch->max_antenna_gain;
|
||||
ci->antenna_max = 0;
|
||||
|
||||
/* mode & flags share storage */
|
||||
ci->mode = ch->mode;
|
||||
|
|
Loading…
Reference in New Issue