Merge ath-next from git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git

ath.git patches for 4.11. Major changes:

ath10k

* add debugfs file peer_debug_trigger for debugging firmware
This commit is contained in:
Kalle Valo 2017-02-08 17:46:02 +02:00
commit 514612fc44
21 changed files with 259 additions and 91 deletions

View File

@ -3,6 +3,7 @@ config ATH10K
depends on MAC80211 && HAS_DMA depends on MAC80211 && HAS_DMA
select ATH_COMMON select ATH_COMMON
select CRC32 select CRC32
select WANT_DEV_COREDUMP
---help--- ---help---
This module adds support for wireless adapters based on This module adds support for wireless adapters based on
Atheros IEEE 802.11ac family of chipsets. Atheros IEEE 802.11ac family of chipsets.

View File

@ -33,6 +33,9 @@ static const struct of_device_id ath10k_ahb_of_match[] = {
MODULE_DEVICE_TABLE(of, ath10k_ahb_of_match); MODULE_DEVICE_TABLE(of, ath10k_ahb_of_match);
#define QCA4019_SRAM_ADDR 0x000C0000
#define QCA4019_SRAM_LEN 0x00040000 /* 256 kb */
static inline struct ath10k_ahb *ath10k_ahb_priv(struct ath10k *ar) static inline struct ath10k_ahb *ath10k_ahb_priv(struct ath10k *ar)
{ {
return &((struct ath10k_pci *)ar->drv_priv)->ahb[0]; return &((struct ath10k_pci *)ar->drv_priv)->ahb[0];
@ -699,6 +702,25 @@ static int ath10k_ahb_hif_power_up(struct ath10k *ar)
return ret; return ret;
} }
static u32 ath10k_ahb_qca4019_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
{
u32 val = 0, region = addr & 0xfffff;
val = ath10k_pci_read32(ar, PCIE_BAR_REG_ADDRESS);
if (region >= QCA4019_SRAM_ADDR && region <=
(QCA4019_SRAM_ADDR + QCA4019_SRAM_LEN)) {
/* SRAM contents for QCA4019 can be directly accessed and
* no conversions are required
*/
val |= region;
} else {
val |= 0x100000 | region;
}
return val;
}
static const struct ath10k_hif_ops ath10k_ahb_hif_ops = { static const struct ath10k_hif_ops ath10k_ahb_hif_ops = {
.tx_sg = ath10k_pci_hif_tx_sg, .tx_sg = ath10k_pci_hif_tx_sg,
.diag_read = ath10k_pci_hif_diag_read, .diag_read = ath10k_pci_hif_diag_read,
@ -766,6 +788,7 @@ static int ath10k_ahb_probe(struct platform_device *pdev)
ar_pci->mem_len = ar_ahb->mem_len; ar_pci->mem_len = ar_ahb->mem_len;
ar_pci->ar = ar; ar_pci->ar = ar;
ar_pci->bus_ops = &ath10k_ahb_bus_ops; ar_pci->bus_ops = &ath10k_ahb_bus_ops;
ar_pci->targ_cpu_to_ce_addr = ath10k_ahb_qca4019_targ_cpu_to_ce_addr;
ret = ath10k_pci_setup_resource(ar); ret = ath10k_pci_setup_resource(ar);
if (ret) { if (ret) {

View File

@ -959,9 +959,9 @@ ath10k_ce_alloc_dest_ring(struct ath10k *ar, unsigned int ce_id,
*/ */
dest_ring->base_addr_owner_space_unaligned = dest_ring->base_addr_owner_space_unaligned =
dma_zalloc_coherent(ar->dev, dma_zalloc_coherent(ar->dev,
(nentries * sizeof(struct ce_desc) + (nentries * sizeof(struct ce_desc) +
CE_DESC_RING_ALIGN), CE_DESC_RING_ALIGN),
&base_addr, GFP_KERNEL); &base_addr, GFP_KERNEL);
if (!dest_ring->base_addr_owner_space_unaligned) { if (!dest_ring->base_addr_owner_space_unaligned) {
kfree(dest_ring); kfree(dest_ring);
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);

View File

@ -1996,7 +1996,8 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
ath10k_dbg(ar, ATH10K_DBG_BOOT, "firmware %s booted\n", ath10k_dbg(ar, ATH10K_DBG_BOOT, "firmware %s booted\n",
ar->hw->wiphy->fw_version); ar->hw->wiphy->fw_version);
if (test_bit(WMI_SERVICE_EXT_RES_CFG_SUPPORT, ar->wmi.svc_map)) { if (test_bit(WMI_SERVICE_EXT_RES_CFG_SUPPORT, ar->wmi.svc_map) &&
mode == ATH10K_FIRMWARE_MODE_NORMAL) {
val = 0; val = 0;
if (ath10k_peer_stats_enabled(ar)) if (ath10k_peer_stats_enabled(ar))
val = WMI_10_4_PEER_STATS; val = WMI_10_4_PEER_STATS;
@ -2049,10 +2050,13 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
* possible to implicitly make it correct by creating a dummy vdev and * possible to implicitly make it correct by creating a dummy vdev and
* then deleting it. * then deleting it.
*/ */
status = ath10k_core_reset_rx_filter(ar); if (mode == ATH10K_FIRMWARE_MODE_NORMAL) {
if (status) { status = ath10k_core_reset_rx_filter(ar);
ath10k_err(ar, "failed to reset rx filter: %d\n", status); if (status) {
goto err_hif_stop; ath10k_err(ar,
"failed to reset rx filter: %d\n", status);
goto err_hif_stop;
}
} }
/* If firmware indicates Full Rx Reorder support it must be used in a /* If firmware indicates Full Rx Reorder support it must be used in a

View File

@ -306,6 +306,69 @@ static const struct file_operations fops_delba = {
.llseek = default_llseek, .llseek = default_llseek,
}; };
static ssize_t ath10k_dbg_sta_read_peer_debug_trigger(struct file *file,
char __user *user_buf,
size_t count,
loff_t *ppos)
{
struct ieee80211_sta *sta = file->private_data;
struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
struct ath10k *ar = arsta->arvif->ar;
char buf[8];
int len = 0;
mutex_lock(&ar->conf_mutex);
len = scnprintf(buf, sizeof(buf) - len,
"Write 1 to once trigger the debug logs\n");
mutex_unlock(&ar->conf_mutex);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
static ssize_t
ath10k_dbg_sta_write_peer_debug_trigger(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ieee80211_sta *sta = file->private_data;
struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
struct ath10k *ar = arsta->arvif->ar;
u8 peer_debug_trigger;
int ret;
if (kstrtou8_from_user(user_buf, count, 0, &peer_debug_trigger))
return -EINVAL;
if (peer_debug_trigger != 1)
return -EINVAL;
mutex_lock(&ar->conf_mutex);
if (ar->state != ATH10K_STATE_ON) {
ret = -ENETDOWN;
goto out;
}
ret = ath10k_wmi_peer_set_param(ar, arsta->arvif->vdev_id, sta->addr,
WMI_PEER_DEBUG, peer_debug_trigger);
if (ret) {
ath10k_warn(ar, "failed to set param to trigger peer tid logs for station ret: %d\n",
ret);
goto out;
}
out:
mutex_unlock(&ar->conf_mutex);
return count;
}
static const struct file_operations fops_peer_debug_trigger = {
.open = simple_open,
.read = ath10k_dbg_sta_read_peer_debug_trigger,
.write = ath10k_dbg_sta_write_peer_debug_trigger,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
void ath10k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif, void ath10k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, struct dentry *dir) struct ieee80211_sta *sta, struct dentry *dir)
{ {
@ -314,4 +377,6 @@ void ath10k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
debugfs_create_file("addba", S_IWUSR, dir, sta, &fops_addba); debugfs_create_file("addba", S_IWUSR, dir, sta, &fops_addba);
debugfs_create_file("addba_resp", S_IWUSR, dir, sta, &fops_addba_resp); debugfs_create_file("addba_resp", S_IWUSR, dir, sta, &fops_addba_resp);
debugfs_create_file("delba", S_IWUSR, dir, sta, &fops_delba); debugfs_create_file("delba", S_IWUSR, dir, sta, &fops_delba);
debugfs_create_file("peer_debug_trigger", 0600, dir, sta,
&fops_peer_debug_trigger);
} }

View File

@ -1636,7 +1636,7 @@ struct ath10k_htt {
int size; int size;
/* size - 1 */ /* size - 1 */
unsigned size_mask; unsigned int size_mask;
/* how many rx buffers to keep in the ring */ /* how many rx buffers to keep in the ring */
int fill_level; int fill_level;
@ -1657,7 +1657,7 @@ struct ath10k_htt {
/* where HTT SW has processed bufs filled by rx MAC DMA */ /* where HTT SW has processed bufs filled by rx MAC DMA */
struct { struct {
unsigned msdu_payld; unsigned int msdu_payld;
} sw_rd_idx; } sw_rd_idx;
/* /*
@ -1820,7 +1820,7 @@ int ath10k_htt_tx_mgmt_inc_pending(struct ath10k_htt *htt, bool is_mgmt,
int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb); int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb);
void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id); void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id);
int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *); int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu);
int ath10k_htt_tx(struct ath10k_htt *htt, int ath10k_htt_tx(struct ath10k_htt *htt,
enum ath10k_hw_txrx_mode txmode, enum ath10k_hw_txrx_mode txmode,
struct sk_buff *msdu); struct sk_buff *msdu);

View File

@ -2492,7 +2492,7 @@ bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ", ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
skb->data, skb->len); skb->data, skb->len);
break; break;
}; }
return true; return true;
} }
EXPORT_SYMBOL(ath10k_htt_t2h_msg_handler); EXPORT_SYMBOL(ath10k_htt_t2h_msg_handler);

View File

@ -840,29 +840,33 @@ void ath10k_pci_rx_replenish_retry(unsigned long ptr)
ath10k_pci_rx_post(ar); ath10k_pci_rx_post(ar);
} }
static u32 ath10k_pci_qca988x_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
{
u32 val = 0, region = addr & 0xfffff;
val = (ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS)
& 0x7ff) << 21;
val |= 0x100000 | region;
return val;
}
static u32 ath10k_pci_qca99x0_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
{
u32 val = 0, region = addr & 0xfffff;
val = ath10k_pci_read32(ar, PCIE_BAR_REG_ADDRESS);
val |= 0x100000 | region;
return val;
}
static u32 ath10k_pci_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr) static u32 ath10k_pci_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
{ {
u32 val = 0; struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
switch (ar->hw_rev) { if (WARN_ON_ONCE(!ar_pci->targ_cpu_to_ce_addr))
case ATH10K_HW_QCA988X: return -ENOTSUPP;
case ATH10K_HW_QCA9887:
case ATH10K_HW_QCA6174:
case ATH10K_HW_QCA9377:
val = (ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
CORE_CTRL_ADDRESS) &
0x7ff) << 21;
break;
case ATH10K_HW_QCA9888:
case ATH10K_HW_QCA99X0:
case ATH10K_HW_QCA9984:
case ATH10K_HW_QCA4019:
val = ath10k_pci_read32(ar, PCIE_BAR_REG_ADDRESS);
break;
}
val |= 0x100000 | (addr & 0xfffff); return ar_pci->targ_cpu_to_ce_addr(ar, addr);
return val;
} }
/* /*
@ -1590,7 +1594,7 @@ void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar)
/* TODO: Find appropriate register configuration for QCA99X0 /* TODO: Find appropriate register configuration for QCA99X0
* to mask irq/MSI. * to mask irq/MSI.
*/ */
break; break;
} }
} }
@ -3170,6 +3174,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
bool pci_ps; bool pci_ps;
int (*pci_soft_reset)(struct ath10k *ar); int (*pci_soft_reset)(struct ath10k *ar);
int (*pci_hard_reset)(struct ath10k *ar); int (*pci_hard_reset)(struct ath10k *ar);
u32 (*targ_cpu_to_ce_addr)(struct ath10k *ar, u32 addr);
switch (pci_dev->device) { switch (pci_dev->device) {
case QCA988X_2_0_DEVICE_ID: case QCA988X_2_0_DEVICE_ID:
@ -3177,12 +3182,14 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
pci_ps = false; pci_ps = false;
pci_soft_reset = ath10k_pci_warm_reset; pci_soft_reset = ath10k_pci_warm_reset;
pci_hard_reset = ath10k_pci_qca988x_chip_reset; pci_hard_reset = ath10k_pci_qca988x_chip_reset;
targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr;
break; break;
case QCA9887_1_0_DEVICE_ID: case QCA9887_1_0_DEVICE_ID:
hw_rev = ATH10K_HW_QCA9887; hw_rev = ATH10K_HW_QCA9887;
pci_ps = false; pci_ps = false;
pci_soft_reset = ath10k_pci_warm_reset; pci_soft_reset = ath10k_pci_warm_reset;
pci_hard_reset = ath10k_pci_qca988x_chip_reset; pci_hard_reset = ath10k_pci_qca988x_chip_reset;
targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr;
break; break;
case QCA6164_2_1_DEVICE_ID: case QCA6164_2_1_DEVICE_ID:
case QCA6174_2_1_DEVICE_ID: case QCA6174_2_1_DEVICE_ID:
@ -3190,30 +3197,35 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
pci_ps = true; pci_ps = true;
pci_soft_reset = ath10k_pci_warm_reset; pci_soft_reset = ath10k_pci_warm_reset;
pci_hard_reset = ath10k_pci_qca6174_chip_reset; pci_hard_reset = ath10k_pci_qca6174_chip_reset;
targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr;
break; break;
case QCA99X0_2_0_DEVICE_ID: case QCA99X0_2_0_DEVICE_ID:
hw_rev = ATH10K_HW_QCA99X0; hw_rev = ATH10K_HW_QCA99X0;
pci_ps = false; pci_ps = false;
pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset; pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset;
pci_hard_reset = ath10k_pci_qca99x0_chip_reset; pci_hard_reset = ath10k_pci_qca99x0_chip_reset;
targ_cpu_to_ce_addr = ath10k_pci_qca99x0_targ_cpu_to_ce_addr;
break; break;
case QCA9984_1_0_DEVICE_ID: case QCA9984_1_0_DEVICE_ID:
hw_rev = ATH10K_HW_QCA9984; hw_rev = ATH10K_HW_QCA9984;
pci_ps = false; pci_ps = false;
pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset; pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset;
pci_hard_reset = ath10k_pci_qca99x0_chip_reset; pci_hard_reset = ath10k_pci_qca99x0_chip_reset;
targ_cpu_to_ce_addr = ath10k_pci_qca99x0_targ_cpu_to_ce_addr;
break; break;
case QCA9888_2_0_DEVICE_ID: case QCA9888_2_0_DEVICE_ID:
hw_rev = ATH10K_HW_QCA9888; hw_rev = ATH10K_HW_QCA9888;
pci_ps = false; pci_ps = false;
pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset; pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset;
pci_hard_reset = ath10k_pci_qca99x0_chip_reset; pci_hard_reset = ath10k_pci_qca99x0_chip_reset;
targ_cpu_to_ce_addr = ath10k_pci_qca99x0_targ_cpu_to_ce_addr;
break; break;
case QCA9377_1_0_DEVICE_ID: case QCA9377_1_0_DEVICE_ID:
hw_rev = ATH10K_HW_QCA9377; hw_rev = ATH10K_HW_QCA9377;
pci_ps = true; pci_ps = true;
pci_soft_reset = NULL; pci_soft_reset = NULL;
pci_hard_reset = ath10k_pci_qca6174_chip_reset; pci_hard_reset = ath10k_pci_qca6174_chip_reset;
targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr;
break; break;
default: default:
WARN_ON(1); WARN_ON(1);
@ -3240,6 +3252,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
ar_pci->bus_ops = &ath10k_pci_bus_ops; ar_pci->bus_ops = &ath10k_pci_bus_ops;
ar_pci->pci_soft_reset = pci_soft_reset; ar_pci->pci_soft_reset = pci_soft_reset;
ar_pci->pci_hard_reset = pci_hard_reset; ar_pci->pci_hard_reset = pci_hard_reset;
ar_pci->targ_cpu_to_ce_addr = targ_cpu_to_ce_addr;
ar->id.vendor = pdev->vendor; ar->id.vendor = pdev->vendor;
ar->id.device = pdev->device; ar->id.device = pdev->device;

View File

@ -233,6 +233,11 @@ struct ath10k_pci {
/* Chip specific pci full reset function */ /* Chip specific pci full reset function */
int (*pci_hard_reset)(struct ath10k *ar); int (*pci_hard_reset)(struct ath10k *ar);
/* chip specific methods for converting target CPU virtual address
* space to CE address space
*/
u32 (*targ_cpu_to_ce_addr)(struct ath10k *ar, u32 addr);
/* Keep this entry in the last, memory for struct ath10k_ahb is /* Keep this entry in the last, memory for struct ath10k_ahb is
* allocated (ahb support enabled case) in the continuation of * allocated (ahb support enabled case) in the continuation of
* this struct. * this struct.

View File

@ -5811,6 +5811,7 @@ enum wmi_peer_param {
WMI_PEER_CHAN_WIDTH = 0x4, WMI_PEER_CHAN_WIDTH = 0x4,
WMI_PEER_NSS = 0x5, WMI_PEER_NSS = 0x5,
WMI_PEER_USE_4ADDR = 0x6, WMI_PEER_USE_4ADDR = 0x6,
WMI_PEER_DEBUG = 0xa,
WMI_PEER_DUMMY_VAR = 0xff, /* dummy parameter for STA PS workaround */ WMI_PEER_DUMMY_VAR = 0xff, /* dummy parameter for STA PS workaround */
}; };
@ -6604,7 +6605,7 @@ struct sk_buff *ath10k_wmi_alloc_skb(struct ath10k *ar, u32 len);
int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id); int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id);
int ath10k_wmi_cmd_send_nowait(struct ath10k *ar, struct sk_buff *skb, int ath10k_wmi_cmd_send_nowait(struct ath10k *ar, struct sk_buff *skb,
u32 cmd_id); u32 cmd_id);
void ath10k_wmi_start_scan_init(struct ath10k *ar, struct wmi_start_scan_arg *); void ath10k_wmi_start_scan_init(struct ath10k *ar, struct wmi_start_scan_arg *arg);
void ath10k_wmi_pull_pdev_stats_base(const struct wmi_pdev_stats_base *src, void ath10k_wmi_pull_pdev_stats_base(const struct wmi_pdev_stats_base *src,
struct ath10k_fw_stats_pdev *dst); struct ath10k_fw_stats_pdev *dst);

View File

@ -108,7 +108,7 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
#define ATH_AGGR_MIN_QDEPTH 2 #define ATH_AGGR_MIN_QDEPTH 2
/* minimum h/w qdepth for non-aggregated traffic */ /* minimum h/w qdepth for non-aggregated traffic */
#define ATH_NON_AGGR_MIN_QDEPTH 8 #define ATH_NON_AGGR_MIN_QDEPTH 8
#define ATH_TX_COMPLETE_POLL_INT 1000 #define ATH_HW_CHECK_POLL_INT 1000
#define ATH_TXFIFO_DEPTH 8 #define ATH_TXFIFO_DEPTH 8
#define ATH_TX_ERROR 0x01 #define ATH_TX_ERROR 0x01
@ -745,7 +745,7 @@ void ath9k_csa_update(struct ath_softc *sc);
#define ATH_PAPRD_TIMEOUT 100 /* msecs */ #define ATH_PAPRD_TIMEOUT 100 /* msecs */
#define ATH_PLL_WORK_INTERVAL 100 #define ATH_PLL_WORK_INTERVAL 100
void ath_tx_complete_poll_work(struct work_struct *work); void ath_hw_check_work(struct work_struct *work);
void ath_reset_work(struct work_struct *work); void ath_reset_work(struct work_struct *work);
bool ath_hw_check(struct ath_softc *sc); bool ath_hw_check(struct ath_softc *sc);
void ath_hw_pll_work(struct work_struct *work); void ath_hw_pll_work(struct work_struct *work);
@ -998,6 +998,7 @@ struct ath_softc {
struct survey_info *cur_survey; struct survey_info *cur_survey;
struct survey_info survey[ATH9K_NUM_CHANNELS]; struct survey_info survey[ATH9K_NUM_CHANNELS];
spinlock_t intr_lock;
struct tasklet_struct intr_tq; struct tasklet_struct intr_tq;
struct tasklet_struct bcon_tasklet; struct tasklet_struct bcon_tasklet;
struct ath_hw *sc_ah; struct ath_hw *sc_ah;
@ -1053,7 +1054,7 @@ struct ath_softc {
#ifdef CONFIG_ATH9K_DEBUGFS #ifdef CONFIG_ATH9K_DEBUGFS
struct ath9k_debug debug; struct ath9k_debug debug;
#endif #endif
struct delayed_work tx_complete_work; struct delayed_work hw_check_work;
struct delayed_work hw_pll_work; struct delayed_work hw_pll_work;
struct timer_list sleep_timer; struct timer_list sleep_timer;

View File

@ -1603,6 +1603,10 @@ bool ath9k_hw_check_alive(struct ath_hw *ah)
int count = 50; int count = 50;
u32 reg, last_val; u32 reg, last_val;
/* Check if chip failed to wake up */
if (REG_READ(ah, AR_CFG) == 0xdeadbeef)
return false;
if (AR_SREV_9300(ah)) if (AR_SREV_9300(ah))
return !ath9k_hw_detect_mac_hang(ah); return !ath9k_hw_detect_mac_hang(ah);

View File

@ -669,6 +669,7 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
common->bt_ant_diversity = 1; common->bt_ant_diversity = 1;
spin_lock_init(&common->cc_lock); spin_lock_init(&common->cc_lock);
spin_lock_init(&sc->intr_lock);
spin_lock_init(&sc->sc_serial_rw); spin_lock_init(&sc->sc_serial_rw);
spin_lock_init(&sc->sc_pm_lock); spin_lock_init(&sc->sc_pm_lock);
spin_lock_init(&sc->chan_lock); spin_lock_init(&sc->chan_lock);
@ -681,6 +682,7 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
INIT_WORK(&sc->hw_reset_work, ath_reset_work); INIT_WORK(&sc->hw_reset_work, ath_reset_work);
INIT_WORK(&sc->paprd_work, ath_paprd_calibrate); INIT_WORK(&sc->paprd_work, ath_paprd_calibrate);
INIT_DELAYED_WORK(&sc->hw_pll_work, ath_hw_pll_work); INIT_DELAYED_WORK(&sc->hw_pll_work, ath_hw_pll_work);
INIT_DELAYED_WORK(&sc->hw_check_work, ath_hw_check_work);
ath9k_init_channel_context(sc); ath9k_init_channel_context(sc);

View File

@ -20,20 +20,13 @@
* TX polling - checks if the TX engine is stuck somewhere * TX polling - checks if the TX engine is stuck somewhere
* and issues a chip reset if so. * and issues a chip reset if so.
*/ */
void ath_tx_complete_poll_work(struct work_struct *work) static bool ath_tx_complete_check(struct ath_softc *sc)
{ {
struct ath_softc *sc = container_of(work, struct ath_softc,
tx_complete_work.work);
struct ath_txq *txq; struct ath_txq *txq;
int i; int i;
bool needreset = false;
if (sc->tx99_state)
if (sc->tx99_state) { return true;
ath_dbg(ath9k_hw_common(sc->sc_ah), RESET,
"skip tx hung detection on tx99\n");
return;
}
for (i = 0; i < IEEE80211_NUM_ACS; i++) { for (i = 0; i < IEEE80211_NUM_ACS; i++) {
txq = sc->tx.txq_map[i]; txq = sc->tx.txq_map[i];
@ -41,25 +34,36 @@ void ath_tx_complete_poll_work(struct work_struct *work)
ath_txq_lock(sc, txq); ath_txq_lock(sc, txq);
if (txq->axq_depth) { if (txq->axq_depth) {
if (txq->axq_tx_inprogress) { if (txq->axq_tx_inprogress) {
needreset = true;
ath_txq_unlock(sc, txq); ath_txq_unlock(sc, txq);
break; goto reset;
} else {
txq->axq_tx_inprogress = true;
} }
txq->axq_tx_inprogress = true;
} }
ath_txq_unlock(sc, txq); ath_txq_unlock(sc, txq);
} }
if (needreset) { return true;
ath_dbg(ath9k_hw_common(sc->sc_ah), RESET,
"tx hung, resetting the chip\n");
ath9k_queue_reset(sc, RESET_TYPE_TX_HANG);
return;
}
ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, reset:
msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT)); ath_dbg(ath9k_hw_common(sc->sc_ah), RESET,
"tx hung, resetting the chip\n");
ath9k_queue_reset(sc, RESET_TYPE_TX_HANG);
return false;
}
void ath_hw_check_work(struct work_struct *work)
{
struct ath_softc *sc = container_of(work, struct ath_softc,
hw_check_work.work);
if (!ath_hw_check(sc) ||
!ath_tx_complete_check(sc))
return;
ieee80211_queue_delayed_work(sc->hw, &sc->hw_check_work,
msecs_to_jiffies(ATH_HW_CHECK_POLL_INT));
} }
/* /*

View File

@ -805,21 +805,12 @@ void ath9k_hw_disable_interrupts(struct ath_hw *ah)
} }
EXPORT_SYMBOL(ath9k_hw_disable_interrupts); EXPORT_SYMBOL(ath9k_hw_disable_interrupts);
void ath9k_hw_enable_interrupts(struct ath_hw *ah) static void __ath9k_hw_enable_interrupts(struct ath_hw *ah)
{ {
struct ath_common *common = ath9k_hw_common(ah); struct ath_common *common = ath9k_hw_common(ah);
u32 sync_default = AR_INTR_SYNC_DEFAULT; u32 sync_default = AR_INTR_SYNC_DEFAULT;
u32 async_mask; u32 async_mask;
if (!(ah->imask & ATH9K_INT_GLOBAL))
return;
if (!atomic_inc_and_test(&ah->intr_ref_cnt)) {
ath_dbg(common, INTERRUPT, "Do not enable IER ref count %d\n",
atomic_read(&ah->intr_ref_cnt));
return;
}
if (AR_SREV_9340(ah) || AR_SREV_9550(ah) || AR_SREV_9531(ah) || if (AR_SREV_9340(ah) || AR_SREV_9550(ah) || AR_SREV_9531(ah) ||
AR_SREV_9561(ah)) AR_SREV_9561(ah))
sync_default &= ~AR_INTR_SYNC_HOST1_FATAL; sync_default &= ~AR_INTR_SYNC_HOST1_FATAL;
@ -841,6 +832,39 @@ void ath9k_hw_enable_interrupts(struct ath_hw *ah)
ath_dbg(common, INTERRUPT, "AR_IMR 0x%x IER 0x%x\n", ath_dbg(common, INTERRUPT, "AR_IMR 0x%x IER 0x%x\n",
REG_READ(ah, AR_IMR), REG_READ(ah, AR_IER)); REG_READ(ah, AR_IMR), REG_READ(ah, AR_IER));
} }
void ath9k_hw_resume_interrupts(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
if (!(ah->imask & ATH9K_INT_GLOBAL))
return;
if (atomic_read(&ah->intr_ref_cnt) != 0) {
ath_dbg(common, INTERRUPT, "Do not enable IER ref count %d\n",
atomic_read(&ah->intr_ref_cnt));
return;
}
__ath9k_hw_enable_interrupts(ah);
}
EXPORT_SYMBOL(ath9k_hw_resume_interrupts);
void ath9k_hw_enable_interrupts(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
if (!(ah->imask & ATH9K_INT_GLOBAL))
return;
if (!atomic_inc_and_test(&ah->intr_ref_cnt)) {
ath_dbg(common, INTERRUPT, "Do not enable IER ref count %d\n",
atomic_read(&ah->intr_ref_cnt));
return;
}
__ath9k_hw_enable_interrupts(ah);
}
EXPORT_SYMBOL(ath9k_hw_enable_interrupts); EXPORT_SYMBOL(ath9k_hw_enable_interrupts);
void ath9k_hw_set_interrupts(struct ath_hw *ah) void ath9k_hw_set_interrupts(struct ath_hw *ah)

View File

@ -744,6 +744,7 @@ void ath9k_hw_set_interrupts(struct ath_hw *ah);
void ath9k_hw_enable_interrupts(struct ath_hw *ah); void ath9k_hw_enable_interrupts(struct ath_hw *ah);
void ath9k_hw_disable_interrupts(struct ath_hw *ah); void ath9k_hw_disable_interrupts(struct ath_hw *ah);
void ath9k_hw_kill_interrupts(struct ath_hw *ah); void ath9k_hw_kill_interrupts(struct ath_hw *ah);
void ath9k_hw_resume_interrupts(struct ath_hw *ah);
void ar9002_hw_attach_mac_ops(struct ath_hw *ah); void ar9002_hw_attach_mac_ops(struct ath_hw *ah);

View File

@ -181,7 +181,7 @@ void ath9k_ps_restore(struct ath_softc *sc)
static void __ath_cancel_work(struct ath_softc *sc) static void __ath_cancel_work(struct ath_softc *sc)
{ {
cancel_work_sync(&sc->paprd_work); cancel_work_sync(&sc->paprd_work);
cancel_delayed_work_sync(&sc->tx_complete_work); cancel_delayed_work_sync(&sc->hw_check_work);
cancel_delayed_work_sync(&sc->hw_pll_work); cancel_delayed_work_sync(&sc->hw_pll_work);
#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT #ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
@ -198,7 +198,8 @@ void ath_cancel_work(struct ath_softc *sc)
void ath_restart_work(struct ath_softc *sc) void ath_restart_work(struct ath_softc *sc)
{ {
ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0); ieee80211_queue_delayed_work(sc->hw, &sc->hw_check_work,
ATH_HW_CHECK_POLL_INT);
if (AR_SREV_9340(sc->sc_ah) || AR_SREV_9330(sc->sc_ah)) if (AR_SREV_9340(sc->sc_ah) || AR_SREV_9330(sc->sc_ah))
ieee80211_queue_delayed_work(sc->hw, &sc->hw_pll_work, ieee80211_queue_delayed_work(sc->hw, &sc->hw_pll_work,
@ -373,21 +374,20 @@ void ath9k_tasklet(unsigned long data)
struct ath_common *common = ath9k_hw_common(ah); struct ath_common *common = ath9k_hw_common(ah);
enum ath_reset_type type; enum ath_reset_type type;
unsigned long flags; unsigned long flags;
u32 status = sc->intrstatus; u32 status;
u32 rxmask; u32 rxmask;
spin_lock_irqsave(&sc->intr_lock, flags);
status = sc->intrstatus;
sc->intrstatus = 0;
spin_unlock_irqrestore(&sc->intr_lock, flags);
ath9k_ps_wakeup(sc); ath9k_ps_wakeup(sc);
spin_lock(&sc->sc_pcu_lock); spin_lock(&sc->sc_pcu_lock);
if (status & ATH9K_INT_FATAL) { if (status & ATH9K_INT_FATAL) {
type = RESET_TYPE_FATAL_INT; type = RESET_TYPE_FATAL_INT;
ath9k_queue_reset(sc, type); ath9k_queue_reset(sc, type);
/*
* Increment the ref. counter here so that
* interrupts are enabled in the reset routine.
*/
atomic_inc(&ah->intr_ref_cnt);
ath_dbg(common, RESET, "FATAL: Skipping interrupts\n"); ath_dbg(common, RESET, "FATAL: Skipping interrupts\n");
goto out; goto out;
} }
@ -403,11 +403,6 @@ void ath9k_tasklet(unsigned long data)
type = RESET_TYPE_BB_WATCHDOG; type = RESET_TYPE_BB_WATCHDOG;
ath9k_queue_reset(sc, type); ath9k_queue_reset(sc, type);
/*
* Increment the ref. counter here so that
* interrupts are enabled in the reset routine.
*/
atomic_inc(&ah->intr_ref_cnt);
ath_dbg(common, RESET, ath_dbg(common, RESET,
"BB_WATCHDOG: Skipping interrupts\n"); "BB_WATCHDOG: Skipping interrupts\n");
goto out; goto out;
@ -420,7 +415,6 @@ void ath9k_tasklet(unsigned long data)
if ((sc->gtt_cnt >= MAX_GTT_CNT) && !ath9k_hw_check_alive(ah)) { if ((sc->gtt_cnt >= MAX_GTT_CNT) && !ath9k_hw_check_alive(ah)) {
type = RESET_TYPE_TX_GTT; type = RESET_TYPE_TX_GTT;
ath9k_queue_reset(sc, type); ath9k_queue_reset(sc, type);
atomic_inc(&ah->intr_ref_cnt);
ath_dbg(common, RESET, ath_dbg(common, RESET,
"GTT: Skipping interrupts\n"); "GTT: Skipping interrupts\n");
goto out; goto out;
@ -477,7 +471,7 @@ void ath9k_tasklet(unsigned long data)
ath9k_btcoex_handle_interrupt(sc, status); ath9k_btcoex_handle_interrupt(sc, status);
/* re-enable hardware interrupt */ /* re-enable hardware interrupt */
ath9k_hw_enable_interrupts(ah); ath9k_hw_resume_interrupts(ah);
out: out:
spin_unlock(&sc->sc_pcu_lock); spin_unlock(&sc->sc_pcu_lock);
ath9k_ps_restore(sc); ath9k_ps_restore(sc);
@ -541,7 +535,9 @@ irqreturn_t ath_isr(int irq, void *dev)
return IRQ_NONE; return IRQ_NONE;
/* Cache the status */ /* Cache the status */
sc->intrstatus = status; spin_lock(&sc->intr_lock);
sc->intrstatus |= status;
spin_unlock(&sc->intr_lock);
if (status & SCHED_INTR) if (status & SCHED_INTR)
sched = true; sched = true;
@ -587,7 +583,7 @@ irqreturn_t ath_isr(int irq, void *dev)
if (sched) { if (sched) {
/* turn off every interrupt */ /* turn off every interrupt */
ath9k_hw_disable_interrupts(ah); ath9k_hw_kill_interrupts(ah);
tasklet_schedule(&sc->intr_tq); tasklet_schedule(&sc->intr_tq);
} }
@ -2091,7 +2087,7 @@ void __ath9k_flush(struct ieee80211_hw *hw, u32 queues, bool drop,
int timeout; int timeout;
bool drain_txq; bool drain_txq;
cancel_delayed_work_sync(&sc->tx_complete_work); cancel_delayed_work_sync(&sc->hw_check_work);
if (ah->ah_flags & AH_UNPLUGGED) { if (ah->ah_flags & AH_UNPLUGGED) {
ath_dbg(common, ANY, "Device has been unplugged!\n"); ath_dbg(common, ANY, "Device has been unplugged!\n");
@ -2129,7 +2125,8 @@ void __ath9k_flush(struct ieee80211_hw *hw, u32 queues, bool drop,
ath9k_ps_restore(sc); ath9k_ps_restore(sc);
} }
ieee80211_queue_delayed_work(hw, &sc->tx_complete_work, 0); ieee80211_queue_delayed_work(hw, &sc->hw_check_work,
ATH_HW_CHECK_POLL_INT);
} }
static bool ath9k_tx_frames_pending(struct ieee80211_hw *hw) static bool ath9k_tx_frames_pending(struct ieee80211_hw *hw)

View File

@ -2872,8 +2872,6 @@ int ath_tx_init(struct ath_softc *sc, int nbufs)
return error; return error;
} }
INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
error = ath_tx_edma_init(sc); error = ath_tx_edma_init(sc);

View File

@ -574,6 +574,7 @@ static void wcn36xx_hw_scan_worker(struct work_struct *work)
struct cfg80211_scan_request *req = wcn->scan_req; struct cfg80211_scan_request *req = wcn->scan_req;
u8 channels[WCN36XX_HAL_PNO_MAX_NETW_CHANNELS_EX]; u8 channels[WCN36XX_HAL_PNO_MAX_NETW_CHANNELS_EX];
struct cfg80211_scan_info scan_info = {}; struct cfg80211_scan_info scan_info = {};
bool aborted = false;
int i; int i;
wcn36xx_dbg(WCN36XX_DBG_MAC, "mac80211 scan %d channels worker\n", req->n_channels); wcn36xx_dbg(WCN36XX_DBG_MAC, "mac80211 scan %d channels worker\n", req->n_channels);
@ -585,6 +586,13 @@ static void wcn36xx_hw_scan_worker(struct work_struct *work)
wcn36xx_smd_init_scan(wcn, HAL_SYS_MODE_SCAN); wcn36xx_smd_init_scan(wcn, HAL_SYS_MODE_SCAN);
for (i = 0; i < req->n_channels; i++) { for (i = 0; i < req->n_channels; i++) {
mutex_lock(&wcn->scan_lock);
aborted = wcn->scan_aborted;
mutex_unlock(&wcn->scan_lock);
if (aborted)
break;
wcn->scan_freq = req->channels[i]->center_freq; wcn->scan_freq = req->channels[i]->center_freq;
wcn->scan_band = req->channels[i]->band; wcn->scan_band = req->channels[i]->band;
@ -596,7 +604,7 @@ static void wcn36xx_hw_scan_worker(struct work_struct *work)
} }
wcn36xx_smd_finish_scan(wcn, HAL_SYS_MODE_SCAN); wcn36xx_smd_finish_scan(wcn, HAL_SYS_MODE_SCAN);
scan_info.aborted = false; scan_info.aborted = aborted;
ieee80211_scan_completed(wcn->hw, &scan_info); ieee80211_scan_completed(wcn->hw, &scan_info);
mutex_lock(&wcn->scan_lock); mutex_lock(&wcn->scan_lock);
@ -615,6 +623,8 @@ static int wcn36xx_hw_scan(struct ieee80211_hw *hw,
mutex_unlock(&wcn->scan_lock); mutex_unlock(&wcn->scan_lock);
return -EBUSY; return -EBUSY;
} }
wcn->scan_aborted = false;
wcn->scan_req = &hw_req->req; wcn->scan_req = &hw_req->req;
mutex_unlock(&wcn->scan_lock); mutex_unlock(&wcn->scan_lock);
@ -623,6 +633,18 @@ static int wcn36xx_hw_scan(struct ieee80211_hw *hw,
return 0; return 0;
} }
static void wcn36xx_cancel_hw_scan(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct wcn36xx *wcn = hw->priv;
mutex_lock(&wcn->scan_lock);
wcn->scan_aborted = true;
mutex_unlock(&wcn->scan_lock);
cancel_work_sync(&wcn->scan_work);
}
static void wcn36xx_update_allowed_rates(struct ieee80211_sta *sta, static void wcn36xx_update_allowed_rates(struct ieee80211_sta *sta,
enum nl80211_band band) enum nl80211_band band)
{ {
@ -1034,6 +1056,7 @@ static const struct ieee80211_ops wcn36xx_ops = {
.tx = wcn36xx_tx, .tx = wcn36xx_tx,
.set_key = wcn36xx_set_key, .set_key = wcn36xx_set_key,
.hw_scan = wcn36xx_hw_scan, .hw_scan = wcn36xx_hw_scan,
.cancel_hw_scan = wcn36xx_cancel_hw_scan,
.bss_info_changed = wcn36xx_bss_info_changed, .bss_info_changed = wcn36xx_bss_info_changed,
.set_rts_threshold = wcn36xx_set_rts_threshold, .set_rts_threshold = wcn36xx_set_rts_threshold,
.sta_add = wcn36xx_sta_add, .sta_add = wcn36xx_sta_add,

View File

@ -220,6 +220,7 @@ struct wcn36xx {
int scan_freq; int scan_freq;
int scan_band; int scan_band;
struct mutex scan_lock; struct mutex scan_lock;
bool scan_aborted;
/* DXE channels */ /* DXE channels */
struct wcn36xx_dxe_ch dxe_tx_l_ch; /* TX low */ struct wcn36xx_dxe_ch dxe_tx_l_ch; /* TX low */

View File

@ -15,6 +15,7 @@
*/ */
#include <linux/etherdevice.h> #include <linux/etherdevice.h>
#include <linux/moduleparam.h>
#include "wil6210.h" #include "wil6210.h"
#include "wmi.h" #include "wmi.h"