Merge branch 'for-davem' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-next

John W. Linville says:

====================
Please accept this batch of updates intended for the 3.12 stream.

For the mac80211 bits, Johannes says this:

"This time I have various improvements all over the place: IBSS, mesh,
testmode, AP client powersave handling, one of the rare rfkill patches
and some code cleanup."

Also for mac80211:

"And I also have some more changes for -next, just a few small fixes and
improvements, nothing really stands out."

And for iwlwifi:

"This time I have some powersave work (notably uAPSD support), CQM
offloads, support for a new firmware API and various code cleanups."

Regarding the Bluetooth bits, Gustavo says:

"Patches to 3.12, here we have:

* implementation of a proper tty_port for RFCOMM devices, this fixes some
issues people were seeing lately in the kernel.
* Add voice_setting option for SCO, it is used for SCO Codec selection
* bugfixes, small improvements and clean ups"

For the NFC bits, Samuel says:

"With this one we have:

- A few pn533 improvements and minor fixes. Testing our pn533 driver
  against Google's NCI stack triggered a few issues that we fixed now.
  We also added Tx fragmentation support to this driver.

- More NFC secure element handling. We added a GET_SE netlink command
  for getting all the discovered secure elements, and we defined 2
  additional secure element netlink event (transaction and connectivity).
  We also fixed a couple of typos and copy-paste bugs from the secure
  element handling code.

- Firmware download support for the pn544 driver. This chipset can enter a
  special mode where it's waiting for firmware blobs to replace the
  already flashed one. We now support that mode."

With repect to the ath tree, Kalle says:

"New features in ath10k are rx/tx checsumming in hw and survey scan
implemented by Michal. Also he made fixes to different areas of the
driver, most notable being fixing the case when using two streams and
reducing the number of interface combinations to avoid firmware crashes.
Bartosz did a clean related to how we handle SoC power save in PCI
layer.

For ath6kl Mohammed and Vasanth sent each a patch to fix two infrequent
crashes."

I also pulled the wireless tree into wireless-next to support a
request from Johannes.  On top of all that, there are the usual
sort of driver updates.  The mwifiex, brcmfmac, brcmsmac, ath9k,
and rt2x00 drivers all get some attention, as does the bcma bus and
a few other random bits here and there.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2013-09-03 21:45:31 -04:00
commit e7abfe4092
241 changed files with 5457 additions and 2943 deletions

View File

@ -325,6 +325,7 @@
<title>functions/definitions</title>
!Finclude/net/mac80211.h ieee80211_rx_status
!Finclude/net/mac80211.h mac80211_rx_flags
!Finclude/net/mac80211.h mac80211_tx_info_flags
!Finclude/net/mac80211.h mac80211_tx_control_flags
!Finclude/net/mac80211.h mac80211_rate_control_flags
!Finclude/net/mac80211.h ieee80211_tx_rate

View File

@ -5792,7 +5792,7 @@ M: Aloisio Almeida Jr <aloisio.almeida@openbossa.org>
M: Samuel Ortiz <sameo@linux.intel.com>
L: linux-wireless@vger.kernel.org
L: linux-nfc@lists.01.org (moderated for non-subscribers)
S: Maintained
S: Supported
F: net/nfc/
F: include/net/nfc/
F: include/uapi/linux/nfc.h

View File

@ -31,7 +31,7 @@ static void bcma_pcie_write(struct bcma_drv_pci *pc, u32 address, u32 data)
pcicore_write32(pc, BCMA_CORE_PCI_PCIEIND_DATA, data);
}
static void bcma_pcie_mdio_set_phy(struct bcma_drv_pci *pc, u8 phy)
static void bcma_pcie_mdio_set_phy(struct bcma_drv_pci *pc, u16 phy)
{
u32 v;
int i;
@ -55,7 +55,7 @@ static void bcma_pcie_mdio_set_phy(struct bcma_drv_pci *pc, u8 phy)
}
}
static u16 bcma_pcie_mdio_read(struct bcma_drv_pci *pc, u8 device, u8 address)
static u16 bcma_pcie_mdio_read(struct bcma_drv_pci *pc, u16 device, u8 address)
{
int max_retries = 10;
u16 ret = 0;
@ -98,7 +98,7 @@ static u16 bcma_pcie_mdio_read(struct bcma_drv_pci *pc, u8 device, u8 address)
return ret;
}
static void bcma_pcie_mdio_write(struct bcma_drv_pci *pc, u8 device,
static void bcma_pcie_mdio_write(struct bcma_drv_pci *pc, u16 device,
u8 address, u16 data)
{
int max_retries = 10;
@ -137,6 +137,13 @@ static void bcma_pcie_mdio_write(struct bcma_drv_pci *pc, u8 device,
pcicore_write32(pc, BCMA_CORE_PCI_MDIO_CONTROL, 0);
}
static u16 bcma_pcie_mdio_writeread(struct bcma_drv_pci *pc, u16 device,
u8 address, u16 data)
{
bcma_pcie_mdio_write(pc, device, address, data);
return bcma_pcie_mdio_read(pc, device, address);
}
/**************************************************
* Workarounds.
**************************************************/
@ -203,6 +210,25 @@ static void bcma_core_pci_config_fixup(struct bcma_drv_pci *pc)
}
}
static void bcma_core_pci_power_save(struct bcma_drv_pci *pc, bool up)
{
u16 data;
if (pc->core->id.rev >= 15 && pc->core->id.rev <= 20) {
data = up ? 0x74 : 0x7C;
bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1,
BCMA_CORE_PCI_MDIO_BLK1_MGMT1, 0x7F64);
bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1,
BCMA_CORE_PCI_MDIO_BLK1_MGMT3, data);
} else if (pc->core->id.rev >= 21 && pc->core->id.rev <= 22) {
data = up ? 0x75 : 0x7D;
bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1,
BCMA_CORE_PCI_MDIO_BLK1_MGMT1, 0x7E65);
bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1,
BCMA_CORE_PCI_MDIO_BLK1_MGMT3, data);
}
}
/**************************************************
* Init.
**************************************************/
@ -262,7 +288,7 @@ int bcma_core_pci_irq_ctl(struct bcma_drv_pci *pc, struct bcma_device *core,
}
EXPORT_SYMBOL_GPL(bcma_core_pci_irq_ctl);
void bcma_core_pci_extend_L1timer(struct bcma_drv_pci *pc, bool extend)
static void bcma_core_pci_extend_L1timer(struct bcma_drv_pci *pc, bool extend)
{
u32 w;
@ -274,4 +300,33 @@ void bcma_core_pci_extend_L1timer(struct bcma_drv_pci *pc, bool extend)
bcma_pcie_write(pc, BCMA_CORE_PCI_DLLP_PMTHRESHREG, w);
bcma_pcie_read(pc, BCMA_CORE_PCI_DLLP_PMTHRESHREG);
}
EXPORT_SYMBOL_GPL(bcma_core_pci_extend_L1timer);
void bcma_core_pci_up(struct bcma_bus *bus)
{
struct bcma_drv_pci *pc;
if (bus->hosttype != BCMA_HOSTTYPE_PCI)
return;
pc = &bus->drv_pci[0];
bcma_core_pci_power_save(pc, true);
bcma_core_pci_extend_L1timer(pc, true);
}
EXPORT_SYMBOL_GPL(bcma_core_pci_up);
void bcma_core_pci_down(struct bcma_bus *bus)
{
struct bcma_drv_pci *pc;
if (bus->hosttype != BCMA_HOSTTYPE_PCI)
return;
pc = &bus->drv_pci[0];
bcma_core_pci_extend_L1timer(pc, false);
bcma_core_pci_power_save(pc, false);
}
EXPORT_SYMBOL_GPL(bcma_core_pci_down);

View File

@ -581,6 +581,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, bcma_core_pci_fixup_addresses);
int bcma_core_pci_plat_dev_init(struct pci_dev *dev)
{
struct bcma_drv_pci_host *pc_host;
int readrq;
if (dev->bus->ops->read != bcma_core_pci_hostmode_read_config) {
/* This is not a device on the PCI-core bridge. */
@ -595,6 +596,11 @@ int bcma_core_pci_plat_dev_init(struct pci_dev *dev)
dev->irq = bcma_core_irq(pc_host->pdev->core);
pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq);
readrq = pcie_get_readrq(dev);
if (readrq > 128) {
pr_info("change PCIe max read request size from %i to 128\n", readrq);
pcie_set_readrq(dev, 128);
}
return 0;
}
EXPORT_SYMBOL(bcma_core_pci_plat_dev_init);

View File

@ -43,7 +43,7 @@ static ssize_t btmrvl_hscfgcmd_write(struct file *file,
if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
return -EFAULT;
ret = strict_strtol(buf, 10, &result);
ret = kstrtol(buf, 10, &result);
if (ret)
return ret;
@ -89,7 +89,7 @@ static ssize_t btmrvl_pscmd_write(struct file *file, const char __user *ubuf,
if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
return -EFAULT;
ret = strict_strtol(buf, 10, &result);
ret = kstrtol(buf, 10, &result);
if (ret)
return ret;
@ -135,7 +135,7 @@ static ssize_t btmrvl_hscmd_write(struct file *file, const char __user *ubuf,
if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
return -EFAULT;
ret = strict_strtol(buf, 10, &result);
ret = kstrtol(buf, 10, &result);
if (ret)
return ret;

View File

@ -486,7 +486,7 @@ static int btmrvl_sdio_download_fw_w_helper(struct btmrvl_sdio_card *card)
if (firmwarelen - offset < txlen)
txlen = firmwarelen - offset;
tx_blocks = (txlen + blksz_dl - 1) / blksz_dl;
tx_blocks = DIV_ROUND_UP(txlen, blksz_dl);
memcpy(fwbuf, &firmware[offset], txlen);
}
@ -873,7 +873,7 @@ static int btmrvl_sdio_host_to_card(struct btmrvl_private *priv,
}
blksz = SDIO_BLOCK_SIZE;
buf_block_len = (nb + blksz - 1) / blksz;
buf_block_len = DIV_ROUND_UP(nb, blksz);
sdio_claim_host(card->func);

View File

@ -637,6 +637,7 @@ static int ath10k_ce_completed_send_next_nolock(struct ce_state *ce_state,
ath10k_pci_wake(ar);
src_ring->hw_index =
ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
src_ring->hw_index &= nentries_mask;
ath10k_pci_sleep(ar);
}
read_index = src_ring->hw_index;
@ -950,10 +951,12 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
ath10k_pci_wake(ar);
src_ring->sw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
src_ring->sw_index &= src_ring->nentries_mask;
src_ring->hw_index = src_ring->sw_index;
src_ring->write_index =
ath10k_ce_src_ring_write_index_get(ar, ctrl_addr);
src_ring->write_index &= src_ring->nentries_mask;
ath10k_pci_sleep(ar);
src_ring->per_transfer_context = (void **)ptr;
@ -1035,8 +1038,10 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar,
ath10k_pci_wake(ar);
dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr);
dest_ring->sw_index &= dest_ring->nentries_mask;
dest_ring->write_index =
ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr);
dest_ring->write_index &= dest_ring->nentries_mask;
ath10k_pci_sleep(ar);
dest_ring->per_transfer_context = (void **)ptr;

View File

@ -38,6 +38,7 @@
#define ATH10K_SCAN_ID 0
#define WMI_READY_TIMEOUT (5 * HZ)
#define ATH10K_FLUSH_TIMEOUT_HZ (5*HZ)
#define ATH10K_NUM_CHANS 38
/* Antenna noise floor */
#define ATH10K_DEFAULT_NOISE_FLOOR -95
@ -285,6 +286,7 @@ struct ath10k {
u32 hw_max_tx_power;
u32 ht_cap_info;
u32 vht_cap_info;
u32 num_rf_chains;
struct targetdef *targetdef;
struct hostdef *hostdef;
@ -374,6 +376,12 @@ struct ath10k {
struct work_struct restart_work;
/* cycle count is reported twice for each visited channel during scan.
* access protected by data_lock */
u32 survey_last_rx_clear_count;
u32 survey_last_cycle_count;
struct survey_info survey[ATH10K_NUM_CHANS];
#ifdef CONFIG_ATH10K_DEBUGFS
struct ath10k_debug debug;
#endif

View File

@ -804,6 +804,37 @@ static bool ath10k_htt_rx_has_fcs_err(struct sk_buff *skb)
return false;
}
static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb)
{
struct htt_rx_desc *rxd;
u32 flags, info;
bool is_ip4, is_ip6;
bool is_tcp, is_udp;
bool ip_csum_ok, tcpudp_csum_ok;
rxd = (void *)skb->data - sizeof(*rxd);
flags = __le32_to_cpu(rxd->attention.flags);
info = __le32_to_cpu(rxd->msdu_start.info1);
is_ip4 = !!(info & RX_MSDU_START_INFO1_IPV4_PROTO);
is_ip6 = !!(info & RX_MSDU_START_INFO1_IPV6_PROTO);
is_tcp = !!(info & RX_MSDU_START_INFO1_TCP_PROTO);
is_udp = !!(info & RX_MSDU_START_INFO1_UDP_PROTO);
ip_csum_ok = !(flags & RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL);
tcpudp_csum_ok = !(flags & RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL);
if (!is_ip4 && !is_ip6)
return CHECKSUM_NONE;
if (!is_tcp && !is_udp)
return CHECKSUM_NONE;
if (!ip_csum_ok)
return CHECKSUM_NONE;
if (!tcpudp_csum_ok)
return CHECKSUM_NONE;
return CHECKSUM_UNNECESSARY;
}
static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
struct htt_rx_indication *rx)
{
@ -815,6 +846,7 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
u8 *fw_desc;
int i, j;
int ret;
int ip_summed;
memset(&info, 0, sizeof(info));
@ -889,6 +921,11 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
continue;
}
/* The skb is not yet processed and it may be
* reallocated. Since the offload is in the original
* skb extract the checksum now and assign it later */
ip_summed = ath10k_htt_rx_get_csum_state(msdu_head);
info.skb = msdu_head;
info.fcs_err = ath10k_htt_rx_has_fcs_err(msdu_head);
info.signal = ATH10K_DEFAULT_NOISE_FLOOR;
@ -914,6 +951,8 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
if (ath10k_htt_rx_hdr_is_amsdu((void *)info.skb->data))
ath10k_dbg(ATH10K_DBG_HTT, "htt mpdu is amsdu\n");
info.skb->ip_summed = ip_summed;
ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt mpdu: ",
info.skb->data, info.skb->len);
ath10k_process_rx(htt->ar, &info);
@ -980,6 +1019,7 @@ static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
info.status = HTT_RX_IND_MPDU_STATUS_OK;
info.encrypt_type = MS(__le32_to_cpu(rxd->mpdu_start.info0),
RX_MPDU_START_INFO0_ENCRYPT_TYPE);
info.skb->ip_summed = ath10k_htt_rx_get_csum_state(info.skb);
if (tkip_mic_err) {
ath10k_warn("tkip mic error\n");

View File

@ -465,6 +465,8 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
flags1 = 0;
flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
frags_paddr = ATH10K_SKB_CB(txfrag)->paddr;

View File

@ -1406,9 +1406,9 @@ static void ath10k_tx_h_qos_workaround(struct ieee80211_hw *hw,
return;
qos_ctl = ieee80211_get_qos_ctl(hdr);
memmove(qos_ctl, qos_ctl + IEEE80211_QOS_CTL_LEN,
skb->len - ieee80211_hdrlen(hdr->frame_control));
skb_trim(skb, skb->len - IEEE80211_QOS_CTL_LEN);
memmove(skb->data + IEEE80211_QOS_CTL_LEN,
skb->data, (void *)qos_ctl - (void *)skb->data);
skb_pull(skb, IEEE80211_QOS_CTL_LEN);
}
static void ath10k_tx_h_update_wep_key(struct sk_buff *skb)
@ -1925,6 +1925,8 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
mutex_lock(&ar->conf_mutex);
memset(arvif, 0, sizeof(*arvif));
arvif->ar = ar;
arvif->vif = vif;
@ -2338,6 +2340,8 @@ static int ath10k_hw_scan(struct ieee80211_hw *hw,
arg.ssids[i].len = req->ssids[i].ssid_len;
arg.ssids[i].ssid = req->ssids[i].ssid;
}
} else {
arg.scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE;
}
if (req->n_channels) {
@ -2934,6 +2938,41 @@ static void ath10k_restart_complete(struct ieee80211_hw *hw)
mutex_unlock(&ar->conf_mutex);
}
static int ath10k_get_survey(struct ieee80211_hw *hw, int idx,
struct survey_info *survey)
{
struct ath10k *ar = hw->priv;
struct ieee80211_supported_band *sband;
struct survey_info *ar_survey = &ar->survey[idx];
int ret = 0;
mutex_lock(&ar->conf_mutex);
sband = hw->wiphy->bands[IEEE80211_BAND_2GHZ];
if (sband && idx >= sband->n_channels) {
idx -= sband->n_channels;
sband = NULL;
}
if (!sband)
sband = hw->wiphy->bands[IEEE80211_BAND_5GHZ];
if (!sband || idx >= sband->n_channels) {
ret = -ENOENT;
goto exit;
}
spin_lock_bh(&ar->data_lock);
memcpy(survey, ar_survey, sizeof(*survey));
spin_unlock_bh(&ar->data_lock);
survey->channel = &sband->channels[idx];
exit:
mutex_unlock(&ar->conf_mutex);
return ret;
}
static const struct ieee80211_ops ath10k_ops = {
.tx = ath10k_tx,
.start = ath10k_start,
@ -2955,6 +2994,7 @@ static const struct ieee80211_ops ath10k_ops = {
.flush = ath10k_flush,
.tx_last_beacon = ath10k_tx_last_beacon,
.restart_complete = ath10k_restart_complete,
.get_survey = ath10k_get_survey,
#ifdef CONFIG_PM
.suspend = ath10k_suspend,
.resume = ath10k_resume,
@ -3076,9 +3116,15 @@ static const struct ieee80211_iface_limit ath10k_if_limits[] = {
.max = 8,
.types = BIT(NL80211_IFTYPE_STATION)
| BIT(NL80211_IFTYPE_P2P_CLIENT)
| BIT(NL80211_IFTYPE_P2P_GO)
| BIT(NL80211_IFTYPE_AP)
}
},
{
.max = 3,
.types = BIT(NL80211_IFTYPE_P2P_GO)
},
{
.max = 7,
.types = BIT(NL80211_IFTYPE_AP)
},
};
static const struct ieee80211_iface_combination ath10k_if_comb = {
@ -3093,19 +3139,18 @@ static struct ieee80211_sta_vht_cap ath10k_create_vht_cap(struct ath10k *ar)
{
struct ieee80211_sta_vht_cap vht_cap = {0};
u16 mcs_map;
int i;
vht_cap.vht_supported = 1;
vht_cap.cap = ar->vht_cap_info;
/* FIXME: check dynamically how many streams board supports */
mcs_map = IEEE80211_VHT_MCS_SUPPORT_0_9 << 0 |
IEEE80211_VHT_MCS_SUPPORT_0_9 << 2 |
IEEE80211_VHT_MCS_SUPPORT_0_9 << 4 |
IEEE80211_VHT_MCS_NOT_SUPPORTED << 6 |
IEEE80211_VHT_MCS_NOT_SUPPORTED << 8 |
IEEE80211_VHT_MCS_NOT_SUPPORTED << 10 |
IEEE80211_VHT_MCS_NOT_SUPPORTED << 12 |
IEEE80211_VHT_MCS_NOT_SUPPORTED << 14;
mcs_map = 0;
for (i = 0; i < 8; i++) {
if (i < ar->num_rf_chains)
mcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << (i*2);
else
mcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << (i*2);
}
vht_cap.vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map);
vht_cap.vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
@ -3168,7 +3213,7 @@ static struct ieee80211_sta_ht_cap ath10k_get_ht_cap(struct ath10k *ar)
if (ar->vht_cap_info & WMI_VHT_CAP_MAX_MPDU_LEN_MASK)
ht_cap.cap |= IEEE80211_HT_CAP_MAX_AMSDU;
for (i = 0; i < WMI_MAX_SPATIAL_STREAM; i++)
for (i = 0; i < ar->num_rf_chains; i++)
ht_cap.mcs.rx_mask[i] = 0xFF;
ht_cap.mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
@ -3310,6 +3355,8 @@ int ath10k_mac_register(struct ath10k *ar)
ar->hw->wiphy->iface_combinations = &ath10k_if_comb;
ar->hw->wiphy->n_iface_combinations = 1;
ar->hw->netdev_features = NETIF_F_HW_CSUM;
ret = ath_regd_init(&ar->ath_common.regulatory, ar->hw->wiphy,
ath10k_reg_notifier);
if (ret) {

View File

@ -32,7 +32,7 @@
#include "ce.h"
#include "pci.h"
unsigned int ath10k_target_ps;
static unsigned int ath10k_target_ps;
module_param(ath10k_target_ps, uint, 0644);
MODULE_PARM_DESC(ath10k_target_ps, "Enable ath10k Target (SoC) PS option");
@ -56,6 +56,8 @@ static void ath10k_pci_rx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info);
static void ath10k_pci_stop_ce(struct ath10k *ar);
static void ath10k_pci_device_reset(struct ath10k *ar);
static int ath10k_pci_reset_target(struct ath10k *ar);
static int ath10k_pci_start_intr(struct ath10k *ar);
static void ath10k_pci_stop_intr(struct ath10k *ar);
static const struct ce_attr host_ce_config_wlan[] = {
/* host->target HTC control and raw streams */
@ -1254,10 +1256,25 @@ static void ath10k_pci_ce_deinit(struct ath10k *ar)
}
}
static void ath10k_pci_disable_irqs(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
int i;
for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
disable_irq(ar_pci->pdev->irq + i);
}
static void ath10k_pci_hif_stop(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
/* Irqs are never explicitly re-enabled. They are implicitly re-enabled
* by ath10k_pci_start_intr(). */
ath10k_pci_disable_irqs(ar);
ath10k_pci_stop_ce(ar);
/* At this point, asynchronous threads are stopped, the target should
@ -1267,6 +1284,8 @@ static void ath10k_pci_hif_stop(struct ath10k *ar)
ath10k_pci_process_ce(ar);
ath10k_pci_cleanup_ce(ar);
ath10k_pci_buffer_cleanup(ar);
ar_pci->started = 0;
}
static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
@ -1740,8 +1759,15 @@ static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar)
static int ath10k_pci_hif_power_up(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
int ret;
ret = ath10k_pci_start_intr(ar);
if (ret) {
ath10k_err("could not start interrupt handling (%d)\n", ret);
goto err;
}
/*
* Bring the target up cleanly.
*
@ -1756,15 +1782,11 @@ static int ath10k_pci_hif_power_up(struct ath10k *ar)
ret = ath10k_pci_reset_target(ar);
if (ret)
goto err;
goto err_irq;
if (ath10k_target_ps) {
ath10k_dbg(ATH10K_DBG_PCI, "on-chip power save enabled\n");
} else {
if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
/* Force AWAKE forever */
ath10k_dbg(ATH10K_DBG_PCI, "on-chip power save disabled\n");
ath10k_do_pci_wake(ar);
}
ret = ath10k_pci_ce_init(ar);
if (ret)
@ -1785,16 +1807,22 @@ static int ath10k_pci_hif_power_up(struct ath10k *ar)
err_ce:
ath10k_pci_ce_deinit(ar);
err_ps:
if (!ath10k_target_ps)
if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
ath10k_do_pci_sleep(ar);
err_irq:
ath10k_pci_stop_intr(ar);
err:
return ret;
}
static void ath10k_pci_hif_power_down(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
ath10k_pci_stop_intr(ar);
ath10k_pci_ce_deinit(ar);
if (!ath10k_target_ps)
if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
ath10k_do_pci_sleep(ar);
}
@ -1990,8 +2018,13 @@ static int ath10k_pci_start_intr_msix(struct ath10k *ar, int num)
ret = request_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW,
ath10k_pci_msi_fw_handler,
IRQF_SHARED, "ath10k_pci", ar);
if (ret)
if (ret) {
ath10k_warn("request_irq(%d) failed %d\n",
ar_pci->pdev->irq + MSI_ASSIGN_FW, ret);
pci_disable_msi(ar_pci->pdev);
return ret;
}
for (i = MSI_ASSIGN_CE_INITIAL; i <= MSI_ASSIGN_CE_MAX; i++) {
ret = request_irq(ar_pci->pdev->irq + i,
@ -2239,6 +2272,9 @@ static void ath10k_pci_dump_features(struct ath10k_pci *ar_pci)
case ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND:
ath10k_dbg(ATH10K_DBG_PCI, "QCA988X_1.0 workaround enabled\n");
break;
case ATH10K_PCI_FEATURE_SOC_POWER_SAVE:
ath10k_dbg(ATH10K_DBG_PCI, "QCA98XX SoC power save enabled\n");
break;
}
}
}
@ -2274,6 +2310,9 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
goto err_ar_pci;
}
if (ath10k_target_ps)
set_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features);
ath10k_pci_dump_features(ar_pci);
ar = ath10k_core_create(ar_pci, ar_pci->dev, &ath10k_pci_hif_ops);
@ -2358,22 +2397,14 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
ar_pci->cacheline_sz = dma_get_cache_alignment();
ret = ath10k_pci_start_intr(ar);
if (ret) {
ath10k_err("could not start interrupt handling (%d)\n", ret);
goto err_iomap;
}
ret = ath10k_core_register(ar);
if (ret) {
ath10k_err("could not register driver core (%d)\n", ret);
goto err_intr;
goto err_iomap;
}
return 0;
err_intr:
ath10k_pci_stop_intr(ar);
err_iomap:
pci_iounmap(pdev, mem);
err_master:
@ -2410,7 +2441,6 @@ static void ath10k_pci_remove(struct pci_dev *pdev)
tasklet_kill(&ar_pci->msi_fw_err);
ath10k_core_unregister(ar);
ath10k_pci_stop_intr(ar);
pci_set_drvdata(pdev, NULL);
pci_iounmap(pdev, ar_pci->mem);

View File

@ -153,6 +153,7 @@ struct service_to_pipe {
enum ath10k_pci_features {
ATH10K_PCI_FEATURE_MSI_X = 0,
ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND = 1,
ATH10K_PCI_FEATURE_SOC_POWER_SAVE = 2,
/* keep last */
ATH10K_PCI_FEATURE_COUNT
@ -335,20 +336,22 @@ static inline u32 ath10k_pci_read32(struct ath10k *ar, u32 offset)
return ioread32(ar_pci->mem + offset);
}
extern unsigned int ath10k_target_ps;
void ath10k_do_pci_wake(struct ath10k *ar);
void ath10k_do_pci_sleep(struct ath10k *ar);
static inline void ath10k_pci_wake(struct ath10k *ar)
{
if (ath10k_target_ps)
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
if (test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
ath10k_do_pci_wake(ar);
}
static inline void ath10k_pci_sleep(struct ath10k *ar)
{
if (ath10k_target_ps)
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
if (test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
ath10k_do_pci_sleep(ar);
}

View File

@ -390,9 +390,82 @@ static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
return 0;
}
static int freq_to_idx(struct ath10k *ar, int freq)
{
struct ieee80211_supported_band *sband;
int band, ch, idx = 0;
for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) {
sband = ar->hw->wiphy->bands[band];
if (!sband)
continue;
for (ch = 0; ch < sband->n_channels; ch++, idx++)
if (sband->channels[ch].center_freq == freq)
goto exit;
}
exit:
return idx;
}
static void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb)
{
ath10k_dbg(ATH10K_DBG_WMI, "WMI_CHAN_INFO_EVENTID\n");
struct wmi_chan_info_event *ev;
struct survey_info *survey;
u32 err_code, freq, cmd_flags, noise_floor, rx_clear_count, cycle_count;
int idx;
ev = (struct wmi_chan_info_event *)skb->data;
err_code = __le32_to_cpu(ev->err_code);
freq = __le32_to_cpu(ev->freq);
cmd_flags = __le32_to_cpu(ev->cmd_flags);
noise_floor = __le32_to_cpu(ev->noise_floor);
rx_clear_count = __le32_to_cpu(ev->rx_clear_count);
cycle_count = __le32_to_cpu(ev->cycle_count);
ath10k_dbg(ATH10K_DBG_WMI,
"chan info err_code %d freq %d cmd_flags %d noise_floor %d rx_clear_count %d cycle_count %d\n",
err_code, freq, cmd_flags, noise_floor, rx_clear_count,
cycle_count);
spin_lock_bh(&ar->data_lock);
if (!ar->scan.in_progress) {
ath10k_warn("chan info event without a scan request?\n");
goto exit;
}
idx = freq_to_idx(ar, freq);
if (idx >= ARRAY_SIZE(ar->survey)) {
ath10k_warn("chan info: invalid frequency %d (idx %d out of bounds)\n",
freq, idx);
goto exit;
}
if (cmd_flags & WMI_CHAN_INFO_FLAG_COMPLETE) {
/* During scanning chan info is reported twice for each
* visited channel. The reported cycle count is global
* and per-channel cycle count must be calculated */
cycle_count -= ar->survey_last_cycle_count;
rx_clear_count -= ar->survey_last_rx_clear_count;
survey = &ar->survey[idx];
survey->channel_time = WMI_CHAN_INFO_MSEC(cycle_count);
survey->channel_time_rx = WMI_CHAN_INFO_MSEC(rx_clear_count);
survey->noise = noise_floor;
survey->filled = SURVEY_INFO_CHANNEL_TIME |
SURVEY_INFO_CHANNEL_TIME_RX |
SURVEY_INFO_NOISE_DBM;
}
ar->survey_last_rx_clear_count = rx_clear_count;
ar->survey_last_cycle_count = cycle_count;
exit:
spin_unlock_bh(&ar->data_lock);
}
static void ath10k_wmi_event_echo(struct ath10k *ar, struct sk_buff *skb)
@ -868,6 +941,13 @@ static void ath10k_wmi_service_ready_event_rx(struct ath10k *ar,
(__le32_to_cpu(ev->sw_version_1) & 0xffff0000) >> 16;
ar->fw_version_build = (__le32_to_cpu(ev->sw_version_1) & 0x0000ffff);
ar->phy_capability = __le32_to_cpu(ev->phy_capability);
ar->num_rf_chains = __le32_to_cpu(ev->num_rf_chains);
if (ar->num_rf_chains > WMI_MAX_SPATIAL_STREAM) {
ath10k_warn("hardware advertises support for more spatial streams than it should (%d > %d)\n",
ar->num_rf_chains, WMI_MAX_SPATIAL_STREAM);
ar->num_rf_chains = WMI_MAX_SPATIAL_STREAM;
}
ar->ath_common.regulatory.current_rd =
__le32_to_cpu(ev->hal_reg_capabilities.eeprom_rd);
@ -892,7 +972,7 @@ static void ath10k_wmi_service_ready_event_rx(struct ath10k *ar,
}
ath10k_dbg(ATH10K_DBG_WMI,
"wmi event service ready sw_ver 0x%08x sw_ver1 0x%08x abi_ver %u phy_cap 0x%08x ht_cap 0x%08x vht_cap 0x%08x vht_supp_msc 0x%08x sys_cap_info 0x%08x mem_reqs %u\n",
"wmi event service ready sw_ver 0x%08x sw_ver1 0x%08x abi_ver %u phy_cap 0x%08x ht_cap 0x%08x vht_cap 0x%08x vht_supp_msc 0x%08x sys_cap_info 0x%08x mem_reqs %u num_rf_chains %u\n",
__le32_to_cpu(ev->sw_version),
__le32_to_cpu(ev->sw_version_1),
__le32_to_cpu(ev->abi_version),
@ -901,7 +981,8 @@ static void ath10k_wmi_service_ready_event_rx(struct ath10k *ar,
__le32_to_cpu(ev->vht_cap_info),
__le32_to_cpu(ev->vht_supp_mcs),
__le32_to_cpu(ev->sys_cap_info),
__le32_to_cpu(ev->num_mem_reqs));
__le32_to_cpu(ev->num_mem_reqs),
__le32_to_cpu(ev->num_rf_chains));
complete(&ar->wmi.service_ready);
}

View File

@ -2931,6 +2931,11 @@ struct wmi_chan_info_event {
__le32 cycle_count;
} __packed;
#define WMI_CHAN_INFO_FLAG_COMPLETE BIT(0)
/* FIXME: empirically extrapolated */
#define WMI_CHAN_INFO_MSEC(x) ((x) / 76595)
/* Beacon filter wmi command info */
#define BCN_FLT_MAX_SUPPORTED_IES 256
#define BCN_FLT_MAX_ELEMS_IE_LIST (BCN_FLT_MAX_SUPPORTED_IES / 32)

View File

@ -29,6 +29,7 @@
#include <linux/average.h>
#include <linux/leds.h>
#include <net/mac80211.h>
#include <net/cfg80211.h>
/* RX/TX descriptor hw structs
* TODO: Driver part should only see sw structs */

View File

@ -56,6 +56,7 @@
#include <linux/etherdevice.h>
#include <linux/nl80211.h>
#include <net/cfg80211.h>
#include <net/ieee80211_radiotap.h>
#include <asm/unaligned.h>
@ -165,28 +166,36 @@ static const struct ieee80211_rate ath5k_rates[] = {
.flags = IEEE80211_RATE_SHORT_PREAMBLE },
{ .bitrate = 60,
.hw_value = ATH5K_RATE_CODE_6M,
.flags = 0 },
.flags = IEEE80211_RATE_SUPPORTS_5MHZ |
IEEE80211_RATE_SUPPORTS_10MHZ },
{ .bitrate = 90,
.hw_value = ATH5K_RATE_CODE_9M,
.flags = 0 },
.flags = IEEE80211_RATE_SUPPORTS_5MHZ |
IEEE80211_RATE_SUPPORTS_10MHZ },
{ .bitrate = 120,
.hw_value = ATH5K_RATE_CODE_12M,
.flags = 0 },
.flags = IEEE80211_RATE_SUPPORTS_5MHZ |
IEEE80211_RATE_SUPPORTS_10MHZ },
{ .bitrate = 180,
.hw_value = ATH5K_RATE_CODE_18M,
.flags = 0 },
.flags = IEEE80211_RATE_SUPPORTS_5MHZ |
IEEE80211_RATE_SUPPORTS_10MHZ },
{ .bitrate = 240,
.hw_value = ATH5K_RATE_CODE_24M,
.flags = 0 },
.flags = IEEE80211_RATE_SUPPORTS_5MHZ |
IEEE80211_RATE_SUPPORTS_10MHZ },
{ .bitrate = 360,
.hw_value = ATH5K_RATE_CODE_36M,
.flags = 0 },
.flags = IEEE80211_RATE_SUPPORTS_5MHZ |
IEEE80211_RATE_SUPPORTS_10MHZ },
{ .bitrate = 480,
.hw_value = ATH5K_RATE_CODE_48M,
.flags = 0 },
.flags = IEEE80211_RATE_SUPPORTS_5MHZ |
IEEE80211_RATE_SUPPORTS_10MHZ },
{ .bitrate = 540,
.hw_value = ATH5K_RATE_CODE_54M,
.flags = 0 },
.flags = IEEE80211_RATE_SUPPORTS_5MHZ |
IEEE80211_RATE_SUPPORTS_10MHZ },
};
static inline u64 ath5k_extend_tsf(struct ath5k_hw *ah, u32 rstamp)
@ -435,11 +444,27 @@ ath5k_setup_bands(struct ieee80211_hw *hw)
* Called with ah->lock.
*/
int
ath5k_chan_set(struct ath5k_hw *ah, struct ieee80211_channel *chan)
ath5k_chan_set(struct ath5k_hw *ah, struct cfg80211_chan_def *chandef)
{
ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
"channel set, resetting (%u -> %u MHz)\n",
ah->curchan->center_freq, chan->center_freq);
ah->curchan->center_freq, chandef->chan->center_freq);
switch (chandef->width) {
case NL80211_CHAN_WIDTH_20:
case NL80211_CHAN_WIDTH_20_NOHT:
ah->ah_bwmode = AR5K_BWMODE_DEFAULT;
break;
case NL80211_CHAN_WIDTH_5:
ah->ah_bwmode = AR5K_BWMODE_5MHZ;
break;
case NL80211_CHAN_WIDTH_10:
ah->ah_bwmode = AR5K_BWMODE_10MHZ;
break;
default:
WARN_ON(1);
return -EINVAL;
}
/*
* To switch channels clear any pending DMA operations;
@ -447,7 +472,7 @@ ath5k_chan_set(struct ath5k_hw *ah, struct ieee80211_channel *chan)
* hardware at the new frequency, and then re-enable
* the relevant bits of the h/w.
*/
return ath5k_reset(ah, chan, true);
return ath5k_reset(ah, chandef->chan, true);
}
void ath5k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
@ -1400,6 +1425,16 @@ ath5k_receive_frame(struct ath5k_hw *ah, struct sk_buff *skb,
rxs->rate_idx = ath5k_hw_to_driver_rix(ah, rs->rs_rate);
rxs->flag |= ath5k_rx_decrypted(ah, skb, rs);
switch (ah->ah_bwmode) {
case AR5K_BWMODE_5MHZ:
rxs->flag |= RX_FLAG_5MHZ;
break;
case AR5K_BWMODE_10MHZ:
rxs->flag |= RX_FLAG_10MHZ;
break;
default:
break;
}
if (rxs->rate_idx >= 0 && rs->rs_rate ==
ah->sbands[ah->curchan->band].bitrates[rxs->rate_idx].hw_value_short)
@ -2507,6 +2542,8 @@ ath5k_init_ah(struct ath5k_hw *ah, const struct ath_bus_ops *bus_ops)
/* SW support for IBSS_RSN is provided by mac80211 */
hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_5_10_MHZ;
/* both antennas can be configured as RX or TX */
hw->wiphy->available_antennas_tx = 0x3;
hw->wiphy->available_antennas_rx = 0x3;

View File

@ -101,7 +101,7 @@ void ath5k_set_beacon_filter(struct ieee80211_hw *hw, bool enable);
void ath5k_update_bssid_mask_and_opmode(struct ath5k_hw *ah,
struct ieee80211_vif *vif);
int ath5k_chan_set(struct ath5k_hw *ah, struct ieee80211_channel *chan);
int ath5k_chan_set(struct ath5k_hw *ah, struct cfg80211_chan_def *chandef);
void ath5k_txbuf_free_skb(struct ath5k_hw *ah, struct ath5k_buf *bf);
void ath5k_rxbuf_free_skb(struct ath5k_hw *ah, struct ath5k_buf *bf);
void ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,

View File

@ -245,9 +245,11 @@ static ssize_t write_file_beacon(struct file *file,
struct ath5k_hw *ah = file->private_data;
char buf[20];
if (copy_from_user(buf, userbuf, min(count, sizeof(buf))))
count = min_t(size_t, count, sizeof(buf) - 1);
if (copy_from_user(buf, userbuf, count))
return -EFAULT;
buf[count] = '\0';
if (strncmp(buf, "disable", 7) == 0) {
AR5K_REG_DISABLE_BITS(ah, AR5K_BEACON, AR5K_BEACON_ENABLE);
pr_info("debugfs disable beacons\n");
@ -345,9 +347,11 @@ static ssize_t write_file_debug(struct file *file,
unsigned int i;
char buf[20];
if (copy_from_user(buf, userbuf, min(count, sizeof(buf))))
count = min_t(size_t, count, sizeof(buf) - 1);
if (copy_from_user(buf, userbuf, count))
return -EFAULT;
buf[count] = '\0';
for (i = 0; i < ARRAY_SIZE(dbg_info); i++) {
if (strncmp(buf, dbg_info[i].name,
strlen(dbg_info[i].name)) == 0) {
@ -448,9 +452,11 @@ static ssize_t write_file_antenna(struct file *file,
unsigned int i;
char buf[20];
if (copy_from_user(buf, userbuf, min(count, sizeof(buf))))
count = min_t(size_t, count, sizeof(buf) - 1);
if (copy_from_user(buf, userbuf, count))
return -EFAULT;
buf[count] = '\0';
if (strncmp(buf, "diversity", 9) == 0) {
ath5k_hw_set_antenna_mode(ah, AR5K_ANTMODE_DEFAULT);
pr_info("debug: enable diversity\n");
@ -619,9 +625,11 @@ static ssize_t write_file_frameerrors(struct file *file,
struct ath5k_statistics *st = &ah->stats;
char buf[20];
if (copy_from_user(buf, userbuf, min(count, sizeof(buf))))
count = min_t(size_t, count, sizeof(buf) - 1);
if (copy_from_user(buf, userbuf, count))
return -EFAULT;
buf[count] = '\0';
if (strncmp(buf, "clear", 5) == 0) {
st->rxerr_crc = 0;
st->rxerr_phy = 0;
@ -766,9 +774,11 @@ static ssize_t write_file_ani(struct file *file,
struct ath5k_hw *ah = file->private_data;
char buf[20];
if (copy_from_user(buf, userbuf, min(count, sizeof(buf))))
count = min_t(size_t, count, sizeof(buf) - 1);
if (copy_from_user(buf, userbuf, count))
return -EFAULT;
buf[count] = '\0';
if (strncmp(buf, "sens-low", 8) == 0) {
ath5k_ani_init(ah, ATH5K_ANI_MODE_MANUAL_HIGH);
} else if (strncmp(buf, "sens-high", 9) == 0) {
@ -862,9 +872,11 @@ static ssize_t write_file_queue(struct file *file,
struct ath5k_hw *ah = file->private_data;
char buf[20];
if (copy_from_user(buf, userbuf, min(count, sizeof(buf))))
count = min_t(size_t, count, sizeof(buf) - 1);
if (copy_from_user(buf, userbuf, count))
return -EFAULT;
buf[count] = '\0';
if (strncmp(buf, "start", 5) == 0)
ieee80211_wake_queues(ah->hw);
else if (strncmp(buf, "stop", 4) == 0)

View File

@ -202,7 +202,7 @@ ath5k_config(struct ieee80211_hw *hw, u32 changed)
mutex_lock(&ah->lock);
if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
ret = ath5k_chan_set(ah, conf->chandef.chan);
ret = ath5k_chan_set(ah, &conf->chandef);
if (ret < 0)
goto unlock;
}

View File

@ -144,11 +144,13 @@ ath5k_hw_get_frame_duration(struct ath5k_hw *ah, enum ieee80211_band band,
sifs = AR5K_INIT_SIFS_HALF_RATE;
preamble *= 2;
sym_time *= 2;
bitrate = DIV_ROUND_UP(bitrate, 2);
break;
case AR5K_BWMODE_5MHZ:
sifs = AR5K_INIT_SIFS_QUARTER_RATE;
preamble *= 4;
sym_time *= 4;
bitrate = DIV_ROUND_UP(bitrate, 4);
break;
default:
sifs = AR5K_INIT_SIFS_DEFAULT_BG;

View File

@ -566,9 +566,11 @@ int ath5k_hw_set_ifs_intervals(struct ath5k_hw *ah, unsigned int slot_time)
{
struct ieee80211_channel *channel = ah->ah_current_channel;
enum ieee80211_band band;
struct ieee80211_supported_band *sband;
struct ieee80211_rate *rate;
u32 ack_tx_time, eifs, eifs_clock, sifs, sifs_clock;
u32 slot_time_clock = ath5k_hw_htoclock(ah, slot_time);
u32 rate_flags, i;
if (slot_time < 6 || slot_time_clock > AR5K_SLOT_TIME_MAX)
return -EINVAL;
@ -605,7 +607,28 @@ int ath5k_hw_set_ifs_intervals(struct ath5k_hw *ah, unsigned int slot_time)
else
band = IEEE80211_BAND_2GHZ;
rate = &ah->sbands[band].bitrates[0];
switch (ah->ah_bwmode) {
case AR5K_BWMODE_5MHZ:
rate_flags = IEEE80211_RATE_SUPPORTS_5MHZ;
break;
case AR5K_BWMODE_10MHZ:
rate_flags = IEEE80211_RATE_SUPPORTS_10MHZ;
break;
default:
rate_flags = 0;
break;
}
sband = &ah->sbands[band];
rate = NULL;
for (i = 0; i < sband->n_bitrates; i++) {
if ((rate_flags & sband->bitrates[i].flags) != rate_flags)
continue;
rate = &sband->bitrates[i];
break;
}
if (WARN_ON(!rate))
return -EINVAL;
ack_tx_time = ath5k_hw_get_frame_duration(ah, band, 10, rate, false);
/* ack_tx_time includes an SIFS already */

View File

@ -1836,6 +1836,9 @@ void ath6kl_stop_txrx(struct ath6kl *ar)
clear_bit(WMI_READY, &ar->flag);
if (ar->fw_recovery.enable)
del_timer_sync(&ar->fw_recovery.hb_timer);
/*
* After wmi_shudown all WMI events will be dropped. We
* need to cleanup the buffers allocated in AP mode and

View File

@ -29,6 +29,9 @@ struct ath6kl_sta *ath6kl_find_sta(struct ath6kl_vif *vif, u8 *node_addr)
struct ath6kl_sta *conn = NULL;
u8 i, max_conn;
if (is_zero_ether_addr(node_addr))
return NULL;
max_conn = (vif->nw_type == AP_NETWORK) ? AP_MAX_NUM_STA : 0;
for (i = 0; i < max_conn; i++) {

View File

@ -66,7 +66,8 @@ void ath6kl_tm_rx_event(struct ath6kl *ar, void *buf, size_t buf_len)
ath6kl_warn("nla_put failed on testmode rx skb!\n");
}
int ath6kl_tm_cmd(struct wiphy *wiphy, void *data, int len)
int ath6kl_tm_cmd(struct wiphy *wiphy, struct wireless_dev *wdev,
void *data, int len)
{
struct ath6kl *ar = wiphy_priv(wiphy);
struct nlattr *tb[ATH6KL_TM_ATTR_MAX + 1];

View File

@ -20,7 +20,8 @@
#ifdef CONFIG_NL80211_TESTMODE
void ath6kl_tm_rx_event(struct ath6kl *ar, void *buf, size_t buf_len);
int ath6kl_tm_cmd(struct wiphy *wiphy, void *data, int len);
int ath6kl_tm_cmd(struct wiphy *wiphy, struct wireless_dev *wdev,
void *data, int len);
#else
@ -29,7 +30,9 @@ static inline void ath6kl_tm_rx_event(struct ath6kl *ar, void *buf,
{
}
static inline int ath6kl_tm_cmd(struct wiphy *wiphy, void *data, int len)
static inline int ath6kl_tm_cmd(struct wiphy *wiphy,
struct wireless_dev *wdev,
void *data, int len)
{
return 0;
}

View File

@ -568,8 +568,8 @@ static int ath6kl_wmi_rx_probe_req_event_rx(struct wmi *wmi, u8 *datap, int len,
dlen, freq, vif->probe_req_report);
if (vif->probe_req_report || vif->nw_type == AP_NETWORK)
cfg80211_rx_mgmt(&vif->wdev, freq, 0,
ev->data, dlen, GFP_ATOMIC);
cfg80211_rx_mgmt(&vif->wdev, freq, 0, ev->data, dlen, 0,
GFP_ATOMIC);
return 0;
}
@ -608,8 +608,7 @@ static int ath6kl_wmi_rx_action_event_rx(struct wmi *wmi, u8 *datap, int len,
return -EINVAL;
}
ath6kl_dbg(ATH6KL_DBG_WMI, "rx_action: len=%u freq=%u\n", dlen, freq);
cfg80211_rx_mgmt(&vif->wdev, freq, 0,
ev->data, dlen, GFP_ATOMIC);
cfg80211_rx_mgmt(&vif->wdev, freq, 0, ev->data, dlen, 0, GFP_ATOMIC);
return 0;
}

View File

@ -56,7 +56,7 @@ config ATH9K_AHB
config ATH9K_DEBUGFS
bool "Atheros ath9k debugging"
depends on ATH9K
depends on ATH9K && DEBUG_FS
select MAC80211_DEBUGFS
select RELAY
---help---

View File

@ -319,9 +319,6 @@ void ath9k_ani_reset(struct ath_hw *ah, bool is_scanning)
ah->ani_function = 0;
}
/* always allow mode (on/off) to be controlled */
ah->ani_function |= ATH9K_ANI_MODE;
ofdm_nil = max_t(int, ATH9K_ANI_OFDM_DEF_LEVEL,
aniState->ofdmNoiseImmunityLevel);
cck_nil = max_t(int, ATH9K_ANI_CCK_DEF_LEVEL,

View File

@ -48,15 +48,10 @@
/* values here are relative to the INI */
enum ath9k_ani_cmd {
ATH9K_ANI_PRESENT = 0x1,
ATH9K_ANI_NOISE_IMMUNITY_LEVEL = 0x2,
ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION = 0x4,
ATH9K_ANI_CCK_WEAK_SIGNAL_THR = 0x8,
ATH9K_ANI_FIRSTEP_LEVEL = 0x10,
ATH9K_ANI_SPUR_IMMUNITY_LEVEL = 0x20,
ATH9K_ANI_MODE = 0x40,
ATH9K_ANI_PHYERR_RESET = 0x80,
ATH9K_ANI_MRC_CCK = 0x100,
ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION = 0x1,
ATH9K_ANI_FIRSTEP_LEVEL = 0x2,
ATH9K_ANI_SPUR_IMMUNITY_LEVEL = 0x4,
ATH9K_ANI_MRC_CCK = 0x8,
ATH9K_ANI_ALL = 0xfff
};

View File

@ -1160,8 +1160,6 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
*/
WARN_ON(1);
break;
case ATH9K_ANI_PRESENT:
break;
default:
ath_dbg(common, ANI, "invalid cmd %u\n", cmd);
return false;

View File

@ -269,13 +269,12 @@ static void ar9002_hw_configpcipowersave(struct ath_hw *ah,
if (ah->config.pcie_waen & AR_WA_D3_L1_DISABLE)
val |= AR_WA_D3_L1_DISABLE;
} else {
if (((AR_SREV_9285(ah) ||
AR_SREV_9271(ah) ||
AR_SREV_9287(ah)) &&
(AR9285_WA_DEFAULT & AR_WA_D3_L1_DISABLE)) ||
(AR_SREV_9280(ah) &&
(AR9280_WA_DEFAULT & AR_WA_D3_L1_DISABLE))) {
val |= AR_WA_D3_L1_DISABLE;
if (AR_SREV_9285(ah) || AR_SREV_9271(ah) || AR_SREV_9287(ah)) {
if (AR9285_WA_DEFAULT & AR_WA_D3_L1_DISABLE)
val |= AR_WA_D3_L1_DISABLE;
} else if (AR_SREV_9280(ah)) {
if (AR9280_WA_DEFAULT & AR_WA_D3_L1_DISABLE)
val |= AR_WA_D3_L1_DISABLE;
}
}
@ -297,24 +296,18 @@ static void ar9002_hw_configpcipowersave(struct ath_hw *ah,
} else {
if (ah->config.pcie_waen) {
val = ah->config.pcie_waen;
if (!power_off)
val &= (~AR_WA_D3_L1_DISABLE);
val &= (~AR_WA_D3_L1_DISABLE);
} else {
if (AR_SREV_9285(ah) ||
AR_SREV_9271(ah) ||
AR_SREV_9287(ah)) {
if (AR_SREV_9285(ah) || AR_SREV_9271(ah) || AR_SREV_9287(ah)) {
val = AR9285_WA_DEFAULT;
if (!power_off)
val &= (~AR_WA_D3_L1_DISABLE);
}
else if (AR_SREV_9280(ah)) {
val &= (~AR_WA_D3_L1_DISABLE);
} else if (AR_SREV_9280(ah)) {
/*
* For AR9280 chips, bit 22 of 0x4004
* needs to be set.
*/
val = AR9280_WA_DEFAULT;
if (!power_off)
val &= (~AR_WA_D3_L1_DISABLE);
val &= (~AR_WA_D3_L1_DISABLE);
} else {
val = AR_WA_DEFAULT;
}

View File

@ -3615,8 +3615,8 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
value = ar9003_hw_ant_ctrl_common_2_get(ah, is2ghz);
if (AR_SREV_9485(ah) && common->bt_ant_diversity) {
regval &= ~AR_SWITCH_TABLE_COM2_ALL;
regval |= ah->config.ant_ctrl_comm2g_switch_enable;
value &= ~AR_SWITCH_TABLE_COM2_ALL;
value |= ah->config.ant_ctrl_comm2g_switch_enable;
}
REG_RMW_FIELD(ah, AR_PHY_SWITCH_COM_2, AR_SWITCH_TABLE_COM2_ALL, value);
@ -3825,6 +3825,11 @@ static void ar9003_hw_atten_apply(struct ath_hw *ah, struct ath9k_channel *chan)
else
value = ar9003_hw_atten_chain_get_margin(ah, i, chan);
if (ah->config.alt_mingainidx)
REG_RMW_FIELD(ah, AR_PHY_EXT_ATTEN_CTL_0,
AR_PHY_EXT_ATTEN_CTL_XATTEN1_MARGIN,
value);
REG_RMW_FIELD(ah, ext_atten_reg[i],
AR_PHY_EXT_ATTEN_CTL_XATTEN1_MARGIN,
value);

View File

@ -153,7 +153,7 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
if (!ah->is_clk_25mhz)
INIT_INI_ARRAY(&ah->iniAdditional,
ar9340_1p0_radio_core_40M);
} else if (AR_SREV_9485_11(ah)) {
} else if (AR_SREV_9485_11_OR_LATER(ah)) {
/* mac */
INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
ar9485_1_1_mac_core);
@ -424,7 +424,7 @@ static void ar9003_tx_gain_table_mode0(struct ath_hw *ah)
else if (AR_SREV_9340(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9340Modes_lowest_ob_db_tx_gain_table_1p0);
else if (AR_SREV_9485_11(ah))
else if (AR_SREV_9485_11_OR_LATER(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9485_modes_lowest_ob_db_tx_gain_1_1);
else if (AR_SREV_9550(ah))
@ -458,7 +458,7 @@ static void ar9003_tx_gain_table_mode1(struct ath_hw *ah)
else if (AR_SREV_9340(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9340Modes_high_ob_db_tx_gain_table_1p0);
else if (AR_SREV_9485_11(ah))
else if (AR_SREV_9485_11_OR_LATER(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9485Modes_high_ob_db_tx_gain_1_1);
else if (AR_SREV_9580(ah))
@ -492,7 +492,7 @@ static void ar9003_tx_gain_table_mode2(struct ath_hw *ah)
else if (AR_SREV_9340(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9340Modes_low_ob_db_tx_gain_table_1p0);
else if (AR_SREV_9485_11(ah))
else if (AR_SREV_9485_11_OR_LATER(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9485Modes_low_ob_db_tx_gain_1_1);
else if (AR_SREV_9580(ah))
@ -517,7 +517,7 @@ static void ar9003_tx_gain_table_mode3(struct ath_hw *ah)
else if (AR_SREV_9340(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9340Modes_high_power_tx_gain_table_1p0);
else if (AR_SREV_9485_11(ah))
else if (AR_SREV_9485_11_OR_LATER(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9485Modes_high_power_tx_gain_1_1);
else if (AR_SREV_9580(ah))
@ -552,7 +552,7 @@ static void ar9003_tx_gain_table_mode4(struct ath_hw *ah)
static void ar9003_tx_gain_table_mode5(struct ath_hw *ah)
{
if (AR_SREV_9485_11(ah))
if (AR_SREV_9485_11_OR_LATER(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9485Modes_green_ob_db_tx_gain_1_1);
else if (AR_SREV_9340(ah))
@ -571,7 +571,7 @@ static void ar9003_tx_gain_table_mode6(struct ath_hw *ah)
if (AR_SREV_9340(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9340Modes_low_ob_db_and_spur_tx_gain_table_1p0);
else if (AR_SREV_9485_11(ah))
else if (AR_SREV_9485_11_OR_LATER(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9485Modes_green_spur_ob_db_tx_gain_1_1);
else if (AR_SREV_9580(ah))
@ -611,7 +611,7 @@ static void ar9003_rx_gain_table_mode0(struct ath_hw *ah)
else if (AR_SREV_9340(ah))
INIT_INI_ARRAY(&ah->iniModesRxGain,
ar9340Common_rx_gain_table_1p0);
else if (AR_SREV_9485_11(ah))
else if (AR_SREV_9485_11_OR_LATER(ah))
INIT_INI_ARRAY(&ah->iniModesRxGain,
ar9485_common_rx_gain_1_1);
else if (AR_SREV_9550(ah)) {
@ -644,7 +644,7 @@ static void ar9003_rx_gain_table_mode1(struct ath_hw *ah)
else if (AR_SREV_9340(ah))
INIT_INI_ARRAY(&ah->iniModesRxGain,
ar9340Common_wo_xlna_rx_gain_table_1p0);
else if (AR_SREV_9485_11(ah))
else if (AR_SREV_9485_11_OR_LATER(ah))
INIT_INI_ARRAY(&ah->iniModesRxGain,
ar9485Common_wo_xlna_rx_gain_1_1);
else if (AR_SREV_9462_21(ah))
@ -745,16 +745,25 @@ static void ar9003_hw_init_mode_gain_regs(struct ath_hw *ah)
static void ar9003_hw_configpcipowersave(struct ath_hw *ah,
bool power_off)
{
/*
* Increase L1 Entry Latency. Some WB222 boards don't have
* this change in eeprom/OTP.
*
*/
if (AR_SREV_9462(ah)) {
u32 val = ah->config.aspm_l1_fix;
if ((val & 0xff000000) == 0x17000000) {
val &= 0x00ffffff;
val |= 0x27000000;
REG_WRITE(ah, 0x570c, val);
}
}
/* Nothing to do on restore for 11N */
if (!power_off /* !restore */) {
/* set bit 19 to allow forcing of pcie core into L1 state */
REG_SET_BIT(ah, AR_PCIE_PM_CTRL, AR_PCIE_PM_CTRL_ENA);
/* Several PCIe massages to ensure proper behaviour */
if (ah->config.pcie_waen)
REG_WRITE(ah, AR_WA, ah->config.pcie_waen);
else
REG_WRITE(ah, AR_WA, ah->WARegVal);
REG_WRITE(ah, AR_WA, ah->WARegVal);
}
/*

View File

@ -491,6 +491,7 @@ int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah, struct ath_rx_status *rxs,
rxs->rs_rate = MS(rxsp->status1, AR_RxRate);
rxs->rs_more = (rxsp->status2 & AR_RxMore) ? 1 : 0;
rxs->rs_firstaggr = (rxsp->status11 & AR_RxFirstAggr) ? 1 : 0;
rxs->rs_isaggr = (rxsp->status11 & AR_RxAggr) ? 1 : 0;
rxs->rs_moreaggr = (rxsp->status11 & AR_RxMoreAggr) ? 1 : 0;
rxs->rs_antenna = (MS(rxsp->status4, AR_RxAntenna) & 0x7);

View File

@ -1172,6 +1172,10 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
* is_on == 0 means MRC CCK is OFF (more noise imm)
*/
bool is_on = param ? 1 : 0;
if (ah->caps.rx_chainmask == 1)
break;
REG_RMW_FIELD(ah, AR_PHY_MRC_CCK_CTRL,
AR_PHY_MRC_CCK_ENABLE, is_on);
REG_RMW_FIELD(ah, AR_PHY_MRC_CCK_CTRL,
@ -1189,8 +1193,6 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
}
break;
}
case ATH9K_ANI_PRESENT:
break;
default:
ath_dbg(common, ANI, "invalid cmd %u\n", cmd);
return false;
@ -1445,7 +1447,7 @@ static void ar9003_hw_set_bt_ant_diversity(struct ath_hw *ah, bool enable)
regval |= (ant_div_ctl1 & 0x3f) << AR_ANT_DIV_CTRL_ALL_S;
REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval);
if (AR_SREV_9485_11(ah)) {
if (AR_SREV_9485_11_OR_LATER(ah)) {
/*
* Enable LNA diversity.
*/

View File

@ -148,6 +148,8 @@
#define AR_PHY_SFCORR_SPUR_SUBCHNL_SD_S 28
#define AR_PHY_EXT_CCA_THRESH62 0x007F0000
#define AR_PHY_EXT_CCA_THRESH62_S 16
#define AR_PHY_EXTCHN_PWRTHR1_ANT_DIV_ALT_ANT_MINGAINIDX 0x0000FF00
#define AR_PHY_EXTCHN_PWRTHR1_ANT_DIV_ALT_ANT_MINGAINIDX_S 8
#define AR_PHY_EXT_MINCCA_PWR 0x01FF0000
#define AR_PHY_EXT_MINCCA_PWR_S 16
#define AR_PHY_EXT_CYCPWR_THR1 0x0000FE00L

View File

@ -72,17 +72,12 @@ struct ath_config {
/*************************/
#define ATH_TXBUF_RESET(_bf) do { \
(_bf)->bf_stale = false; \
(_bf)->bf_lastbf = NULL; \
(_bf)->bf_next = NULL; \
memset(&((_bf)->bf_state), 0, \
sizeof(struct ath_buf_state)); \
} while (0)
#define ATH_RXBUF_RESET(_bf) do { \
(_bf)->bf_stale = false; \
} while (0)
/**
* enum buffer_type - Buffer type flags
*
@ -196,10 +191,10 @@ struct ath_txq {
struct ath_atx_ac {
struct ath_txq *txq;
int sched;
struct list_head list;
struct list_head tid_q;
bool clear_ps_filter;
bool sched;
};
struct ath_frame_info {
@ -216,6 +211,7 @@ struct ath_buf_state {
u8 bf_type;
u8 bfs_paprd;
u8 ndelim;
bool stale;
u16 seqno;
unsigned long bfs_paprd_timestamp;
};
@ -229,7 +225,6 @@ struct ath_buf {
void *bf_desc; /* virtual addr of desc */
dma_addr_t bf_daddr; /* physical addr of desc */
dma_addr_t bf_buf_addr; /* physical addr of data buffer, for DMA */
bool bf_stale;
struct ieee80211_tx_rate rates[4];
struct ath_buf_state bf_state;
};
@ -241,13 +236,14 @@ struct ath_atx_tid {
struct ath_node *an;
struct ath_atx_ac *ac;
unsigned long tx_buf[BITS_TO_LONGS(ATH_TID_MAX_BUFS)];
int bar_index;
u16 seq_start;
u16 seq_next;
u16 baw_size;
int tidno;
u8 tidno;
int baw_head; /* first un-acked tx buffer */
int baw_tail; /* next unused tx buffer slot */
s8 bar_index;
bool sched;
bool paused;
bool active;
@ -259,17 +255,13 @@ struct ath_node {
struct ieee80211_vif *vif; /* interface with which we're associated */
struct ath_atx_tid tid[IEEE80211_NUM_TIDS];
struct ath_atx_ac ac[IEEE80211_NUM_ACS];
int ps_key;
u16 maxampdu;
u8 mpdudensity;
s8 ps_key;
bool sleeping;
bool no_ps_filter;
#if defined(CONFIG_MAC80211_DEBUGFS) && defined(CONFIG_ATH9K_DEBUGFS)
struct dentry *node_stat;
#endif
};
struct ath_tx_control {
@ -315,6 +307,7 @@ struct ath_rx {
struct ath_descdma rxdma;
struct ath_rx_edma rx_edma[ATH9K_RX_QUEUE_MAX];
struct ath_buf *buf_hold;
struct sk_buff *frag;
u32 ampdu_ref;
@ -427,6 +420,7 @@ void ath9k_beacon_assign_slot(struct ath_softc *sc, struct ieee80211_vif *vif);
void ath9k_beacon_remove_slot(struct ath_softc *sc, struct ieee80211_vif *vif);
void ath9k_set_tsfadjust(struct ath_softc *sc, struct ieee80211_vif *vif);
void ath9k_set_beacon(struct ath_softc *sc);
bool ath9k_csa_is_finished(struct ath_softc *sc);
/*******************/
/* Link Monitoring */
@ -637,6 +631,7 @@ void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs);
#define ATH9K_PCI_CUS217 0x0004
#define ATH9K_PCI_WOW 0x0008
#define ATH9K_PCI_BT_ANT_DIV 0x0010
#define ATH9K_PCI_D3_L1_WAR 0x0020
/*
* Default cache line size, in bytes.
@ -763,6 +758,7 @@ struct ath_softc {
#endif
struct ath_descdma txsdma;
struct ieee80211_vif *csa_vif;
struct ath_ant_comb ant_comb;
u8 ant_tx, ant_rx;

View File

@ -291,6 +291,23 @@ void ath9k_set_tsfadjust(struct ath_softc *sc, struct ieee80211_vif *vif)
(unsigned long long)tsfadjust, avp->av_bslot);
}
bool ath9k_csa_is_finished(struct ath_softc *sc)
{
struct ieee80211_vif *vif;
vif = sc->csa_vif;
if (!vif || !vif->csa_active)
return false;
if (!ieee80211_csa_is_complete(vif))
return false;
ieee80211_csa_finish(vif);
sc->csa_vif = NULL;
return true;
}
void ath9k_beacon_tasklet(unsigned long data)
{
struct ath_softc *sc = (struct ath_softc *)data;
@ -336,6 +353,10 @@ void ath9k_beacon_tasklet(unsigned long data)
return;
}
/* EDMA devices check that in the tx completion function. */
if (!edma && ath9k_csa_is_finished(sc))
return;
slot = ath9k_beacon_choose_slot(sc);
vif = sc->beacon.bslot[slot];

View File

@ -49,37 +49,40 @@ int ath9k_cmn_get_hw_crypto_keytype(struct sk_buff *skb)
}
EXPORT_SYMBOL(ath9k_cmn_get_hw_crypto_keytype);
static u32 ath9k_get_extchanmode(struct ieee80211_channel *chan,
enum nl80211_channel_type channel_type)
static u32 ath9k_get_extchanmode(struct cfg80211_chan_def *chandef)
{
u32 chanmode = 0;
switch (chan->band) {
switch (chandef->chan->band) {
case IEEE80211_BAND_2GHZ:
switch (channel_type) {
case NL80211_CHAN_NO_HT:
case NL80211_CHAN_HT20:
switch (chandef->width) {
case NL80211_CHAN_WIDTH_20_NOHT:
case NL80211_CHAN_WIDTH_20:
chanmode = CHANNEL_G_HT20;
break;
case NL80211_CHAN_HT40PLUS:
chanmode = CHANNEL_G_HT40PLUS;
case NL80211_CHAN_WIDTH_40:
if (chandef->center_freq1 > chandef->chan->center_freq)
chanmode = CHANNEL_G_HT40PLUS;
else
chanmode = CHANNEL_G_HT40MINUS;
break;
case NL80211_CHAN_HT40MINUS:
chanmode = CHANNEL_G_HT40MINUS;
default:
break;
}
break;
case IEEE80211_BAND_5GHZ:
switch (channel_type) {
case NL80211_CHAN_NO_HT:
case NL80211_CHAN_HT20:
switch (chandef->width) {
case NL80211_CHAN_WIDTH_20_NOHT:
case NL80211_CHAN_WIDTH_20:
chanmode = CHANNEL_A_HT20;
break;
case NL80211_CHAN_HT40PLUS:
chanmode = CHANNEL_A_HT40PLUS;
case NL80211_CHAN_WIDTH_40:
if (chandef->center_freq1 > chandef->chan->center_freq)
chanmode = CHANNEL_A_HT40PLUS;
else
chanmode = CHANNEL_A_HT40MINUS;
break;
case NL80211_CHAN_HT40MINUS:
chanmode = CHANNEL_A_HT40MINUS;
default:
break;
}
break;
@ -94,13 +97,12 @@ static u32 ath9k_get_extchanmode(struct ieee80211_channel *chan,
* Update internal channel flags.
*/
void ath9k_cmn_update_ichannel(struct ath9k_channel *ichan,
struct ieee80211_channel *chan,
enum nl80211_channel_type channel_type)
struct cfg80211_chan_def *chandef)
{
ichan->channel = chan->center_freq;
ichan->chan = chan;
ichan->channel = chandef->chan->center_freq;
ichan->chan = chandef->chan;
if (chan->band == IEEE80211_BAND_2GHZ) {
if (chandef->chan->band == IEEE80211_BAND_2GHZ) {
ichan->chanmode = CHANNEL_G;
ichan->channelFlags = CHANNEL_2GHZ | CHANNEL_OFDM;
} else {
@ -108,8 +110,22 @@ void ath9k_cmn_update_ichannel(struct ath9k_channel *ichan,
ichan->channelFlags = CHANNEL_5GHZ | CHANNEL_OFDM;
}
if (channel_type != NL80211_CHAN_NO_HT)
ichan->chanmode = ath9k_get_extchanmode(chan, channel_type);
switch (chandef->width) {
case NL80211_CHAN_WIDTH_5:
ichan->channelFlags |= CHANNEL_QUARTER;
break;
case NL80211_CHAN_WIDTH_10:
ichan->channelFlags |= CHANNEL_HALF;
break;
case NL80211_CHAN_WIDTH_20_NOHT:
break;
case NL80211_CHAN_WIDTH_20:
case NL80211_CHAN_WIDTH_40:
ichan->chanmode = ath9k_get_extchanmode(chandef);
break;
default:
WARN_ON(1);
}
}
EXPORT_SYMBOL(ath9k_cmn_update_ichannel);
@ -125,8 +141,7 @@ struct ath9k_channel *ath9k_cmn_get_curchannel(struct ieee80211_hw *hw,
chan_idx = curchan->hw_value;
channel = &ah->channels[chan_idx];
ath9k_cmn_update_ichannel(channel, curchan,
cfg80211_get_chandef_type(&hw->conf.chandef));
ath9k_cmn_update_ichannel(channel, &hw->conf.chandef);
return channel;
}

View File

@ -44,8 +44,7 @@
int ath9k_cmn_get_hw_crypto_keytype(struct sk_buff *skb);
void ath9k_cmn_update_ichannel(struct ath9k_channel *ichan,
struct ieee80211_channel *chan,
enum nl80211_channel_type channel_type);
struct cfg80211_chan_def *chandef);
struct ath9k_channel *ath9k_cmn_get_curchannel(struct ieee80211_hw *hw,
struct ath_hw *ah);
int ath9k_cmn_count_streams(unsigned int chainmask, int max);

View File

@ -88,90 +88,6 @@ static const struct file_operations fops_debug = {
#define DMA_BUF_LEN 1024
static ssize_t read_file_tx_chainmask(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath_softc *sc = file->private_data;
struct ath_hw *ah = sc->sc_ah;
char buf[32];
unsigned int len;
len = sprintf(buf, "0x%08x\n", ah->txchainmask);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
static ssize_t write_file_tx_chainmask(struct file *file, const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath_softc *sc = file->private_data;
struct ath_hw *ah = sc->sc_ah;
unsigned long mask;
char buf[32];
ssize_t len;
len = min(count, sizeof(buf) - 1);
if (copy_from_user(buf, user_buf, len))
return -EFAULT;
buf[len] = '\0';
if (kstrtoul(buf, 0, &mask))
return -EINVAL;
ah->txchainmask = mask;
ah->caps.tx_chainmask = mask;
return count;
}
static const struct file_operations fops_tx_chainmask = {
.read = read_file_tx_chainmask,
.write = write_file_tx_chainmask,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
static ssize_t read_file_rx_chainmask(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath_softc *sc = file->private_data;
struct ath_hw *ah = sc->sc_ah;
char buf[32];
unsigned int len;
len = sprintf(buf, "0x%08x\n", ah->rxchainmask);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
static ssize_t write_file_rx_chainmask(struct file *file, const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath_softc *sc = file->private_data;
struct ath_hw *ah = sc->sc_ah;
unsigned long mask;
char buf[32];
ssize_t len;
len = min(count, sizeof(buf) - 1);
if (copy_from_user(buf, user_buf, len))
return -EFAULT;
buf[len] = '\0';
if (kstrtoul(buf, 0, &mask))
return -EINVAL;
ah->rxchainmask = mask;
ah->caps.rx_chainmask = mask;
return count;
}
static const struct file_operations fops_rx_chainmask = {
.read = read_file_rx_chainmask,
.write = write_file_rx_chainmask,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
static ssize_t read_file_ani(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
@ -1725,17 +1641,7 @@ void ath9k_sta_add_debugfs(struct ieee80211_hw *hw,
struct dentry *dir)
{
struct ath_node *an = (struct ath_node *)sta->drv_priv;
an->node_stat = debugfs_create_file("node_stat", S_IRUGO,
dir, an, &fops_node_stat);
}
void ath9k_sta_remove_debugfs(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct dentry *dir)
{
struct ath_node *an = (struct ath_node *)sta->drv_priv;
debugfs_remove(an->node_stat);
debugfs_create_file("node_stat", S_IRUGO, dir, an, &fops_node_stat);
}
/* Ethtool support for get-stats */
@ -1906,10 +1812,10 @@ int ath9k_init_debug(struct ath_hw *ah)
&fops_reset);
debugfs_create_file("recv", S_IRUSR, sc->debug.debugfs_phy, sc,
&fops_recv);
debugfs_create_file("rx_chainmask", S_IRUSR | S_IWUSR,
sc->debug.debugfs_phy, sc, &fops_rx_chainmask);
debugfs_create_file("tx_chainmask", S_IRUSR | S_IWUSR,
sc->debug.debugfs_phy, sc, &fops_tx_chainmask);
debugfs_create_u8("rx_chainmask", S_IRUSR, sc->debug.debugfs_phy,
&ah->rxchainmask);
debugfs_create_u8("tx_chainmask", S_IRUSR, sc->debug.debugfs_phy,
&ah->txchainmask);
debugfs_create_file("ani", S_IRUSR | S_IWUSR,
sc->debug.debugfs_phy, sc, &fops_ani);
debugfs_create_bool("paprd", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy,

View File

@ -292,10 +292,6 @@ void ath9k_sta_add_debugfs(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct dentry *dir);
void ath9k_sta_remove_debugfs(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct dentry *dir);
void ath_debug_send_fft_sample(struct ath_softc *sc,
struct fft_sample_tlv *fft_sample);
void ath9k_debug_stat_ant(struct ath_softc *sc,

View File

@ -115,10 +115,10 @@ static int hif_usb_send_regout(struct hif_device_usb *hif_dev,
cmd->skb = skb;
cmd->hif_dev = hif_dev;
usb_fill_bulk_urb(urb, hif_dev->udev,
usb_sndbulkpipe(hif_dev->udev, USB_REG_OUT_PIPE),
usb_fill_int_urb(urb, hif_dev->udev,
usb_sndintpipe(hif_dev->udev, USB_REG_OUT_PIPE),
skb->data, skb->len,
hif_usb_regout_cb, cmd);
hif_usb_regout_cb, cmd, 1);
usb_anchor_urb(urb, &hif_dev->regout_submitted);
ret = usb_submit_urb(urb, GFP_KERNEL);
@ -723,11 +723,11 @@ static void ath9k_hif_usb_reg_in_cb(struct urb *urb)
return;
}
usb_fill_bulk_urb(urb, hif_dev->udev,
usb_rcvbulkpipe(hif_dev->udev,
usb_fill_int_urb(urb, hif_dev->udev,
usb_rcvintpipe(hif_dev->udev,
USB_REG_IN_PIPE),
nskb->data, MAX_REG_IN_BUF_SIZE,
ath9k_hif_usb_reg_in_cb, nskb);
ath9k_hif_usb_reg_in_cb, nskb, 1);
}
resubmit:
@ -909,11 +909,11 @@ static int ath9k_hif_usb_alloc_reg_in_urbs(struct hif_device_usb *hif_dev)
goto err_skb;
}
usb_fill_bulk_urb(urb, hif_dev->udev,
usb_rcvbulkpipe(hif_dev->udev,
usb_fill_int_urb(urb, hif_dev->udev,
usb_rcvintpipe(hif_dev->udev,
USB_REG_IN_PIPE),
skb->data, MAX_REG_IN_BUF_SIZE,
ath9k_hif_usb_reg_in_cb, skb);
ath9k_hif_usb_reg_in_cb, skb, 1);
/* Anchor URB */
usb_anchor_urb(urb, &hif_dev->reg_in_submitted);
@ -1031,9 +1031,7 @@ static int ath9k_hif_usb_download_fw(struct hif_device_usb *hif_dev)
static int ath9k_hif_usb_dev_init(struct hif_device_usb *hif_dev)
{
struct usb_host_interface *alt = &hif_dev->interface->altsetting[0];
struct usb_endpoint_descriptor *endp;
int ret, idx;
int ret;
ret = ath9k_hif_usb_download_fw(hif_dev);
if (ret) {
@ -1043,20 +1041,6 @@ static int ath9k_hif_usb_dev_init(struct hif_device_usb *hif_dev)
return ret;
}
/* On downloading the firmware to the target, the USB descriptor of EP4
* is 'patched' to change the type of the endpoint to Bulk. This will
* bring down CPU usage during the scan period.
*/
for (idx = 0; idx < alt->desc.bNumEndpoints; idx++) {
endp = &alt->endpoint[idx].desc;
if ((endp->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
== USB_ENDPOINT_XFER_INT) {
endp->bmAttributes &= ~USB_ENDPOINT_XFERTYPE_MASK;
endp->bmAttributes |= USB_ENDPOINT_XFER_BULK;
endp->bInterval = 0;
}
}
/* Alloc URBs */
ret = ath9k_hif_usb_alloc_urbs(hif_dev);
if (ret) {
@ -1268,7 +1252,7 @@ static void ath9k_hif_usb_reboot(struct usb_device *udev)
if (!buf)
return;
ret = usb_bulk_msg(udev, usb_sndbulkpipe(udev, USB_REG_OUT_PIPE),
ret = usb_interrupt_msg(udev, usb_sndintpipe(udev, USB_REG_OUT_PIPE),
buf, 4, NULL, HZ);
if (ret)
dev_err(&udev->dev, "ath9k_htc: USB reboot failed\n");

View File

@ -1203,16 +1203,13 @@ static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed)
if ((changed & IEEE80211_CONF_CHANGE_CHANNEL) || chip_reset) {
struct ieee80211_channel *curchan = hw->conf.chandef.chan;
enum nl80211_channel_type channel_type =
cfg80211_get_chandef_type(&hw->conf.chandef);
int pos = curchan->hw_value;
ath_dbg(common, CONFIG, "Set channel: %d MHz\n",
curchan->center_freq);
ath9k_cmn_update_ichannel(&priv->ah->channels[pos],
hw->conf.chandef.chan,
channel_type);
&hw->conf.chandef);
if (ath9k_htc_set_channel(priv, hw, &priv->ah->channels[pos]) < 0) {
ath_err(common, "Unable to set channel\n");

View File

@ -448,6 +448,7 @@ static void ath9k_htc_tx_process(struct ath9k_htc_priv *priv,
struct ieee80211_conf *cur_conf = &priv->hw->conf;
bool txok;
int slot;
int hdrlen, padsize;
slot = strip_drv_header(priv, skb);
if (slot < 0) {
@ -504,6 +505,15 @@ static void ath9k_htc_tx_process(struct ath9k_htc_priv *priv,
ath9k_htc_tx_clear_slot(priv, slot);
/* Remove padding before handing frame back to mac80211 */
hdrlen = ieee80211_get_hdrlen_from_skb(skb);
padsize = hdrlen & 3;
if (padsize && skb->len > hdrlen + padsize) {
memmove(skb->data + padsize, skb->data, hdrlen);
skb_pull(skb, padsize);
}
/* Send status to mac80211 */
ieee80211_tx_status(priv->hw, skb);
}

View File

@ -450,7 +450,6 @@ static void ath9k_hw_init_config(struct ath_hw *ah)
ah->config.ack_6mb = 0x0;
ah->config.cwm_ignore_extcca = 0;
ah->config.pcie_clock_req = 0;
ah->config.pcie_waen = 0;
ah->config.analog_shiftreg = 1;
for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
@ -575,18 +574,17 @@ static int __ath9k_hw_init(struct ath_hw *ah)
* We need to do this to avoid RMW of this register. We cannot
* read the reg when chip is asleep.
*/
ah->WARegVal = REG_READ(ah, AR_WA);
ah->WARegVal |= (AR_WA_D3_L1_DISABLE |
AR_WA_ASPM_TIMER_BASED_DISABLE);
if (AR_SREV_9300_20_OR_LATER(ah)) {
ah->WARegVal = REG_READ(ah, AR_WA);
ah->WARegVal |= (AR_WA_D3_L1_DISABLE |
AR_WA_ASPM_TIMER_BASED_DISABLE);
}
if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_POWER_ON)) {
ath_err(common, "Couldn't reset chip\n");
return -EIO;
}
if (AR_SREV_9462(ah))
ah->WARegVal &= ~AR_WA_D3_L1_DISABLE;
if (AR_SREV_9565(ah)) {
ah->WARegVal |= AR_WA_BIT22;
REG_WRITE(ah, AR_WA, ah->WARegVal);
@ -656,8 +654,6 @@ static int __ath9k_hw_init(struct ath_hw *ah)
ath9k_hw_init_cal_settings(ah);
ah->ani_function = ATH9K_ANI_ALL;
if (AR_SREV_9280_20_OR_LATER(ah) && !AR_SREV_9300_20_OR_LATER(ah))
ah->ani_function &= ~ATH9K_ANI_NOISE_IMMUNITY_LEVEL;
if (!AR_SREV_9300_20_OR_LATER(ah))
ah->ani_function &= ~ATH9K_ANI_MRC_CCK;
@ -1069,7 +1065,7 @@ void ath9k_hw_init_global_settings(struct ath_hw *ah)
if (IS_CHAN_A_FAST_CLOCK(ah, chan))
tx_lat += 11;
sifstime *= 2;
sifstime = 32;
ack_offset = 16;
slottime = 13;
} else if (IS_CHAN_QUARTER_RATE(chan)) {
@ -1079,7 +1075,7 @@ void ath9k_hw_init_global_settings(struct ath_hw *ah)
if (IS_CHAN_A_FAST_CLOCK(ah, chan))
tx_lat += 22;
sifstime *= 4;
sifstime = 64;
ack_offset = 32;
slottime = 21;
} else {
@ -1116,7 +1112,6 @@ void ath9k_hw_init_global_settings(struct ath_hw *ah)
ctstimeout += 48 - sifstime - ah->slottime;
}
ath9k_hw_set_sifs_time(ah, sifstime);
ath9k_hw_setslottime(ah, slottime);
ath9k_hw_set_ack_timeout(ah, acktimeout);

View File

@ -311,9 +311,11 @@ struct ath9k_ops_config {
u16 ani_poll_interval; /* ANI poll interval in ms */
/* Platform specific config */
u32 aspm_l1_fix;
u32 xlna_gpio;
u32 ant_ctrl_comm2g_switch_enable;
bool xatten_margin_cfg;
bool alt_mingainidx;
};
enum ath9k_int {

View File

@ -146,14 +146,22 @@ static struct ieee80211_rate ath9k_legacy_rates[] = {
RATE(20, 0x1a, IEEE80211_RATE_SHORT_PREAMBLE),
RATE(55, 0x19, IEEE80211_RATE_SHORT_PREAMBLE),
RATE(110, 0x18, IEEE80211_RATE_SHORT_PREAMBLE),
RATE(60, 0x0b, 0),
RATE(90, 0x0f, 0),
RATE(120, 0x0a, 0),
RATE(180, 0x0e, 0),
RATE(240, 0x09, 0),
RATE(360, 0x0d, 0),
RATE(480, 0x08, 0),
RATE(540, 0x0c, 0),
RATE(60, 0x0b, (IEEE80211_RATE_SUPPORTS_5MHZ |
IEEE80211_RATE_SUPPORTS_10MHZ)),
RATE(90, 0x0f, (IEEE80211_RATE_SUPPORTS_5MHZ |
IEEE80211_RATE_SUPPORTS_10MHZ)),
RATE(120, 0x0a, (IEEE80211_RATE_SUPPORTS_5MHZ |
IEEE80211_RATE_SUPPORTS_10MHZ)),
RATE(180, 0x0e, (IEEE80211_RATE_SUPPORTS_5MHZ |
IEEE80211_RATE_SUPPORTS_10MHZ)),
RATE(240, 0x09, (IEEE80211_RATE_SUPPORTS_5MHZ |
IEEE80211_RATE_SUPPORTS_10MHZ)),
RATE(360, 0x0d, (IEEE80211_RATE_SUPPORTS_5MHZ |
IEEE80211_RATE_SUPPORTS_10MHZ)),
RATE(480, 0x08, (IEEE80211_RATE_SUPPORTS_5MHZ |
IEEE80211_RATE_SUPPORTS_10MHZ)),
RATE(540, 0x0c, (IEEE80211_RATE_SUPPORTS_5MHZ |
IEEE80211_RATE_SUPPORTS_10MHZ)),
};
#ifdef CONFIG_MAC80211_LEDS
@ -526,6 +534,7 @@ static void ath9k_init_platform(struct ath_softc *sc)
ATH9K_PCI_CUS230)) {
ah->config.xlna_gpio = 9;
ah->config.xatten_margin_cfg = true;
ah->config.alt_mingainidx = true;
ah->config.ant_ctrl_comm2g_switch_enable = 0x000BBB88;
sc->ant_comb.low_rssi_thresh = 20;
sc->ant_comb.fast_div_bias = 3;
@ -542,6 +551,11 @@ static void ath9k_init_platform(struct ath_softc *sc)
pCap->hw_caps |= ATH9K_HW_CAP_BT_ANT_DIV;
ath_info(common, "Set BT/WLAN RX diversity capability\n");
}
if (sc->driver_data & ATH9K_PCI_D3_L1_WAR) {
ah->config.pcie_waen = 0x0040473b;
ath_info(common, "Enable WAR for ASPM D3/L1\n");
}
}
static void ath9k_eeprom_request_cb(const struct firmware *eeprom_blob,
@ -726,13 +740,15 @@ static void ath9k_init_band_txpower(struct ath_softc *sc, int band)
struct ieee80211_supported_band *sband;
struct ieee80211_channel *chan;
struct ath_hw *ah = sc->sc_ah;
struct cfg80211_chan_def chandef;
int i;
sband = &sc->sbands[band];
for (i = 0; i < sband->n_channels; i++) {
chan = &sband->channels[i];
ah->curchan = &ah->channels[chan->hw_value];
ath9k_cmn_update_ichannel(ah->curchan, chan, NL80211_CHAN_HT20);
cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_HT20);
ath9k_cmn_update_ichannel(ah->curchan, &chandef);
ath9k_hw_set_txpowerlimit(ah, MAX_RATE_POWER, true);
}
}
@ -818,7 +834,8 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
IEEE80211_HW_PS_NULLFUNC_STACK |
IEEE80211_HW_SPECTRUM_MGMT |
IEEE80211_HW_REPORTS_TX_ACK_STATUS |
IEEE80211_HW_SUPPORTS_RC_TABLE;
IEEE80211_HW_SUPPORTS_RC_TABLE |
IEEE80211_HW_SUPPORTS_HT_CCK_RATES;
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
@ -850,6 +867,8 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_5_10_MHZ;
hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
#ifdef CONFIG_PM_SLEEP
if ((ah->caps.hw_caps & ATH9K_HW_WOW_DEVICE_CAPABLE) &&

View File

@ -41,7 +41,7 @@ void ath_tx_complete_poll_work(struct work_struct *work)
txq->axq_tx_inprogress = true;
}
}
ath_txq_unlock_complete(sc, txq);
ath_txq_unlock(sc, txq);
}
if (needreset) {

View File

@ -583,9 +583,9 @@ int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
rs->rs_rate = MS(ads.ds_rxstatus0, AR_RxRate);
rs->rs_more = (ads.ds_rxstatus1 & AR_RxMore) ? 1 : 0;
rs->rs_firstaggr = (ads.ds_rxstatus8 & AR_RxFirstAggr) ? 1 : 0;
rs->rs_isaggr = (ads.ds_rxstatus8 & AR_RxAggr) ? 1 : 0;
rs->rs_moreaggr =
(ads.ds_rxstatus8 & AR_RxMoreAggr) ? 1 : 0;
rs->rs_moreaggr = (ads.ds_rxstatus8 & AR_RxMoreAggr) ? 1 : 0;
rs->rs_antenna = MS(ads.ds_rxstatus3, AR_RxAntenna);
/* directly mapped flags for ieee80211_rx_status */

View File

@ -140,6 +140,7 @@ struct ath_rx_status {
int8_t rs_rssi_ext1;
int8_t rs_rssi_ext2;
u8 rs_isaggr;
u8 rs_firstaggr;
u8 rs_moreaggr;
u8 rs_num_delims;
u8 rs_flags;
@ -569,6 +570,7 @@ struct ar5416_desc {
#define AR_RxAggr 0x00020000
#define AR_PostDelimCRCErr 0x00040000
#define AR_RxStatusRsvd71 0x3ff80000
#define AR_RxFirstAggr 0x20000000
#define AR_DecryptBusyErr 0x40000000
#define AR_KeyMiss 0x80000000

View File

@ -173,8 +173,7 @@ static void ath_restart_work(struct ath_softc *sc)
{
ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0);
if (AR_SREV_9340(sc->sc_ah) || AR_SREV_9485(sc->sc_ah) ||
AR_SREV_9550(sc->sc_ah))
if (AR_SREV_9340(sc->sc_ah) || AR_SREV_9330(sc->sc_ah))
ieee80211_queue_delayed_work(sc->hw, &sc->hw_pll_work,
msecs_to_jiffies(ATH_PLL_WORK_INTERVAL));
@ -1032,6 +1031,9 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
if (ath9k_uses_beacons(vif->type))
ath9k_beacon_remove_slot(sc, vif);
if (sc->csa_vif == vif)
sc->csa_vif = NULL;
ath9k_ps_wakeup(sc);
ath9k_calculate_summary_state(hw, NULL);
ath9k_ps_restore(sc);
@ -1201,8 +1203,6 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
if ((changed & IEEE80211_CONF_CHANGE_CHANNEL) || reset_channel) {
struct ieee80211_channel *curchan = hw->conf.chandef.chan;
enum nl80211_channel_type channel_type =
cfg80211_get_chandef_type(&conf->chandef);
int pos = curchan->hw_value;
int old_pos = -1;
unsigned long flags;
@ -1210,8 +1210,8 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
if (ah->curchan)
old_pos = ah->curchan - &ah->channels[0];
ath_dbg(common, CONFIG, "Set channel: %d MHz type: %d\n",
curchan->center_freq, channel_type);
ath_dbg(common, CONFIG, "Set channel: %d MHz width: %d\n",
curchan->center_freq, hw->conf.chandef.width);
/* update survey stats for the old channel before switching */
spin_lock_irqsave(&common->cc_lock, flags);
@ -1219,7 +1219,7 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
spin_unlock_irqrestore(&common->cc_lock, flags);
ath9k_cmn_update_ichannel(&sc->sc_ah->channels[pos],
curchan, channel_type);
&conf->chandef);
/*
* If the operating channel changes, change the survey in-use flags
@ -2320,6 +2320,19 @@ static void ath9k_sw_scan_complete(struct ieee80211_hw *hw)
clear_bit(SC_OP_SCANNING, &sc->sc_flags);
}
static void ath9k_channel_switch_beacon(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct cfg80211_chan_def *chandef)
{
struct ath_softc *sc = hw->priv;
/* mac80211 does not support CSA in multi-if cases (yet) */
if (WARN_ON(sc->csa_vif))
return;
sc->csa_vif = vif;
}
struct ieee80211_ops ath9k_ops = {
.tx = ath9k_tx,
.start = ath9k_start,
@ -2364,8 +2377,8 @@ struct ieee80211_ops ath9k_ops = {
#if defined(CONFIG_MAC80211_DEBUGFS) && defined(CONFIG_ATH9K_DEBUGFS)
.sta_add_debugfs = ath9k_sta_add_debugfs,
.sta_remove_debugfs = ath9k_sta_remove_debugfs,
#endif
.sw_scan_start = ath9k_sw_scan_start,
.sw_scan_complete = ath9k_sw_scan_complete,
.channel_switch_beacon = ath9k_channel_switch_beacon,
};

View File

@ -30,6 +30,52 @@ static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = {
{ PCI_VDEVICE(ATHEROS, 0x0029) }, /* PCI */
{ PCI_VDEVICE(ATHEROS, 0x002A) }, /* PCI-E */
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x002A,
PCI_VENDOR_ID_AZWAVE,
0x1C71),
.driver_data = ATH9K_PCI_D3_L1_WAR },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x002A,
PCI_VENDOR_ID_FOXCONN,
0xE01F),
.driver_data = ATH9K_PCI_D3_L1_WAR },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x002A,
0x11AD, /* LITEON */
0x6632),
.driver_data = ATH9K_PCI_D3_L1_WAR },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x002A,
0x11AD, /* LITEON */
0x6642),
.driver_data = ATH9K_PCI_D3_L1_WAR },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x002A,
PCI_VENDOR_ID_QMI,
0x0306),
.driver_data = ATH9K_PCI_D3_L1_WAR },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x002A,
0x185F, /* WNC */
0x309D),
.driver_data = ATH9K_PCI_D3_L1_WAR },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x002A,
0x10CF, /* Fujitsu */
0x147C),
.driver_data = ATH9K_PCI_D3_L1_WAR },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x002A,
0x10CF, /* Fujitsu */
0x147D),
.driver_data = ATH9K_PCI_D3_L1_WAR },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x002A,
0x10CF, /* Fujitsu */
0x1536),
.driver_data = ATH9K_PCI_D3_L1_WAR },
/* AR9285 card for Asus */
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x002B,
@ -59,6 +105,11 @@ static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = {
PCI_VENDOR_ID_AZWAVE,
0x2126),
.driver_data = ATH9K_PCI_CUS198 | ATH9K_PCI_BT_ANT_DIV },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x0032,
PCI_VENDOR_ID_AZWAVE,
0x126A),
.driver_data = ATH9K_PCI_CUS198 | ATH9K_PCI_BT_ANT_DIV },
/* PCI-E CUS230 */
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
@ -309,6 +360,22 @@ static void ath_pci_aspm_init(struct ath_common *common)
return;
}
/*
* 0x70c - Ack Frequency Register.
*
* Bits 27:29 - DEFAULT_L1_ENTRANCE_LATENCY.
*
* 000 : 1 us
* 001 : 2 us
* 010 : 4 us
* 011 : 8 us
* 100 : 16 us
* 101 : 32 us
* 110/111 : 64 us
*/
if (AR_SREV_9462(ah))
pci_read_config_dword(pdev, 0x70c, &ah->config.aspm_l1_fix);
pcie_capability_read_word(parent, PCI_EXP_LNKCTL, &aspm);
if (aspm & (PCI_EXP_LNKCTL_ASPM_L0S | PCI_EXP_LNKCTL_ASPM_L1)) {
ah->aspm_enabled = true;

View File

@ -1282,9 +1282,14 @@ static void ath_rate_init(void *priv, struct ieee80211_supported_band *sband,
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_rate_priv *ath_rc_priv = priv_sta;
int i, j = 0;
u32 rate_flags = ieee80211_chandef_rate_flags(&sc->hw->conf.chandef);
for (i = 0; i < sband->n_bitrates; i++) {
if (sta->supp_rates[sband->band] & BIT(i)) {
if ((rate_flags & sband->bitrates[i].flags)
!= rate_flags)
continue;
ath_rc_priv->neg_rates.rs_rates[j]
= (sband->bitrates[i].bitrate * 2) / 10;
j++;
@ -1326,8 +1331,8 @@ static void ath_rate_update(void *priv, struct ieee80211_supported_band *sband,
ath_rc_init(sc, priv_sta);
ath_dbg(ath9k_hw_common(sc->sc_ah), CONFIG,
"Operating HT Bandwidth changed to: %d\n",
cfg80211_get_chandef_type(&sc->hw->conf.chandef));
"Operating Bandwidth changed to: %d\n",
sc->hw->conf.chandef.width);
}
}

View File

@ -42,8 +42,6 @@ static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf)
struct ath_desc *ds;
struct sk_buff *skb;
ATH_RXBUF_RESET(bf);
ds = bf->bf_desc;
ds->ds_link = 0; /* link to null */
ds->ds_data = bf->bf_buf_addr;
@ -70,6 +68,14 @@ static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf)
sc->rx.rxlink = &ds->ds_link;
}
static void ath_rx_buf_relink(struct ath_softc *sc, struct ath_buf *bf)
{
if (sc->rx.buf_hold)
ath_rx_buf_link(sc, sc->rx.buf_hold);
sc->rx.buf_hold = bf;
}
static void ath_setdefantenna(struct ath_softc *sc, u32 antenna)
{
/* XXX block beacon interrupts */
@ -117,7 +123,6 @@ static bool ath_rx_edma_buf_link(struct ath_softc *sc,
skb = bf->bf_mpdu;
ATH_RXBUF_RESET(bf);
memset(skb->data, 0, ah->caps.rx_status_len);
dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
ah->caps.rx_status_len, DMA_TO_DEVICE);
@ -185,7 +190,7 @@ static void ath_rx_edma_cleanup(struct ath_softc *sc)
static void ath_rx_edma_init_queue(struct ath_rx_edma *rx_edma, int size)
{
skb_queue_head_init(&rx_edma->rx_fifo);
__skb_queue_head_init(&rx_edma->rx_fifo);
rx_edma->rx_fifo_hwsize = size;
}
@ -432,6 +437,7 @@ int ath_startrecv(struct ath_softc *sc)
if (list_empty(&sc->rx.rxbuf))
goto start_recv;
sc->rx.buf_hold = NULL;
sc->rx.rxlink = NULL;
list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) {
ath_rx_buf_link(sc, bf);
@ -677,6 +683,9 @@ static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc,
}
bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
if (bf == sc->rx.buf_hold)
return NULL;
ds = bf->bf_desc;
/*
@ -755,7 +764,6 @@ static bool ath9k_rx_accept(struct ath_common *common,
bool is_mc, is_valid_tkip, strip_mic, mic_error;
struct ath_hw *ah = common->ah;
__le16 fc;
u8 rx_status_len = ah->caps.rx_status_len;
fc = hdr->frame_control;
@ -777,25 +785,6 @@ static bool ath9k_rx_accept(struct ath_common *common,
!test_bit(rx_stats->rs_keyix, common->ccmp_keymap))
rx_stats->rs_status &= ~ATH9K_RXERR_KEYMISS;
if (!rx_stats->rs_datalen) {
RX_STAT_INC(rx_len_err);
return false;
}
/*
* rs_status follows rs_datalen so if rs_datalen is too large
* we can take a hint that hardware corrupted it, so ignore
* those frames.
*/
if (rx_stats->rs_datalen > (common->rx_bufsize - rx_status_len)) {
RX_STAT_INC(rx_len_err);
return false;
}
/* Only use error bits from the last fragment */
if (rx_stats->rs_more)
return true;
mic_error = is_valid_tkip && !ieee80211_is_ctl(fc) &&
!ieee80211_has_morefrags(fc) &&
!(le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG) &&
@ -814,8 +803,6 @@ static bool ath9k_rx_accept(struct ath_common *common,
rxs->flag |= RX_FLAG_FAILED_FCS_CRC;
mic_error = false;
}
if (rx_stats->rs_status & ATH9K_RXERR_PHY)
return false;
if ((rx_stats->rs_status & ATH9K_RXERR_DECRYPT) ||
(!is_mc && (rx_stats->rs_status & ATH9K_RXERR_KEYMISS))) {
@ -865,6 +852,17 @@ static int ath9k_process_rate(struct ath_common *common,
band = hw->conf.chandef.chan->band;
sband = hw->wiphy->bands[band];
switch (hw->conf.chandef.width) {
case NL80211_CHAN_WIDTH_5:
rxs->flag |= RX_FLAG_5MHZ;
break;
case NL80211_CHAN_WIDTH_10:
rxs->flag |= RX_FLAG_10MHZ;
break;
default:
break;
}
if (rx_stats->rs_rate & 0x80) {
/* HT rate */
rxs->flag |= RX_FLAG_HT;
@ -898,129 +896,65 @@ static int ath9k_process_rate(struct ath_common *common,
static void ath9k_process_rssi(struct ath_common *common,
struct ieee80211_hw *hw,
struct ieee80211_hdr *hdr,
struct ath_rx_status *rx_stats)
struct ath_rx_status *rx_stats,
struct ieee80211_rx_status *rxs)
{
struct ath_softc *sc = hw->priv;
struct ath_hw *ah = common->ah;
int last_rssi;
int rssi = rx_stats->rs_rssi;
if (!rx_stats->is_mybeacon ||
((ah->opmode != NL80211_IFTYPE_STATION) &&
(ah->opmode != NL80211_IFTYPE_ADHOC)))
/*
* RSSI is not available for subframes in an A-MPDU.
*/
if (rx_stats->rs_moreaggr) {
rxs->flag |= RX_FLAG_NO_SIGNAL_VAL;
return;
if (rx_stats->rs_rssi != ATH9K_RSSI_BAD && !rx_stats->rs_moreaggr)
ATH_RSSI_LPF(sc->last_rssi, rx_stats->rs_rssi);
last_rssi = sc->last_rssi;
if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
rssi = ATH_EP_RND(last_rssi, ATH_RSSI_EP_MULTIPLIER);
if (rssi < 0)
rssi = 0;
/* Update Beacon RSSI, this is used by ANI. */
ah->stats.avgbrssi = rssi;
}
/*
* For Decrypt or Demic errors, we only mark packet status here and always push
* up the frame up to let mac80211 handle the actual error case, be it no
* decryption key or real decryption error. This let us keep statistics there.
*/
static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
struct ieee80211_hdr *hdr,
struct ath_rx_status *rx_stats,
struct ieee80211_rx_status *rx_status,
bool *decrypt_error)
{
struct ieee80211_hw *hw = sc->hw;
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
bool discard_current = sc->rx.discard_next;
sc->rx.discard_next = rx_stats->rs_more;
if (discard_current)
return -EINVAL;
}
/*
* everything but the rate is checked here, the rate check is done
* separately to avoid doing two lookups for a rate for each frame.
* Check if the RSSI for the last subframe in an A-MPDU
* or an unaggregated frame is valid.
*/
if (!ath9k_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error))
return -EINVAL;
if (rx_stats->rs_rssi == ATH9K_RSSI_BAD) {
rxs->flag |= RX_FLAG_NO_SIGNAL_VAL;
return;
}
/* Only use status info from the last fragment */
if (rx_stats->rs_more)
return 0;
/*
* Update Beacon RSSI, this is used by ANI.
*/
if (rx_stats->is_mybeacon &&
((ah->opmode == NL80211_IFTYPE_STATION) ||
(ah->opmode == NL80211_IFTYPE_ADHOC))) {
ATH_RSSI_LPF(sc->last_rssi, rx_stats->rs_rssi);
last_rssi = sc->last_rssi;
if (ath9k_process_rate(common, hw, rx_stats, rx_status))
return -EINVAL;
if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
rssi = ATH_EP_RND(last_rssi, ATH_RSSI_EP_MULTIPLIER);
if (rssi < 0)
rssi = 0;
ath9k_process_rssi(common, hw, hdr, rx_stats);
ah->stats.avgbrssi = rssi;
}
rx_status->band = hw->conf.chandef.chan->band;
rx_status->freq = hw->conf.chandef.chan->center_freq;
rx_status->signal = ah->noise + rx_stats->rs_rssi;
rx_status->antenna = rx_stats->rs_antenna;
rx_status->flag |= RX_FLAG_MACTIME_END;
if (rx_stats->rs_moreaggr)
rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
sc->rx.discard_next = false;
return 0;
rxs->signal = ah->noise + rx_stats->rs_rssi;
}
static void ath9k_rx_skb_postprocess(struct ath_common *common,
struct sk_buff *skb,
struct ath_rx_status *rx_stats,
struct ieee80211_rx_status *rxs,
bool decrypt_error)
static void ath9k_process_tsf(struct ath_rx_status *rs,
struct ieee80211_rx_status *rxs,
u64 tsf)
{
struct ath_hw *ah = common->ah;
struct ieee80211_hdr *hdr;
int hdrlen, padpos, padsize;
u8 keyix;
__le16 fc;
u32 tsf_lower = tsf & 0xffffffff;
/* see if any padding is done by the hw and remove it */
hdr = (struct ieee80211_hdr *) skb->data;
hdrlen = ieee80211_get_hdrlen_from_skb(skb);
fc = hdr->frame_control;
padpos = ieee80211_hdrlen(fc);
rxs->mactime = (tsf & ~0xffffffffULL) | rs->rs_tstamp;
if (rs->rs_tstamp > tsf_lower &&
unlikely(rs->rs_tstamp - tsf_lower > 0x10000000))
rxs->mactime -= 0x100000000ULL;
/* The MAC header is padded to have 32-bit boundary if the
* packet payload is non-zero. The general calculation for
* padsize would take into account odd header lengths:
* padsize = (4 - padpos % 4) % 4; However, since only
* even-length headers are used, padding can only be 0 or 2
* bytes and we can optimize this a bit. In addition, we must
* not try to remove padding from short control frames that do
* not have payload. */
padsize = padpos & 3;
if (padsize && skb->len>=padpos+padsize+FCS_LEN) {
memmove(skb->data + padsize, skb->data, padpos);
skb_pull(skb, padsize);
}
keyix = rx_stats->rs_keyix;
if (!(keyix == ATH9K_RXKEYIX_INVALID) && !decrypt_error &&
ieee80211_has_protected(fc)) {
rxs->flag |= RX_FLAG_DECRYPTED;
} else if (ieee80211_has_protected(fc)
&& !decrypt_error && skb->len >= hdrlen + 4) {
keyix = skb->data[hdrlen + 3] >> 6;
if (test_bit(keyix, common->keymap))
rxs->flag |= RX_FLAG_DECRYPTED;
}
if (ah->sw_mgmt_crypto &&
(rxs->flag & RX_FLAG_DECRYPTED) &&
ieee80211_is_mgmt(fc))
/* Use software decrypt for management frames. */
rxs->flag &= ~RX_FLAG_DECRYPTED;
if (rs->rs_tstamp < tsf_lower &&
unlikely(tsf_lower - rs->rs_tstamp > 0x10000000))
rxs->mactime += 0x100000000ULL;
}
#ifdef CONFIG_ATH9K_DEBUGFS
@ -1133,6 +1067,234 @@ static int ath_process_fft(struct ath_softc *sc, struct ieee80211_hdr *hdr,
#endif
}
static bool ath9k_is_mybeacon(struct ath_softc *sc, struct ieee80211_hdr *hdr)
{
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
if (ieee80211_is_beacon(hdr->frame_control)) {
RX_STAT_INC(rx_beacons);
if (!is_zero_ether_addr(common->curbssid) &&
ether_addr_equal(hdr->addr3, common->curbssid))
return true;
}
return false;
}
/*
* For Decrypt or Demic errors, we only mark packet status here and always push
* up the frame up to let mac80211 handle the actual error case, be it no
* decryption key or real decryption error. This let us keep statistics there.
*/
static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
struct sk_buff *skb,
struct ath_rx_status *rx_stats,
struct ieee80211_rx_status *rx_status,
bool *decrypt_error, u64 tsf)
{
struct ieee80211_hw *hw = sc->hw;
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
struct ieee80211_hdr *hdr;
bool discard_current = sc->rx.discard_next;
int ret = 0;
/*
* Discard corrupt descriptors which are marked in
* ath_get_next_rx_buf().
*/
sc->rx.discard_next = rx_stats->rs_more;
if (discard_current)
return -EINVAL;
/*
* Discard zero-length packets.
*/
if (!rx_stats->rs_datalen) {
RX_STAT_INC(rx_len_err);
return -EINVAL;
}
/*
* rs_status follows rs_datalen so if rs_datalen is too large
* we can take a hint that hardware corrupted it, so ignore
* those frames.
*/
if (rx_stats->rs_datalen > (common->rx_bufsize - ah->caps.rx_status_len)) {
RX_STAT_INC(rx_len_err);
return -EINVAL;
}
/* Only use status info from the last fragment */
if (rx_stats->rs_more)
return 0;
/*
* Return immediately if the RX descriptor has been marked
* as corrupt based on the various error bits.
*
* This is different from the other corrupt descriptor
* condition handled above.
*/
if (rx_stats->rs_status & ATH9K_RXERR_CORRUPT_DESC) {
ret = -EINVAL;
goto exit;
}
hdr = (struct ieee80211_hdr *) (skb->data + ah->caps.rx_status_len);
ath9k_process_tsf(rx_stats, rx_status, tsf);
ath_debug_stat_rx(sc, rx_stats);
/*
* Process PHY errors and return so that the packet
* can be dropped.
*/
if (rx_stats->rs_status & ATH9K_RXERR_PHY) {
ath9k_dfs_process_phyerr(sc, hdr, rx_stats, rx_status->mactime);
if (ath_process_fft(sc, hdr, rx_stats, rx_status->mactime))
RX_STAT_INC(rx_spectral);
ret = -EINVAL;
goto exit;
}
/*
* everything but the rate is checked here, the rate check is done
* separately to avoid doing two lookups for a rate for each frame.
*/
if (!ath9k_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error)) {
ret = -EINVAL;
goto exit;
}
rx_stats->is_mybeacon = ath9k_is_mybeacon(sc, hdr);
if (rx_stats->is_mybeacon) {
sc->hw_busy_count = 0;
ath_start_rx_poll(sc, 3);
}
if (ath9k_process_rate(common, hw, rx_stats, rx_status)) {
ret =-EINVAL;
goto exit;
}
ath9k_process_rssi(common, hw, rx_stats, rx_status);
rx_status->band = hw->conf.chandef.chan->band;
rx_status->freq = hw->conf.chandef.chan->center_freq;
rx_status->antenna = rx_stats->rs_antenna;
rx_status->flag |= RX_FLAG_MACTIME_END;
#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
if (ieee80211_is_data_present(hdr->frame_control) &&
!ieee80211_is_qos_nullfunc(hdr->frame_control))
sc->rx.num_pkts++;
#endif
exit:
sc->rx.discard_next = false;
return ret;
}
static void ath9k_rx_skb_postprocess(struct ath_common *common,
struct sk_buff *skb,
struct ath_rx_status *rx_stats,
struct ieee80211_rx_status *rxs,
bool decrypt_error)
{
struct ath_hw *ah = common->ah;
struct ieee80211_hdr *hdr;
int hdrlen, padpos, padsize;
u8 keyix;
__le16 fc;
/* see if any padding is done by the hw and remove it */
hdr = (struct ieee80211_hdr *) skb->data;
hdrlen = ieee80211_get_hdrlen_from_skb(skb);
fc = hdr->frame_control;
padpos = ieee80211_hdrlen(fc);
/* The MAC header is padded to have 32-bit boundary if the
* packet payload is non-zero. The general calculation for
* padsize would take into account odd header lengths:
* padsize = (4 - padpos % 4) % 4; However, since only
* even-length headers are used, padding can only be 0 or 2
* bytes and we can optimize this a bit. In addition, we must
* not try to remove padding from short control frames that do
* not have payload. */
padsize = padpos & 3;
if (padsize && skb->len>=padpos+padsize+FCS_LEN) {
memmove(skb->data + padsize, skb->data, padpos);
skb_pull(skb, padsize);
}
keyix = rx_stats->rs_keyix;
if (!(keyix == ATH9K_RXKEYIX_INVALID) && !decrypt_error &&
ieee80211_has_protected(fc)) {
rxs->flag |= RX_FLAG_DECRYPTED;
} else if (ieee80211_has_protected(fc)
&& !decrypt_error && skb->len >= hdrlen + 4) {
keyix = skb->data[hdrlen + 3] >> 6;
if (test_bit(keyix, common->keymap))
rxs->flag |= RX_FLAG_DECRYPTED;
}
if (ah->sw_mgmt_crypto &&
(rxs->flag & RX_FLAG_DECRYPTED) &&
ieee80211_is_mgmt(fc))
/* Use software decrypt for management frames. */
rxs->flag &= ~RX_FLAG_DECRYPTED;
}
/*
* Run the LNA combining algorithm only in these cases:
*
* Standalone WLAN cards with both LNA/Antenna diversity
* enabled in the EEPROM.
*
* WLAN+BT cards which are in the supported card list
* in ath_pci_id_table and the user has loaded the
* driver with "bt_ant_diversity" set to true.
*/
static void ath9k_antenna_check(struct ath_softc *sc,
struct ath_rx_status *rs)
{
struct ath_hw *ah = sc->sc_ah;
struct ath9k_hw_capabilities *pCap = &ah->caps;
struct ath_common *common = ath9k_hw_common(ah);
if (!(ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB))
return;
/*
* All MPDUs in an aggregate will use the same LNA
* as the first MPDU.
*/
if (rs->rs_isaggr && !rs->rs_firstaggr)
return;
/*
* Change the default rx antenna if rx diversity
* chooses the other antenna 3 times in a row.
*/
if (sc->rx.defant != rs->rs_antenna) {
if (++sc->rx.rxotherant >= 3)
ath_setdefantenna(sc, rs->rs_antenna);
} else {
sc->rx.rxotherant = 0;
}
if (pCap->hw_caps & ATH9K_HW_CAP_BT_ANT_DIV) {
if (common->bt_ant_diversity)
ath_ant_comb_scan(sc, rs);
} else {
ath_ant_comb_scan(sc, rs);
}
}
static void ath9k_apply_ampdu_details(struct ath_softc *sc,
struct ath_rx_status *rs, struct ieee80211_rx_status *rxs)
{
@ -1157,18 +1319,14 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
struct sk_buff *skb = NULL, *requeue_skb, *hdr_skb;
struct ieee80211_rx_status *rxs;
struct ath_hw *ah = sc->sc_ah;
struct ath9k_hw_capabilities *pCap = &ah->caps;
struct ath_common *common = ath9k_hw_common(ah);
struct ieee80211_hw *hw = sc->hw;
struct ieee80211_hdr *hdr;
int retval;
struct ath_rx_status rs;
enum ath9k_rx_qtype qtype;
bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
int dma_type;
u8 rx_status_len = ah->caps.rx_status_len;
u64 tsf = 0;
u32 tsf_lower = 0;
unsigned long flags;
dma_addr_t new_buf_addr;
@ -1180,7 +1338,6 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
qtype = hp ? ATH9K_RX_QUEUE_HP : ATH9K_RX_QUEUE_LP;
tsf = ath9k_hw_gettsf64(ah);
tsf_lower = tsf & 0xffffffff;
do {
bool decrypt_error = false;
@ -1207,55 +1364,14 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
else
hdr_skb = skb;
hdr = (struct ieee80211_hdr *) (hdr_skb->data + rx_status_len);
rxs = IEEE80211_SKB_RXCB(hdr_skb);
if (ieee80211_is_beacon(hdr->frame_control)) {
RX_STAT_INC(rx_beacons);
if (!is_zero_ether_addr(common->curbssid) &&
ether_addr_equal(hdr->addr3, common->curbssid))
rs.is_mybeacon = true;
else
rs.is_mybeacon = false;
}
else
rs.is_mybeacon = false;
if (ieee80211_is_data_present(hdr->frame_control) &&
!ieee80211_is_qos_nullfunc(hdr->frame_control))
sc->rx.num_pkts++;
ath_debug_stat_rx(sc, &rs);
memset(rxs, 0, sizeof(struct ieee80211_rx_status));
rxs->mactime = (tsf & ~0xffffffffULL) | rs.rs_tstamp;
if (rs.rs_tstamp > tsf_lower &&
unlikely(rs.rs_tstamp - tsf_lower > 0x10000000))
rxs->mactime -= 0x100000000ULL;
if (rs.rs_tstamp < tsf_lower &&
unlikely(tsf_lower - rs.rs_tstamp > 0x10000000))
rxs->mactime += 0x100000000ULL;
if (rs.rs_phyerr == ATH9K_PHYERR_RADAR)
ath9k_dfs_process_phyerr(sc, hdr, &rs, rxs->mactime);
if (rs.rs_status & ATH9K_RXERR_PHY) {
if (ath_process_fft(sc, hdr, &rs, rxs->mactime)) {
RX_STAT_INC(rx_spectral);
goto requeue_drop_frag;
}
}
retval = ath9k_rx_skb_preprocess(sc, hdr, &rs, rxs,
&decrypt_error);
retval = ath9k_rx_skb_preprocess(sc, hdr_skb, &rs, rxs,
&decrypt_error, tsf);
if (retval)
goto requeue_drop_frag;
if (rs.is_mybeacon) {
sc->hw_busy_count = 0;
ath_start_rx_poll(sc, 3);
}
/* Ensure we always have an skb to requeue once we are done
* processing the current buffer's skb */
requeue_skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_ATOMIC);
@ -1309,8 +1425,6 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
sc->rx.frag = skb;
goto requeue;
}
if (rs.rs_status & ATH9K_RXERR_CORRUPT_DESC)
goto requeue_drop_frag;
if (sc->rx.frag) {
int space = skb->len - skb_tailroom(hdr_skb);
@ -1340,35 +1454,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
ath_rx_ps(sc, skb, rs.is_mybeacon);
spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
/*
* Run the LNA combining algorithm only in these cases:
*
* Standalone WLAN cards with both LNA/Antenna diversity
* enabled in the EEPROM.
*
* WLAN+BT cards which are in the supported card list
* in ath_pci_id_table and the user has loaded the
* driver with "bt_ant_diversity" set to true.
*/
if (ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) {
/*
* Change the default rx antenna if rx diversity
* chooses the other antenna 3 times in a row.
*/
if (sc->rx.defant != rs.rs_antenna) {
if (++sc->rx.rxotherant >= 3)
ath_setdefantenna(sc, rs.rs_antenna);
} else {
sc->rx.rxotherant = 0;
}
if (pCap->hw_caps & ATH9K_HW_CAP_BT_ANT_DIV) {
if (common->bt_ant_diversity)
ath_ant_comb_scan(sc, &rs);
} else {
ath_ant_comb_scan(sc, &rs);
}
}
ath9k_antenna_check(sc, &rs);
ath9k_apply_ampdu_details(sc, &rs, rxs);
@ -1387,7 +1473,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
if (edma) {
ath_rx_edma_buf_link(sc, qtype);
} else {
ath_rx_buf_link(sc, bf);
ath_rx_buf_relink(sc, bf);
ath9k_hw_rxena(ah);
}
} while (1);

View File

@ -893,9 +893,9 @@
#define AR_SREV_9485(_ah) \
(((_ah)->hw_version.macVersion == AR_SREV_VERSION_9485))
#define AR_SREV_9485_11(_ah) \
(AR_SREV_9485(_ah) && \
((_ah)->hw_version.macRev == AR_SREV_REVISION_9485_11))
#define AR_SREV_9485_11_OR_LATER(_ah) \
(((_ah)->hw_version.macVersion == AR_SREV_VERSION_9485) && \
((_ah)->hw_version.macRev >= AR_SREV_REVISION_9485_11))
#define AR_SREV_9485_OR_LATER(_ah) \
(((_ah)->hw_version.macVersion >= AR_SREV_VERSION_9485))

View File

@ -312,12 +312,6 @@ static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
}
}
/*
* TODO: For frame(s) that are in the retry state, we will reuse the
* sequence number(s) without setting the retry bit. The
* alternative is to give up on these and BAR the receiver's window
* forward.
*/
static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
struct ath_atx_tid *tid)
@ -341,14 +335,8 @@ static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
}
list_add_tail(&bf->list, &bf_head);
ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
}
tid->seq_next = tid->seq_start;
tid->baw_tail = tid->baw_head;
tid->bar_index = -1;
}
static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
@ -493,7 +481,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
while (bf) {
bf_next = bf->bf_next;
if (!bf->bf_stale || bf_next != NULL)
if (!bf->bf_state.stale || bf_next != NULL)
list_move_tail(&bf->list, &bf_head);
ath_tx_complete_buf(sc, bf, txq, &bf_head, ts, 0);
@ -586,7 +574,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
* not a holding desc.
*/
INIT_LIST_HEAD(&bf_head);
if (bf_next != NULL || !bf_last->bf_stale)
if (bf_next != NULL || !bf_last->bf_state.stale)
list_move_tail(&bf->list, &bf_head);
if (!txpending) {
@ -610,7 +598,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
ieee80211_sta_eosp(sta);
}
/* retry the un-acked ones */
if (bf->bf_next == NULL && bf_last->bf_stale) {
if (bf->bf_next == NULL && bf_last->bf_state.stale) {
struct ath_buf *tbf;
tbf = ath_clone_txbuf(sc, bf_last);
@ -900,6 +888,8 @@ ath_tx_get_tid_subframe(struct ath_softc *sc, struct ath_txq *txq,
bf = fi->bf;
if (!fi->bf)
bf = ath_tx_setup_buffer(sc, txq, tid, skb);
else
bf->bf_state.stale = false;
if (!bf) {
__skb_unlink(skb, *q);
@ -1734,7 +1724,7 @@ static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq,
while (!list_empty(list)) {
bf = list_first_entry(list, struct ath_buf, list);
if (bf->bf_stale) {
if (bf->bf_state.stale) {
list_del(&bf->list);
ath_tx_return_buffer(sc, bf);
@ -2490,7 +2480,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
* it with the STALE flag.
*/
bf_held = NULL;
if (bf->bf_stale) {
if (bf->bf_state.stale) {
bf_held = bf;
if (list_is_last(&bf_held->list, &txq->axq_q))
break;
@ -2514,7 +2504,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
* however leave the last descriptor back as the holding
* descriptor for hw.
*/
lastbf->bf_stale = true;
lastbf->bf_state.stale = true;
INIT_LIST_HEAD(&bf_head);
if (!list_is_singular(&lastbf->list))
list_cut_position(&bf_head,
@ -2569,6 +2559,8 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
if (ts.qid == sc->beacon.beaconq) {
sc->beacon.tx_processed = true;
sc->beacon.tx_last = !(ts.ts_status & ATH9K_TXERR_MASK);
ath9k_csa_is_finished(sc);
continue;
}
@ -2585,7 +2577,7 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
}
bf = list_first_entry(fifo_list, struct ath_buf, list);
if (bf->bf_stale) {
if (bf->bf_state.stale) {
list_del(&bf->list);
ath_tx_return_buffer(sc, bf);
bf = list_first_entry(fifo_list, struct ath_buf, list);
@ -2607,7 +2599,7 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
ath_tx_txqaddbuf(sc, txq, &bf_q, true);
}
} else {
lastbf->bf_stale = true;
lastbf->bf_state.stale = true;
if (bf != lastbf)
list_cut_position(&bf_head, fifo_list,
lastbf->list.prev);

View File

@ -1860,7 +1860,8 @@ void *carl9170_alloc(size_t priv_size)
IEEE80211_HW_PS_NULLFUNC_STACK |
IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC |
IEEE80211_HW_SUPPORTS_RC_TABLE |
IEEE80211_HW_SIGNAL_DBM;
IEEE80211_HW_SIGNAL_DBM |
IEEE80211_HW_SUPPORTS_HT_CCK_RATES;
if (!modparam_noht) {
/*

View File

@ -416,13 +416,13 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
*/
if (d->dma.status & RX_DMA_STATUS_L4_IDENT) {
/* L4 protocol identified, csum calculated */
if ((d->dma.error & RX_DMA_ERROR_L4_ERR) == 0) {
if ((d->dma.error & RX_DMA_ERROR_L4_ERR) == 0)
skb->ip_summed = CHECKSUM_UNNECESSARY;
} else {
wil_err(wil, "Incorrect checksum reported\n");
kfree_skb(skb);
return NULL;
}
/* If HW reports bad checksum, let IP stack re-check it
* For example, HW don't understand Microsoft IP stack that
* mis-calculates TCP checksum - if it should be 0x0,
* it writes 0xffff in violation of RFC 1624
*/
}
ds_bits = wil_rxdesc_ds_bits(d);

View File

@ -339,7 +339,7 @@ static void wmi_evt_rx_mgmt(struct wil6210_priv *wil, int id, void *d, int len)
}
} else {
cfg80211_rx_mgmt(wil->wdev, freq, signal,
(void *)rx_mgmt_frame, d_len, GFP_KERNEL);
(void *)rx_mgmt_frame, d_len, 0, GFP_KERNEL);
}
}

View File

@ -4645,6 +4645,19 @@ static void b43_wireless_core_exit(struct b43_wldev *dev)
b43_maskset32(dev, B43_MMIO_MACCTL, ~B43_MACCTL_PSM_RUN,
B43_MACCTL_PSM_JMP0);
switch (dev->dev->bus_type) {
#ifdef CONFIG_B43_BCMA
case B43_BUS_BCMA:
bcma_core_pci_down(dev->dev->bdev->bus);
break;
#endif
#ifdef CONFIG_B43_SSB
case B43_BUS_SSB:
/* TODO */
break;
#endif
}
b43_dma_free(dev);
b43_pio_free(dev);
b43_chip_exit(dev);
@ -4684,6 +4697,7 @@ static int b43_wireless_core_init(struct b43_wldev *dev)
case B43_BUS_BCMA:
bcma_core_pci_irq_ctl(&dev->dev->bdev->bus->drv_pci[0],
dev->dev->bdev, true);
bcma_core_pci_up(dev->dev->bdev->bus);
break;
#endif
#ifdef CONFIG_B43_SSB

View File

@ -592,6 +592,7 @@ brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
uint flags, u8 *buf, uint nbytes)
{
struct sk_buff *mypkt;
struct sk_buff_head pktq;
int err;
mypkt = brcmu_pkt_buf_get_skb(nbytes);
@ -602,7 +603,10 @@ brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
}
memcpy(mypkt->data, buf, nbytes);
err = brcmf_sdcard_send_pkt(sdiodev, addr, fn, flags, mypkt);
__skb_queue_head_init(&pktq);
__skb_queue_tail(&pktq, mypkt);
err = brcmf_sdcard_send_pkt(sdiodev, addr, fn, flags, &pktq);
__skb_dequeue_tail(&pktq);
brcmu_pkt_buf_free_skb(mypkt);
return err;
@ -611,22 +615,18 @@ brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
int
brcmf_sdcard_send_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
uint flags, struct sk_buff *pkt)
uint flags, struct sk_buff_head *pktq)
{
uint width;
int err = 0;
struct sk_buff_head pkt_list;
brcmf_dbg(SDIO, "fun = %d, addr = 0x%x, size = %d\n",
fn, addr, pkt->len);
fn, addr, pktq->qlen);
width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
brcmf_sdio_addrprep(sdiodev, width, &addr);
skb_queue_head_init(&pkt_list);
skb_queue_tail(&pkt_list, pkt);
err = brcmf_sdio_buffrw(sdiodev, fn, true, addr, &pkt_list);
skb_dequeue_tail(&pkt_list);
err = brcmf_sdio_buffrw(sdiodev, fn, true, addr, pktq);
return err;
}

View File

@ -350,7 +350,6 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
sdiodev->bus_if = bus_if;
bus_if->bus_priv.sdio = sdiodev;
bus_if->align = BRCMF_SDALIGN;
dev_set_drvdata(&func->dev, bus_if);
dev_set_drvdata(&sdiodev->func[1]->dev, bus_if);
sdiodev->dev = &sdiodev->func[1]->dev;

View File

@ -194,6 +194,8 @@
#define BRCMF_E_IF_DEL 2
#define BRCMF_E_IF_CHANGE 3
#define BRCMF_E_IF_FLAG_NOIF 1
#define BRCMF_E_IF_ROLE_STA 0
#define BRCMF_E_IF_ROLE_AP 1
#define BRCMF_E_IF_ROLE_WDS 2
@ -209,6 +211,8 @@
#define BRCMF_DCMD_MEDLEN 1536
#define BRCMF_DCMD_MAXLEN 8192
#define BRCMF_AMPDU_RX_REORDER_MAXFLOWS 256
/* Pattern matching filter. Specifies an offset within received packets to
* start matching, the pattern to match, the size of the pattern, and a bitmask
* that indicates which bits within the pattern should be matched.
@ -505,6 +509,25 @@ struct brcmf_dcmd {
uint needed; /* bytes needed (optional) */
};
/**
* struct brcmf_ampdu_rx_reorder - AMPDU receive reorder info
*
* @pktslots: dynamic allocated array for ordering AMPDU packets.
* @flow_id: AMPDU flow identifier.
* @cur_idx: last AMPDU index from firmware.
* @exp_idx: expected next AMPDU index.
* @max_idx: maximum amount of packets per AMPDU.
* @pend_pkts: number of packets currently in @pktslots.
*/
struct brcmf_ampdu_rx_reorder {
struct sk_buff **pktslots;
u8 flow_id;
u8 cur_idx;
u8 exp_idx;
u8 max_idx;
u8 pend_pkts;
};
/* Forward decls for struct brcmf_pub (see below) */
struct brcmf_proto; /* device communication protocol info */
struct brcmf_cfg80211_dev; /* cfg80211 device info */
@ -536,9 +559,10 @@ struct brcmf_pub {
struct brcmf_fweh_info fweh;
bool fw_signals;
struct brcmf_fws_info *fws;
spinlock_t fws_spinlock;
struct brcmf_ampdu_rx_reorder
*reorder_flows[BRCMF_AMPDU_RX_REORDER_MAXFLOWS];
#ifdef DEBUG
struct dentry *dbgfs_dir;
#endif
@ -604,6 +628,9 @@ struct brcmf_if {
wait_queue_head_t pend_8021x_wait;
};
struct brcmf_skb_reorder_data {
u8 *reorder;
};
extern int brcmf_netdev_wait_pend8021x(struct net_device *ndev);

View File

@ -36,7 +36,11 @@ struct brcmf_bus_dcmd {
*
* @init: prepare for communication with dongle.
* @stop: clear pending frames, disable data flow.
* @txdata: send a data frame to the dongle (callee disposes skb).
* @txdata: send a data frame to the dongle. When the data
* has been transferred, the common driver must be
* notified using brcmf_txcomplete(). The common
* driver calls this function with interrupts
* disabled.
* @txctl: transmit a control request message to dongle.
* @rxctl: receive a control response message from dongle.
* @gettxq: obtain a reference of bus transmit queue (optional).
@ -65,7 +69,6 @@ struct brcmf_bus_ops {
* @maxctl: maximum size for rxctl request message.
* @tx_realloc: number of tx packets realloced for headroom.
* @dstats: dongle-based statistical data.
* @align: alignment requirement for the bus.
* @dcmd_list: bus/device specific dongle initialization commands.
* @chip: device identifier of the dongle chip.
* @chiprev: revision of the dongle chip.
@ -80,7 +83,6 @@ struct brcmf_bus {
enum brcmf_bus_state state;
uint maxctl;
unsigned long tx_realloc;
u8 align;
u32 chip;
u32 chiprev;
struct list_head dcmd_list;

View File

@ -38,6 +38,19 @@ MODULE_LICENSE("Dual BSD/GPL");
#define MAX_WAIT_FOR_8021X_TX 50 /* msecs */
/* AMPDU rx reordering definitions */
#define BRCMF_RXREORDER_FLOWID_OFFSET 0
#define BRCMF_RXREORDER_MAXIDX_OFFSET 2
#define BRCMF_RXREORDER_FLAGS_OFFSET 4
#define BRCMF_RXREORDER_CURIDX_OFFSET 6
#define BRCMF_RXREORDER_EXPIDX_OFFSET 8
#define BRCMF_RXREORDER_DEL_FLOW 0x01
#define BRCMF_RXREORDER_FLUSH_ALL 0x02
#define BRCMF_RXREORDER_CURIDX_VALID 0x04
#define BRCMF_RXREORDER_EXPIDX_VALID 0x08
#define BRCMF_RXREORDER_NEW_HOLE 0x10
/* Error bits */
int brcmf_msg_level;
module_param_named(debug, brcmf_msg_level, int, S_IRUSR | S_IWUSR);
@ -265,17 +278,234 @@ void brcmf_txflowblock(struct device *dev, bool state)
{
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
struct brcmf_pub *drvr = bus_if->drvr;
int i;
brcmf_dbg(TRACE, "Enter\n");
if (brcmf_fws_fc_active(drvr->fws)) {
brcmf_fws_bus_blocked(drvr, state);
brcmf_fws_bus_blocked(drvr, state);
}
static void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb)
{
skb->dev = ifp->ndev;
skb->protocol = eth_type_trans(skb, skb->dev);
if (skb->pkt_type == PACKET_MULTICAST)
ifp->stats.multicast++;
/* Process special event packets */
brcmf_fweh_process_skb(ifp->drvr, skb);
if (!(ifp->ndev->flags & IFF_UP)) {
brcmu_pkt_buf_free_skb(skb);
return;
}
ifp->stats.rx_bytes += skb->len;
ifp->stats.rx_packets++;
brcmf_dbg(DATA, "rx proto=0x%X\n", ntohs(skb->protocol));
if (in_interrupt())
netif_rx(skb);
else
/* If the receive is not processed inside an ISR,
* the softirqd must be woken explicitly to service
* the NET_RX_SOFTIRQ. This is handled by netif_rx_ni().
*/
netif_rx_ni(skb);
}
static void brcmf_rxreorder_get_skb_list(struct brcmf_ampdu_rx_reorder *rfi,
u8 start, u8 end,
struct sk_buff_head *skb_list)
{
/* initialize return list */
__skb_queue_head_init(skb_list);
if (rfi->pend_pkts == 0) {
brcmf_dbg(INFO, "no packets in reorder queue\n");
return;
}
do {
if (rfi->pktslots[start]) {
__skb_queue_tail(skb_list, rfi->pktslots[start]);
rfi->pktslots[start] = NULL;
}
start++;
if (start > rfi->max_idx)
start = 0;
} while (start != end);
rfi->pend_pkts -= skb_queue_len(skb_list);
}
static void brcmf_rxreorder_process_info(struct brcmf_if *ifp, u8 *reorder_data,
struct sk_buff *pkt)
{
u8 flow_id, max_idx, cur_idx, exp_idx, end_idx;
struct brcmf_ampdu_rx_reorder *rfi;
struct sk_buff_head reorder_list;
struct sk_buff *pnext;
u8 flags;
u32 buf_size;
flow_id = reorder_data[BRCMF_RXREORDER_FLOWID_OFFSET];
flags = reorder_data[BRCMF_RXREORDER_FLAGS_OFFSET];
/* validate flags and flow id */
if (flags == 0xFF) {
brcmf_err("invalid flags...so ignore this packet\n");
brcmf_netif_rx(ifp, pkt);
return;
}
rfi = ifp->drvr->reorder_flows[flow_id];
if (flags & BRCMF_RXREORDER_DEL_FLOW) {
brcmf_dbg(INFO, "flow-%d: delete\n",
flow_id);
if (rfi == NULL) {
brcmf_dbg(INFO, "received flags to cleanup, but no flow (%d) yet\n",
flow_id);
brcmf_netif_rx(ifp, pkt);
return;
}
brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx, rfi->exp_idx,
&reorder_list);
/* add the last packet */
__skb_queue_tail(&reorder_list, pkt);
kfree(rfi);
ifp->drvr->reorder_flows[flow_id] = NULL;
goto netif_rx;
}
/* from here on we need a flow reorder instance */
if (rfi == NULL) {
buf_size = sizeof(*rfi);
max_idx = reorder_data[BRCMF_RXREORDER_MAXIDX_OFFSET];
buf_size += (max_idx + 1) * sizeof(pkt);
/* allocate space for flow reorder info */
brcmf_dbg(INFO, "flow-%d: start, maxidx %d\n",
flow_id, max_idx);
rfi = kzalloc(buf_size, GFP_ATOMIC);
if (rfi == NULL) {
brcmf_err("failed to alloc buffer\n");
brcmf_netif_rx(ifp, pkt);
return;
}
ifp->drvr->reorder_flows[flow_id] = rfi;
rfi->pktslots = (struct sk_buff **)(rfi+1);
rfi->max_idx = max_idx;
}
if (flags & BRCMF_RXREORDER_NEW_HOLE) {
if (rfi->pend_pkts) {
brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx,
rfi->exp_idx,
&reorder_list);
WARN_ON(rfi->pend_pkts);
} else {
__skb_queue_head_init(&reorder_list);
}
rfi->cur_idx = reorder_data[BRCMF_RXREORDER_CURIDX_OFFSET];
rfi->exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET];
rfi->max_idx = reorder_data[BRCMF_RXREORDER_MAXIDX_OFFSET];
rfi->pktslots[rfi->cur_idx] = pkt;
rfi->pend_pkts++;
brcmf_dbg(DATA, "flow-%d: new hole %d (%d), pending %d\n",
flow_id, rfi->cur_idx, rfi->exp_idx, rfi->pend_pkts);
} else if (flags & BRCMF_RXREORDER_CURIDX_VALID) {
cur_idx = reorder_data[BRCMF_RXREORDER_CURIDX_OFFSET];
exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET];
if ((exp_idx == rfi->exp_idx) && (cur_idx != rfi->exp_idx)) {
/* still in the current hole */
/* enqueue the current on the buffer chain */
if (rfi->pktslots[cur_idx] != NULL) {
brcmf_dbg(INFO, "HOLE: ERROR buffer pending..free it\n");
brcmu_pkt_buf_free_skb(rfi->pktslots[cur_idx]);
rfi->pktslots[cur_idx] = NULL;
}
rfi->pktslots[cur_idx] = pkt;
rfi->pend_pkts++;
rfi->cur_idx = cur_idx;
brcmf_dbg(DATA, "flow-%d: store pkt %d (%d), pending %d\n",
flow_id, cur_idx, exp_idx, rfi->pend_pkts);
/* can return now as there is no reorder
* list to process.
*/
return;
}
if (rfi->exp_idx == cur_idx) {
if (rfi->pktslots[cur_idx] != NULL) {
brcmf_dbg(INFO, "error buffer pending..free it\n");
brcmu_pkt_buf_free_skb(rfi->pktslots[cur_idx]);
rfi->pktslots[cur_idx] = NULL;
}
rfi->pktslots[cur_idx] = pkt;
rfi->pend_pkts++;
/* got the expected one. flush from current to expected
* and update expected
*/
brcmf_dbg(DATA, "flow-%d: expected %d (%d), pending %d\n",
flow_id, cur_idx, exp_idx, rfi->pend_pkts);
rfi->cur_idx = cur_idx;
rfi->exp_idx = exp_idx;
brcmf_rxreorder_get_skb_list(rfi, cur_idx, exp_idx,
&reorder_list);
brcmf_dbg(DATA, "flow-%d: freeing buffers %d, pending %d\n",
flow_id, skb_queue_len(&reorder_list),
rfi->pend_pkts);
} else {
u8 end_idx;
brcmf_dbg(DATA, "flow-%d (0x%x): both moved, old %d/%d, new %d/%d\n",
flow_id, flags, rfi->cur_idx, rfi->exp_idx,
cur_idx, exp_idx);
if (flags & BRCMF_RXREORDER_FLUSH_ALL)
end_idx = rfi->exp_idx;
else
end_idx = exp_idx;
/* flush pkts first */
brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx, end_idx,
&reorder_list);
if (exp_idx == ((cur_idx + 1) % (rfi->max_idx + 1))) {
__skb_queue_tail(&reorder_list, pkt);
} else {
rfi->pktslots[cur_idx] = pkt;
rfi->pend_pkts++;
}
rfi->exp_idx = exp_idx;
rfi->cur_idx = cur_idx;
}
} else {
for (i = 0; i < BRCMF_MAX_IFS; i++)
brcmf_txflowblock_if(drvr->iflist[i],
BRCMF_NETIF_STOP_REASON_BLOCK_BUS,
state);
/* explicity window move updating the expected index */
exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET];
brcmf_dbg(DATA, "flow-%d (0x%x): change expected: %d -> %d\n",
flow_id, flags, rfi->exp_idx, exp_idx);
if (flags & BRCMF_RXREORDER_FLUSH_ALL)
end_idx = rfi->exp_idx;
else
end_idx = exp_idx;
brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx, end_idx,
&reorder_list);
__skb_queue_tail(&reorder_list, pkt);
/* set the new expected idx */
rfi->exp_idx = exp_idx;
}
netif_rx:
skb_queue_walk_safe(&reorder_list, pkt, pnext) {
__skb_unlink(pkt, &reorder_list);
brcmf_netif_rx(ifp, pkt);
}
}
@ -285,16 +515,18 @@ void brcmf_rx_frames(struct device *dev, struct sk_buff_head *skb_list)
struct brcmf_if *ifp;
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
struct brcmf_pub *drvr = bus_if->drvr;
struct brcmf_skb_reorder_data *rd;
u8 ifidx;
int ret;
brcmf_dbg(DATA, "Enter\n");
brcmf_dbg(DATA, "Enter: %s: count=%u\n", dev_name(dev),
skb_queue_len(skb_list));
skb_queue_walk_safe(skb_list, skb, pnext) {
skb_unlink(skb, skb_list);
/* process and remove protocol-specific header */
ret = brcmf_proto_hdrpull(drvr, drvr->fw_signals, &ifidx, skb);
ret = brcmf_proto_hdrpull(drvr, true, &ifidx, skb);
ifp = drvr->iflist[ifidx];
if (ret || !ifp || !ifp->ndev) {
@ -304,31 +536,11 @@ void brcmf_rx_frames(struct device *dev, struct sk_buff_head *skb_list)
continue;
}
skb->dev = ifp->ndev;
skb->protocol = eth_type_trans(skb, skb->dev);
if (skb->pkt_type == PACKET_MULTICAST)
ifp->stats.multicast++;
/* Process special event packets */
brcmf_fweh_process_skb(drvr, skb);
if (!(ifp->ndev->flags & IFF_UP)) {
brcmu_pkt_buf_free_skb(skb);
continue;
}
ifp->stats.rx_bytes += skb->len;
ifp->stats.rx_packets++;
if (in_interrupt())
netif_rx(skb);
rd = (struct brcmf_skb_reorder_data *)skb->cb;
if (rd->reorder)
brcmf_rxreorder_process_info(ifp, rd->reorder, skb);
else
/* If the receive is not processed inside an ISR,
* the softirqd must be woken explicitly to service the
* NET_RX_SOFTIRQ. This is handled by netif_rx_ni().
*/
netif_rx_ni(skb);
brcmf_netif_rx(ifp, skb);
}
}
@ -889,7 +1101,6 @@ int brcmf_bus_start(struct device *dev)
if (ret < 0)
goto fail;
drvr->fw_signals = true;
ret = brcmf_fws_init(drvr);
if (ret < 0)
goto fail;

View File

@ -201,13 +201,6 @@ struct rte_console {
#define SFC_CRC4WOOS (1 << 2) /* CRC error for write out of sync */
#define SFC_ABORTALL (1 << 3) /* Abort all in-progress frames */
/* HW frame tag */
#define SDPCM_FRAMETAG_LEN 4 /* 2 bytes len, 2 bytes check val */
/* Total length of frame header for dongle protocol */
#define SDPCM_HDRLEN (SDPCM_FRAMETAG_LEN + SDPCM_SWHEADER_LEN)
#define SDPCM_RESERVE (SDPCM_HDRLEN + BRCMF_SDALIGN)
/*
* Software allocation of To SB Mailbox resources
*/
@ -250,38 +243,6 @@ struct rte_console {
/* Current protocol version */
#define SDPCM_PROT_VERSION 4
/* SW frame header */
#define SDPCM_PACKET_SEQUENCE(p) (((u8 *)p)[0] & 0xff)
#define SDPCM_CHANNEL_MASK 0x00000f00
#define SDPCM_CHANNEL_SHIFT 8
#define SDPCM_PACKET_CHANNEL(p) (((u8 *)p)[1] & 0x0f)
#define SDPCM_NEXTLEN_OFFSET 2
/* Data Offset from SOF (HW Tag, SW Tag, Pad) */
#define SDPCM_DOFFSET_OFFSET 3 /* Data Offset */
#define SDPCM_DOFFSET_VALUE(p) (((u8 *)p)[SDPCM_DOFFSET_OFFSET] & 0xff)
#define SDPCM_DOFFSET_MASK 0xff000000
#define SDPCM_DOFFSET_SHIFT 24
#define SDPCM_FCMASK_OFFSET 4 /* Flow control */
#define SDPCM_FCMASK_VALUE(p) (((u8 *)p)[SDPCM_FCMASK_OFFSET] & 0xff)
#define SDPCM_WINDOW_OFFSET 5 /* Credit based fc */
#define SDPCM_WINDOW_VALUE(p) (((u8 *)p)[SDPCM_WINDOW_OFFSET] & 0xff)
#define SDPCM_SWHEADER_LEN 8 /* SW header is 64 bits */
/* logical channel numbers */
#define SDPCM_CONTROL_CHANNEL 0 /* Control channel Id */
#define SDPCM_EVENT_CHANNEL 1 /* Asyc Event Indication Channel Id */
#define SDPCM_DATA_CHANNEL 2 /* Data Xmit/Recv Channel Id */
#define SDPCM_GLOM_CHANNEL 3 /* For coalesced packets */
#define SDPCM_TEST_CHANNEL 15 /* Reserved for test/debug packets */
#define SDPCM_SEQUENCE_WRAP 256 /* wrap-around val for 8bit frame seq */
#define SDPCM_GLOMDESC(p) (((u8 *)p)[1] & 0x80)
/*
* Shared structure between dongle and the host.
* The structure contains pointers to trap or assert information.
@ -396,8 +357,8 @@ struct sdpcm_shared_le {
__le32 brpt_addr;
};
/* SDIO read frame info */
struct brcmf_sdio_read {
/* dongle SDIO bus specific header info */
struct brcmf_sdio_hdrinfo {
u8 seq_num;
u8 channel;
u16 len;
@ -431,7 +392,7 @@ struct brcmf_sdio {
u8 hdrbuf[MAX_HDR_READ + BRCMF_SDALIGN];
u8 *rxhdr; /* Header of current rx frame (in hdrbuf) */
u8 rx_seq; /* Receive sequence number (expected) */
struct brcmf_sdio_read cur_read;
struct brcmf_sdio_hdrinfo cur_read;
/* info of current read frame */
bool rxskip; /* Skip receive (awaiting NAK ACK) */
bool rxpending; /* Data frame pending in dongle */
@ -500,6 +461,8 @@ struct brcmf_sdio {
struct brcmf_sdio_count sdcnt;
bool sr_enabled; /* SaveRestore enabled */
bool sleeping; /* SDIO bus sleeping */
u8 tx_hdrlen; /* sdio bus header length for tx packet */
};
/* clkstate */
@ -510,7 +473,6 @@ struct brcmf_sdio {
#ifdef DEBUG
static int qcount[NUMPRIO];
static int tx_packets[NUMPRIO];
#endif /* DEBUG */
#define DEFAULT_SDIO_DRIVE_STRENGTH 6 /* in milliamps */
@ -1043,18 +1005,63 @@ static void brcmf_sdbrcm_free_glom(struct brcmf_sdio *bus)
}
}
static int brcmf_sdio_hdparser(struct brcmf_sdio *bus, u8 *header,
struct brcmf_sdio_read *rd,
enum brcmf_sdio_frmtype type)
/**
* brcmfmac sdio bus specific header
* This is the lowest layer header wrapped on the packets transmitted between
* host and WiFi dongle which contains information needed for SDIO core and
* firmware
*
* It consists of 2 parts: hw header and software header
* hardware header (frame tag) - 4 bytes
* Byte 0~1: Frame length
* Byte 2~3: Checksum, bit-wise inverse of frame length
* software header - 8 bytes
* Byte 0: Rx/Tx sequence number
* Byte 1: 4 MSB Channel number, 4 LSB arbitrary flag
* Byte 2: Length of next data frame, reserved for Tx
* Byte 3: Data offset
* Byte 4: Flow control bits, reserved for Tx
* Byte 5: Maximum Sequence number allowed by firmware for Tx, N/A for Tx packet
* Byte 6~7: Reserved
*/
#define SDPCM_HWHDR_LEN 4
#define SDPCM_SWHDR_LEN 8
#define SDPCM_HDRLEN (SDPCM_HWHDR_LEN + SDPCM_SWHDR_LEN)
/* software header */
#define SDPCM_SEQ_MASK 0x000000ff
#define SDPCM_SEQ_WRAP 256
#define SDPCM_CHANNEL_MASK 0x00000f00
#define SDPCM_CHANNEL_SHIFT 8
#define SDPCM_CONTROL_CHANNEL 0 /* Control */
#define SDPCM_EVENT_CHANNEL 1 /* Asyc Event Indication */
#define SDPCM_DATA_CHANNEL 2 /* Data Xmit/Recv */
#define SDPCM_GLOM_CHANNEL 3 /* Coalesced packets */
#define SDPCM_TEST_CHANNEL 15 /* Test/debug packets */
#define SDPCM_GLOMDESC(p) (((u8 *)p)[1] & 0x80)
#define SDPCM_NEXTLEN_MASK 0x00ff0000
#define SDPCM_NEXTLEN_SHIFT 16
#define SDPCM_DOFFSET_MASK 0xff000000
#define SDPCM_DOFFSET_SHIFT 24
#define SDPCM_FCMASK_MASK 0x000000ff
#define SDPCM_WINDOW_MASK 0x0000ff00
#define SDPCM_WINDOW_SHIFT 8
static inline u8 brcmf_sdio_getdatoffset(u8 *swheader)
{
u32 hdrvalue;
hdrvalue = *(u32 *)swheader;
return (u8)((hdrvalue & SDPCM_DOFFSET_MASK) >> SDPCM_DOFFSET_SHIFT);
}
static int brcmf_sdio_hdparse(struct brcmf_sdio *bus, u8 *header,
struct brcmf_sdio_hdrinfo *rd,
enum brcmf_sdio_frmtype type)
{
u16 len, checksum;
u8 rx_seq, fc, tx_seq_max;
u32 swheader;
/*
* 4 bytes hardware header (frame tag)
* Byte 0~1: Frame length
* Byte 2~3: Checksum, bit-wise inverse of frame length
*/
/* hw header */
len = get_unaligned_le16(header);
checksum = get_unaligned_le16(header + sizeof(u16));
/* All zero means no more to read */
@ -1083,24 +1090,16 @@ static int brcmf_sdio_hdparser(struct brcmf_sdio *bus, u8 *header,
}
rd->len = len;
/*
* 8 bytes hardware header
* Byte 0: Rx sequence number
* Byte 1: 4 MSB Channel number, 4 LSB arbitrary flag
* Byte 2: Length of next data frame
* Byte 3: Data offset
* Byte 4: Flow control bits
* Byte 5: Maximum Sequence number allow for Tx
* Byte 6~7: Reserved
*/
if (type == BRCMF_SDIO_FT_SUPER &&
SDPCM_GLOMDESC(&header[SDPCM_FRAMETAG_LEN])) {
/* software header */
header += SDPCM_HWHDR_LEN;
swheader = le32_to_cpu(*(__le32 *)header);
if (type == BRCMF_SDIO_FT_SUPER && SDPCM_GLOMDESC(header)) {
brcmf_err("Glom descriptor found in superframe head\n");
rd->len = 0;
return -EINVAL;
}
rx_seq = SDPCM_PACKET_SEQUENCE(&header[SDPCM_FRAMETAG_LEN]);
rd->channel = SDPCM_PACKET_CHANNEL(&header[SDPCM_FRAMETAG_LEN]);
rx_seq = (u8)(swheader & SDPCM_SEQ_MASK);
rd->channel = (swheader & SDPCM_CHANNEL_MASK) >> SDPCM_CHANNEL_SHIFT;
if (len > MAX_RX_DATASZ && rd->channel != SDPCM_CONTROL_CHANNEL &&
type != BRCMF_SDIO_FT_SUPER) {
brcmf_err("HW header length too long\n");
@ -1120,7 +1119,7 @@ static int brcmf_sdio_hdparser(struct brcmf_sdio *bus, u8 *header,
rd->len = 0;
return -EINVAL;
}
rd->dat_offset = SDPCM_DOFFSET_VALUE(&header[SDPCM_FRAMETAG_LEN]);
rd->dat_offset = brcmf_sdio_getdatoffset(header);
if (rd->dat_offset < SDPCM_HDRLEN || rd->dat_offset > rd->len) {
brcmf_err("seq %d: bad data offset\n", rx_seq);
bus->sdcnt.rx_badhdr++;
@ -1137,14 +1136,15 @@ static int brcmf_sdio_hdparser(struct brcmf_sdio *bus, u8 *header,
/* no need to check the reset for subframe */
if (type == BRCMF_SDIO_FT_SUB)
return 0;
rd->len_nxtfrm = header[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET];
rd->len_nxtfrm = (swheader & SDPCM_NEXTLEN_MASK) >> SDPCM_NEXTLEN_SHIFT;
if (rd->len_nxtfrm << 4 > MAX_RX_DATASZ) {
/* only warm for NON glom packet */
if (rd->channel != SDPCM_GLOM_CHANNEL)
brcmf_err("seq %d: next length error\n", rx_seq);
rd->len_nxtfrm = 0;
}
fc = SDPCM_FCMASK_VALUE(&header[SDPCM_FRAMETAG_LEN]);
swheader = le32_to_cpu(*(__le32 *)(header + 4));
fc = swheader & SDPCM_FCMASK_MASK;
if (bus->flowcontrol != fc) {
if (~bus->flowcontrol & fc)
bus->sdcnt.fc_xoff++;
@ -1153,7 +1153,7 @@ static int brcmf_sdio_hdparser(struct brcmf_sdio *bus, u8 *header,
bus->sdcnt.fc_rcvd++;
bus->flowcontrol = fc;
}
tx_seq_max = SDPCM_WINDOW_VALUE(&header[SDPCM_FRAMETAG_LEN]);
tx_seq_max = (swheader & SDPCM_WINDOW_MASK) >> SDPCM_WINDOW_SHIFT;
if ((u8)(tx_seq_max - bus->tx_seq) > 0x40) {
brcmf_err("seq %d: max tx seq number error\n", rx_seq);
tx_seq_max = bus->tx_seq + 2;
@ -1163,18 +1163,40 @@ static int brcmf_sdio_hdparser(struct brcmf_sdio *bus, u8 *header,
return 0;
}
static inline void brcmf_sdio_update_hwhdr(u8 *header, u16 frm_length)
{
*(__le16 *)header = cpu_to_le16(frm_length);
*(((__le16 *)header) + 1) = cpu_to_le16(~frm_length);
}
static void brcmf_sdio_hdpack(struct brcmf_sdio *bus, u8 *header,
struct brcmf_sdio_hdrinfo *hd_info)
{
u32 sw_header;
brcmf_sdio_update_hwhdr(header, hd_info->len);
sw_header = bus->tx_seq;
sw_header |= (hd_info->channel << SDPCM_CHANNEL_SHIFT) &
SDPCM_CHANNEL_MASK;
sw_header |= (hd_info->dat_offset << SDPCM_DOFFSET_SHIFT) &
SDPCM_DOFFSET_MASK;
*(((__le32 *)header) + 1) = cpu_to_le32(sw_header);
*(((__le32 *)header) + 2) = 0;
}
static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
{
u16 dlen, totlen;
u8 *dptr, num = 0;
u32 align = 0;
u16 sublen;
struct sk_buff *pfirst, *pnext;
int errcode;
u8 doff, sfdoff;
struct brcmf_sdio_read rd_new;
struct brcmf_sdio_hdrinfo rd_new;
/* If packets, issue read(s) and send up packet chain */
/* Return sequence numbers consumed? */
@ -1182,6 +1204,11 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
brcmf_dbg(SDIO, "start: glomd %p glom %p\n",
bus->glomd, skb_peek(&bus->glom));
if (bus->sdiodev->pdata)
align = bus->sdiodev->pdata->sd_sgentry_align;
if (align < 4)
align = 4;
/* If there's a descriptor, generate the packet chain */
if (bus->glomd) {
pfirst = pnext = NULL;
@ -1205,9 +1232,9 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
pnext = NULL;
break;
}
if (sublen % BRCMF_SDALIGN) {
if (sublen % align) {
brcmf_err("sublen %d not multiple of %d\n",
sublen, BRCMF_SDALIGN);
sublen, align);
}
totlen += sublen;
@ -1220,7 +1247,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
}
/* Allocate/chain packet for next subframe */
pnext = brcmu_pkt_buf_get_skb(sublen + BRCMF_SDALIGN);
pnext = brcmu_pkt_buf_get_skb(sublen + align);
if (pnext == NULL) {
brcmf_err("bcm_pkt_buf_get_skb failed, num %d len %d\n",
num, sublen);
@ -1229,7 +1256,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
skb_queue_tail(&bus->glom, pnext);
/* Adhere to start alignment requirements */
pkt_align(pnext, sublen, BRCMF_SDALIGN);
pkt_align(pnext, sublen, align);
}
/* If all allocations succeeded, save packet chain
@ -1305,8 +1332,8 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
rd_new.seq_num = rxseq;
rd_new.len = dlen;
sdio_claim_host(bus->sdiodev->func[1]);
errcode = brcmf_sdio_hdparser(bus, pfirst->data, &rd_new,
BRCMF_SDIO_FT_SUPER);
errcode = brcmf_sdio_hdparse(bus, pfirst->data, &rd_new,
BRCMF_SDIO_FT_SUPER);
sdio_release_host(bus->sdiodev->func[1]);
bus->cur_read.len = rd_new.len_nxtfrm << 4;
@ -1324,8 +1351,8 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
rd_new.len = pnext->len;
rd_new.seq_num = rxseq++;
sdio_claim_host(bus->sdiodev->func[1]);
errcode = brcmf_sdio_hdparser(bus, pnext->data, &rd_new,
BRCMF_SDIO_FT_SUB);
errcode = brcmf_sdio_hdparse(bus, pnext->data, &rd_new,
BRCMF_SDIO_FT_SUB);
sdio_release_host(bus->sdiodev->func[1]);
brcmf_dbg_hex_dump(BRCMF_GLOM_ON(),
pnext->data, 32, "subframe:\n");
@ -1357,7 +1384,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
skb_queue_walk_safe(&bus->glom, pfirst, pnext) {
dptr = (u8 *) (pfirst->data);
sublen = get_unaligned_le16(dptr);
doff = SDPCM_DOFFSET_VALUE(&dptr[SDPCM_FRAMETAG_LEN]);
doff = brcmf_sdio_getdatoffset(&dptr[SDPCM_HWHDR_LEN]);
brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_DATA_ON(),
dptr, pfirst->len,
@ -1535,7 +1562,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
uint rxleft = 0; /* Remaining number of frames allowed */
int ret; /* Return code from calls */
uint rxcount = 0; /* Total frames read */
struct brcmf_sdio_read *rd = &bus->cur_read, rd_new;
struct brcmf_sdio_hdrinfo *rd = &bus->cur_read, rd_new;
u8 head_read = 0;
brcmf_dbg(TRACE, "Enter\n");
@ -1583,8 +1610,8 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
bus->rxhdr, SDPCM_HDRLEN,
"RxHdr:\n");
if (brcmf_sdio_hdparser(bus, bus->rxhdr, rd,
BRCMF_SDIO_FT_NORMAL)) {
if (brcmf_sdio_hdparse(bus, bus->rxhdr, rd,
BRCMF_SDIO_FT_NORMAL)) {
sdio_release_host(bus->sdiodev->func[1]);
if (!bus->rxpending)
break;
@ -1648,8 +1675,8 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
memcpy(bus->rxhdr, pkt->data, SDPCM_HDRLEN);
rd_new.seq_num = rd->seq_num;
sdio_claim_host(bus->sdiodev->func[1]);
if (brcmf_sdio_hdparser(bus, bus->rxhdr, &rd_new,
BRCMF_SDIO_FT_NORMAL)) {
if (brcmf_sdio_hdparse(bus, bus->rxhdr, &rd_new,
BRCMF_SDIO_FT_NORMAL)) {
rd->len = 0;
brcmu_pkt_buf_free_skb(pkt);
}
@ -1693,7 +1720,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
/* Save superframe descriptor and allocate packet frame */
if (rd->channel == SDPCM_GLOM_CHANNEL) {
if (SDPCM_GLOMDESC(&bus->rxhdr[SDPCM_FRAMETAG_LEN])) {
if (SDPCM_GLOMDESC(&bus->rxhdr[SDPCM_HWHDR_LEN])) {
brcmf_dbg(GLOM, "glom descriptor, %d bytes:\n",
rd->len);
brcmf_dbg_hex_dump(BRCMF_GLOM_ON(),
@ -1759,85 +1786,168 @@ brcmf_sdbrcm_wait_event_wakeup(struct brcmf_sdio *bus)
return;
}
/* flag marking a dummy skb added for DMA alignment requirement */
#define DUMMY_SKB_FLAG 0x10000
/* bit mask of data length chopped from the previous packet */
#define DUMMY_SKB_CHOP_LEN_MASK 0xffff
/**
* brcmf_sdio_txpkt_prep - packet preparation for transmit
* @bus: brcmf_sdio structure pointer
* @pktq: packet list pointer
* @chan: virtual channel to transmit the packet
*
* Processes to be applied to the packet
* - Align data buffer pointer
* - Align data buffer length
* - Prepare header
* Return: negative value if there is error
*/
static int
brcmf_sdio_txpkt_prep(struct brcmf_sdio *bus, struct sk_buff_head *pktq,
uint chan)
{
u16 head_pad, tail_pad, tail_chop, head_align, sg_align;
int ntail;
struct sk_buff *pkt_next, *pkt_new;
u8 *dat_buf;
unsigned blksize = bus->sdiodev->func[SDIO_FUNC_2]->cur_blksize;
struct brcmf_sdio_hdrinfo hd_info = {0};
/* SDIO ADMA requires at least 32 bit alignment */
head_align = 4;
sg_align = 4;
if (bus->sdiodev->pdata) {
head_align = bus->sdiodev->pdata->sd_head_align > 4 ?
bus->sdiodev->pdata->sd_head_align : 4;
sg_align = bus->sdiodev->pdata->sd_sgentry_align > 4 ?
bus->sdiodev->pdata->sd_sgentry_align : 4;
}
/* sg entry alignment should be a divisor of block size */
WARN_ON(blksize % sg_align);
pkt_next = pktq->next;
dat_buf = (u8 *)(pkt_next->data);
/* Check head padding */
head_pad = ((unsigned long)dat_buf % head_align);
if (head_pad) {
if (skb_headroom(pkt_next) < head_pad) {
bus->sdiodev->bus_if->tx_realloc++;
head_pad = 0;
if (skb_cow(pkt_next, head_pad))
return -ENOMEM;
}
skb_push(pkt_next, head_pad);
dat_buf = (u8 *)(pkt_next->data);
memset(dat_buf, 0, head_pad + bus->tx_hdrlen);
}
/* Check tail padding */
pkt_new = NULL;
tail_chop = pkt_next->len % sg_align;
tail_pad = sg_align - tail_chop;
tail_pad += blksize - (pkt_next->len + tail_pad) % blksize;
if (skb_tailroom(pkt_next) < tail_pad && pkt_next->len > blksize) {
pkt_new = brcmu_pkt_buf_get_skb(tail_pad + tail_chop);
if (pkt_new == NULL)
return -ENOMEM;
memcpy(pkt_new->data,
pkt_next->data + pkt_next->len - tail_chop,
tail_chop);
*(u32 *)(pkt_new->cb) = DUMMY_SKB_FLAG + tail_chop;
skb_trim(pkt_next, pkt_next->len - tail_chop);
__skb_queue_after(pktq, pkt_next, pkt_new);
} else {
ntail = pkt_next->data_len + tail_pad -
(pkt_next->end - pkt_next->tail);
if (skb_cloned(pkt_next) || ntail > 0)
if (pskb_expand_head(pkt_next, 0, ntail, GFP_ATOMIC))
return -ENOMEM;
if (skb_linearize(pkt_next))
return -ENOMEM;
dat_buf = (u8 *)(pkt_next->data);
__skb_put(pkt_next, tail_pad);
}
/* Now prep the header */
if (pkt_new)
hd_info.len = pkt_next->len + tail_chop;
else
hd_info.len = pkt_next->len - tail_pad;
hd_info.channel = chan;
hd_info.dat_offset = head_pad + bus->tx_hdrlen;
brcmf_sdio_hdpack(bus, dat_buf, &hd_info);
if (BRCMF_BYTES_ON() &&
((BRCMF_CTL_ON() && chan == SDPCM_CONTROL_CHANNEL) ||
(BRCMF_DATA_ON() && chan != SDPCM_CONTROL_CHANNEL)))
brcmf_dbg_hex_dump(true, pkt_next, hd_info.len, "Tx Frame:\n");
else if (BRCMF_HDRS_ON())
brcmf_dbg_hex_dump(true, pkt_next, head_pad + bus->tx_hdrlen,
"Tx Header:\n");
return 0;
}
/**
* brcmf_sdio_txpkt_postp - packet post processing for transmit
* @bus: brcmf_sdio structure pointer
* @pktq: packet list pointer
*
* Processes to be applied to the packet
* - Remove head padding
* - Remove tail padding
*/
static void
brcmf_sdio_txpkt_postp(struct brcmf_sdio *bus, struct sk_buff_head *pktq)
{
u8 *hdr;
u32 dat_offset;
u32 dummy_flags, chop_len;
struct sk_buff *pkt_next, *tmp, *pkt_prev;
skb_queue_walk_safe(pktq, pkt_next, tmp) {
dummy_flags = *(u32 *)(pkt_next->cb);
if (dummy_flags & DUMMY_SKB_FLAG) {
chop_len = dummy_flags & DUMMY_SKB_CHOP_LEN_MASK;
if (chop_len) {
pkt_prev = pkt_next->prev;
memcpy(pkt_prev->data + pkt_prev->len,
pkt_next->data, chop_len);
skb_put(pkt_prev, chop_len);
}
__skb_unlink(pkt_next, pktq);
brcmu_pkt_buf_free_skb(pkt_next);
} else {
hdr = pkt_next->data + SDPCM_HWHDR_LEN;
dat_offset = le32_to_cpu(*(__le32 *)hdr);
dat_offset = (dat_offset & SDPCM_DOFFSET_MASK) >>
SDPCM_DOFFSET_SHIFT;
skb_pull(pkt_next, dat_offset);
}
}
}
/* Writes a HW/SW header into the packet and sends it. */
/* Assumes: (a) header space already there, (b) caller holds lock */
static int brcmf_sdbrcm_txpkt(struct brcmf_sdio *bus, struct sk_buff *pkt,
uint chan)
{
int ret;
u8 *frame;
u16 len, pad = 0;
u32 swheader;
int i;
struct sk_buff_head localq;
brcmf_dbg(TRACE, "Enter\n");
frame = (u8 *) (pkt->data);
/* Add alignment padding, allocate new packet if needed */
pad = ((unsigned long)frame % BRCMF_SDALIGN);
if (pad) {
if (skb_headroom(pkt) < pad) {
brcmf_dbg(INFO, "insufficient headroom %d for %d pad\n",
skb_headroom(pkt), pad);
bus->sdiodev->bus_if->tx_realloc++;
ret = skb_cow(pkt, BRCMF_SDALIGN);
if (ret)
goto done;
pad = ((unsigned long)frame % BRCMF_SDALIGN);
}
skb_push(pkt, pad);
frame = (u8 *) (pkt->data);
memset(frame, 0, pad + SDPCM_HDRLEN);
}
/* precondition: pad < BRCMF_SDALIGN */
/* Hardware tag: 2 byte len followed by 2 byte ~len check (all LE) */
len = (u16) (pkt->len);
*(__le16 *) frame = cpu_to_le16(len);
*(((__le16 *) frame) + 1) = cpu_to_le16(~len);
/* Software tag: channel, sequence number, data offset */
swheader =
((chan << SDPCM_CHANNEL_SHIFT) & SDPCM_CHANNEL_MASK) | bus->tx_seq |
(((pad +
SDPCM_HDRLEN) << SDPCM_DOFFSET_SHIFT) & SDPCM_DOFFSET_MASK);
*(((__le32 *) frame) + 1) = cpu_to_le32(swheader);
*(((__le32 *) frame) + 2) = 0;
#ifdef DEBUG
tx_packets[pkt->priority]++;
#endif
brcmf_dbg_hex_dump(BRCMF_BYTES_ON() &&
((BRCMF_CTL_ON() && chan == SDPCM_CONTROL_CHANNEL) ||
(BRCMF_DATA_ON() && chan != SDPCM_CONTROL_CHANNEL)),
frame, len, "Tx Frame:\n");
brcmf_dbg_hex_dump(!(BRCMF_BYTES_ON() &&
((BRCMF_CTL_ON() &&
chan == SDPCM_CONTROL_CHANNEL) ||
(BRCMF_DATA_ON() &&
chan != SDPCM_CONTROL_CHANNEL))) &&
BRCMF_HDRS_ON(),
frame, min_t(u16, len, 16), "TxHdr:\n");
/* Raise len to next SDIO block to eliminate tail command */
if (bus->roundup && bus->blocksize && (len > bus->blocksize)) {
u16 pad = bus->blocksize - (len % bus->blocksize);
if ((pad <= bus->roundup) && (pad < bus->blocksize))
len += pad;
} else if (len % BRCMF_SDALIGN) {
len += BRCMF_SDALIGN - (len % BRCMF_SDALIGN);
}
/* Some controllers have trouble with odd bytes -- round to even */
if (len & (ALIGNMENT - 1))
len = roundup(len, ALIGNMENT);
__skb_queue_head_init(&localq);
__skb_queue_tail(&localq, pkt);
ret = brcmf_sdio_txpkt_prep(bus, &localq, chan);
if (ret)
goto done;
sdio_claim_host(bus->sdiodev->func[1]);
ret = brcmf_sdcard_send_pkt(bus->sdiodev, bus->sdiodev->sbwad,
SDIO_FUNC_2, F2SYNC, pkt);
SDIO_FUNC_2, F2SYNC, &localq);
bus->sdcnt.f2txdata++;
if (ret < 0) {
@ -1865,11 +1975,11 @@ static int brcmf_sdbrcm_txpkt(struct brcmf_sdio *bus, struct sk_buff *pkt,
}
sdio_release_host(bus->sdiodev->func[1]);
if (ret == 0)
bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP;
bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQ_WRAP;
done:
/* restore pkt buffer pointer before calling tx complete routine */
skb_pull(pkt, SDPCM_HDRLEN + pad);
brcmf_sdio_txpkt_postp(bus, &localq);
__skb_dequeue_tail(&localq);
brcmf_txcomplete(bus->sdiodev->dev, pkt, ret == 0);
return ret;
}
@ -1880,7 +1990,6 @@ static uint brcmf_sdbrcm_sendfromq(struct brcmf_sdio *bus, uint maxframes)
u32 intstatus = 0;
int ret = 0, prec_out;
uint cnt = 0;
uint datalen;
u8 tx_prec_map;
brcmf_dbg(TRACE, "Enter\n");
@ -1896,7 +2005,6 @@ static uint brcmf_sdbrcm_sendfromq(struct brcmf_sdio *bus, uint maxframes)
break;
}
spin_unlock_bh(&bus->txqlock);
datalen = pkt->len - SDPCM_HDRLEN;
ret = brcmf_sdbrcm_txpkt(bus, pkt, SDPCM_DATA_CHANNEL);
@ -2221,7 +2329,7 @@ static void brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
}
} else {
bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP;
bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQ_WRAP;
}
sdio_release_host(bus->sdiodev->func[1]);
bus->ctrl_frame_stat = false;
@ -2276,13 +2384,14 @@ static int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt)
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
struct brcmf_sdio *bus = sdiodev->bus;
ulong flags;
brcmf_dbg(TRACE, "Enter\n");
datalen = pkt->len;
/* Add space for the header */
skb_push(pkt, SDPCM_HDRLEN);
skb_push(pkt, bus->tx_hdrlen);
/* precondition: IS_ALIGNED((unsigned long)(pkt->data), 2) */
prec = prio2prec((pkt->priority & PRIOMASK));
@ -2293,10 +2402,9 @@ static int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt)
bus->sdcnt.fcqueued++;
/* Priority based enq */
spin_lock_bh(&bus->txqlock);
spin_lock_irqsave(&bus->txqlock, flags);
if (!brcmf_c_prec_enq(bus->sdiodev->dev, &bus->txq, pkt, prec)) {
skb_pull(pkt, SDPCM_HDRLEN);
brcmf_txcomplete(bus->sdiodev->dev, pkt, false);
skb_pull(pkt, bus->tx_hdrlen);
brcmf_err("out of bus->txq !!!\n");
ret = -ENOSR;
} else {
@ -2307,7 +2415,7 @@ static int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt)
bus->txoff = true;
brcmf_txflowblock(bus->sdiodev->dev, true);
}
spin_unlock_bh(&bus->txqlock);
spin_unlock_irqrestore(&bus->txqlock, flags);
#ifdef DEBUG
if (pktq_plen(&bus->txq, prec) > qcount[prec])
@ -2436,7 +2544,7 @@ static int brcmf_tx_frame(struct brcmf_sdio *bus, u8 *frame, u16 len)
return ret;
}
bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP;
bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQ_WRAP;
return ret;
}
@ -2446,19 +2554,19 @@ brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
{
u8 *frame;
u16 len;
u32 swheader;
uint retries = 0;
u8 doff = 0;
int ret = -1;
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
struct brcmf_sdio *bus = sdiodev->bus;
struct brcmf_sdio_hdrinfo hd_info = {0};
brcmf_dbg(TRACE, "Enter\n");
/* Back the pointer to make a room for bus header */
frame = msg - SDPCM_HDRLEN;
len = (msglen += SDPCM_HDRLEN);
frame = msg - bus->tx_hdrlen;
len = (msglen += bus->tx_hdrlen);
/* Add alignment padding (optional for ctl frames) */
doff = ((unsigned long)frame % BRCMF_SDALIGN);
@ -2466,10 +2574,10 @@ brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
frame -= doff;
len += doff;
msglen += doff;
memset(frame, 0, doff + SDPCM_HDRLEN);
memset(frame, 0, doff + bus->tx_hdrlen);
}
/* precondition: doff < BRCMF_SDALIGN */
doff += SDPCM_HDRLEN;
doff += bus->tx_hdrlen;
/* Round send length to next SDIO block */
if (bus->roundup && bus->blocksize && (len > bus->blocksize)) {
@ -2491,18 +2599,10 @@ brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
brcmf_sdbrcm_bus_sleep(bus, false, false);
sdio_release_host(bus->sdiodev->func[1]);
/* Hardware tag: 2 byte len followed by 2 byte ~len check (all LE) */
*(__le16 *) frame = cpu_to_le16((u16) msglen);
*(((__le16 *) frame) + 1) = cpu_to_le16(~msglen);
/* Software tag: channel, sequence number, data offset */
swheader =
((SDPCM_CONTROL_CHANNEL << SDPCM_CHANNEL_SHIFT) &
SDPCM_CHANNEL_MASK)
| bus->tx_seq | ((doff << SDPCM_DOFFSET_SHIFT) &
SDPCM_DOFFSET_MASK);
put_unaligned_le32(swheader, frame + SDPCM_FRAMETAG_LEN);
put_unaligned_le32(0, frame + SDPCM_FRAMETAG_LEN + sizeof(swheader));
hd_info.len = (u16)msglen;
hd_info.channel = SDPCM_CONTROL_CHANNEL;
hd_info.dat_offset = doff;
brcmf_sdio_hdpack(bus, frame, &hd_info);
if (!data_ok(bus)) {
brcmf_dbg(INFO, "No bus credit bus->tx_max %d, bus->tx_seq %d\n",
@ -3733,7 +3833,7 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
struct brcmf_sdio *bus;
struct brcmf_bus_dcmd *dlst;
u32 dngl_txglom;
u32 dngl_txglomalign;
u32 txglomalign = 0;
u8 idx;
brcmf_dbg(TRACE, "Enter\n");
@ -3752,7 +3852,7 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
bus->txbound = BRCMF_TXBOUND;
bus->rxbound = BRCMF_RXBOUND;
bus->txminmax = BRCMF_TXMINMAX;
bus->tx_seq = SDPCM_SEQUENCE_WRAP - 1;
bus->tx_seq = SDPCM_SEQ_WRAP - 1;
INIT_WORK(&bus->datawork, brcmf_sdio_dataworker);
bus->brcmf_wq = create_singlethread_workqueue("brcmf_wq");
@ -3794,8 +3894,11 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
bus->sdiodev->bus_if->chip = bus->ci->chip;
bus->sdiodev->bus_if->chiprev = bus->ci->chiprev;
/* Attach to the brcmf/OS/network interface */
ret = brcmf_attach(SDPCM_RESERVE, bus->sdiodev->dev);
/* default sdio bus header length for tx packet */
bus->tx_hdrlen = SDPCM_HWHDR_LEN + SDPCM_SWHDR_LEN;
/* Attach to the common layer, reserve hdr space */
ret = brcmf_attach(bus->tx_hdrlen, bus->sdiodev->dev);
if (ret != 0) {
brcmf_err("brcmf_attach failed\n");
goto fail;
@ -3827,9 +3930,13 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
dlst->param_len = sizeof(u32);
} else {
/* otherwise, set txglomalign */
dngl_txglomalign = bus->sdiodev->bus_if->align;
if (sdiodev->pdata)
txglomalign = sdiodev->pdata->sd_sgentry_align;
/* SDIO ADMA requires at least 32 bit alignment */
if (txglomalign < 4)
txglomalign = 4;
dlst->name = "bus:txglomalign";
dlst->param = (char *)&dngl_txglomalign;
dlst->param = (char *)&txglomalign;
dlst->param_len = sizeof(u32);
}
list_add(&dlst->list, &bus->sdiodev->bus_if->dcmd_list);

View File

@ -185,6 +185,10 @@ static void brcmf_fweh_handle_if_event(struct brcmf_pub *drvr,
ifevent->action, ifevent->ifidx, ifevent->bssidx,
ifevent->flags, ifevent->role);
if (ifevent->flags & BRCMF_E_IF_FLAG_NOIF) {
brcmf_dbg(EVENT, "event can be ignored\n");
return;
}
if (ifevent->ifidx >= BRCMF_MAX_IFS) {
brcmf_err("invalid interface index: %u\n",
ifevent->ifidx);

View File

@ -69,4 +69,25 @@ struct brcmf_fil_bss_enable_le {
__le32 enable;
};
/**
* struct tdls_iovar - common structure for tdls iovars.
*
* @ea: ether address of peer station.
* @mode: mode value depending on specific tdls iovar.
* @chanspec: channel specification.
* @pad: unused (for future use).
*/
struct brcmf_tdls_iovar_le {
u8 ea[ETH_ALEN]; /* Station address */
u8 mode; /* mode: depends on iovar */
__le16 chanspec;
__le32 pad; /* future */
};
enum brcmf_tdls_manual_ep_ops {
BRCMF_TDLS_MANUAL_EP_CREATE = 1,
BRCMF_TDLS_MANUAL_EP_DELETE = 3,
BRCMF_TDLS_MANUAL_EP_DISCOVERY = 6
};
#endif /* FWIL_TYPES_H_ */

View File

@ -422,9 +422,12 @@ struct brcmf_fws_macdesc_table {
struct brcmf_fws_info {
struct brcmf_pub *drvr;
spinlock_t spinlock;
ulong flags;
struct brcmf_fws_stats stats;
struct brcmf_fws_hanger hanger;
enum brcmf_fws_fcmode fcmode;
bool fw_signals;
bool bcmc_credit_check;
struct brcmf_fws_macdesc_table desc;
struct workqueue_struct *fws_wq;
@ -483,6 +486,18 @@ static int brcmf_fws_get_tlv_len(struct brcmf_fws_info *fws,
}
#undef BRCMF_FWS_TLV_DEF
static void brcmf_fws_lock(struct brcmf_fws_info *fws)
__acquires(&fws->spinlock)
{
spin_lock_irqsave(&fws->spinlock, fws->flags);
}
static void brcmf_fws_unlock(struct brcmf_fws_info *fws)
__releases(&fws->spinlock)
{
spin_unlock_irqrestore(&fws->spinlock, fws->flags);
}
static bool brcmf_fws_ifidx_match(struct sk_buff *skb, void *arg)
{
u32 ifidx = brcmf_skb_if_flags_get_field(skb, INDEX);
@ -869,8 +884,11 @@ static bool brcmf_fws_tim_update(struct brcmf_fws_info *fws,
skcb->state = BRCMF_FWS_SKBSTATE_TIM;
bus = fws->drvr->bus_if;
err = brcmf_fws_hdrpush(fws, skb);
if (err == 0)
if (err == 0) {
brcmf_fws_unlock(fws);
err = brcmf_bus_txdata(bus, skb);
brcmf_fws_lock(fws);
}
if (err)
brcmu_pkt_buf_free_skb(skb);
return true;
@ -905,26 +923,10 @@ static int brcmf_fws_rssi_indicate(struct brcmf_fws_info *fws, s8 rssi)
return 0;
}
/* using macro so sparse checking does not complain
* about locking imbalance.
*/
#define brcmf_fws_lock(drvr, flags) \
do { \
flags = 0; \
spin_lock_irqsave(&((drvr)->fws_spinlock), (flags)); \
} while (0)
/* using macro so sparse checking does not complain
* about locking imbalance.
*/
#define brcmf_fws_unlock(drvr, flags) \
spin_unlock_irqrestore(&((drvr)->fws_spinlock), (flags))
static
int brcmf_fws_macdesc_indicate(struct brcmf_fws_info *fws, u8 type, u8 *data)
{
struct brcmf_fws_mac_descriptor *entry, *existing;
ulong flags;
u8 mac_handle;
u8 ifidx;
u8 *addr;
@ -938,10 +940,10 @@ int brcmf_fws_macdesc_indicate(struct brcmf_fws_info *fws, u8 type, u8 *data)
if (entry->occupied) {
brcmf_dbg(TRACE, "deleting %s mac %pM\n",
entry->name, addr);
brcmf_fws_lock(fws->drvr, flags);
brcmf_fws_lock(fws);
brcmf_fws_macdesc_cleanup(fws, entry, -1);
brcmf_fws_macdesc_deinit(entry);
brcmf_fws_unlock(fws->drvr, flags);
brcmf_fws_unlock(fws);
} else
fws->stats.mac_update_failed++;
return 0;
@ -950,13 +952,13 @@ int brcmf_fws_macdesc_indicate(struct brcmf_fws_info *fws, u8 type, u8 *data)
existing = brcmf_fws_macdesc_lookup(fws, addr);
if (IS_ERR(existing)) {
if (!entry->occupied) {
brcmf_fws_lock(fws->drvr, flags);
brcmf_fws_lock(fws);
entry->mac_handle = mac_handle;
brcmf_fws_macdesc_init(entry, addr, ifidx);
brcmf_fws_macdesc_set_name(fws, entry);
brcmu_pktq_init(&entry->psq, BRCMF_FWS_PSQ_PREC_COUNT,
BRCMF_FWS_PSQ_LEN);
brcmf_fws_unlock(fws->drvr, flags);
brcmf_fws_unlock(fws);
brcmf_dbg(TRACE, "add %s mac %pM\n", entry->name, addr);
} else {
fws->stats.mac_update_failed++;
@ -964,13 +966,13 @@ int brcmf_fws_macdesc_indicate(struct brcmf_fws_info *fws, u8 type, u8 *data)
} else {
if (entry != existing) {
brcmf_dbg(TRACE, "copy mac %s\n", existing->name);
brcmf_fws_lock(fws->drvr, flags);
brcmf_fws_lock(fws);
memcpy(entry, existing,
offsetof(struct brcmf_fws_mac_descriptor, psq));
entry->mac_handle = mac_handle;
brcmf_fws_macdesc_deinit(existing);
brcmf_fws_macdesc_set_name(fws, entry);
brcmf_fws_unlock(fws->drvr, flags);
brcmf_fws_unlock(fws);
brcmf_dbg(TRACE, "relocate %s mac %pM\n", entry->name,
addr);
} else {
@ -986,7 +988,6 @@ static int brcmf_fws_macdesc_state_indicate(struct brcmf_fws_info *fws,
u8 type, u8 *data)
{
struct brcmf_fws_mac_descriptor *entry;
ulong flags;
u8 mac_handle;
int ret;
@ -996,7 +997,7 @@ static int brcmf_fws_macdesc_state_indicate(struct brcmf_fws_info *fws,
fws->stats.mac_ps_update_failed++;
return -ESRCH;
}
brcmf_fws_lock(fws->drvr, flags);
brcmf_fws_lock(fws);
/* a state update should wipe old credits */
entry->requested_credit = 0;
entry->requested_packet = 0;
@ -1011,7 +1012,7 @@ static int brcmf_fws_macdesc_state_indicate(struct brcmf_fws_info *fws,
brcmf_fws_tim_update(fws, entry, BRCMF_FWS_FIFO_AC_VO, true);
ret = BRCMF_FWS_RET_OK_NOSCHEDULE;
}
brcmf_fws_unlock(fws->drvr, flags);
brcmf_fws_unlock(fws);
return ret;
}
@ -1019,7 +1020,6 @@ static int brcmf_fws_interface_state_indicate(struct brcmf_fws_info *fws,
u8 type, u8 *data)
{
struct brcmf_fws_mac_descriptor *entry;
ulong flags;
u8 ifidx;
int ret;
@ -1038,7 +1038,7 @@ static int brcmf_fws_interface_state_indicate(struct brcmf_fws_info *fws,
brcmf_dbg(TRACE, "%s (%d): %s\n", brcmf_fws_get_tlv_name(type), type,
entry->name);
brcmf_fws_lock(fws->drvr, flags);
brcmf_fws_lock(fws);
switch (type) {
case BRCMF_FWS_TYPE_INTERFACE_OPEN:
entry->state = BRCMF_FWS_STATE_OPEN;
@ -1050,10 +1050,10 @@ static int brcmf_fws_interface_state_indicate(struct brcmf_fws_info *fws,
break;
default:
ret = -EINVAL;
brcmf_fws_unlock(fws->drvr, flags);
brcmf_fws_unlock(fws);
goto fail;
}
brcmf_fws_unlock(fws->drvr, flags);
brcmf_fws_unlock(fws);
return ret;
fail:
@ -1065,7 +1065,6 @@ static int brcmf_fws_request_indicate(struct brcmf_fws_info *fws, u8 type,
u8 *data)
{
struct brcmf_fws_mac_descriptor *entry;
ulong flags;
entry = &fws->desc.nodes[data[1] & 0x1F];
if (!entry->occupied) {
@ -1079,14 +1078,14 @@ static int brcmf_fws_request_indicate(struct brcmf_fws_info *fws, u8 type,
brcmf_dbg(TRACE, "%s (%d): %s cnt %d bmp %d\n",
brcmf_fws_get_tlv_name(type), type, entry->name,
data[0], data[2]);
brcmf_fws_lock(fws->drvr, flags);
brcmf_fws_lock(fws);
if (type == BRCMF_FWS_TYPE_MAC_REQUEST_CREDIT)
entry->requested_credit = data[0];
else
entry->requested_packet = data[0];
entry->ac_bitmap = data[2];
brcmf_fws_unlock(fws->drvr, flags);
brcmf_fws_unlock(fws);
return BRCMF_FWS_RET_OK_SCHEDULE;
}
@ -1160,7 +1159,8 @@ static void brcmf_fws_return_credits(struct brcmf_fws_info *fws,
static void brcmf_fws_schedule_deq(struct brcmf_fws_info *fws)
{
/* only schedule dequeue when there are credits for delayed traffic */
if (fws->fifo_credit_map & fws->fifo_delay_map)
if ((fws->fifo_credit_map & fws->fifo_delay_map) ||
(!brcmf_fws_fc_active(fws) && fws->fifo_delay_map))
queue_work(fws->fws_wq, &fws->fws_dequeue_work);
}
@ -1383,7 +1383,6 @@ brcmf_fws_txs_process(struct brcmf_fws_info *fws, u8 flags, u32 hslot,
static int brcmf_fws_fifocreditback_indicate(struct brcmf_fws_info *fws,
u8 *data)
{
ulong flags;
int i;
if (fws->fcmode != BRCMF_FWS_FCMODE_EXPLICIT_CREDIT) {
@ -1392,19 +1391,18 @@ static int brcmf_fws_fifocreditback_indicate(struct brcmf_fws_info *fws,
}
brcmf_dbg(DATA, "enter: data %pM\n", data);
brcmf_fws_lock(fws->drvr, flags);
brcmf_fws_lock(fws);
for (i = 0; i < BRCMF_FWS_FIFO_COUNT; i++)
brcmf_fws_return_credits(fws, i, data[i]);
brcmf_dbg(DATA, "map: credit %x delay %x\n", fws->fifo_credit_map,
fws->fifo_delay_map);
brcmf_fws_unlock(fws->drvr, flags);
brcmf_fws_unlock(fws);
return BRCMF_FWS_RET_OK_SCHEDULE;
}
static int brcmf_fws_txstatus_indicate(struct brcmf_fws_info *fws, u8 *data)
{
ulong lflags;
__le32 status_le;
u32 status;
u32 hslot;
@ -1418,9 +1416,9 @@ static int brcmf_fws_txstatus_indicate(struct brcmf_fws_info *fws, u8 *data)
hslot = brcmf_txstatus_get_field(status, HSLOT);
genbit = brcmf_txstatus_get_field(status, GENERATION);
brcmf_fws_lock(fws->drvr, lflags);
brcmf_fws_lock(fws);
brcmf_fws_txs_process(fws, flags, hslot, genbit);
brcmf_fws_unlock(fws->drvr, lflags);
brcmf_fws_unlock(fws);
return BRCMF_FWS_RET_OK_NOSCHEDULE;
}
@ -1440,7 +1438,6 @@ static int brcmf_fws_notify_credit_map(struct brcmf_if *ifp,
{
struct brcmf_fws_info *fws = ifp->drvr->fws;
int i;
ulong flags;
u8 *credits = data;
if (e->datalen < BRCMF_FWS_FIFO_COUNT) {
@ -1453,7 +1450,7 @@ static int brcmf_fws_notify_credit_map(struct brcmf_if *ifp,
fws->creditmap_received = true;
brcmf_dbg(TRACE, "enter: credits %pM\n", credits);
brcmf_fws_lock(ifp->drvr, flags);
brcmf_fws_lock(fws);
for (i = 0; i < ARRAY_SIZE(fws->fifo_credit); i++) {
if (*credits)
fws->fifo_credit_map |= 1 << i;
@ -1462,7 +1459,7 @@ static int brcmf_fws_notify_credit_map(struct brcmf_if *ifp,
fws->fifo_credit[i] = *credits++;
}
brcmf_fws_schedule_deq(fws);
brcmf_fws_unlock(ifp->drvr, flags);
brcmf_fws_unlock(fws);
return 0;
}
@ -1471,18 +1468,18 @@ static int brcmf_fws_notify_bcmc_credit_support(struct brcmf_if *ifp,
void *data)
{
struct brcmf_fws_info *fws = ifp->drvr->fws;
ulong flags;
brcmf_fws_lock(ifp->drvr, flags);
brcmf_fws_lock(fws);
if (fws)
fws->bcmc_credit_check = true;
brcmf_fws_unlock(ifp->drvr, flags);
brcmf_fws_unlock(fws);
return 0;
}
int brcmf_fws_hdrpull(struct brcmf_pub *drvr, int ifidx, s16 signal_len,
struct sk_buff *skb)
{
struct brcmf_skb_reorder_data *rd;
struct brcmf_fws_info *fws = drvr->fws;
u8 *signal_data;
s16 data_len;
@ -1497,8 +1494,10 @@ int brcmf_fws_hdrpull(struct brcmf_pub *drvr, int ifidx, s16 signal_len,
WARN_ON(signal_len > skb->len);
if (!signal_len)
return 0;
/* if flow control disabled, skip to packet data and leave */
if (!signal_len || !drvr->fw_signals) {
if (!fws->fw_signals) {
skb_pull(skb, signal_len);
return 0;
}
@ -1536,9 +1535,12 @@ int brcmf_fws_hdrpull(struct brcmf_pub *drvr, int ifidx, s16 signal_len,
err = BRCMF_FWS_RET_OK_NOSCHEDULE;
switch (type) {
case BRCMF_FWS_TYPE_HOST_REORDER_RXPKTS:
case BRCMF_FWS_TYPE_COMP_TXSTATUS:
break;
case BRCMF_FWS_TYPE_HOST_REORDER_RXPKTS:
rd = (struct brcmf_skb_reorder_data *)skb->cb;
rd->reorder = data;
break;
case BRCMF_FWS_TYPE_MACDESC_ADD:
case BRCMF_FWS_TYPE_MACDESC_DEL:
brcmf_fws_macdesc_indicate(fws, type, data);
@ -1694,17 +1696,22 @@ static int brcmf_fws_commit_skb(struct brcmf_fws_info *fws, int fifo,
return PTR_ERR(entry);
brcmf_fws_precommit_skb(fws, fifo, skb);
entry->transit_count++;
if (entry->suppressed)
entry->suppr_transit_count++;
brcmf_fws_unlock(fws);
rc = brcmf_bus_txdata(bus, skb);
brcmf_fws_lock(fws);
brcmf_dbg(DATA, "%s flags %X htod %X bus_tx %d\n", entry->name,
skcb->if_flags, skcb->htod, rc);
if (rc < 0) {
entry->transit_count--;
if (entry->suppressed)
entry->suppr_transit_count--;
brcmf_proto_hdrpull(fws->drvr, false, &ifidx, skb);
goto rollback;
}
entry->transit_count++;
if (entry->suppressed)
entry->suppr_transit_count++;
fws->stats.pkt2bus++;
fws->stats.send_pkts[fifo]++;
if (brcmf_skb_if_flags_get_field(skb, REQUESTED))
@ -1741,11 +1748,11 @@ int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb)
struct brcmf_fws_info *fws = drvr->fws;
struct brcmf_skbuff_cb *skcb = brcmf_skbcb(skb);
struct ethhdr *eh = (struct ethhdr *)(skb->data);
ulong flags;
int fifo = BRCMF_FWS_FIFO_BCMC;
bool multicast = is_multicast_ether_addr(eh->h_dest);
bool pae = eh->h_proto == htons(ETH_P_PAE);
brcmf_dbg(DATA, "tx proto=0x%X\n", ntohs(eh->h_proto));
/* determine the priority */
if (!skb->priority)
skb->priority = cfg80211_classify8021d(skb);
@ -1754,14 +1761,6 @@ int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb)
if (pae)
atomic_inc(&ifp->pend_8021x_cnt);
if (!brcmf_fws_fc_active(fws)) {
/* If the protocol uses a data header, apply it */
brcmf_proto_hdrpush(drvr, ifp->ifidx, 0, skb);
/* Use bus module to send data frame */
return brcmf_bus_txdata(drvr->bus_if, skb);
}
/* set control buffer information */
skcb->if_flags = 0;
skcb->state = BRCMF_FWS_SKBSTATE_NEW;
@ -1769,7 +1768,7 @@ int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb)
if (!multicast)
fifo = brcmf_fws_prio2fifo[skb->priority];
brcmf_fws_lock(drvr, flags);
brcmf_fws_lock(fws);
if (fifo != BRCMF_FWS_FIFO_AC_BE && fifo < BRCMF_FWS_FIFO_BCMC)
fws->borrow_defer_timestamp = jiffies +
BRCMF_FWS_BORROW_DEFER_PERIOD;
@ -1789,7 +1788,7 @@ int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb)
}
brcmu_pkt_buf_free_skb(skb);
}
brcmf_fws_unlock(drvr, flags);
brcmf_fws_unlock(fws);
return 0;
}
@ -1809,7 +1808,7 @@ void brcmf_fws_add_interface(struct brcmf_if *ifp)
struct brcmf_fws_info *fws = ifp->drvr->fws;
struct brcmf_fws_mac_descriptor *entry;
if (!ifp->ndev || !ifp->drvr->fw_signals)
if (!ifp->ndev)
return;
entry = &fws->desc.iface[ifp->ifidx];
@ -1824,31 +1823,54 @@ void brcmf_fws_add_interface(struct brcmf_if *ifp)
void brcmf_fws_del_interface(struct brcmf_if *ifp)
{
struct brcmf_fws_mac_descriptor *entry = ifp->fws_desc;
ulong flags;
if (!entry)
return;
brcmf_fws_lock(ifp->drvr, flags);
brcmf_fws_lock(ifp->drvr->fws);
ifp->fws_desc = NULL;
brcmf_dbg(TRACE, "deleting %s\n", entry->name);
brcmf_fws_macdesc_deinit(entry);
brcmf_fws_cleanup(ifp->drvr->fws, ifp->ifidx);
brcmf_fws_unlock(ifp->drvr, flags);
brcmf_fws_unlock(ifp->drvr->fws);
}
static void brcmf_fws_dequeue_worker(struct work_struct *worker)
{
struct brcmf_fws_info *fws;
struct brcmf_pub *drvr;
struct sk_buff *skb;
ulong flags;
int fifo;
u32 hslot;
u32 ifidx;
int ret;
fws = container_of(worker, struct brcmf_fws_info, fws_dequeue_work);
drvr = fws->drvr;
brcmf_fws_lock(fws->drvr, flags);
brcmf_fws_lock(fws);
for (fifo = BRCMF_FWS_FIFO_BCMC; fifo >= 0 && !fws->bus_flow_blocked;
fifo--) {
if (!brcmf_fws_fc_active(fws)) {
while ((skb = brcmf_fws_deq(fws, fifo)) != NULL) {
hslot = brcmf_skb_htod_tag_get_field(skb,
HSLOT);
brcmf_fws_hanger_poppkt(&fws->hanger, hslot,
&skb, true);
ifidx = brcmf_skb_if_flags_get_field(skb,
INDEX);
brcmf_proto_hdrpush(drvr, ifidx, 0, skb);
/* Use bus module to send data frame */
brcmf_fws_unlock(fws);
ret = brcmf_bus_txdata(drvr->bus_if, skb);
brcmf_fws_lock(fws);
if (ret < 0)
brcmf_txfinalize(drvr, skb, false);
if (fws->bus_flow_blocked)
break;
}
continue;
}
while ((fws->fifo_credit[fifo]) || ((!fws->bcmc_credit_check) &&
(fifo == BRCMF_FWS_FIFO_BCMC))) {
skb = brcmf_fws_deq(fws, fifo);
@ -1876,42 +1898,43 @@ static void brcmf_fws_dequeue_worker(struct work_struct *worker)
}
}
}
brcmf_fws_unlock(fws->drvr, flags);
brcmf_fws_unlock(fws);
}
int brcmf_fws_init(struct brcmf_pub *drvr)
{
struct brcmf_fws_info *fws;
u32 tlv = BRCMF_FWS_FLAGS_RSSI_SIGNALS;
int rc;
if (!drvr->fw_signals)
return 0;
spin_lock_init(&drvr->fws_spinlock);
drvr->fws = kzalloc(sizeof(*(drvr->fws)), GFP_KERNEL);
if (!drvr->fws) {
rc = -ENOMEM;
goto fail;
}
/* set linkage back */
drvr->fws->drvr = drvr;
drvr->fws->fcmode = fcmode;
fws = drvr->fws;
drvr->fws->fws_wq = create_singlethread_workqueue("brcmf_fws_wq");
if (drvr->fws->fws_wq == NULL) {
spin_lock_init(&fws->spinlock);
/* set linkage back */
fws->drvr = drvr;
fws->fcmode = fcmode;
fws->fws_wq = create_singlethread_workqueue("brcmf_fws_wq");
if (fws->fws_wq == NULL) {
brcmf_err("workqueue creation failed\n");
rc = -EBADF;
goto fail;
}
INIT_WORK(&drvr->fws->fws_dequeue_work, brcmf_fws_dequeue_worker);
INIT_WORK(&fws->fws_dequeue_work, brcmf_fws_dequeue_worker);
/* enable firmware signalling if fcmode active */
if (drvr->fws->fcmode != BRCMF_FWS_FCMODE_NONE)
if (fws->fcmode != BRCMF_FWS_FCMODE_NONE)
tlv |= BRCMF_FWS_FLAGS_XONXOFF_SIGNALS |
BRCMF_FWS_FLAGS_CREDIT_STATUS_SIGNALS |
BRCMF_FWS_FLAGS_HOST_PROPTXSTATUS_ACTIVE;
BRCMF_FWS_FLAGS_HOST_PROPTXSTATUS_ACTIVE |
BRCMF_FWS_FLAGS_HOST_RXREORDER_ACTIVE;
rc = brcmf_fweh_register(drvr, BRCMF_E_FIFO_CREDIT_MAP,
brcmf_fws_notify_credit_map);
@ -1927,31 +1950,33 @@ int brcmf_fws_init(struct brcmf_pub *drvr)
goto fail;
}
/* setting the iovar may fail if feature is unsupported
/* Setting the iovar may fail if feature is unsupported
* so leave the rc as is so driver initialization can
* continue.
* continue. Set mode back to none indicating not enabled.
*/
fws->fw_signals = true;
if (brcmf_fil_iovar_int_set(drvr->iflist[0], "tlv", tlv)) {
brcmf_err("failed to set bdcv2 tlv signaling\n");
goto fail_event;
fws->fcmode = BRCMF_FWS_FCMODE_NONE;
fws->fw_signals = false;
}
brcmf_fws_hanger_init(&drvr->fws->hanger);
brcmf_fws_macdesc_init(&drvr->fws->desc.other, NULL, 0);
brcmf_fws_macdesc_set_name(drvr->fws, &drvr->fws->desc.other);
brcmu_pktq_init(&drvr->fws->desc.other.psq, BRCMF_FWS_PSQ_PREC_COUNT,
if (brcmf_fil_iovar_int_set(drvr->iflist[0], "ampdu_hostreorder", 1))
brcmf_dbg(INFO, "enabling AMPDU host-reorder failed\n");
brcmf_fws_hanger_init(&fws->hanger);
brcmf_fws_macdesc_init(&fws->desc.other, NULL, 0);
brcmf_fws_macdesc_set_name(fws, &fws->desc.other);
brcmu_pktq_init(&fws->desc.other.psq, BRCMF_FWS_PSQ_PREC_COUNT,
BRCMF_FWS_PSQ_LEN);
/* create debugfs file for statistics */
brcmf_debugfs_create_fws_stats(drvr, &drvr->fws->stats);
brcmf_debugfs_create_fws_stats(drvr, &fws->stats);
brcmf_dbg(INFO, "%s bdcv2 tlv signaling [%x]\n",
drvr->fw_signals ? "enabled" : "disabled", tlv);
fws->fw_signals ? "enabled" : "disabled", tlv);
return 0;
fail_event:
brcmf_fweh_unregister(drvr, BRCMF_E_BCMC_CREDIT_SUPPORT);
brcmf_fweh_unregister(drvr, BRCMF_E_FIFO_CREDIT_MAP);
fail:
brcmf_fws_deinit(drvr);
return rc;
@ -1960,24 +1985,18 @@ int brcmf_fws_init(struct brcmf_pub *drvr)
void brcmf_fws_deinit(struct brcmf_pub *drvr)
{
struct brcmf_fws_info *fws = drvr->fws;
ulong flags;
if (!fws)
return;
/* disable firmware signalling entirely
* to avoid using the workqueue.
*/
drvr->fw_signals = false;
if (drvr->fws->fws_wq)
destroy_workqueue(drvr->fws->fws_wq);
/* cleanup */
brcmf_fws_lock(drvr, flags);
brcmf_fws_lock(fws);
brcmf_fws_cleanup(fws, -1);
drvr->fws = NULL;
brcmf_fws_unlock(drvr, flags);
brcmf_fws_unlock(fws);
/* free top structure */
kfree(fws);
@ -1985,7 +2004,7 @@ void brcmf_fws_deinit(struct brcmf_pub *drvr)
bool brcmf_fws_fc_active(struct brcmf_fws_info *fws)
{
if (!fws)
if (!fws->creditmap_received)
return false;
return fws->fcmode != BRCMF_FWS_FCMODE_NONE;
@ -1993,17 +2012,16 @@ bool brcmf_fws_fc_active(struct brcmf_fws_info *fws)
void brcmf_fws_bustxfail(struct brcmf_fws_info *fws, struct sk_buff *skb)
{
ulong flags;
u32 hslot;
if (brcmf_skbcb(skb)->state == BRCMF_FWS_SKBSTATE_TIM) {
brcmu_pkt_buf_free_skb(skb);
return;
}
brcmf_fws_lock(fws->drvr, flags);
brcmf_fws_lock(fws);
hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT);
brcmf_fws_txs_process(fws, BRCMF_FWS_TXSTATUS_HOST_TOSSED, hslot, 0);
brcmf_fws_unlock(fws->drvr, flags);
brcmf_fws_unlock(fws);
}
void brcmf_fws_bus_blocked(struct brcmf_pub *drvr, bool flow_blocked)

View File

@ -1430,7 +1430,7 @@ int brcmf_p2p_notify_action_frame_rx(struct brcmf_if *ifp,
IEEE80211_BAND_5GHZ);
wdev = &ifp->vif->wdev;
cfg80211_rx_mgmt(wdev, freq, 0, (u8 *)mgmt_frame, mgmt_frame_len,
cfg80211_rx_mgmt(wdev, freq, 0, (u8 *)mgmt_frame, mgmt_frame_len, 0,
GFP_ATOMIC);
kfree(mgmt_frame);
@ -1895,7 +1895,7 @@ s32 brcmf_p2p_notify_rx_mgmt_p2p_probereq(struct brcmf_if *ifp,
IEEE80211_BAND_2GHZ :
IEEE80211_BAND_5GHZ);
cfg80211_rx_mgmt(&vif->wdev, freq, 0, mgmt_frame, mgmt_frame_len,
cfg80211_rx_mgmt(&vif->wdev, freq, 0, mgmt_frame, mgmt_frame_len, 0,
GFP_ATOMIC);
brcmf_dbg(INFO, "mgmt_frame_len (%d) , e->datalen (%d), chanspec (%04x), freq (%d)\n",

View File

@ -208,7 +208,7 @@ extern int brcmf_sdio_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
*/
extern int
brcmf_sdcard_send_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
uint flags, struct sk_buff *pkt);
uint flags, struct sk_buff_head *pktq);
extern int
brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
uint flags, u8 *buf, uint nbytes);

View File

@ -614,7 +614,6 @@ static int brcmf_usb_tx(struct device *dev, struct sk_buff *skb)
return 0;
fail:
brcmf_txcomplete(dev, skb, false);
return ret;
}

View File

@ -3155,7 +3155,9 @@ static int brcmf_cfg80211_sched_scan_stop(struct wiphy *wiphy,
}
#ifdef CONFIG_NL80211_TESTMODE
static int brcmf_cfg80211_testmode(struct wiphy *wiphy, void *data, int len)
static int brcmf_cfg80211_testmode(struct wiphy *wiphy,
struct wireless_dev *wdev,
void *data, int len)
{
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
struct net_device *ndev = cfg_to_ndev(cfg);
@ -4126,6 +4128,53 @@ static void brcmf_cfg80211_crit_proto_stop(struct wiphy *wiphy,
clear_bit(BRCMF_SCAN_STATUS_SUPPRESS, &cfg->scan_status);
}
static int brcmf_convert_nl80211_tdls_oper(enum nl80211_tdls_operation oper)
{
int ret;
switch (oper) {
case NL80211_TDLS_DISCOVERY_REQ:
ret = BRCMF_TDLS_MANUAL_EP_DISCOVERY;
break;
case NL80211_TDLS_SETUP:
ret = BRCMF_TDLS_MANUAL_EP_CREATE;
break;
case NL80211_TDLS_TEARDOWN:
ret = BRCMF_TDLS_MANUAL_EP_DELETE;
break;
default:
brcmf_err("unsupported operation: %d\n", oper);
ret = -EOPNOTSUPP;
}
return ret;
}
static int brcmf_cfg80211_tdls_oper(struct wiphy *wiphy,
struct net_device *ndev, u8 *peer,
enum nl80211_tdls_operation oper)
{
struct brcmf_if *ifp;
struct brcmf_tdls_iovar_le info;
int ret = 0;
ret = brcmf_convert_nl80211_tdls_oper(oper);
if (ret < 0)
return ret;
ifp = netdev_priv(ndev);
memset(&info, 0, sizeof(info));
info.mode = (u8)ret;
if (peer)
memcpy(info.ea, peer, ETH_ALEN);
ret = brcmf_fil_iovar_data_set(ifp, "tdls_endpoint",
&info, sizeof(info));
if (ret < 0)
brcmf_err("tdls_endpoint iovar failed: ret=%d\n", ret);
return ret;
}
static struct cfg80211_ops wl_cfg80211_ops = {
.add_virtual_intf = brcmf_cfg80211_add_iface,
.del_virtual_intf = brcmf_cfg80211_del_iface,
@ -4164,6 +4213,7 @@ static struct cfg80211_ops wl_cfg80211_ops = {
.stop_p2p_device = brcmf_p2p_stop_device,
.crit_proto_start = brcmf_cfg80211_crit_proto_start,
.crit_proto_stop = brcmf_cfg80211_crit_proto_stop,
.tdls_oper = brcmf_cfg80211_tdls_oper,
CFG80211_TESTMODE_CMD(brcmf_cfg80211_testmode)
};
@ -4285,7 +4335,8 @@ static struct wiphy *brcmf_setup_wiphy(struct device *phydev)
wiphy->n_cipher_suites = ARRAY_SIZE(__wl_cipher_suites);
wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT |
WIPHY_FLAG_OFFCHAN_TX |
WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
WIPHY_FLAG_SUPPORTS_TDLS;
wiphy->mgmt_stypes = brcmf_txrx_stypes;
wiphy->max_remain_on_channel_duration = 5000;
brcmf_wiphy_pno_params(wiphy);
@ -4906,6 +4957,12 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
goto cfg80211_p2p_attach_out;
}
err = brcmf_fil_iovar_int_set(ifp, "tdls_enable", 1);
if (err) {
brcmf_dbg(INFO, "TDLS not enabled (%d)\n", err);
wiphy->flags &= ~WIPHY_FLAG_SUPPORTS_TDLS;
}
err = brcmf_fil_cmd_int_get(ifp, BRCMF_C_GET_VERSION,
&io_type);
if (err) {

View File

@ -679,27 +679,6 @@ bool ai_clkctl_cc(struct si_pub *sih, enum bcma_clkmode mode)
return mode == BCMA_CLKMODE_FAST;
}
void ai_pci_up(struct si_pub *sih)
{
struct si_info *sii;
sii = container_of(sih, struct si_info, pub);
if (sii->icbus->hosttype == BCMA_HOSTTYPE_PCI)
bcma_core_pci_extend_L1timer(&sii->icbus->drv_pci[0], true);
}
/* Unconfigure and/or apply various WARs when going down */
void ai_pci_down(struct si_pub *sih)
{
struct si_info *sii;
sii = container_of(sih, struct si_info, pub);
if (sii->icbus->hosttype == BCMA_HOSTTYPE_PCI)
bcma_core_pci_extend_L1timer(&sii->icbus->drv_pci[0], false);
}
/* Enable BT-COEX & Ex-PA for 4313 */
void ai_epa_4313war(struct si_pub *sih)
{

View File

@ -183,9 +183,6 @@ extern u16 ai_clkctl_fast_pwrup_delay(struct si_pub *sih);
extern bool ai_clkctl_cc(struct si_pub *sih, enum bcma_clkmode mode);
extern bool ai_deviceremoved(struct si_pub *sih);
extern void ai_pci_down(struct si_pub *sih);
extern void ai_pci_up(struct si_pub *sih);
/* Enable Ex-PA for 4313 */
extern void ai_epa_4313war(struct si_pub *sih);

View File

@ -1015,9 +1015,10 @@ static bool dma64_txidle(struct dma_info *di)
/*
* post receive buffers
* return false is refill failed completely and ring is empty this will stall
* the rx dma and user might want to call rxfill again asap. This unlikely
* happens on memory-rich NIC, but often on memory-constrained dongle
* Return false if refill failed completely or dma mapping failed. The ring
* is empty, which will stall the rx dma and user might want to call rxfill
* again asap. This is unlikely to happen on a memory-rich NIC, but often on
* memory-constrained dongle.
*/
bool dma_rxfill(struct dma_pub *pub)
{
@ -1078,6 +1079,8 @@ bool dma_rxfill(struct dma_pub *pub)
pa = dma_map_single(di->dmadev, p->data, di->rxbufsize,
DMA_FROM_DEVICE);
if (dma_mapping_error(di->dmadev, pa))
return false;
/* save the free packet pointer */
di->rxp[rxout] = p;
@ -1284,7 +1287,11 @@ static void dma_txenq(struct dma_info *di, struct sk_buff *p)
/* get physical address of buffer start */
pa = dma_map_single(di->dmadev, data, len, DMA_TO_DEVICE);
/* if mapping failed, free skb */
if (dma_mapping_error(di->dmadev, pa)) {
brcmu_pkt_buf_free_skb(p);
return;
}
/* With a DMA segment list, Descriptor table is filled
* using the segment list instead of looping over
* buffers in multi-chain DMA. Therefore, EOF for SGLIST

View File

@ -4652,7 +4652,9 @@ static int brcms_b_attach(struct brcms_c_info *wlc, struct bcma_device *core,
wlc->band->phyrev = wlc_hw->band->phyrev;
wlc->band->radioid = wlc_hw->band->radioid;
wlc->band->radiorev = wlc_hw->band->radiorev;
brcms_dbg_info(core, "wl%d: phy %u/%u radio %x/%u\n", unit,
wlc->band->phytype, wlc->band->phyrev,
wlc->band->radioid, wlc->band->radiorev);
/* default contention windows size limits */
wlc_hw->band->CWmin = APHY_CWMIN;
wlc_hw->band->CWmax = PHY_CWMAX;
@ -4667,7 +4669,7 @@ static int brcms_b_attach(struct brcms_c_info *wlc, struct bcma_device *core,
brcms_c_coredisable(wlc_hw);
/* Match driver "down" state */
ai_pci_down(wlc_hw->sih);
bcma_core_pci_down(wlc_hw->d11core->bus);
/* turn off pll and xtal to match driver "down" state */
brcms_b_xtal(wlc_hw, OFF);
@ -5010,12 +5012,12 @@ static int brcms_b_up_prep(struct brcms_hardware *wlc_hw)
*/
if (brcms_b_radio_read_hwdisabled(wlc_hw)) {
/* put SB PCI in down state again */
ai_pci_down(wlc_hw->sih);
bcma_core_pci_down(wlc_hw->d11core->bus);
brcms_b_xtal(wlc_hw, OFF);
return -ENOMEDIUM;
}
ai_pci_up(wlc_hw->sih);
bcma_core_pci_up(wlc_hw->d11core->bus);
/* reset the d11 core */
brcms_b_corereset(wlc_hw, BRCMS_USE_COREFLAGS);
@ -5212,7 +5214,7 @@ static int brcms_b_down_finish(struct brcms_hardware *wlc_hw)
/* turn off primary xtal and pll */
if (!wlc_hw->noreset) {
ai_pci_down(wlc_hw->sih);
bcma_core_pci_down(wlc_hw->d11core->bus);
brcms_b_xtal(wlc_hw, OFF);
}
}

View File

@ -1137,8 +1137,9 @@ wlc_lcnphy_set_rx_gain_by_distribution(struct brcms_phy *pi,
gain0_15 = ((biq1 & 0xf) << 12) |
((tia & 0xf) << 8) |
((lna2 & 0x3) << 6) |
((lna2 &
0x3) << 4) | ((lna1 & 0x3) << 2) | ((lna1 & 0x3) << 0);
((lna2 & 0x3) << 4) |
((lna1 & 0x3) << 2) |
((lna1 & 0x3) << 0);
mod_phy_reg(pi, 0x4b6, (0xffff << 0), gain0_15 << 0);
mod_phy_reg(pi, 0x4b7, (0xf << 0), gain16_19 << 0);
@ -1328,6 +1329,43 @@ static u32 wlc_lcnphy_measure_digital_power(struct brcms_phy *pi, u16 nsamples)
return (iq_est.i_pwr + iq_est.q_pwr) / nsamples;
}
static bool wlc_lcnphy_rx_iq_cal_gain(struct brcms_phy *pi, u16 biq1_gain,
u16 tia_gain, u16 lna2_gain)
{
u32 i_thresh_l, q_thresh_l;
u32 i_thresh_h, q_thresh_h;
struct lcnphy_iq_est iq_est_h, iq_est_l;
wlc_lcnphy_set_rx_gain_by_distribution(pi, 0, 0, 0, biq1_gain, tia_gain,
lna2_gain, 0);
wlc_lcnphy_rx_gain_override_enable(pi, true);
wlc_lcnphy_start_tx_tone(pi, 2000, (40 >> 1), 0);
udelay(500);
write_radio_reg(pi, RADIO_2064_REG112, 0);
if (!wlc_lcnphy_rx_iq_est(pi, 1024, 32, &iq_est_l))
return false;
wlc_lcnphy_start_tx_tone(pi, 2000, 40, 0);
udelay(500);
write_radio_reg(pi, RADIO_2064_REG112, 0);
if (!wlc_lcnphy_rx_iq_est(pi, 1024, 32, &iq_est_h))
return false;
i_thresh_l = (iq_est_l.i_pwr << 1);
i_thresh_h = (iq_est_l.i_pwr << 2) + iq_est_l.i_pwr;
q_thresh_l = (iq_est_l.q_pwr << 1);
q_thresh_h = (iq_est_l.q_pwr << 2) + iq_est_l.q_pwr;
if ((iq_est_h.i_pwr > i_thresh_l) &&
(iq_est_h.i_pwr < i_thresh_h) &&
(iq_est_h.q_pwr > q_thresh_l) &&
(iq_est_h.q_pwr < q_thresh_h))
return true;
return false;
}
static bool
wlc_lcnphy_rx_iq_cal(struct brcms_phy *pi,
const struct lcnphy_rx_iqcomp *iqcomp,
@ -1342,8 +1380,8 @@ wlc_lcnphy_rx_iq_cal(struct brcms_phy *pi,
RFOverrideVal0_old, rfoverride2_old, rfoverride2val_old,
rfoverride3_old, rfoverride3val_old, rfoverride4_old,
rfoverride4val_old, afectrlovr_old, afectrlovrval_old;
int tia_gain;
u32 received_power, rx_pwr_threshold;
int tia_gain, lna2_gain, biq1_gain;
bool set_gain;
u16 old_sslpnCalibClkEnCtrl, old_sslpnRxFeClkEnCtrl;
u16 values_to_save[11];
s16 *ptr;
@ -1368,127 +1406,126 @@ wlc_lcnphy_rx_iq_cal(struct brcms_phy *pi,
goto cal_done;
}
if (module == 1) {
WARN_ON(module != 1);
tx_pwr_ctrl = wlc_lcnphy_get_tx_pwr_ctrl(pi);
wlc_lcnphy_set_tx_pwr_ctrl(pi, LCNPHY_TX_PWR_CTRL_OFF);
tx_pwr_ctrl = wlc_lcnphy_get_tx_pwr_ctrl(pi);
wlc_lcnphy_set_tx_pwr_ctrl(pi, LCNPHY_TX_PWR_CTRL_OFF);
for (i = 0; i < 11; i++)
values_to_save[i] =
read_radio_reg(pi, rxiq_cal_rf_reg[i]);
Core1TxControl_old = read_phy_reg(pi, 0x631);
for (i = 0; i < 11; i++)
values_to_save[i] =
read_radio_reg(pi, rxiq_cal_rf_reg[i]);
Core1TxControl_old = read_phy_reg(pi, 0x631);
or_phy_reg(pi, 0x631, 0x0015);
or_phy_reg(pi, 0x631, 0x0015);
RFOverride0_old = read_phy_reg(pi, 0x44c);
RFOverrideVal0_old = read_phy_reg(pi, 0x44d);
rfoverride2_old = read_phy_reg(pi, 0x4b0);
rfoverride2val_old = read_phy_reg(pi, 0x4b1);
rfoverride3_old = read_phy_reg(pi, 0x4f9);
rfoverride3val_old = read_phy_reg(pi, 0x4fa);
rfoverride4_old = read_phy_reg(pi, 0x938);
rfoverride4val_old = read_phy_reg(pi, 0x939);
afectrlovr_old = read_phy_reg(pi, 0x43b);
afectrlovrval_old = read_phy_reg(pi, 0x43c);
old_sslpnCalibClkEnCtrl = read_phy_reg(pi, 0x6da);
old_sslpnRxFeClkEnCtrl = read_phy_reg(pi, 0x6db);
RFOverride0_old = read_phy_reg(pi, 0x44c);
RFOverrideVal0_old = read_phy_reg(pi, 0x44d);
rfoverride2_old = read_phy_reg(pi, 0x4b0);
rfoverride2val_old = read_phy_reg(pi, 0x4b1);
rfoverride3_old = read_phy_reg(pi, 0x4f9);
rfoverride3val_old = read_phy_reg(pi, 0x4fa);
rfoverride4_old = read_phy_reg(pi, 0x938);
rfoverride4val_old = read_phy_reg(pi, 0x939);
afectrlovr_old = read_phy_reg(pi, 0x43b);
afectrlovrval_old = read_phy_reg(pi, 0x43c);
old_sslpnCalibClkEnCtrl = read_phy_reg(pi, 0x6da);
old_sslpnRxFeClkEnCtrl = read_phy_reg(pi, 0x6db);
tx_gain_override_old = wlc_lcnphy_tx_gain_override_enabled(pi);
if (tx_gain_override_old) {
wlc_lcnphy_get_tx_gain(pi, &old_gains);
tx_gain_index_old = pi_lcn->lcnphy_current_index;
}
wlc_lcnphy_set_tx_pwr_by_index(pi, tx_gain_idx);
mod_phy_reg(pi, 0x4f9, (0x1 << 0), 1 << 0);
mod_phy_reg(pi, 0x4fa, (0x1 << 0), 0 << 0);
mod_phy_reg(pi, 0x43b, (0x1 << 1), 1 << 1);
mod_phy_reg(pi, 0x43c, (0x1 << 1), 0 << 1);
write_radio_reg(pi, RADIO_2064_REG116, 0x06);
write_radio_reg(pi, RADIO_2064_REG12C, 0x07);
write_radio_reg(pi, RADIO_2064_REG06A, 0xd3);
write_radio_reg(pi, RADIO_2064_REG098, 0x03);
write_radio_reg(pi, RADIO_2064_REG00B, 0x7);
mod_radio_reg(pi, RADIO_2064_REG113, 1 << 4, 1 << 4);
write_radio_reg(pi, RADIO_2064_REG01D, 0x01);
write_radio_reg(pi, RADIO_2064_REG114, 0x01);
write_radio_reg(pi, RADIO_2064_REG02E, 0x10);
write_radio_reg(pi, RADIO_2064_REG12A, 0x08);
mod_phy_reg(pi, 0x938, (0x1 << 0), 1 << 0);
mod_phy_reg(pi, 0x939, (0x1 << 0), 0 << 0);
mod_phy_reg(pi, 0x938, (0x1 << 1), 1 << 1);
mod_phy_reg(pi, 0x939, (0x1 << 1), 1 << 1);
mod_phy_reg(pi, 0x938, (0x1 << 2), 1 << 2);
mod_phy_reg(pi, 0x939, (0x1 << 2), 1 << 2);
mod_phy_reg(pi, 0x938, (0x1 << 3), 1 << 3);
mod_phy_reg(pi, 0x939, (0x1 << 3), 1 << 3);
mod_phy_reg(pi, 0x938, (0x1 << 5), 1 << 5);
mod_phy_reg(pi, 0x939, (0x1 << 5), 0 << 5);
mod_phy_reg(pi, 0x43b, (0x1 << 0), 1 << 0);
mod_phy_reg(pi, 0x43c, (0x1 << 0), 0 << 0);
wlc_lcnphy_start_tx_tone(pi, 2000, 120, 0);
write_phy_reg(pi, 0x6da, 0xffff);
or_phy_reg(pi, 0x6db, 0x3);
wlc_lcnphy_set_trsw_override(pi, tx_switch, rx_switch);
wlc_lcnphy_rx_gain_override_enable(pi, true);
tia_gain = 8;
rx_pwr_threshold = 950;
while (tia_gain > 0) {
tia_gain -= 1;
wlc_lcnphy_set_rx_gain_by_distribution(pi,
0, 0, 2, 2,
(u16)
tia_gain, 1, 0);
udelay(500);
received_power =
wlc_lcnphy_measure_digital_power(pi, 2000);
if (received_power < rx_pwr_threshold)
break;
}
result = wlc_lcnphy_calc_rx_iq_comp(pi, 0xffff);
wlc_lcnphy_stop_tx_tone(pi);
write_phy_reg(pi, 0x631, Core1TxControl_old);
write_phy_reg(pi, 0x44c, RFOverrideVal0_old);
write_phy_reg(pi, 0x44d, RFOverrideVal0_old);
write_phy_reg(pi, 0x4b0, rfoverride2_old);
write_phy_reg(pi, 0x4b1, rfoverride2val_old);
write_phy_reg(pi, 0x4f9, rfoverride3_old);
write_phy_reg(pi, 0x4fa, rfoverride3val_old);
write_phy_reg(pi, 0x938, rfoverride4_old);
write_phy_reg(pi, 0x939, rfoverride4val_old);
write_phy_reg(pi, 0x43b, afectrlovr_old);
write_phy_reg(pi, 0x43c, afectrlovrval_old);
write_phy_reg(pi, 0x6da, old_sslpnCalibClkEnCtrl);
write_phy_reg(pi, 0x6db, old_sslpnRxFeClkEnCtrl);
wlc_lcnphy_clear_trsw_override(pi);
mod_phy_reg(pi, 0x44c, (0x1 << 2), 0 << 2);
for (i = 0; i < 11; i++)
write_radio_reg(pi, rxiq_cal_rf_reg[i],
values_to_save[i]);
if (tx_gain_override_old)
wlc_lcnphy_set_tx_pwr_by_index(pi, tx_gain_index_old);
else
wlc_lcnphy_disable_tx_gain_override(pi);
wlc_lcnphy_set_tx_pwr_ctrl(pi, tx_pwr_ctrl);
wlc_lcnphy_rx_gain_override_enable(pi, false);
tx_gain_override_old = wlc_lcnphy_tx_gain_override_enabled(pi);
if (tx_gain_override_old) {
wlc_lcnphy_get_tx_gain(pi, &old_gains);
tx_gain_index_old = pi_lcn->lcnphy_current_index;
}
wlc_lcnphy_set_tx_pwr_by_index(pi, tx_gain_idx);
mod_phy_reg(pi, 0x4f9, (0x1 << 0), 1 << 0);
mod_phy_reg(pi, 0x4fa, (0x1 << 0), 0 << 0);
mod_phy_reg(pi, 0x43b, (0x1 << 1), 1 << 1);
mod_phy_reg(pi, 0x43c, (0x1 << 1), 0 << 1);
write_radio_reg(pi, RADIO_2064_REG116, 0x06);
write_radio_reg(pi, RADIO_2064_REG12C, 0x07);
write_radio_reg(pi, RADIO_2064_REG06A, 0xd3);
write_radio_reg(pi, RADIO_2064_REG098, 0x03);
write_radio_reg(pi, RADIO_2064_REG00B, 0x7);
mod_radio_reg(pi, RADIO_2064_REG113, 1 << 4, 1 << 4);
write_radio_reg(pi, RADIO_2064_REG01D, 0x01);
write_radio_reg(pi, RADIO_2064_REG114, 0x01);
write_radio_reg(pi, RADIO_2064_REG02E, 0x10);
write_radio_reg(pi, RADIO_2064_REG12A, 0x08);
mod_phy_reg(pi, 0x938, (0x1 << 0), 1 << 0);
mod_phy_reg(pi, 0x939, (0x1 << 0), 0 << 0);
mod_phy_reg(pi, 0x938, (0x1 << 1), 1 << 1);
mod_phy_reg(pi, 0x939, (0x1 << 1), 1 << 1);
mod_phy_reg(pi, 0x938, (0x1 << 2), 1 << 2);
mod_phy_reg(pi, 0x939, (0x1 << 2), 1 << 2);
mod_phy_reg(pi, 0x938, (0x1 << 3), 1 << 3);
mod_phy_reg(pi, 0x939, (0x1 << 3), 1 << 3);
mod_phy_reg(pi, 0x938, (0x1 << 5), 1 << 5);
mod_phy_reg(pi, 0x939, (0x1 << 5), 0 << 5);
mod_phy_reg(pi, 0x43b, (0x1 << 0), 1 << 0);
mod_phy_reg(pi, 0x43c, (0x1 << 0), 0 << 0);
write_phy_reg(pi, 0x6da, 0xffff);
or_phy_reg(pi, 0x6db, 0x3);
wlc_lcnphy_set_trsw_override(pi, tx_switch, rx_switch);
for (lna2_gain = 3; lna2_gain >= 0; lna2_gain--) {
for (tia_gain = 4; tia_gain >= 0; tia_gain--) {
for (biq1_gain = 6; biq1_gain >= 0; biq1_gain--) {
set_gain = wlc_lcnphy_rx_iq_cal_gain(pi,
(u16)
biq1_gain,
(u16)
tia_gain,
(u16)
lna2_gain);
if (!set_gain)
continue;
result = wlc_lcnphy_calc_rx_iq_comp(pi, 1024);
goto stop_tone;
}
}
}
stop_tone:
wlc_lcnphy_stop_tx_tone(pi);
write_phy_reg(pi, 0x631, Core1TxControl_old);
write_phy_reg(pi, 0x44c, RFOverrideVal0_old);
write_phy_reg(pi, 0x44d, RFOverrideVal0_old);
write_phy_reg(pi, 0x4b0, rfoverride2_old);
write_phy_reg(pi, 0x4b1, rfoverride2val_old);
write_phy_reg(pi, 0x4f9, rfoverride3_old);
write_phy_reg(pi, 0x4fa, rfoverride3val_old);
write_phy_reg(pi, 0x938, rfoverride4_old);
write_phy_reg(pi, 0x939, rfoverride4val_old);
write_phy_reg(pi, 0x43b, afectrlovr_old);
write_phy_reg(pi, 0x43c, afectrlovrval_old);
write_phy_reg(pi, 0x6da, old_sslpnCalibClkEnCtrl);
write_phy_reg(pi, 0x6db, old_sslpnRxFeClkEnCtrl);
wlc_lcnphy_clear_trsw_override(pi);
mod_phy_reg(pi, 0x44c, (0x1 << 2), 0 << 2);
for (i = 0; i < 11; i++)
write_radio_reg(pi, rxiq_cal_rf_reg[i],
values_to_save[i]);
if (tx_gain_override_old)
wlc_lcnphy_set_tx_pwr_by_index(pi, tx_gain_index_old);
else
wlc_lcnphy_disable_tx_gain_override(pi);
wlc_lcnphy_set_tx_pwr_ctrl(pi, tx_pwr_ctrl);
wlc_lcnphy_rx_gain_override_enable(pi, false);
cal_done:
kfree(ptr);
return result;
@ -1789,6 +1826,19 @@ wlc_lcnphy_radio_2064_channel_tune_4313(struct brcms_phy *pi, u8 channel)
write_radio_reg(pi, RADIO_2064_REG038, 3);
write_radio_reg(pi, RADIO_2064_REG091, 7);
}
if (!(pi->sh->boardflags & BFL_FEM)) {
static const u8 reg038[14] = {
0xd, 0xe, 0xd, 0xd, 0xd, 0xc, 0xa,
0xb, 0xb, 0x3, 0x3, 0x2, 0x0, 0x0
};
write_radio_reg(pi, RADIO_2064_REG02A, 0xf);
write_radio_reg(pi, RADIO_2064_REG091, 0x3);
write_radio_reg(pi, RADIO_2064_REG038, 0x3);
write_radio_reg(pi, RADIO_2064_REG038, reg038[channel - 1]);
}
}
static int
@ -1983,6 +2033,16 @@ wlc_lcnphy_set_tssi_mux(struct brcms_phy *pi, enum lcnphy_tssi_mode pos)
} else {
mod_radio_reg(pi, RADIO_2064_REG03A, 1, 0x1);
mod_radio_reg(pi, RADIO_2064_REG11A, 0x8, 0x8);
mod_radio_reg(pi, RADIO_2064_REG028, 0x1, 0x0);
mod_radio_reg(pi, RADIO_2064_REG11A, 0x4, 1<<2);
mod_radio_reg(pi, RADIO_2064_REG036, 0x10, 0x0);
mod_radio_reg(pi, RADIO_2064_REG11A, 0x10, 1<<4);
mod_radio_reg(pi, RADIO_2064_REG036, 0x3, 0x0);
mod_radio_reg(pi, RADIO_2064_REG035, 0xff, 0x77);
mod_radio_reg(pi, RADIO_2064_REG028, 0x1e, 0xe<<1);
mod_radio_reg(pi, RADIO_2064_REG112, 0x80, 1<<7);
mod_radio_reg(pi, RADIO_2064_REG005, 0x7, 1<<1);
mod_radio_reg(pi, RADIO_2064_REG029, 0xf0, 0<<4);
}
} else {
mod_phy_reg(pi, 0x4d9, (0x1 << 2), (0x1) << 2);
@ -2069,13 +2129,23 @@ static void wlc_lcnphy_pwrctrl_rssiparams(struct brcms_phy *pi)
(auxpga_vmid_temp << 0) | (auxpga_gain_temp << 12));
mod_radio_reg(pi, RADIO_2064_REG082, (1 << 5), (1 << 5));
mod_radio_reg(pi, RADIO_2064_REG07C, (1 << 0), (1 << 0));
}
static void wlc_lcnphy_tssi_setup(struct brcms_phy *pi)
{
struct phytbl_info tab;
u32 rfseq, ind;
enum lcnphy_tssi_mode mode;
u8 tssi_sel;
if (pi->sh->boardflags & BFL_FEM) {
tssi_sel = 0x1;
mode = LCNPHY_TSSI_EXT;
} else {
tssi_sel = 0xe;
mode = LCNPHY_TSSI_POST_PA;
}
tab.tbl_id = LCNPHY_TBL_ID_TXPWRCTL;
tab.tbl_width = 32;
tab.tbl_ptr = &ind;
@ -2096,7 +2166,7 @@ static void wlc_lcnphy_tssi_setup(struct brcms_phy *pi)
mod_phy_reg(pi, 0x503, (0x1 << 4), (1) << 4);
wlc_lcnphy_set_tssi_mux(pi, LCNPHY_TSSI_EXT);
wlc_lcnphy_set_tssi_mux(pi, mode);
mod_phy_reg(pi, 0x4a4, (0x1 << 14), (0) << 14);
mod_phy_reg(pi, 0x4a4, (0x1 << 15), (1) << 15);
@ -2132,9 +2202,10 @@ static void wlc_lcnphy_tssi_setup(struct brcms_phy *pi)
mod_phy_reg(pi, 0x49a, (0x1ff << 0), (0xff) << 0);
if (LCNREV_IS(pi->pubpi.phy_rev, 2)) {
mod_radio_reg(pi, RADIO_2064_REG028, 0xf, 0xe);
mod_radio_reg(pi, RADIO_2064_REG028, 0xf, tssi_sel);
mod_radio_reg(pi, RADIO_2064_REG086, 0x4, 0x4);
} else {
mod_radio_reg(pi, RADIO_2064_REG028, 0x1e, tssi_sel << 1);
mod_radio_reg(pi, RADIO_2064_REG03A, 0x1, 1);
mod_radio_reg(pi, RADIO_2064_REG11A, 0x8, 1 << 3);
}
@ -2181,6 +2252,10 @@ static void wlc_lcnphy_tssi_setup(struct brcms_phy *pi)
mod_phy_reg(pi, 0x4d7, (0xf << 8), (0) << 8);
mod_radio_reg(pi, RADIO_2064_REG035, 0xff, 0x0);
mod_radio_reg(pi, RADIO_2064_REG036, 0x3, 0x0);
mod_radio_reg(pi, RADIO_2064_REG11A, 0x8, 0x8);
wlc_lcnphy_pwrctrl_rssiparams(pi);
}
@ -2799,6 +2874,8 @@ static void wlc_lcnphy_idle_tssi_est(struct brcms_phy_pub *ppi)
read_radio_reg(pi, RADIO_2064_REG007) & 1;
u16 SAVE_jtag_auxpga = read_radio_reg(pi, RADIO_2064_REG0FF) & 0x10;
u16 SAVE_iqadc_aux_en = read_radio_reg(pi, RADIO_2064_REG11F) & 4;
u8 SAVE_bbmult = wlc_lcnphy_get_bbmult(pi);
idleTssi = read_phy_reg(pi, 0x4ab);
suspend = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) &
MCTL_EN_MAC));
@ -2816,6 +2893,12 @@ static void wlc_lcnphy_idle_tssi_est(struct brcms_phy_pub *ppi)
mod_radio_reg(pi, RADIO_2064_REG0FF, 0x10, 1 << 4);
mod_radio_reg(pi, RADIO_2064_REG11F, 0x4, 1 << 2);
wlc_lcnphy_tssi_setup(pi);
mod_phy_reg(pi, 0x4d7, (0x1 << 0), (1 << 0));
mod_phy_reg(pi, 0x4d7, (0x1 << 6), (1 << 6));
wlc_lcnphy_set_bbmult(pi, 0x0);
wlc_phy_do_dummy_tx(pi, true, OFF);
idleTssi = ((read_phy_reg(pi, 0x4ab) & (0x1ff << 0))
>> 0);
@ -2837,6 +2920,7 @@ static void wlc_lcnphy_idle_tssi_est(struct brcms_phy_pub *ppi)
mod_phy_reg(pi, 0x44c, (0x1 << 12), (0) << 12);
wlc_lcnphy_set_bbmult(pi, SAVE_bbmult);
wlc_lcnphy_set_tx_gain_override(pi, tx_gain_override_old);
wlc_lcnphy_set_tx_gain(pi, &old_gains);
wlc_lcnphy_set_tx_pwr_ctrl(pi, SAVE_txpwrctrl);
@ -3050,6 +3134,11 @@ static void wlc_lcnphy_tx_pwr_ctrl_init(struct brcms_phy_pub *ppi)
wlc_lcnphy_write_table(pi, &tab);
tab.tbl_offset++;
}
mod_phy_reg(pi, 0x4d0, (0x1 << 0), (0) << 0);
mod_phy_reg(pi, 0x4d3, (0xff << 0), (0) << 0);
mod_phy_reg(pi, 0x4d3, (0xff << 8), (0) << 8);
mod_phy_reg(pi, 0x4d0, (0x1 << 4), (0) << 4);
mod_phy_reg(pi, 0x4d0, (0x1 << 2), (0) << 2);
mod_phy_reg(pi, 0x410, (0x1 << 7), (0) << 7);
@ -3851,7 +3940,6 @@ static void wlc_lcnphy_txpwrtbl_iqlo_cal(struct brcms_phy *pi)
target_gains.pad_gain = 21;
target_gains.dac_gain = 0;
wlc_lcnphy_set_tx_gain(pi, &target_gains);
wlc_lcnphy_set_tx_pwr_by_index(pi, 16);
if (LCNREV_IS(pi->pubpi.phy_rev, 1) || pi_lcn->lcnphy_hw_iqcal_en) {
@ -3862,6 +3950,7 @@ static void wlc_lcnphy_txpwrtbl_iqlo_cal(struct brcms_phy *pi)
lcnphy_recal ? LCNPHY_CAL_RECAL :
LCNPHY_CAL_FULL), false);
} else {
wlc_lcnphy_set_tx_pwr_by_index(pi, 16);
wlc_lcnphy_tx_iqlo_soft_cal_full(pi);
}
@ -4283,20 +4372,20 @@ wlc_lcnphy_load_tx_gain_table(struct brcms_phy *pi,
u16 pa_gain;
u16 gm_gain;
if (CHSPEC_IS5G(pi->radio_chanspec))
pa_gain = 0x70;
else
pa_gain = 0x70;
if (pi->sh->boardflags & BFL_FEM)
pa_gain = 0x10;
else
pa_gain = 0x60;
tab.tbl_id = LCNPHY_TBL_ID_TXPWRCTL;
tab.tbl_width = 32;
tab.tbl_len = 1;
tab.tbl_ptr = &val;
/* fixed gm_gain value for iPA */
gm_gain = 15;
for (j = 0; j < 128; j++) {
gm_gain = gain_table[j].gm;
if (pi->sh->boardflags & BFL_FEM)
gm_gain = gain_table[j].gm;
val = (((u32) pa_gain << 24) |
(gain_table[j].pad << 16) |
(gain_table[j].pga << 8) | gm_gain);
@ -4507,7 +4596,10 @@ static void wlc_radio_2064_init(struct brcms_phy *pi)
write_phy_reg(pi, 0x4ea, 0x4688);
mod_phy_reg(pi, 0x4eb, (0x7 << 0), 2 << 0);
if (pi->sh->boardflags & BFL_FEM)
mod_phy_reg(pi, 0x4eb, (0x7 << 0), 2 << 0);
else
mod_phy_reg(pi, 0x4eb, (0x7 << 0), 3 << 0);
mod_phy_reg(pi, 0x4eb, (0x7 << 6), 0 << 6);
@ -4518,6 +4610,13 @@ static void wlc_radio_2064_init(struct brcms_phy *pi)
wlc_lcnphy_rcal(pi);
wlc_lcnphy_rc_cal(pi);
if (!(pi->sh->boardflags & BFL_FEM)) {
write_radio_reg(pi, RADIO_2064_REG032, 0x6f);
write_radio_reg(pi, RADIO_2064_REG033, 0x19);
write_radio_reg(pi, RADIO_2064_REG039, 0xe);
}
}
static void wlc_lcnphy_radio_init(struct brcms_phy *pi)
@ -4530,6 +4629,7 @@ static void wlc_lcnphy_tbl_init(struct brcms_phy *pi)
uint idx;
u8 phybw40;
struct phytbl_info tab;
const struct phytbl_info *tb;
u32 val;
phybw40 = CHSPEC_IS40(pi->radio_chanspec);
@ -4547,22 +4647,20 @@ static void wlc_lcnphy_tbl_init(struct brcms_phy *pi)
wlc_lcnphy_write_table(pi, &tab);
}
tab.tbl_id = LCNPHY_TBL_ID_RFSEQ;
tab.tbl_width = 16;
tab.tbl_ptr = &val;
tab.tbl_len = 1;
if (!(pi->sh->boardflags & BFL_FEM)) {
tab.tbl_id = LCNPHY_TBL_ID_RFSEQ;
tab.tbl_width = 16;
tab.tbl_ptr = &val;
tab.tbl_len = 1;
val = 114;
tab.tbl_offset = 0;
wlc_lcnphy_write_table(pi, &tab);
val = 150;
tab.tbl_offset = 0;
wlc_lcnphy_write_table(pi, &tab);
val = 130;
tab.tbl_offset = 1;
wlc_lcnphy_write_table(pi, &tab);
val = 6;
tab.tbl_offset = 8;
wlc_lcnphy_write_table(pi, &tab);
val = 220;
tab.tbl_offset = 1;
wlc_lcnphy_write_table(pi, &tab);
}
if (CHSPEC_IS2G(pi->radio_chanspec)) {
if (pi->sh->boardflags & BFL_FEM)
@ -4576,7 +4674,6 @@ static void wlc_lcnphy_tbl_init(struct brcms_phy *pi)
}
if (LCNREV_IS(pi->pubpi.phy_rev, 2)) {
const struct phytbl_info *tb;
int l;
if (CHSPEC_IS2G(pi->radio_chanspec)) {
@ -4597,21 +4694,22 @@ static void wlc_lcnphy_tbl_init(struct brcms_phy *pi)
wlc_lcnphy_write_table(pi, &tb[idx]);
}
if ((pi->sh->boardflags & BFL_FEM)
&& !(pi->sh->boardflags & BFL_FEM_BT))
wlc_lcnphy_write_table(pi, &dot11lcn_sw_ctrl_tbl_info_4313_epa);
else if (pi->sh->boardflags & BFL_FEM_BT) {
if (pi->sh->boardrev < 0x1250)
wlc_lcnphy_write_table(
pi,
&dot11lcn_sw_ctrl_tbl_info_4313_bt_epa);
if (pi->sh->boardflags & BFL_FEM) {
if (pi->sh->boardflags & BFL_FEM_BT) {
if (pi->sh->boardrev < 0x1250)
tb = &dot11lcn_sw_ctrl_tbl_info_4313_bt_epa;
else
tb = &dot11lcn_sw_ctrl_tbl_info_4313_bt_epa_p250;
} else {
tb = &dot11lcn_sw_ctrl_tbl_info_4313_epa;
}
} else {
if (pi->sh->boardflags & BFL_FEM_BT)
tb = &dot11lcn_sw_ctrl_tbl_info_4313_bt_ipa;
else
wlc_lcnphy_write_table(
pi,
&dot11lcn_sw_ctrl_tbl_info_4313_bt_epa_p250);
} else
wlc_lcnphy_write_table(pi, &dot11lcn_sw_ctrl_tbl_info_4313);
tb = &dot11lcn_sw_ctrl_tbl_info_4313;
}
wlc_lcnphy_write_table(pi, tb);
wlc_lcnphy_load_rfpower(pi);
wlc_lcnphy_clear_papd_comptable(pi);
@ -4955,6 +5053,8 @@ void wlc_phy_chanspec_set_lcnphy(struct brcms_phy *pi, u16 chanspec)
wlc_lcnphy_load_tx_iir_filter(pi, true, 3);
mod_phy_reg(pi, 0x4eb, (0x7 << 3), (1) << 3);
if (wlc_lcnphy_tssi_based_pwr_ctrl_enabled(pi))
wlc_lcnphy_tssi_setup(pi);
}
void wlc_phy_detach_lcnphy(struct brcms_phy *pi)
@ -4993,8 +5093,7 @@ bool wlc_phy_attach_lcnphy(struct brcms_phy *pi)
if (!wlc_phy_txpwr_srom_read_lcnphy(pi))
return false;
if ((pi->sh->boardflags & BFL_FEM) &&
(LCNREV_IS(pi->pubpi.phy_rev, 1))) {
if (LCNREV_IS(pi->pubpi.phy_rev, 1)) {
if (pi_lcn->lcnphy_tempsense_option == 3) {
pi->hwpwrctrl = true;
pi->hwpwrctrl_capable = true;

View File

@ -1507,117 +1507,103 @@ static const u32 dot11lcn_gain_tbl_5G[] = {
const struct phytbl_info dot11lcnphytbl_rx_gain_info_rev0[] = {
{&dot11lcn_gain_tbl_rev0,
sizeof(dot11lcn_gain_tbl_rev0) / sizeof(dot11lcn_gain_tbl_rev0[0]), 18,
ARRAY_SIZE(dot11lcn_gain_tbl_rev0), 18,
0, 32}
,
{&dot11lcn_aux_gain_idx_tbl_rev0,
sizeof(dot11lcn_aux_gain_idx_tbl_rev0) /
sizeof(dot11lcn_aux_gain_idx_tbl_rev0[0]), 14, 0, 16}
ARRAY_SIZE(dot11lcn_aux_gain_idx_tbl_rev0), 14, 0, 16}
,
{&dot11lcn_gain_idx_tbl_rev0,
sizeof(dot11lcn_gain_idx_tbl_rev0) /
sizeof(dot11lcn_gain_idx_tbl_rev0[0]), 13, 0, 32}
ARRAY_SIZE(dot11lcn_gain_idx_tbl_rev0), 13, 0, 32}
,
};
static const struct phytbl_info dot11lcnphytbl_rx_gain_info_rev1[] = {
{&dot11lcn_gain_tbl_rev1,
sizeof(dot11lcn_gain_tbl_rev1) / sizeof(dot11lcn_gain_tbl_rev1[0]), 18,
ARRAY_SIZE(dot11lcn_gain_tbl_rev1), 18,
0, 32}
,
{&dot11lcn_aux_gain_idx_tbl_rev0,
sizeof(dot11lcn_aux_gain_idx_tbl_rev0) /
sizeof(dot11lcn_aux_gain_idx_tbl_rev0[0]), 14, 0, 16}
ARRAY_SIZE(dot11lcn_aux_gain_idx_tbl_rev0), 14, 0, 16}
,
{&dot11lcn_gain_idx_tbl_rev0,
sizeof(dot11lcn_gain_idx_tbl_rev0) /
sizeof(dot11lcn_gain_idx_tbl_rev0[0]), 13, 0, 32}
ARRAY_SIZE(dot11lcn_gain_idx_tbl_rev0), 13, 0, 32}
,
};
const struct phytbl_info dot11lcnphytbl_rx_gain_info_2G_rev2[] = {
{&dot11lcn_gain_tbl_2G,
sizeof(dot11lcn_gain_tbl_2G) / sizeof(dot11lcn_gain_tbl_2G[0]), 18, 0,
ARRAY_SIZE(dot11lcn_gain_tbl_2G), 18, 0,
32}
,
{&dot11lcn_aux_gain_idx_tbl_2G,
sizeof(dot11lcn_aux_gain_idx_tbl_2G) /
sizeof(dot11lcn_aux_gain_idx_tbl_2G[0]), 14, 0, 16}
ARRAY_SIZE(dot11lcn_aux_gain_idx_tbl_2G), 14, 0, 16}
,
{&dot11lcn_gain_idx_tbl_2G,
sizeof(dot11lcn_gain_idx_tbl_2G) / sizeof(dot11lcn_gain_idx_tbl_2G[0]),
ARRAY_SIZE(dot11lcn_gain_idx_tbl_2G),
13, 0, 32}
,
{&dot11lcn_gain_val_tbl_2G,
sizeof(dot11lcn_gain_val_tbl_2G) / sizeof(dot11lcn_gain_val_tbl_2G[0]),
ARRAY_SIZE(dot11lcn_gain_val_tbl_2G),
17, 0, 8}
};
const struct phytbl_info dot11lcnphytbl_rx_gain_info_5G_rev2[] = {
{&dot11lcn_gain_tbl_5G,
sizeof(dot11lcn_gain_tbl_5G) / sizeof(dot11lcn_gain_tbl_5G[0]), 18, 0,
ARRAY_SIZE(dot11lcn_gain_tbl_5G), 18, 0,
32}
,
{&dot11lcn_aux_gain_idx_tbl_5G,
sizeof(dot11lcn_aux_gain_idx_tbl_5G) /
sizeof(dot11lcn_aux_gain_idx_tbl_5G[0]), 14, 0, 16}
ARRAY_SIZE(dot11lcn_aux_gain_idx_tbl_5G), 14, 0, 16}
,
{&dot11lcn_gain_idx_tbl_5G,
sizeof(dot11lcn_gain_idx_tbl_5G) / sizeof(dot11lcn_gain_idx_tbl_5G[0]),
ARRAY_SIZE(dot11lcn_gain_idx_tbl_5G),
13, 0, 32}
,
{&dot11lcn_gain_val_tbl_5G,
sizeof(dot11lcn_gain_val_tbl_5G) / sizeof(dot11lcn_gain_val_tbl_5G[0]),
ARRAY_SIZE(dot11lcn_gain_val_tbl_5G),
17, 0, 8}
};
const struct phytbl_info dot11lcnphytbl_rx_gain_info_extlna_2G_rev2[] = {
{&dot11lcn_gain_tbl_extlna_2G,
sizeof(dot11lcn_gain_tbl_extlna_2G) /
sizeof(dot11lcn_gain_tbl_extlna_2G[0]), 18, 0, 32}
ARRAY_SIZE(dot11lcn_gain_tbl_extlna_2G), 18, 0, 32}
,
{&dot11lcn_aux_gain_idx_tbl_extlna_2G,
sizeof(dot11lcn_aux_gain_idx_tbl_extlna_2G) /
sizeof(dot11lcn_aux_gain_idx_tbl_extlna_2G[0]), 14, 0, 16}
ARRAY_SIZE(dot11lcn_aux_gain_idx_tbl_extlna_2G), 14, 0, 16}
,
{&dot11lcn_gain_idx_tbl_extlna_2G,
sizeof(dot11lcn_gain_idx_tbl_extlna_2G) /
sizeof(dot11lcn_gain_idx_tbl_extlna_2G[0]), 13, 0, 32}
ARRAY_SIZE(dot11lcn_gain_idx_tbl_extlna_2G), 13, 0, 32}
,
{&dot11lcn_gain_val_tbl_extlna_2G,
sizeof(dot11lcn_gain_val_tbl_extlna_2G) /
sizeof(dot11lcn_gain_val_tbl_extlna_2G[0]), 17, 0, 8}
ARRAY_SIZE(dot11lcn_gain_val_tbl_extlna_2G), 17, 0, 8}
};
const struct phytbl_info dot11lcnphytbl_rx_gain_info_extlna_5G_rev2[] = {
{&dot11lcn_gain_tbl_5G,
sizeof(dot11lcn_gain_tbl_5G) / sizeof(dot11lcn_gain_tbl_5G[0]), 18, 0,
ARRAY_SIZE(dot11lcn_gain_tbl_5G), 18, 0,
32}
,
{&dot11lcn_aux_gain_idx_tbl_5G,
sizeof(dot11lcn_aux_gain_idx_tbl_5G) /
sizeof(dot11lcn_aux_gain_idx_tbl_5G[0]), 14, 0, 16}
ARRAY_SIZE(dot11lcn_aux_gain_idx_tbl_5G), 14, 0, 16}
,
{&dot11lcn_gain_idx_tbl_5G,
sizeof(dot11lcn_gain_idx_tbl_5G) / sizeof(dot11lcn_gain_idx_tbl_5G[0]),
ARRAY_SIZE(dot11lcn_gain_idx_tbl_5G),
13, 0, 32}
,
{&dot11lcn_gain_val_tbl_5G,
sizeof(dot11lcn_gain_val_tbl_5G) / sizeof(dot11lcn_gain_val_tbl_5G[0]),
ARRAY_SIZE(dot11lcn_gain_val_tbl_5G),
17, 0, 8}
};
const u32 dot11lcnphytbl_rx_gain_info_sz_rev0 =
sizeof(dot11lcnphytbl_rx_gain_info_rev0) /
sizeof(dot11lcnphytbl_rx_gain_info_rev0[0]);
ARRAY_SIZE(dot11lcnphytbl_rx_gain_info_rev0);
const u32 dot11lcnphytbl_rx_gain_info_2G_rev2_sz =
sizeof(dot11lcnphytbl_rx_gain_info_2G_rev2) /
sizeof(dot11lcnphytbl_rx_gain_info_2G_rev2[0]);
ARRAY_SIZE(dot11lcnphytbl_rx_gain_info_2G_rev2);
const u32 dot11lcnphytbl_rx_gain_info_5G_rev2_sz =
sizeof(dot11lcnphytbl_rx_gain_info_5G_rev2) /
sizeof(dot11lcnphytbl_rx_gain_info_5G_rev2[0]);
ARRAY_SIZE(dot11lcnphytbl_rx_gain_info_5G_rev2);
static const u16 dot11lcn_min_sig_sq_tbl_rev0[] = {
0x014d,
@ -2058,6 +2044,73 @@ static const u16 dot11lcn_sw_ctrl_tbl_4313_rev0[] = {
0x0005,
};
static const u16 dot11lcn_sw_ctrl_tbl_4313_ipa_rev0_combo[] = {
0x0005,
0x0006,
0x0009,
0x000a,
0x0005,
0x0006,
0x0009,
0x000a,
0x0005,
0x0006,
0x0009,
0x000a,
0x0005,
0x0006,
0x0009,
0x000a,
0x0005,
0x0006,
0x0009,
0x000a,
0x0005,
0x0006,
0x0009,
0x000a,
0x0005,
0x0006,
0x0009,
0x000a,
0x0005,
0x0006,
0x0009,
0x000a,
0x0005,
0x0006,
0x0009,
0x000a,
0x0005,
0x0006,
0x0009,
0x000a,
0x0005,
0x0006,
0x0009,
0x000a,
0x0005,
0x0006,
0x0009,
0x000a,
0x0005,
0x0006,
0x0009,
0x000a,
0x0005,
0x0006,
0x0009,
0x000a,
0x0005,
0x0006,
0x0009,
0x000a,
0x0005,
0x0006,
0x0009,
0x000a,
};
static const u16 dot11lcn_sw_ctrl_tbl_rev0[] = {
0x0004,
0x0004,
@ -2771,89 +2824,79 @@ static const u32 dot11lcn_papd_compdelta_tbl_rev0[] = {
const struct phytbl_info dot11lcnphytbl_info_rev0[] = {
{&dot11lcn_min_sig_sq_tbl_rev0,
sizeof(dot11lcn_min_sig_sq_tbl_rev0) /
sizeof(dot11lcn_min_sig_sq_tbl_rev0[0]), 2, 0, 16}
ARRAY_SIZE(dot11lcn_min_sig_sq_tbl_rev0), 2, 0, 16}
,
{&dot11lcn_noise_scale_tbl_rev0,
sizeof(dot11lcn_noise_scale_tbl_rev0) /
sizeof(dot11lcn_noise_scale_tbl_rev0[0]), 1, 0, 16}
ARRAY_SIZE(dot11lcn_noise_scale_tbl_rev0), 1, 0, 16}
,
{&dot11lcn_fltr_ctrl_tbl_rev0,
sizeof(dot11lcn_fltr_ctrl_tbl_rev0) /
sizeof(dot11lcn_fltr_ctrl_tbl_rev0[0]), 11, 0, 32}
ARRAY_SIZE(dot11lcn_fltr_ctrl_tbl_rev0), 11, 0, 32}
,
{&dot11lcn_ps_ctrl_tbl_rev0,
sizeof(dot11lcn_ps_ctrl_tbl_rev0) /
sizeof(dot11lcn_ps_ctrl_tbl_rev0[0]), 12, 0, 32}
ARRAY_SIZE(dot11lcn_ps_ctrl_tbl_rev0), 12, 0, 32}
,
{&dot11lcn_gain_idx_tbl_rev0,
sizeof(dot11lcn_gain_idx_tbl_rev0) /
sizeof(dot11lcn_gain_idx_tbl_rev0[0]), 13, 0, 32}
ARRAY_SIZE(dot11lcn_gain_idx_tbl_rev0), 13, 0, 32}
,
{&dot11lcn_aux_gain_idx_tbl_rev0,
sizeof(dot11lcn_aux_gain_idx_tbl_rev0) /
sizeof(dot11lcn_aux_gain_idx_tbl_rev0[0]), 14, 0, 16}
ARRAY_SIZE(dot11lcn_aux_gain_idx_tbl_rev0), 14, 0, 16}
,
{&dot11lcn_sw_ctrl_tbl_rev0,
sizeof(dot11lcn_sw_ctrl_tbl_rev0) /
sizeof(dot11lcn_sw_ctrl_tbl_rev0[0]), 15, 0, 16}
ARRAY_SIZE(dot11lcn_sw_ctrl_tbl_rev0), 15, 0, 16}
,
{&dot11lcn_nf_table_rev0,
sizeof(dot11lcn_nf_table_rev0) / sizeof(dot11lcn_nf_table_rev0[0]), 16,
ARRAY_SIZE(dot11lcn_nf_table_rev0), 16,
0, 8}
,
{&dot11lcn_gain_val_tbl_rev0,
sizeof(dot11lcn_gain_val_tbl_rev0) /
sizeof(dot11lcn_gain_val_tbl_rev0[0]), 17, 0, 8}
ARRAY_SIZE(dot11lcn_gain_val_tbl_rev0), 17, 0, 8}
,
{&dot11lcn_gain_tbl_rev0,
sizeof(dot11lcn_gain_tbl_rev0) / sizeof(dot11lcn_gain_tbl_rev0[0]), 18,
ARRAY_SIZE(dot11lcn_gain_tbl_rev0), 18,
0, 32}
,
{&dot11lcn_spur_tbl_rev0,
sizeof(dot11lcn_spur_tbl_rev0) / sizeof(dot11lcn_spur_tbl_rev0[0]), 20,
ARRAY_SIZE(dot11lcn_spur_tbl_rev0), 20,
0, 8}
,
{&dot11lcn_unsup_mcs_tbl_rev0,
sizeof(dot11lcn_unsup_mcs_tbl_rev0) /
sizeof(dot11lcn_unsup_mcs_tbl_rev0[0]), 23, 0, 16}
ARRAY_SIZE(dot11lcn_unsup_mcs_tbl_rev0), 23, 0, 16}
,
{&dot11lcn_iq_local_tbl_rev0,
sizeof(dot11lcn_iq_local_tbl_rev0) /
sizeof(dot11lcn_iq_local_tbl_rev0[0]), 0, 0, 16}
ARRAY_SIZE(dot11lcn_iq_local_tbl_rev0), 0, 0, 16}
,
{&dot11lcn_papd_compdelta_tbl_rev0,
sizeof(dot11lcn_papd_compdelta_tbl_rev0) /
sizeof(dot11lcn_papd_compdelta_tbl_rev0[0]), 24, 0, 32}
ARRAY_SIZE(dot11lcn_papd_compdelta_tbl_rev0), 24, 0, 32}
,
};
const struct phytbl_info dot11lcn_sw_ctrl_tbl_info_4313 = {
&dot11lcn_sw_ctrl_tbl_4313_rev0,
sizeof(dot11lcn_sw_ctrl_tbl_4313_rev0) /
sizeof(dot11lcn_sw_ctrl_tbl_4313_rev0[0]), 15, 0, 16
ARRAY_SIZE(dot11lcn_sw_ctrl_tbl_4313_rev0), 15, 0, 16
};
const struct phytbl_info dot11lcn_sw_ctrl_tbl_info_4313_bt_ipa = {
&dot11lcn_sw_ctrl_tbl_4313_ipa_rev0_combo,
ARRAY_SIZE(dot11lcn_sw_ctrl_tbl_4313_ipa_rev0_combo), 15, 0, 16
};
const struct phytbl_info dot11lcn_sw_ctrl_tbl_info_4313_epa = {
&dot11lcn_sw_ctrl_tbl_4313_epa_rev0,
sizeof(dot11lcn_sw_ctrl_tbl_4313_epa_rev0) /
sizeof(dot11lcn_sw_ctrl_tbl_4313_epa_rev0[0]), 15, 0, 16
ARRAY_SIZE(dot11lcn_sw_ctrl_tbl_4313_epa_rev0), 15, 0, 16
};
const struct phytbl_info dot11lcn_sw_ctrl_tbl_info_4313_bt_epa = {
&dot11lcn_sw_ctrl_tbl_4313_epa_rev0_combo,
sizeof(dot11lcn_sw_ctrl_tbl_4313_epa_rev0_combo) /
sizeof(dot11lcn_sw_ctrl_tbl_4313_epa_rev0_combo[0]), 15, 0, 16
ARRAY_SIZE(dot11lcn_sw_ctrl_tbl_4313_epa_rev0_combo), 15, 0, 16
};
const struct phytbl_info dot11lcn_sw_ctrl_tbl_info_4313_bt_epa_p250 = {
&dot11lcn_sw_ctrl_tbl_4313_bt_epa_p250_rev0,
sizeof(dot11lcn_sw_ctrl_tbl_4313_bt_epa_p250_rev0) /
sizeof(dot11lcn_sw_ctrl_tbl_4313_bt_epa_p250_rev0[0]), 15, 0, 16
ARRAY_SIZE(dot11lcn_sw_ctrl_tbl_4313_bt_epa_p250_rev0), 15, 0, 16
};
const u32 dot11lcnphytbl_info_sz_rev0 =
sizeof(dot11lcnphytbl_info_rev0) / sizeof(dot11lcnphytbl_info_rev0[0]);
ARRAY_SIZE(dot11lcnphytbl_info_rev0);
const struct lcnphy_tx_gain_tbl_entry
dot11lcnphy_2GHz_extPA_gaintable_rev0[128] = {
@ -2988,134 +3031,134 @@ dot11lcnphy_2GHz_extPA_gaintable_rev0[128] = {
};
const struct lcnphy_tx_gain_tbl_entry dot11lcnphy_2GHz_gaintable_rev0[128] = {
{7, 0, 31, 0, 72},
{7, 0, 31, 0, 70},
{7, 0, 31, 0, 68},
{7, 0, 30, 0, 67},
{7, 0, 29, 0, 68},
{7, 0, 28, 0, 68},
{7, 0, 27, 0, 69},
{7, 0, 26, 0, 70},
{7, 0, 25, 0, 70},
{7, 0, 24, 0, 71},
{7, 0, 23, 0, 72},
{7, 0, 23, 0, 70},
{7, 0, 22, 0, 71},
{7, 0, 21, 0, 72},
{7, 0, 21, 0, 70},
{7, 0, 21, 0, 68},
{7, 0, 21, 0, 66},
{7, 0, 21, 0, 64},
{7, 0, 21, 0, 63},
{7, 0, 20, 0, 64},
{7, 0, 19, 0, 65},
{7, 0, 19, 0, 64},
{7, 0, 18, 0, 65},
{7, 0, 18, 0, 64},
{7, 0, 17, 0, 65},
{7, 0, 17, 0, 64},
{7, 0, 16, 0, 65},
{7, 0, 16, 0, 64},
{7, 0, 16, 0, 62},
{7, 0, 16, 0, 60},
{7, 0, 16, 0, 58},
{7, 0, 15, 0, 61},
{7, 0, 15, 0, 59},
{7, 0, 14, 0, 61},
{7, 0, 14, 0, 60},
{7, 0, 14, 0, 58},
{7, 0, 13, 0, 60},
{7, 0, 13, 0, 59},
{7, 0, 12, 0, 62},
{7, 0, 12, 0, 60},
{7, 0, 12, 0, 58},
{7, 0, 11, 0, 62},
{7, 0, 11, 0, 60},
{7, 0, 11, 0, 59},
{7, 0, 11, 0, 57},
{7, 0, 10, 0, 61},
{7, 0, 10, 0, 59},
{7, 0, 10, 0, 57},
{7, 0, 9, 0, 62},
{7, 0, 9, 0, 60},
{7, 0, 9, 0, 58},
{7, 0, 9, 0, 57},
{7, 0, 8, 0, 62},
{7, 0, 8, 0, 60},
{7, 0, 8, 0, 58},
{7, 0, 8, 0, 57},
{7, 0, 8, 0, 55},
{7, 0, 7, 0, 61},
{15, 0, 31, 0, 72},
{15, 0, 31, 0, 70},
{15, 0, 31, 0, 68},
{15, 0, 30, 0, 68},
{15, 0, 29, 0, 69},
{15, 0, 28, 0, 69},
{15, 0, 27, 0, 70},
{15, 0, 26, 0, 70},
{15, 0, 25, 0, 71},
{15, 0, 24, 0, 72},
{15, 0, 23, 0, 73},
{15, 0, 23, 0, 71},
{15, 0, 22, 0, 72},
{15, 0, 21, 0, 73},
{15, 0, 21, 0, 71},
{15, 0, 21, 0, 69},
{15, 0, 21, 0, 67},
{15, 0, 21, 0, 65},
{15, 0, 21, 0, 63},
{15, 0, 20, 0, 65},
{15, 0, 19, 0, 66},
{15, 0, 19, 0, 64},
{15, 0, 18, 0, 66},
{15, 0, 18, 0, 64},
{15, 0, 17, 0, 66},
{15, 0, 17, 0, 64},
{15, 0, 16, 0, 66},
{15, 0, 16, 0, 64},
{15, 0, 16, 0, 62},
{15, 0, 16, 0, 61},
{15, 0, 16, 0, 59},
{15, 0, 15, 0, 61},
{15, 0, 15, 0, 59},
{15, 0, 14, 0, 62},
{15, 0, 14, 0, 60},
{15, 0, 14, 0, 58},
{15, 0, 13, 0, 61},
{15, 0, 13, 0, 59},
{15, 0, 12, 0, 62},
{15, 0, 12, 0, 61},
{15, 0, 12, 0, 59},
{15, 0, 11, 0, 62},
{15, 0, 11, 0, 61},
{15, 0, 11, 0, 59},
{15, 0, 11, 0, 57},
{15, 0, 10, 0, 61},
{15, 0, 10, 0, 59},
{15, 0, 10, 0, 58},
{15, 0, 9, 0, 62},
{15, 0, 9, 0, 61},
{15, 0, 9, 0, 59},
{15, 0, 9, 0, 57},
{15, 0, 8, 0, 62},
{15, 0, 8, 0, 61},
{15, 0, 8, 0, 59},
{15, 0, 8, 0, 57},
{15, 0, 8, 0, 56},
{15, 0, 8, 0, 54},
{15, 0, 8, 0, 53},
{15, 0, 8, 0, 51},
{15, 0, 8, 0, 50},
{7, 0, 7, 0, 69},
{7, 0, 7, 0, 67},
{7, 0, 7, 0, 65},
{7, 0, 7, 0, 64},
{7, 0, 7, 0, 62},
{7, 0, 7, 0, 60},
{7, 0, 7, 0, 58},
{7, 0, 7, 0, 56},
{7, 0, 7, 0, 57},
{7, 0, 7, 0, 55},
{7, 0, 6, 0, 62},
{7, 0, 6, 0, 60},
{7, 0, 6, 0, 58},
{7, 0, 6, 0, 61},
{7, 0, 6, 0, 59},
{7, 0, 6, 0, 57},
{7, 0, 6, 0, 55},
{7, 0, 6, 0, 56},
{7, 0, 6, 0, 54},
{7, 0, 6, 0, 52},
{7, 0, 6, 0, 53},
{7, 0, 5, 0, 61},
{7, 0, 5, 0, 59},
{7, 0, 5, 0, 57},
{7, 0, 5, 0, 60},
{7, 0, 5, 0, 58},
{7, 0, 5, 0, 56},
{7, 0, 5, 0, 54},
{7, 0, 5, 0, 55},
{7, 0, 5, 0, 53},
{7, 0, 5, 0, 51},
{7, 0, 4, 0, 62},
{7, 0, 4, 0, 60},
{7, 0, 4, 0, 58},
{7, 0, 5, 0, 52},
{7, 0, 5, 0, 50},
{7, 0, 5, 0, 49},
{7, 0, 5, 0, 47},
{7, 0, 4, 0, 57},
{7, 0, 4, 0, 55},
{7, 0, 4, 0, 56},
{7, 0, 4, 0, 54},
{7, 0, 4, 0, 52},
{7, 0, 4, 0, 53},
{7, 0, 4, 0, 51},
{7, 0, 4, 0, 49},
{7, 0, 4, 0, 50},
{7, 0, 4, 0, 48},
{7, 0, 4, 0, 47},
{7, 0, 4, 0, 46},
{7, 0, 3, 0, 60},
{7, 0, 3, 0, 58},
{7, 0, 3, 0, 57},
{7, 0, 3, 0, 55},
{7, 0, 3, 0, 54},
{7, 0, 3, 0, 52},
{7, 0, 4, 0, 44},
{7, 0, 4, 0, 43},
{7, 0, 4, 0, 42},
{7, 0, 4, 0, 41},
{7, 0, 4, 0, 40},
{7, 0, 3, 0, 51},
{7, 0, 3, 0, 49},
{7, 0, 3, 0, 50},
{7, 0, 3, 0, 48},
{7, 0, 3, 0, 47},
{7, 0, 3, 0, 46},
{7, 0, 3, 0, 45},
{7, 0, 3, 0, 44},
{7, 0, 3, 0, 43},
{7, 0, 3, 0, 42},
{7, 0, 3, 0, 41},
{7, 0, 2, 0, 61},
{7, 0, 2, 0, 59},
{7, 0, 2, 0, 57},
{7, 0, 2, 0, 56},
{7, 0, 2, 0, 54},
{7, 0, 2, 0, 53},
{7, 0, 2, 0, 51},
{7, 0, 2, 0, 50},
{7, 0, 2, 0, 48},
{7, 0, 2, 0, 47},
{7, 0, 2, 0, 46},
{7, 0, 2, 0, 44},
{7, 0, 2, 0, 43},
{7, 0, 2, 0, 42},
{7, 0, 2, 0, 41},
{7, 0, 2, 0, 39},
{7, 0, 2, 0, 38},
{7, 0, 2, 0, 37},
{7, 0, 2, 0, 36},
{7, 0, 2, 0, 35},
{7, 0, 2, 0, 34},
{7, 0, 2, 0, 33},
{7, 0, 2, 0, 32},
{7, 0, 1, 0, 63},
{7, 0, 1, 0, 61},
{7, 0, 1, 0, 59},
{7, 0, 1, 0, 57},
{3, 0, 3, 0, 56},
{3, 0, 3, 0, 54},
{3, 0, 3, 0, 53},
{3, 0, 3, 0, 51},
{3, 0, 3, 0, 50},
{3, 0, 3, 0, 48},
{3, 0, 3, 0, 47},
{3, 0, 3, 0, 46},
{3, 0, 3, 0, 44},
{3, 0, 3, 0, 43},
{3, 0, 3, 0, 42},
{3, 0, 3, 0, 41},
{3, 0, 3, 0, 39},
{3, 0, 3, 0, 38},
{3, 0, 3, 0, 37},
{3, 0, 3, 0, 36},
{3, 0, 3, 0, 35},
{3, 0, 3, 0, 34},
};
const struct lcnphy_tx_gain_tbl_entry dot11lcnphy_5GHz_gaintable_rev0[128] = {

View File

@ -20,6 +20,7 @@
extern const struct phytbl_info dot11lcnphytbl_rx_gain_info_rev0[];
extern const u32 dot11lcnphytbl_rx_gain_info_sz_rev0;
extern const struct phytbl_info dot11lcn_sw_ctrl_tbl_info_4313;
extern const struct phytbl_info dot11lcn_sw_ctrl_tbl_info_4313_bt_ipa;
extern const struct phytbl_info dot11lcn_sw_ctrl_tbl_info_4313_epa;
extern const struct phytbl_info dot11lcn_sw_ctrl_tbl_info_4313_epa_combo;
extern const struct phytbl_info dot11lcn_sw_ctrl_tbl_info_4313_bt_epa;

View File

@ -465,8 +465,8 @@ static int cw1200_bh(void *arg)
(rx || tx || term || suspend || priv->bh_error);
}), status);
pr_debug("[BH] - rx: %d, tx: %d, term: %d, suspend: %d, status: %ld\n",
rx, tx, term, suspend, status);
pr_debug("[BH] - rx: %d, tx: %d, term: %d, bh_err: %d, suspend: %d, status: %ld\n",
rx, tx, term, suspend, priv->bh_error, status);
/* Did an error occur? */
if ((status < 0 && status != -ERESTARTSYS) ||

View File

@ -507,7 +507,7 @@ u32 cw1200_dpll_from_clk(u16 clk_khz)
case 0xCB20: /* 52000 KHz */
return 0x07627091;
default:
pr_err("Unknown Refclk freq (0x%04x), using 2600KHz\n",
pr_err("Unknown Refclk freq (0x%04x), using 26000KHz\n",
clk_khz);
return 0x0EC4F121;
}

View File

@ -4470,9 +4470,9 @@ il4965_irq_tasklet(struct il_priv *il)
set_bit(S_RFKILL, &il->status);
} else {
clear_bit(S_RFKILL, &il->status);
wiphy_rfkill_set_hw_state(il->hw->wiphy, hw_rf_kill);
il_force_reset(il, true);
}
wiphy_rfkill_set_hw_state(il->hw->wiphy, hw_rf_kill);
handled |= CSR_INT_BIT_RF_KILL;
}

View File

@ -22,6 +22,8 @@ config IWLWIFI
Intel Wireless WiFi Link 6150BGN 2 Adapter
Intel 100 Series Wi-Fi Adapters (100BGN and 130BGN)
Intel 2000 Series Wi-Fi Adapters
Intel 7260 Wi-Fi Adapter
Intel 3160 Wi-Fi Adapter
This driver uses the kernel's mac80211 subsystem.
@ -46,17 +48,16 @@ config IWLDVM
depends on IWLWIFI
default IWLWIFI
help
This is the driver supporting the DVM firmware which is
currently the only firmware available for existing devices.
This is the driver that supports the DVM firmware which is
used by most existing devices (with the exception of 7260
and 3160).
config IWLMVM
tristate "Intel Wireless WiFi MVM Firmware support"
depends on IWLWIFI
help
This is the driver supporting the MVM firmware which is
currently only available for 7000 series devices.
Say yes if you have such a device.
This is the driver that supports the MVM firmware which is
currently only available for 7260 and 3160 devices.
# don't call it _MODULE -- will confuse Kconfig/fixdep/...
config IWLWIFI_OPMODE_MODULAR

View File

@ -87,7 +87,7 @@ static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv,
priv->lib->bt_params->advanced_bt_coexist &&
(ieee80211_is_auth(fc) || ieee80211_is_assoc_req(fc) ||
ieee80211_is_reassoc_req(fc) ||
skb->protocol == cpu_to_be16(ETH_P_PAE)))
info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO))
tx_flags |= TX_CMD_FLG_IGNORE_BT;

View File

@ -145,6 +145,7 @@ do { \
#define IWL_DL_RX 0x01000000
#define IWL_DL_ISR 0x02000000
#define IWL_DL_HT 0x04000000
#define IWL_DL_EXTERNAL 0x08000000
/* 0xF0000000 - 0x10000000 */
#define IWL_DL_11H 0x10000000
#define IWL_DL_STATS 0x20000000
@ -153,6 +154,7 @@ do { \
#define IWL_DEBUG_INFO(p, f, a...) IWL_DEBUG(p, IWL_DL_INFO, f, ## a)
#define IWL_DEBUG_MAC80211(p, f, a...) IWL_DEBUG(p, IWL_DL_MAC80211, f, ## a)
#define IWL_DEBUG_EXTERNAL(p, f, a...) IWL_DEBUG(p, IWL_DL_EXTERNAL, f, ## a)
#define IWL_DEBUG_TEMP(p, f, a...) IWL_DEBUG(p, IWL_DL_TEMP, f, ## a)
#define IWL_DEBUG_SCAN(p, f, a...) IWL_DEBUG(p, IWL_DL_SCAN, f, ## a)
#define IWL_DEBUG_RX(p, f, a...) IWL_DEBUG(p, IWL_DL_RX, f, ## a)

View File

@ -33,10 +33,11 @@
static inline bool iwl_trace_data(struct sk_buff *skb)
{
struct ieee80211_hdr *hdr = (void *)skb->data;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
if (ieee80211_is_data(hdr->frame_control))
return skb->protocol != cpu_to_be16(ETH_P_PAE);
return false;
if (!ieee80211_is_data(hdr->frame_control))
return false;
return !(info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO);
}
static inline size_t iwl_rx_trace_len(const struct iwl_trans *trans,

View File

@ -843,7 +843,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
int i;
bool load_module = false;
fw->ucode_capa.max_probe_length = 200;
fw->ucode_capa.max_probe_length = IWL_DEFAULT_MAX_PROBE_LENGTH;
fw->ucode_capa.standard_phy_calibration_size =
IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE;
@ -1032,8 +1032,10 @@ struct iwl_drv *iwl_drv_start(struct iwl_trans *trans,
int ret;
drv = kzalloc(sizeof(*drv), GFP_KERNEL);
if (!drv)
return NULL;
if (!drv) {
ret = -ENOMEM;
goto err;
}
drv->trans = trans;
drv->dev = trans->dev;
@ -1078,7 +1080,7 @@ struct iwl_drv *iwl_drv_start(struct iwl_trans *trans,
err_free_drv:
#endif
kfree(drv);
err:
return ERR_PTR(ret);
}

View File

@ -76,6 +76,7 @@
* @IWL_UCODE_TLV_FLAGS_DW_BC_TABLE: The SCD byte count table is in DWORDS
* @IWL_UCODE_TLV_FLAGS_UAPSD: This uCode image supports uAPSD
* @IWL_UCODE_TLV_FLAGS_RX_ENERGY_API: supports rx signal strength api
* @IWL_UCODE_TLV_FLAGS_TIME_EVENT_API_V2: using the new time event API.
* @IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS: D3 image supports up to six
* (rather than two) IPv6 addresses
* @IWL_UCODE_TLV_FLAGS_BF_UPDATED: new beacon filtering API
@ -88,6 +89,7 @@ enum iwl_ucode_tlv_flag {
IWL_UCODE_TLV_FLAGS_DW_BC_TABLE = BIT(4),
IWL_UCODE_TLV_FLAGS_UAPSD = BIT(6),
IWL_UCODE_TLV_FLAGS_RX_ENERGY_API = BIT(8),
IWL_UCODE_TLV_FLAGS_TIME_EVENT_API_V2 = BIT(9),
IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS = BIT(10),
IWL_UCODE_TLV_FLAGS_BF_UPDATED = BIT(11),
};
@ -97,6 +99,9 @@ enum iwl_ucode_tlv_flag {
#define IWL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE 19
#define IWL_MAX_PHY_CALIBRATE_TBL_SIZE 253
/* The default max probe length if not specified by the firmware file */
#define IWL_DEFAULT_MAX_PROBE_LENGTH 200
/**
* enum iwl_ucode_type
*

View File

@ -67,5 +67,14 @@
#define IWL_MVM_DEFAULT_PS_RX_DATA_TIMEOUT (100 * USEC_PER_MSEC)
#define IWL_MVM_WOWLAN_PS_TX_DATA_TIMEOUT (10 * USEC_PER_MSEC)
#define IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT (10 * USEC_PER_MSEC)
#define IWL_MVM_UAPSD_RX_DATA_TIMEOUT (50 * USEC_PER_MSEC)
#define IWL_MVM_UAPSD_TX_DATA_TIMEOUT (50 * USEC_PER_MSEC)
#define IWL_MVM_PS_HEAVY_TX_THLD_PACKETS 20
#define IWL_MVM_PS_HEAVY_RX_THLD_PACKETS 20
#define IWL_MVM_PS_HEAVY_TX_THLD_PERCENT 50
#define IWL_MVM_PS_HEAVY_RX_THLD_PERCENT 50
#define IWL_MVM_PS_SNOOZE_INTERVAL 25
#define IWL_MVM_PS_SNOOZE_WINDOW 50
#define IWL_MVM_WOWLAN_PS_SNOOZE_WINDOW 25
#endif /* __MVM_CONSTANTS_H */

View File

@ -1109,73 +1109,16 @@ int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
return __iwl_mvm_suspend(hw, wowlan, false);
}
static void iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
static void iwl_mvm_report_wakeup_reasons(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct iwl_wowlan_status *status)
{
u32 base = mvm->error_event_table;
struct error_table_start {
/* cf. struct iwl_error_event_table */
u32 valid;
u32 error_id;
} err_info;
struct sk_buff *pkt = NULL;
struct cfg80211_wowlan_wakeup wakeup = {
.pattern_idx = -1,
};
struct cfg80211_wowlan_wakeup *wakeup_report = &wakeup;
struct iwl_host_cmd cmd = {
.id = WOWLAN_GET_STATUSES,
.flags = CMD_SYNC | CMD_WANT_SKB,
};
struct iwl_wowlan_status *status;
u32 reasons;
int ret, len;
struct sk_buff *pkt = NULL;
iwl_trans_read_mem_bytes(mvm->trans, base,
&err_info, sizeof(err_info));
if (err_info.valid) {
IWL_INFO(mvm, "error table is valid (%d)\n",
err_info.valid);
if (err_info.error_id == RF_KILL_INDICATOR_FOR_WOWLAN) {
wakeup.rfkill_release = true;
ieee80211_report_wowlan_wakeup(vif, &wakeup,
GFP_KERNEL);
}
return;
}
/* only for tracing for now */
ret = iwl_mvm_send_cmd_pdu(mvm, OFFLOADS_QUERY_CMD, CMD_SYNC, 0, NULL);
if (ret)
IWL_ERR(mvm, "failed to query offload statistics (%d)\n", ret);
ret = iwl_mvm_send_cmd(mvm, &cmd);
if (ret) {
IWL_ERR(mvm, "failed to query status (%d)\n", ret);
return;
}
/* RF-kill already asserted again... */
if (!cmd.resp_pkt)
return;
len = le32_to_cpu(cmd.resp_pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
if (len - sizeof(struct iwl_cmd_header) < sizeof(*status)) {
IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
goto out;
}
status = (void *)cmd.resp_pkt->data;
if (len - sizeof(struct iwl_cmd_header) !=
sizeof(*status) +
ALIGN(le32_to_cpu(status->wake_packet_bufsize), 4)) {
IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
goto out;
}
reasons = le32_to_cpu(status->wakeup_reasons);
u32 reasons = le32_to_cpu(status->wakeup_reasons);
if (reasons == IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS) {
wakeup_report = NULL;
@ -1238,6 +1181,12 @@ static void iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
pktsize -= hdrlen;
if (ieee80211_has_protected(hdr->frame_control)) {
/*
* This is unlocked and using gtk_i(c)vlen,
* but since everything is under RTNL still
* that's not really a problem - changing
* it would be difficult.
*/
if (is_multicast_ether_addr(hdr->addr1)) {
ivlen = mvm->gtk_ivlen;
icvlen += mvm->gtk_icvlen;
@ -1288,9 +1237,82 @@ static void iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
report:
ieee80211_report_wowlan_wakeup(vif, wakeup_report, GFP_KERNEL);
kfree_skb(pkt);
}
out:
/* releases the MVM mutex */
static void iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
u32 base = mvm->error_event_table;
struct error_table_start {
/* cf. struct iwl_error_event_table */
u32 valid;
u32 error_id;
} err_info;
struct iwl_host_cmd cmd = {
.id = WOWLAN_GET_STATUSES,
.flags = CMD_SYNC | CMD_WANT_SKB,
};
struct iwl_wowlan_status *status;
int ret, len;
iwl_trans_read_mem_bytes(mvm->trans, base,
&err_info, sizeof(err_info));
if (err_info.valid) {
IWL_INFO(mvm, "error table is valid (%d)\n",
err_info.valid);
if (err_info.error_id == RF_KILL_INDICATOR_FOR_WOWLAN) {
struct cfg80211_wowlan_wakeup wakeup = {
.rfkill_release = true,
};
ieee80211_report_wowlan_wakeup(vif, &wakeup,
GFP_KERNEL);
}
goto out_unlock;
}
/* only for tracing for now */
ret = iwl_mvm_send_cmd_pdu(mvm, OFFLOADS_QUERY_CMD, CMD_SYNC, 0, NULL);
if (ret)
IWL_ERR(mvm, "failed to query offload statistics (%d)\n", ret);
ret = iwl_mvm_send_cmd(mvm, &cmd);
if (ret) {
IWL_ERR(mvm, "failed to query status (%d)\n", ret);
goto out_unlock;
}
/* RF-kill already asserted again... */
if (!cmd.resp_pkt)
goto out_unlock;
len = le32_to_cpu(cmd.resp_pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
if (len - sizeof(struct iwl_cmd_header) < sizeof(*status)) {
IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
goto out_free_resp;
}
status = (void *)cmd.resp_pkt->data;
if (len - sizeof(struct iwl_cmd_header) !=
sizeof(*status) +
ALIGN(le32_to_cpu(status->wake_packet_bufsize), 4)) {
IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
goto out_free_resp;
}
/* now we have all the data we need, unlock to avoid mac80211 issues */
mutex_unlock(&mvm->mutex);
iwl_mvm_report_wakeup_reasons(mvm, vif, status);
iwl_free_resp(&cmd);
return;
out_free_resp:
iwl_free_resp(&cmd);
out_unlock:
mutex_unlock(&mvm->mutex);
}
static void iwl_mvm_read_d3_sram(struct iwl_mvm *mvm)
@ -1347,10 +1369,13 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
iwl_mvm_read_d3_sram(mvm);
iwl_mvm_query_wakeup_reasons(mvm, vif);
/* has unlocked the mutex, so skip that */
goto out;
out_unlock:
mutex_unlock(&mvm->mutex);
out:
if (!test && vif)
ieee80211_resume_disconnect(vif);

View File

@ -352,6 +352,10 @@ static void iwl_dbgfs_update_pm(struct iwl_mvm *mvm,
IWL_DEBUG_POWER(mvm, "lprx_rssi_threshold=%d\n", val);
dbgfs_pm->lprx_rssi_threshold = val;
break;
case MVM_DEBUGFS_PM_SNOOZE_ENABLE:
IWL_DEBUG_POWER(mvm, "snooze_enable=%d\n", val);
dbgfs_pm->snooze_ena = val;
break;
}
}
@ -405,6 +409,10 @@ static ssize_t iwl_dbgfs_pm_params_write(struct file *file,
POWER_LPRX_RSSI_THRESHOLD_MIN)
return -EINVAL;
param = MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD;
} else if (!strncmp("snooze_enable=", buf, 14)) {
if (sscanf(buf + 14, "%d", &val) != 1)
return -EINVAL;
param = MVM_DEBUGFS_PM_SNOOZE_ENABLE;
} else {
return -EINVAL;
}
@ -424,7 +432,7 @@ static ssize_t iwl_dbgfs_pm_params_read(struct file *file,
struct ieee80211_vif *vif = file->private_data;
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm *mvm = mvmvif->dbgfs_data;
char buf[256];
char buf[512];
int bufsz = sizeof(buf);
int pos;
@ -895,10 +903,7 @@ static ssize_t iwl_dbgfs_bf_params_write(struct file *file,
if (param == MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER && !value) {
ret = iwl_mvm_disable_beacon_filter(mvm, vif);
} else {
if (mvmvif->bf_enabled)
ret = iwl_mvm_enable_beacon_filter(mvm, vif);
else
ret = iwl_mvm_disable_beacon_filter(mvm, vif);
ret = iwl_mvm_enable_beacon_filter(mvm, vif);
}
mutex_unlock(&mvm->mutex);
@ -923,7 +928,7 @@ static ssize_t iwl_dbgfs_bf_params_read(struct file *file,
};
iwl_mvm_beacon_filter_debugfs_parameters(vif, &cmd);
if (mvmvif->bf_enabled)
if (mvmvif->bf_data.bf_enabled)
cmd.bf_enable_beacon_filter = cpu_to_le32(1);
else
cmd.bf_enable_beacon_filter = 0;

View File

@ -155,8 +155,12 @@ struct iwl_powertable_cmd {
* @lprx_rssi_threshold: Signal strength up to which LP RX can be enabled.
* Default: 80dbm
* @num_skip_dtim: Number of DTIMs to skip if Skip over DTIM flag is set
* @snooze_interval: TBD
* @snooze_window: TBD
* @snooze_interval: Maximum time between attempts to retrieve buffered data
* from the AP [msec]
* @snooze_window: A window of time in which PBW snoozing insures that all
* packets received. It is also the minimum time from last
* received unicast RX packet, before client stops snoozing
* for data. [msec]
* @snooze_step: TBD
* @qndp_tid: TID client shall use for uAPSD QNDP triggers
* @uapsd_ac_flags: Set trigger-enabled and delivery-enabled indication for
@ -164,10 +168,10 @@ struct iwl_powertable_cmd {
* Use IEEE80211_WMM_IE_STA_QOSINFO_AC* for correct values.
* @uapsd_max_sp: Use IEEE80211_WMM_IE_STA_QOSINFO_SP_* for correct
* values.
* @heavy_traffic_thr_tx_pkts: TX threshold measured in number of packets
* @heavy_traffic_thr_rx_pkts: RX threshold measured in number of packets
* @heavy_traffic_thr_tx_load: TX threshold measured in load's percentage
* @heavy_traffic_thr_rx_load: RX threshold measured in load's percentage
* @heavy_tx_thld_packets: TX threshold measured in number of packets
* @heavy_rx_thld_packets: RX threshold measured in number of packets
* @heavy_tx_thld_percentage: TX threshold measured in load's percentage
* @heavy_rx_thld_percentage: RX threshold measured in load's percentage
* @limited_ps_threshold:
*/
struct iwl_mac_power_cmd {
@ -189,10 +193,10 @@ struct iwl_mac_power_cmd {
u8 qndp_tid;
u8 uapsd_ac_flags;
u8 uapsd_max_sp;
u8 heavy_traffic_threshold_tx_packets;
u8 heavy_traffic_threshold_rx_packets;
u8 heavy_traffic_threshold_tx_percentage;
u8 heavy_traffic_threshold_rx_percentage;
u8 heavy_tx_thld_packets;
u8 heavy_rx_thld_packets;
u8 heavy_tx_thld_percentage;
u8 heavy_rx_thld_percentage;
u8 limited_ps_threshold;
u8 reserved;
} __packed;

View File

@ -499,71 +499,79 @@ enum iwl_time_event_type {
TE_MAX
}; /* MAC_EVENT_TYPE_API_E_VER_1 */
/* Time Event dependencies: none, on another TE, or in a specific time */
enum {
TE_INDEPENDENT = 0,
TE_DEP_OTHER = 1,
TE_DEP_TSF = 2,
TE_EVENT_SOCIOPATHIC = 4,
}; /* MAC_EVENT_DEPENDENCY_POLICY_API_E_VER_2 */
/*
* Supported Time event notifications configuration.
* A notification (both event and fragment) includes a status indicating weather
* the FW was able to schedule the event or not. For fragment start/end
* notification the status is always success. There is no start/end fragment
* notification for monolithic events.
*
* @TE_NOTIF_NONE: no notifications
* @TE_NOTIF_HOST_EVENT_START: request/receive notification on event start
* @TE_NOTIF_HOST_EVENT_END:request/receive notification on event end
* @TE_NOTIF_INTERNAL_EVENT_START: internal FW use
* @TE_NOTIF_INTERNAL_EVENT_END: internal FW use.
* @TE_NOTIF_HOST_FRAG_START: request/receive notification on frag start
* @TE_NOTIF_HOST_FRAG_END:request/receive notification on frag end
* @TE_NOTIF_INTERNAL_FRAG_START: internal FW use.
* @TE_NOTIF_INTERNAL_FRAG_END: internal FW use.
*/
enum {
TE_NOTIF_NONE = 0,
TE_NOTIF_HOST_EVENT_START = 0x1,
TE_NOTIF_HOST_EVENT_END = 0x2,
TE_NOTIF_INTERNAL_EVENT_START = 0x4,
TE_NOTIF_INTERNAL_EVENT_END = 0x8,
TE_NOTIF_HOST_FRAG_START = 0x10,
TE_NOTIF_HOST_FRAG_END = 0x20,
TE_NOTIF_INTERNAL_FRAG_START = 0x40,
TE_NOTIF_INTERNAL_FRAG_END = 0x80
}; /* MAC_EVENT_ACTION_API_E_VER_2 */
/* Time event - defines for command API v1 */
/*
* @TE_FRAG_NONE: fragmentation of the time event is NOT allowed.
* @TE_FRAG_SINGLE: fragmentation of the time event is allowed, but only
* the first fragment is scheduled.
* @TE_FRAG_DUAL: fragmentation of the time event is allowed, but only
* the first 2 fragments are scheduled.
* @TE_FRAG_ENDLESS: fragmentation of the time event is allowed, and any number
* of fragments are valid.
* @TE_V1_FRAG_NONE: fragmentation of the time event is NOT allowed.
* @TE_V1_FRAG_SINGLE: fragmentation of the time event is allowed, but only
* the first fragment is scheduled.
* @TE_V1_FRAG_DUAL: fragmentation of the time event is allowed, but only
* the first 2 fragments are scheduled.
* @TE_V1_FRAG_ENDLESS: fragmentation of the time event is allowed, and any
* number of fragments are valid.
*
* Other than the constant defined above, specifying a fragmentation value 'x'
* means that the event can be fragmented but only the first 'x' will be
* scheduled.
*/
enum {
TE_FRAG_NONE = 0,
TE_FRAG_SINGLE = 1,
TE_FRAG_DUAL = 2,
TE_FRAG_ENDLESS = 0xffffffff
TE_V1_FRAG_NONE = 0,
TE_V1_FRAG_SINGLE = 1,
TE_V1_FRAG_DUAL = 2,
TE_V1_FRAG_ENDLESS = 0xffffffff
};
/* Repeat the time event endlessly (until removed) */
#define TE_REPEAT_ENDLESS (0xffffffff)
/* If a Time Event has bounded repetitions, this is the maximal value */
#define TE_REPEAT_MAX_MSK (0x0fffffff)
/* If a Time Event can be fragmented, this is the max number of fragments */
#define TE_FRAG_MAX_MSK (0x0fffffff)
#define TE_V1_FRAG_MAX_MSK 0x0fffffff
/* Repeat the time event endlessly (until removed) */
#define TE_V1_REPEAT_ENDLESS 0xffffffff
/* If a Time Event has bounded repetitions, this is the maximal value */
#define TE_V1_REPEAT_MAX_MSK_V1 0x0fffffff
/* Time Event dependencies: none, on another TE, or in a specific time */
enum {
TE_V1_INDEPENDENT = 0,
TE_V1_DEP_OTHER = BIT(0),
TE_V1_DEP_TSF = BIT(1),
TE_V1_EVENT_SOCIOPATHIC = BIT(2),
}; /* MAC_EVENT_DEPENDENCY_POLICY_API_E_VER_2 */
/*
* @TE_V1_NOTIF_NONE: no notifications
* @TE_V1_NOTIF_HOST_EVENT_START: request/receive notification on event start
* @TE_V1_NOTIF_HOST_EVENT_END:request/receive notification on event end
* @TE_V1_NOTIF_INTERNAL_EVENT_START: internal FW use
* @TE_V1_NOTIF_INTERNAL_EVENT_END: internal FW use.
* @TE_V1_NOTIF_HOST_FRAG_START: request/receive notification on frag start
* @TE_V1_NOTIF_HOST_FRAG_END:request/receive notification on frag end
* @TE_V1_NOTIF_INTERNAL_FRAG_START: internal FW use.
* @TE_V1_NOTIF_INTERNAL_FRAG_END: internal FW use.
*
* Supported Time event notifications configuration.
* A notification (both event and fragment) includes a status indicating weather
* the FW was able to schedule the event or not. For fragment start/end
* notification the status is always success. There is no start/end fragment
* notification for monolithic events.
*/
enum {
TE_V1_NOTIF_NONE = 0,
TE_V1_NOTIF_HOST_EVENT_START = BIT(0),
TE_V1_NOTIF_HOST_EVENT_END = BIT(1),
TE_V1_NOTIF_INTERNAL_EVENT_START = BIT(2),
TE_V1_NOTIF_INTERNAL_EVENT_END = BIT(3),
TE_V1_NOTIF_HOST_FRAG_START = BIT(4),
TE_V1_NOTIF_HOST_FRAG_END = BIT(5),
TE_V1_NOTIF_INTERNAL_FRAG_START = BIT(6),
TE_V1_NOTIF_INTERNAL_FRAG_END = BIT(7),
}; /* MAC_EVENT_ACTION_API_E_VER_2 */
/**
* struct iwl_time_event_cmd - configuring Time Events
* struct iwl_time_event_cmd_api_v1 - configuring Time Events
* with struct MAC_TIME_EVENT_DATA_API_S_VER_1 (see also
* with version 2. determined by IWL_UCODE_TLV_FLAGS)
* ( TIME_EVENT_CMD = 0x29 )
* @id_and_color: ID and color of the relevant MAC
* @action: action to perform, one of FW_CTXT_ACTION_*
@ -578,12 +586,13 @@ enum {
* @interval_reciprocal: 2^32 / interval
* @duration: duration of event in TU
* @repeat: how many repetitions to do, can be TE_REPEAT_ENDLESS
* @dep_policy: one of TE_INDEPENDENT, TE_DEP_OTHER, TE_DEP_TSF
* @dep_policy: one of TE_V1_INDEPENDENT, TE_V1_DEP_OTHER, TE_V1_DEP_TSF
* and TE_V1_EVENT_SOCIOPATHIC
* @is_present: 0 or 1, are we present or absent during the Time Event
* @max_frags: maximal number of fragments the Time Event can be divided to
* @notify: notifications using TE_NOTIF_* (whom to notify when)
* @notify: notifications using TE_V1_NOTIF_* (whom to notify when)
*/
struct iwl_time_event_cmd {
struct iwl_time_event_cmd_v1 {
/* COMMON_INDEX_HDR_API_S_VER_1 */
__le32 id_and_color;
__le32 action;
@ -602,6 +611,123 @@ struct iwl_time_event_cmd {
__le32 notify;
} __packed; /* MAC_TIME_EVENT_CMD_API_S_VER_1 */
/* Time event - defines for command API v2 */
/*
* @TE_V2_FRAG_NONE: fragmentation of the time event is NOT allowed.
* @TE_V2_FRAG_SINGLE: fragmentation of the time event is allowed, but only
* the first fragment is scheduled.
* @TE_V2_FRAG_DUAL: fragmentation of the time event is allowed, but only
* the first 2 fragments are scheduled.
* @TE_V2_FRAG_ENDLESS: fragmentation of the time event is allowed, and any
* number of fragments are valid.
*
* Other than the constant defined above, specifying a fragmentation value 'x'
* means that the event can be fragmented but only the first 'x' will be
* scheduled.
*/
enum {
TE_V2_FRAG_NONE = 0,
TE_V2_FRAG_SINGLE = 1,
TE_V2_FRAG_DUAL = 2,
TE_V2_FRAG_MAX = 0xfe,
TE_V2_FRAG_ENDLESS = 0xff
};
/* Repeat the time event endlessly (until removed) */
#define TE_V2_REPEAT_ENDLESS 0xff
/* If a Time Event has bounded repetitions, this is the maximal value */
#define TE_V2_REPEAT_MAX 0xfe
#define TE_V2_PLACEMENT_POS 12
#define TE_V2_ABSENCE_POS 15
/* Time event policy values (for time event cmd api v2)
* A notification (both event and fragment) includes a status indicating weather
* the FW was able to schedule the event or not. For fragment start/end
* notification the status is always success. There is no start/end fragment
* notification for monolithic events.
*
* @TE_V2_DEFAULT_POLICY: independent, social, present, unoticable
* @TE_V2_NOTIF_HOST_EVENT_START: request/receive notification on event start
* @TE_V2_NOTIF_HOST_EVENT_END:request/receive notification on event end
* @TE_V2_NOTIF_INTERNAL_EVENT_START: internal FW use
* @TE_V2_NOTIF_INTERNAL_EVENT_END: internal FW use.
* @TE_V2_NOTIF_HOST_FRAG_START: request/receive notification on frag start
* @TE_V2_NOTIF_HOST_FRAG_END:request/receive notification on frag end
* @TE_V2_NOTIF_INTERNAL_FRAG_START: internal FW use.
* @TE_V2_NOTIF_INTERNAL_FRAG_END: internal FW use.
* @TE_V2_DEP_OTHER: depends on another time event
* @TE_V2_DEP_TSF: depends on a specific time
* @TE_V2_EVENT_SOCIOPATHIC: can't co-exist with other events of tha same MAC
* @TE_V2_ABSENCE: are we present or absent during the Time Event.
*/
enum {
TE_V2_DEFAULT_POLICY = 0x0,
/* notifications (event start/stop, fragment start/stop) */
TE_V2_NOTIF_HOST_EVENT_START = BIT(0),
TE_V2_NOTIF_HOST_EVENT_END = BIT(1),
TE_V2_NOTIF_INTERNAL_EVENT_START = BIT(2),
TE_V2_NOTIF_INTERNAL_EVENT_END = BIT(3),
TE_V2_NOTIF_HOST_FRAG_START = BIT(4),
TE_V2_NOTIF_HOST_FRAG_END = BIT(5),
TE_V2_NOTIF_INTERNAL_FRAG_START = BIT(6),
TE_V2_NOTIF_INTERNAL_FRAG_END = BIT(7),
TE_V2_NOTIF_MSK = 0xff,
/* placement characteristics */
TE_V2_DEP_OTHER = BIT(TE_V2_PLACEMENT_POS),
TE_V2_DEP_TSF = BIT(TE_V2_PLACEMENT_POS + 1),
TE_V2_EVENT_SOCIOPATHIC = BIT(TE_V2_PLACEMENT_POS + 2),
/* are we present or absent during the Time Event. */
TE_V2_ABSENCE = BIT(TE_V2_ABSENCE_POS),
};
/**
* struct iwl_time_event_cmd_api_v2 - configuring Time Events
* with struct MAC_TIME_EVENT_DATA_API_S_VER_2 (see also
* with version 1. determined by IWL_UCODE_TLV_FLAGS)
* ( TIME_EVENT_CMD = 0x29 )
* @id_and_color: ID and color of the relevant MAC
* @action: action to perform, one of FW_CTXT_ACTION_*
* @id: this field has two meanings, depending on the action:
* If the action is ADD, then it means the type of event to add.
* For all other actions it is the unique event ID assigned when the
* event was added by the FW.
* @apply_time: When to start the Time Event (in GP2)
* @max_delay: maximum delay to event's start (apply time), in TU
* @depends_on: the unique ID of the event we depend on (if any)
* @interval: interval between repetitions, in TU
* @duration: duration of event in TU
* @repeat: how many repetitions to do, can be TE_REPEAT_ENDLESS
* @max_frags: maximal number of fragments the Time Event can be divided to
* @policy: defines whether uCode shall notify the host or other uCode modules
* on event and/or fragment start and/or end
* using one of TE_INDEPENDENT, TE_DEP_OTHER, TE_DEP_TSF
* TE_EVENT_SOCIOPATHIC
* using TE_ABSENCE and using TE_NOTIF_*
*/
struct iwl_time_event_cmd_v2 {
/* COMMON_INDEX_HDR_API_S_VER_1 */
__le32 id_and_color;
__le32 action;
__le32 id;
/* MAC_TIME_EVENT_DATA_API_S_VER_2 */
__le32 apply_time;
__le32 max_delay;
__le32 depends_on;
__le32 interval;
__le32 duration;
u8 repeat;
u8 max_frags;
__le16 policy;
} __packed; /* MAC_TIME_EVENT_CMD_API_S_VER_2 */
/**
* struct iwl_time_event_resp - response structure to iwl_time_event_cmd
* @status: bit 0 indicates success, all others specify errors
@ -1195,7 +1321,7 @@ struct mvm_statistics_general {
struct mvm_statistics_general_common common;
__le32 beacon_filtered;
__le32 missed_beacons;
__s8 beacon_filter_everage_energy;
__s8 beacon_filter_average_energy;
__s8 beacon_filter_reason;
__s8 beacon_filter_current_energy;
__s8 beacon_filter_reserved;

View File

@ -155,7 +155,8 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
IEEE80211_HW_TIMING_BEACON_ONLY |
IEEE80211_HW_CONNECTION_MONITOR |
IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
IEEE80211_HW_SUPPORTS_STATIC_SMPS;
IEEE80211_HW_SUPPORTS_STATIC_SMPS |
IEEE80211_HW_SUPPORTS_UAPSD;
hw->queues = IWL_MVM_FIRST_AGG_QUEUE;
hw->offchannel_tx_hw_queue = IWL_MVM_OFFCHANNEL_QUEUE;
@ -190,6 +191,8 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
hw->wiphy->max_remain_on_channel_duration = 10000;
hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
hw->uapsd_queues = IWL_UAPSD_AC_INFO;
hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
/* Extract MAC address */
memcpy(mvm->addresses[0].addr, mvm->nvm_data->hw_addr, ETH_ALEN);
@ -577,7 +580,8 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
vif->type == NL80211_IFTYPE_STATION && !vif->p2p &&
mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BF_UPDATED){
mvm->bf_allowed_vif = mvmvif;
vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER;
vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
IEEE80211_VIF_SUPPORTS_CQM_RSSI;
}
/*
@ -617,7 +621,8 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
out_free_bf:
if (mvm->bf_allowed_vif == mvmvif) {
mvm->bf_allowed_vif = NULL;
vif->driver_flags &= ~IEEE80211_VIF_BEACON_FILTER;
vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER |
IEEE80211_VIF_SUPPORTS_CQM_RSSI);
}
out_remove_mac:
mvmvif->phy_ctxt = NULL;
@ -683,7 +688,8 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
if (mvm->bf_allowed_vif == mvmvif) {
mvm->bf_allowed_vif = NULL;
vif->driver_flags &= ~IEEE80211_VIF_BEACON_FILTER;
vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER |
IEEE80211_VIF_SUPPORTS_CQM_RSSI);
}
iwl_mvm_vif_dbgfs_clean(mvm, vif);
@ -801,6 +807,10 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
if (ret)
IWL_ERR(mvm, "failed to update quotas\n");
}
/* reset rssi values */
mvmvif->bf_data.ave_beacon_signal = 0;
if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD)) {
/* Workaround for FW bug, otherwise FW disables device
* power save upon disassociation
@ -817,7 +827,7 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
*/
iwl_mvm_remove_time_event(mvm, mvmvif,
&mvmvif->time_event_data);
} else if (changes & BSS_CHANGED_PS) {
} else if (changes & (BSS_CHANGED_PS | BSS_CHANGED_QOS)) {
ret = iwl_mvm_power_update_mode(mvm, vif);
if (ret)
IWL_ERR(mvm, "failed to update power mode\n");
@ -827,6 +837,15 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
bss_conf->txpower);
iwl_mvm_set_tx_power(mvm, vif, bss_conf->txpower);
}
if (changes & BSS_CHANGED_CQM) {
IWL_DEBUG_MAC80211(mvm, "cqm info_changed");
/* reset cqm events tracking */
mvmvif->bf_data.last_cqm_event = 0;
ret = iwl_mvm_update_beacon_filter(mvm, vif);
if (ret)
IWL_ERR(mvm, "failed to update CQM thresholds\n");
}
}
static int iwl_mvm_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif)

View File

@ -153,6 +153,11 @@ enum iwl_power_scheme {
};
#define IWL_CONN_MAX_LISTEN_INTERVAL 70
#define IWL_UAPSD_AC_INFO (IEEE80211_WMM_IE_STA_QOSINFO_AC_VO |\
IEEE80211_WMM_IE_STA_QOSINFO_AC_VI |\
IEEE80211_WMM_IE_STA_QOSINFO_AC_BK |\
IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
#define IWL_UAPSD_MAX_SP IEEE80211_WMM_IE_STA_QOSINFO_SP_2
struct iwl_mvm_power_ops {
int (*power_update_mode)(struct iwl_mvm *mvm,
@ -175,6 +180,7 @@ enum iwl_dbgfs_pm_mask {
MVM_DEBUGFS_PM_DISABLE_POWER_OFF = BIT(5),
MVM_DEBUGFS_PM_LPRX_ENA = BIT(6),
MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD = BIT(7),
MVM_DEBUGFS_PM_SNOOZE_ENABLE = BIT(8),
};
struct iwl_dbgfs_pm {
@ -186,6 +192,7 @@ struct iwl_dbgfs_pm {
bool disable_power_off;
bool lprx_ena;
u32 lprx_rssi_threshold;
bool snooze_ena;
int mask;
};
@ -227,6 +234,21 @@ enum iwl_mvm_smps_type_request {
NUM_IWL_MVM_SMPS_REQ,
};
/**
* struct iwl_mvm_vif_bf_data - beacon filtering related data
* @bf_enabled: indicates if beacon filtering is enabled
* @ba_enabled: indicated if beacon abort is enabled
* @last_beacon_signal: last beacon rssi signal in dbm
* @ave_beacon_signal: average beacon signal
* @last_cqm_event: rssi of the last cqm event
*/
struct iwl_mvm_vif_bf_data {
bool bf_enabled;
bool ba_enabled;
s8 ave_beacon_signal;
s8 last_cqm_event;
};
/**
* struct iwl_mvm_vif - data per Virtual Interface, it is a MAC context
* @id: between 0 and 3
@ -252,8 +274,7 @@ struct iwl_mvm_vif {
bool uploaded;
bool ap_active;
bool monitor_active;
/* indicate whether beacon filtering is enabled */
bool bf_enabled;
struct iwl_mvm_vif_bf_data bf_data;
u32 ap_beacon_time;
@ -754,6 +775,8 @@ int iwl_mvm_beacon_filter_send_cmd(struct iwl_mvm *mvm,
struct iwl_beacon_filter_cmd *cmd);
int iwl_mvm_update_beacon_abort(struct iwl_mvm *mvm,
struct ieee80211_vif *vif, bool enable);
int iwl_mvm_update_beacon_filter(struct iwl_mvm *mvm,
struct ieee80211_vif *vif);
/* SMPS */
void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,

View File

@ -110,6 +110,23 @@ int iwl_mvm_beacon_filter_send_cmd(struct iwl_mvm *mvm,
return ret;
}
static
void iwl_mvm_beacon_filter_set_cqm_params(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct iwl_beacon_filter_cmd *cmd)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
if (vif->bss_conf.cqm_rssi_thold) {
cmd->bf_energy_delta =
cpu_to_le32(vif->bss_conf.cqm_rssi_hyst);
/* fw uses an absolute value for this */
cmd->bf_roaming_state =
cpu_to_le32(-vif->bss_conf.cqm_rssi_thold);
}
cmd->ba_enable_beacon_abort = cpu_to_le32(mvmvif->bf_data.ba_enabled);
}
int iwl_mvm_update_beacon_abort(struct iwl_mvm *mvm,
struct ieee80211_vif *vif, bool enable)
{
@ -120,12 +137,14 @@ int iwl_mvm_update_beacon_abort(struct iwl_mvm *mvm,
.ba_enable_beacon_abort = cpu_to_le32(enable),
};
if (!mvmvif->bf_enabled)
if (!mvmvif->bf_data.bf_enabled)
return 0;
if (mvm->cur_ucode == IWL_UCODE_WOWLAN)
cmd.ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER_D3);
mvmvif->bf_data.ba_enabled = enable;
iwl_mvm_beacon_filter_set_cqm_params(mvm, vif, &cmd);
iwl_mvm_beacon_filter_debugfs_parameters(vif, &cmd);
return iwl_mvm_beacon_filter_send_cmd(mvm, &cmd);
}
@ -140,17 +159,30 @@ static void iwl_mvm_power_log(struct iwl_mvm *mvm,
IWL_DEBUG_POWER(mvm, "Keep alive = %u sec\n",
le16_to_cpu(cmd->keep_alive_seconds));
if (cmd->flags & cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK)) {
IWL_DEBUG_POWER(mvm, "Rx timeout = %u usec\n",
le32_to_cpu(cmd->rx_data_timeout));
IWL_DEBUG_POWER(mvm, "Tx timeout = %u usec\n",
le32_to_cpu(cmd->tx_data_timeout));
if (cmd->flags & cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK))
IWL_DEBUG_POWER(mvm, "DTIM periods to skip = %u\n",
cmd->skip_dtim_periods);
if (cmd->flags & cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK))
IWL_DEBUG_POWER(mvm, "LP RX RSSI threshold = %u\n",
cmd->lprx_rssi_threshold);
if (!(cmd->flags & cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK))) {
IWL_DEBUG_POWER(mvm, "Disable power management\n");
return;
}
IWL_DEBUG_POWER(mvm, "Rx timeout = %u usec\n",
le32_to_cpu(cmd->rx_data_timeout));
IWL_DEBUG_POWER(mvm, "Tx timeout = %u usec\n",
le32_to_cpu(cmd->tx_data_timeout));
if (cmd->flags & cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK))
IWL_DEBUG_POWER(mvm, "DTIM periods to skip = %u\n",
cmd->skip_dtim_periods);
if (cmd->flags & cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK))
IWL_DEBUG_POWER(mvm, "LP RX RSSI threshold = %u\n",
cmd->lprx_rssi_threshold);
if (cmd->flags & cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK)) {
IWL_DEBUG_POWER(mvm, "uAPSD enabled\n");
IWL_DEBUG_POWER(mvm, "Rx timeout (uAPSD) = %u usec\n",
le32_to_cpu(cmd->rx_data_timeout_uapsd));
IWL_DEBUG_POWER(mvm, "Tx timeout (uAPSD) = %u usec\n",
le32_to_cpu(cmd->tx_data_timeout_uapsd));
IWL_DEBUG_POWER(mvm, "QNDP TID = %d\n", cmd->qndp_tid);
IWL_DEBUG_POWER(mvm, "ACs flags = 0x%x\n", cmd->uapsd_ac_flags);
IWL_DEBUG_POWER(mvm, "Max SP = %d\n", cmd->uapsd_max_sp);
}
}
@ -166,6 +198,8 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
bool radar_detect = false;
struct iwl_mvm_vif *mvmvif __maybe_unused =
iwl_mvm_vif_from_mac80211(vif);
enum ieee80211_ac_numbers ac;
bool tid_found = false;
cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
mvmvif->color));
@ -235,6 +269,63 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
cpu_to_le32(IWL_MVM_WOWLAN_PS_TX_DATA_TIMEOUT);
}
for (ac = IEEE80211_AC_VO; ac <= IEEE80211_AC_BK; ac++) {
if (!mvmvif->queue_params[ac].uapsd)
continue;
cmd->flags |= cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK);
cmd->uapsd_ac_flags |= BIT(ac);
/* QNDP TID - the highest TID with no admission control */
if (!tid_found && !mvmvif->queue_params[ac].acm) {
tid_found = true;
switch (ac) {
case IEEE80211_AC_VO:
cmd->qndp_tid = 6;
break;
case IEEE80211_AC_VI:
cmd->qndp_tid = 5;
break;
case IEEE80211_AC_BE:
cmd->qndp_tid = 0;
break;
case IEEE80211_AC_BK:
cmd->qndp_tid = 1;
break;
}
}
}
if (cmd->flags & cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK)) {
cmd->rx_data_timeout_uapsd =
cpu_to_le32(IWL_MVM_UAPSD_RX_DATA_TIMEOUT);
cmd->tx_data_timeout_uapsd =
cpu_to_le32(IWL_MVM_UAPSD_TX_DATA_TIMEOUT);
if (cmd->uapsd_ac_flags == (BIT(IEEE80211_AC_VO) |
BIT(IEEE80211_AC_VI) |
BIT(IEEE80211_AC_BE) |
BIT(IEEE80211_AC_BK))) {
cmd->flags |= cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK);
cmd->snooze_interval =
cpu_to_le16(IWL_MVM_PS_SNOOZE_INTERVAL);
cmd->snooze_window =
(mvm->cur_ucode == IWL_UCODE_WOWLAN) ?
cpu_to_le16(IWL_MVM_WOWLAN_PS_SNOOZE_WINDOW) :
cpu_to_le16(IWL_MVM_PS_SNOOZE_WINDOW);
}
cmd->uapsd_max_sp = IWL_UAPSD_MAX_SP;
cmd->heavy_tx_thld_packets =
IWL_MVM_PS_HEAVY_TX_THLD_PACKETS;
cmd->heavy_rx_thld_packets =
IWL_MVM_PS_HEAVY_RX_THLD_PACKETS;
cmd->heavy_tx_thld_percentage =
IWL_MVM_PS_HEAVY_TX_THLD_PERCENT;
cmd->heavy_rx_thld_percentage =
IWL_MVM_PS_HEAVY_RX_THLD_PERCENT;
}
#ifdef CONFIG_IWLWIFI_DEBUGFS
if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_KEEP_ALIVE)
cmd->keep_alive_seconds =
@ -263,6 +354,14 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
}
if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD)
cmd->lprx_rssi_threshold = mvmvif->dbgfs_pm.lprx_rssi_threshold;
if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_SNOOZE_ENABLE) {
if (mvmvif->dbgfs_pm.snooze_ena)
cmd->flags |=
cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK);
else
cmd->flags &=
cpu_to_le16(~POWER_FLAGS_SNOOZE_ENA_MSK);
}
#endif /* CONFIG_IWLWIFI_DEBUGFS */
}
@ -342,8 +441,6 @@ static int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm,
(cmd.flags &
cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK)) ?
0 : 1);
pos += scnprintf(buf+pos, bufsz-pos, "skip_dtim_periods = %d\n",
cmd.skip_dtim_periods);
pos += scnprintf(buf+pos, bufsz-pos, "power_scheme = %d\n",
iwlmvm_mod_params.power_scheme);
pos += scnprintf(buf+pos, bufsz-pos, "flags = 0x%x\n",
@ -356,14 +453,64 @@ static int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm,
(cmd.flags &
cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK)) ?
1 : 0);
pos += scnprintf(buf+pos, bufsz-pos, "rx_data_timeout = %d\n",
le32_to_cpu(cmd.rx_data_timeout));
pos += scnprintf(buf+pos, bufsz-pos, "tx_data_timeout = %d\n",
le32_to_cpu(cmd.tx_data_timeout));
pos += scnprintf(buf+pos, bufsz-pos, "skip_dtim_periods = %d\n",
cmd.skip_dtim_periods);
if (!(cmd.flags &
cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK))) {
pos += scnprintf(buf+pos, bufsz-pos,
"rx_data_timeout = %d\n",
le32_to_cpu(cmd.rx_data_timeout));
pos += scnprintf(buf+pos, bufsz-pos,
"tx_data_timeout = %d\n",
le32_to_cpu(cmd.tx_data_timeout));
}
if (cmd.flags & cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK))
pos += scnprintf(buf+pos, bufsz-pos,
"lprx_rssi_threshold = %d\n",
cmd.lprx_rssi_threshold);
if (cmd.flags & cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK)) {
pos +=
scnprintf(buf+pos, bufsz-pos,
"rx_data_timeout_uapsd = %d\n",
le32_to_cpu(cmd.rx_data_timeout_uapsd));
pos +=
scnprintf(buf+pos, bufsz-pos,
"tx_data_timeout_uapsd = %d\n",
le32_to_cpu(cmd.tx_data_timeout_uapsd));
pos += scnprintf(buf+pos, bufsz-pos, "qndp_tid = %d\n",
cmd.qndp_tid);
pos += scnprintf(buf+pos, bufsz-pos,
"uapsd_ac_flags = 0x%x\n",
cmd.uapsd_ac_flags);
pos += scnprintf(buf+pos, bufsz-pos,
"uapsd_max_sp = %d\n",
cmd.uapsd_max_sp);
pos += scnprintf(buf+pos, bufsz-pos,
"heavy_tx_thld_packets = %d\n",
cmd.heavy_tx_thld_packets);
pos += scnprintf(buf+pos, bufsz-pos,
"heavy_rx_thld_packets = %d\n",
cmd.heavy_rx_thld_packets);
pos += scnprintf(buf+pos, bufsz-pos,
"heavy_tx_thld_percentage = %d\n",
cmd.heavy_tx_thld_percentage);
pos += scnprintf(buf+pos, bufsz-pos,
"heavy_rx_thld_percentage = %d\n",
cmd.heavy_rx_thld_percentage);
pos +=
scnprintf(buf+pos, bufsz-pos, "snooze_enable = %d\n",
(cmd.flags &
cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) ?
1 : 0);
}
if (cmd.flags & cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) {
pos += scnprintf(buf+pos, bufsz-pos,
"snooze_interval = %d\n",
cmd.snooze_interval);
pos += scnprintf(buf+pos, bufsz-pos,
"snooze_window = %d\n",
cmd.snooze_window);
}
}
return pos;
}
@ -417,11 +564,12 @@ int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
vif->type != NL80211_IFTYPE_STATION || vif->p2p)
return 0;
iwl_mvm_beacon_filter_set_cqm_params(mvm, vif, &cmd);
iwl_mvm_beacon_filter_debugfs_parameters(vif, &cmd);
ret = iwl_mvm_beacon_filter_send_cmd(mvm, &cmd);
if (!ret)
mvmvif->bf_enabled = true;
mvmvif->bf_data.bf_enabled = true;
return ret;
}
@ -440,11 +588,22 @@ int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
ret = iwl_mvm_beacon_filter_send_cmd(mvm, &cmd);
if (!ret)
mvmvif->bf_enabled = false;
mvmvif->bf_data.bf_enabled = false;
return ret;
}
int iwl_mvm_update_beacon_filter(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
if (!mvmvif->bf_data.bf_enabled)
return 0;
return iwl_mvm_enable_beacon_filter(mvm, vif);
}
const struct iwl_mvm_power_ops pm_mac_ops = {
.power_update_mode = iwl_mvm_power_mac_update_mode,
.power_disable = iwl_mvm_power_mac_disable,

View File

@ -82,41 +82,35 @@ static const u8 ant_toggle_lookup[] = {
[ANT_ABC] = ANT_ABC,
};
#define IWL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np) \
#define IWL_DECLARE_RATE_INFO(r, s, rp, rn) \
[IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \
IWL_RATE_SISO_##s##M_PLCP, \
IWL_RATE_MIMO2_##s##M_PLCP,\
IWL_RATE_MIMO3_##s##M_PLCP,\
IWL_RATE_##r##M_IEEE, \
IWL_RATE_##ip##M_INDEX, \
IWL_RATE_##in##M_INDEX, \
IWL_RATE_##rp##M_INDEX, \
IWL_RATE_##rn##M_INDEX, \
IWL_RATE_##pp##M_INDEX, \
IWL_RATE_##np##M_INDEX }
IWL_RATE_##rn##M_INDEX }
/*
* Parameter order:
* rate, ht rate, prev rate, next rate, prev tgg rate, next tgg rate
* rate, ht rate, prev rate, next rate
*
* If there isn't a valid next or previous rate then INV is used which
* maps to IWL_RATE_INVALID
*
*/
static const struct iwl_rs_rate_info iwl_rates[IWL_RATE_COUNT] = {
IWL_DECLARE_RATE_INFO(1, INV, INV, 2, INV, 2, INV, 2), /* 1mbps */
IWL_DECLARE_RATE_INFO(2, INV, 1, 5, 1, 5, 1, 5), /* 2mbps */
IWL_DECLARE_RATE_INFO(5, INV, 2, 6, 2, 11, 2, 11), /*5.5mbps */
IWL_DECLARE_RATE_INFO(11, INV, 9, 12, 9, 12, 5, 18), /* 11mbps */
IWL_DECLARE_RATE_INFO(6, 6, 5, 9, 5, 11, 5, 11), /* 6mbps */
IWL_DECLARE_RATE_INFO(9, 6, 6, 11, 6, 11, 5, 11), /* 9mbps */
IWL_DECLARE_RATE_INFO(12, 12, 11, 18, 11, 18, 11, 18), /* 12mbps */
IWL_DECLARE_RATE_INFO(18, 18, 12, 24, 12, 24, 11, 24), /* 18mbps */
IWL_DECLARE_RATE_INFO(24, 24, 18, 36, 18, 36, 18, 36), /* 24mbps */
IWL_DECLARE_RATE_INFO(36, 36, 24, 48, 24, 48, 24, 48), /* 36mbps */
IWL_DECLARE_RATE_INFO(48, 48, 36, 54, 36, 54, 36, 54), /* 48mbps */
IWL_DECLARE_RATE_INFO(54, 54, 48, INV, 48, INV, 48, INV),/* 54mbps */
IWL_DECLARE_RATE_INFO(60, 60, 48, INV, 48, INV, 48, INV),/* 60mbps */
IWL_DECLARE_RATE_INFO(1, INV, INV, 2), /* 1mbps */
IWL_DECLARE_RATE_INFO(2, INV, 1, 5), /* 2mbps */
IWL_DECLARE_RATE_INFO(5, INV, 2, 11), /*5.5mbps */
IWL_DECLARE_RATE_INFO(11, INV, 9, 12), /* 11mbps */
IWL_DECLARE_RATE_INFO(6, 6, 5, 11), /* 6mbps */
IWL_DECLARE_RATE_INFO(9, 6, 6, 11), /* 9mbps */
IWL_DECLARE_RATE_INFO(12, 12, 11, 18), /* 12mbps */
IWL_DECLARE_RATE_INFO(18, 18, 12, 24), /* 18mbps */
IWL_DECLARE_RATE_INFO(24, 24, 18, 36), /* 24mbps */
IWL_DECLARE_RATE_INFO(36, 36, 24, 48), /* 36mbps */
IWL_DECLARE_RATE_INFO(48, 48, 36, 54), /* 48mbps */
IWL_DECLARE_RATE_INFO(54, 54, 48, INV), /* 54mbps */
IWL_DECLARE_RATE_INFO(60, 60, 48, INV), /* 60mbps */
/* FIXME:RS: ^^ should be INV (legacy) */
};
@ -134,9 +128,8 @@ static int iwl_hwrate_to_plcp_idx(u32 rate_n_flags)
if (rate_n_flags & RATE_MCS_HT_MSK) {
idx = rs_extract_rate(rate_n_flags);
if (idx >= IWL_RATE_MIMO3_6M_PLCP)
idx = idx - IWL_RATE_MIMO3_6M_PLCP;
else if (idx >= IWL_RATE_MIMO2_6M_PLCP)
WARN_ON_ONCE(idx >= IWL_RATE_MIMO3_6M_PLCP);
if (idx >= IWL_RATE_MIMO2_6M_PLCP)
idx = idx - IWL_RATE_MIMO2_6M_PLCP;
idx += IWL_FIRST_OFDM_RATE;
@ -168,10 +161,10 @@ static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search);
#ifdef CONFIG_MAC80211_DEBUGFS
static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
u32 *rate_n_flags, int index);
u32 *rate_n_flags);
#else
static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
u32 *rate_n_flags, int index)
u32 *rate_n_flags)
{}
#endif
@ -218,20 +211,6 @@ static s32 expected_tpt_mimo2_40MHz[4][IWL_RATE_COUNT] = {
{0, 0, 0, 0, 186, 0, 329, 439, 527, 667, 764, 803, 838}, /* AGG+SGI */
};
static s32 expected_tpt_mimo3_20MHz[4][IWL_RATE_COUNT] = {
{0, 0, 0, 0, 99, 0, 153, 186, 208, 239, 256, 263, 268}, /* Norm */
{0, 0, 0, 0, 106, 0, 162, 194, 215, 246, 262, 268, 273}, /* SGI */
{0, 0, 0, 0, 134, 0, 249, 346, 431, 574, 685, 732, 775}, /* AGG */
{0, 0, 0, 0, 148, 0, 272, 376, 465, 614, 727, 775, 818}, /* AGG+SGI */
};
static s32 expected_tpt_mimo3_40MHz[4][IWL_RATE_COUNT] = {
{0, 0, 0, 0, 152, 0, 211, 239, 255, 279, 290, 294, 297}, /* Norm */
{0, 0, 0, 0, 160, 0, 219, 245, 261, 284, 294, 297, 300}, /* SGI */
{0, 0, 0, 0, 254, 0, 443, 584, 695, 868, 984, 1030, 1070}, /* AGG */
{0, 0, 0, 0, 277, 0, 478, 624, 737, 911, 1026, 1070, 1109}, /* AGG+SGI */
};
/* mbps, mcs */
static const struct iwl_rate_mcs_info iwl_rate_mcs[IWL_RATE_COUNT] = {
{ "1", "BPSK DSSS"},
@ -279,7 +258,6 @@ static void rs_program_fix_rate(struct iwl_mvm *mvm,
lq_sta->active_legacy_rate = 0x0FFF; /* 1 - 54 MBits, includes CCK */
lq_sta->active_siso_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
lq_sta->active_mimo2_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
lq_sta->active_mimo3_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
IWL_DEBUG_RATE(mvm, "sta_id %d rate 0x%X\n",
lq_sta->lq.sta_id, lq_sta->dbg_fixed_rate);
@ -459,7 +437,7 @@ static u32 rate_n_flags_from_tbl(struct iwl_mvm *mvm,
else if (is_mimo2(tbl->lq_type))
rate_n_flags |= iwl_rates[index].plcp_mimo2;
else
rate_n_flags |= iwl_rates[index].plcp_mimo3;
WARN_ON_ONCE(1);
} else {
IWL_ERR(mvm, "Invalid tbl->lq_type %d\n", tbl->lq_type);
}
@ -497,7 +475,7 @@ static int rs_get_tbl_info_from_mcs(const u32 rate_n_flags,
u8 num_of_ant = get_num_of_ant_from_rate(rate_n_flags);
u8 mcs;
memset(tbl, 0, sizeof(struct iwl_scale_tbl_info));
memset(tbl, 0, offsetof(struct iwl_scale_tbl_info, win));
*rate_idx = iwl_hwrate_to_plcp_idx(rate_n_flags);
if (*rate_idx == IWL_RATE_INVALID) {
@ -536,12 +514,8 @@ static int rs_get_tbl_info_from_mcs(const u32 rate_n_flags,
} else if (mcs <= IWL_RATE_MIMO2_60M_PLCP) {
if (num_of_ant == 2)
tbl->lq_type = LQ_MIMO2;
/* MIMO3 */
} else {
if (num_of_ant == 3) {
tbl->max_search = IWL_MAX_11N_MIMO3_SEARCH;
tbl->lq_type = LQ_MIMO3;
}
WARN_ON_ONCE(num_of_ant == 3);
}
}
return 0;
@ -607,10 +581,10 @@ static u16 rs_get_supported_rates(struct iwl_lq_sta *lq_sta,
} else {
if (is_siso(rate_type))
return lq_sta->active_siso_rate;
else if (is_mimo2(rate_type))
else {
WARN_ON_ONCE(!is_mimo2(rate_type));
return lq_sta->active_mimo2_rate;
else
return lq_sta->active_mimo3_rate;
}
}
}
@ -985,7 +959,7 @@ static void rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta,
}
/* Choose among many HT tables depending on number of streams
* (SISO/MIMO2/MIMO3), channel width (20/40), SGI, and aggregation
* (SISO/MIMO2), channel width (20/40), SGI, and aggregation
* status */
if (is_siso(tbl->lq_type) && !tbl->is_ht40)
ht_tbl_pointer = expected_tpt_siso20MHz;
@ -993,12 +967,10 @@ static void rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta,
ht_tbl_pointer = expected_tpt_siso40MHz;
else if (is_mimo2(tbl->lq_type) && !tbl->is_ht40)
ht_tbl_pointer = expected_tpt_mimo2_20MHz;
else if (is_mimo2(tbl->lq_type))
else {
WARN_ON_ONCE(!is_mimo2(tbl->lq_type));
ht_tbl_pointer = expected_tpt_mimo2_40MHz;
else if (is_mimo3(tbl->lq_type) && !tbl->is_ht40)
ht_tbl_pointer = expected_tpt_mimo3_20MHz;
else /* if (is_mimo3(tbl->lq_type)) <-- must be true */
ht_tbl_pointer = expected_tpt_mimo3_40MHz;
}
if (!tbl->is_SGI && !lq_sta->is_agg) /* Normal */
tbl->expected_tpt = ht_tbl_pointer[0];
@ -1169,58 +1141,6 @@ static int rs_switch_to_mimo2(struct iwl_mvm *mvm,
return 0;
}
/*
* Set up search table for MIMO3
*/
static int rs_switch_to_mimo3(struct iwl_mvm *mvm,
struct iwl_lq_sta *lq_sta,
struct ieee80211_sta *sta,
struct iwl_scale_tbl_info *tbl, int index)
{
u16 rate_mask;
s32 rate;
s8 is_green = lq_sta->is_green;
if (!sta->ht_cap.ht_supported)
return -1;
if (sta->smps_mode == IEEE80211_SMPS_STATIC)
return -1;
/* Need both Tx chains/antennas to support MIMO */
if (num_of_ant(iwl_fw_valid_tx_ant(mvm->fw)) < 3)
return -1;
IWL_DEBUG_RATE(mvm, "LQ: try to switch to MIMO3\n");
tbl->lq_type = LQ_MIMO3;
tbl->action = 0;
tbl->max_search = IWL_MAX_11N_MIMO3_SEARCH;
rate_mask = lq_sta->active_mimo3_rate;
if (iwl_is_ht40_tx_allowed(sta))
tbl->is_ht40 = 1;
else
tbl->is_ht40 = 0;
rs_set_expected_tpt_table(lq_sta, tbl);
rate = rs_get_best_rate(mvm, lq_sta, tbl, rate_mask, index);
IWL_DEBUG_RATE(mvm, "LQ: MIMO3 best rate %d mask %X\n",
rate, rate_mask);
if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) {
IWL_DEBUG_RATE(mvm, "Can't switch with index %d rate mask %x\n",
rate, rate_mask);
return -1;
}
tbl->current_rate = rate_n_flags_from_tbl(mvm, tbl, rate, is_green);
IWL_DEBUG_RATE(mvm, "LQ: Switch to new mcs %X index is green %X\n",
tbl->current_rate, is_green);
return 0;
}
/*
* Set up search table for SISO
*/
@ -1330,21 +1250,14 @@ static int rs_move_legacy_other(struct iwl_mvm *mvm,
}
break;
case IWL_LEGACY_SWITCH_MIMO2_AB:
case IWL_LEGACY_SWITCH_MIMO2_AC:
case IWL_LEGACY_SWITCH_MIMO2_BC:
case IWL_LEGACY_SWITCH_MIMO2:
IWL_DEBUG_RATE(mvm, "LQ: Legacy switch to MIMO2\n");
/* Set up search table to try MIMO */
memcpy(search_tbl, tbl, sz);
search_tbl->is_SGI = 0;
if (tbl->action == IWL_LEGACY_SWITCH_MIMO2_AB)
search_tbl->ant_type = ANT_AB;
else if (tbl->action == IWL_LEGACY_SWITCH_MIMO2_AC)
search_tbl->ant_type = ANT_AC;
else
search_tbl->ant_type = ANT_BC;
search_tbl->ant_type = ANT_AB;
if (!rs_is_valid_ant(valid_tx_ant,
search_tbl->ant_type))
@ -1357,30 +1270,11 @@ static int rs_move_legacy_other(struct iwl_mvm *mvm,
goto out;
}
break;
case IWL_LEGACY_SWITCH_MIMO3_ABC:
IWL_DEBUG_RATE(mvm, "LQ: Legacy switch to MIMO3\n");
/* Set up search table to try MIMO3 */
memcpy(search_tbl, tbl, sz);
search_tbl->is_SGI = 0;
search_tbl->ant_type = ANT_ABC;
if (!rs_is_valid_ant(valid_tx_ant,
search_tbl->ant_type))
break;
ret = rs_switch_to_mimo3(mvm, lq_sta, sta,
search_tbl, index);
if (!ret) {
lq_sta->action_counter = 0;
goto out;
}
break;
default:
WARN_ON_ONCE(1);
}
tbl->action++;
if (tbl->action > IWL_LEGACY_SWITCH_MIMO3_ABC)
if (tbl->action > IWL_LEGACY_SWITCH_MIMO2)
tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
if (tbl->action == start_action)
@ -1392,7 +1286,7 @@ static int rs_move_legacy_other(struct iwl_mvm *mvm,
out:
lq_sta->search_better_tbl = 1;
tbl->action++;
if (tbl->action > IWL_LEGACY_SWITCH_MIMO3_ABC)
if (tbl->action > IWL_LEGACY_SWITCH_MIMO2)
tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
if (update_search_tbl_counter)
search_tbl->action = tbl->action;
@ -1427,7 +1321,7 @@ static int rs_move_siso_to_other(struct iwl_mvm *mvm,
case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
/* avoid antenna B unless MIMO */
if (tbl->action == IWL_SISO_SWITCH_ANTENNA2)
tbl->action = IWL_SISO_SWITCH_MIMO2_AB;
tbl->action = IWL_SISO_SWITCH_MIMO2;
break;
case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
@ -1469,19 +1363,12 @@ static int rs_move_siso_to_other(struct iwl_mvm *mvm,
goto out;
}
break;
case IWL_SISO_SWITCH_MIMO2_AB:
case IWL_SISO_SWITCH_MIMO2_AC:
case IWL_SISO_SWITCH_MIMO2_BC:
case IWL_SISO_SWITCH_MIMO2:
IWL_DEBUG_RATE(mvm, "LQ: SISO switch to MIMO2\n");
memcpy(search_tbl, tbl, sz);
search_tbl->is_SGI = 0;
if (tbl->action == IWL_SISO_SWITCH_MIMO2_AB)
search_tbl->ant_type = ANT_AB;
else if (tbl->action == IWL_SISO_SWITCH_MIMO2_AC)
search_tbl->ant_type = ANT_AC;
else
search_tbl->ant_type = ANT_BC;
search_tbl->ant_type = ANT_AB;
if (!rs_is_valid_ant(valid_tx_ant,
search_tbl->ant_type))
@ -1522,24 +1409,11 @@ static int rs_move_siso_to_other(struct iwl_mvm *mvm,
index, is_green);
update_search_tbl_counter = 1;
goto out;
case IWL_SISO_SWITCH_MIMO3_ABC:
IWL_DEBUG_RATE(mvm, "LQ: SISO switch to MIMO3\n");
memcpy(search_tbl, tbl, sz);
search_tbl->is_SGI = 0;
search_tbl->ant_type = ANT_ABC;
if (!rs_is_valid_ant(valid_tx_ant,
search_tbl->ant_type))
break;
ret = rs_switch_to_mimo3(mvm, lq_sta, sta,
search_tbl, index);
if (!ret)
goto out;
break;
default:
WARN_ON_ONCE(1);
}
tbl->action++;
if (tbl->action > IWL_LEGACY_SWITCH_MIMO3_ABC)
if (tbl->action > IWL_SISO_SWITCH_GI)
tbl->action = IWL_SISO_SWITCH_ANTENNA1;
if (tbl->action == start_action)
@ -1551,7 +1425,7 @@ static int rs_move_siso_to_other(struct iwl_mvm *mvm,
out:
lq_sta->search_better_tbl = 1;
tbl->action++;
if (tbl->action > IWL_SISO_SWITCH_MIMO3_ABC)
if (tbl->action > IWL_SISO_SWITCH_GI)
tbl->action = IWL_SISO_SWITCH_ANTENNA1;
if (update_search_tbl_counter)
search_tbl->action = tbl->action;
@ -1592,8 +1466,7 @@ static int rs_move_mimo2_to_other(struct iwl_mvm *mvm,
break;
case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
/* avoid antenna B unless MIMO */
if (tbl->action == IWL_MIMO2_SWITCH_SISO_B ||
tbl->action == IWL_MIMO2_SWITCH_SISO_C)
if (tbl->action == IWL_MIMO2_SWITCH_SISO_B)
tbl->action = IWL_MIMO2_SWITCH_SISO_A;
break;
default:
@ -1626,7 +1499,6 @@ static int rs_move_mimo2_to_other(struct iwl_mvm *mvm,
break;
case IWL_MIMO2_SWITCH_SISO_A:
case IWL_MIMO2_SWITCH_SISO_B:
case IWL_MIMO2_SWITCH_SISO_C:
IWL_DEBUG_RATE(mvm, "LQ: MIMO2 switch to SISO\n");
/* Set up new search table for SISO */
@ -1634,10 +1506,8 @@ static int rs_move_mimo2_to_other(struct iwl_mvm *mvm,
if (tbl->action == IWL_MIMO2_SWITCH_SISO_A)
search_tbl->ant_type = ANT_A;
else if (tbl->action == IWL_MIMO2_SWITCH_SISO_B)
else /* tbl->action == IWL_MIMO2_SWITCH_SISO_B */
search_tbl->ant_type = ANT_B;
else
search_tbl->ant_type = ANT_C;
if (!rs_is_valid_ant(valid_tx_ant,
search_tbl->ant_type))
@ -1680,26 +1550,11 @@ static int rs_move_mimo2_to_other(struct iwl_mvm *mvm,
index, is_green);
update_search_tbl_counter = 1;
goto out;
case IWL_MIMO2_SWITCH_MIMO3_ABC:
IWL_DEBUG_RATE(mvm, "LQ: MIMO2 switch to MIMO3\n");
memcpy(search_tbl, tbl, sz);
search_tbl->is_SGI = 0;
search_tbl->ant_type = ANT_ABC;
if (!rs_is_valid_ant(valid_tx_ant,
search_tbl->ant_type))
break;
ret = rs_switch_to_mimo3(mvm, lq_sta, sta,
search_tbl, index);
if (!ret)
goto out;
break;
default:
WARN_ON_ONCE(1);
}
tbl->action++;
if (tbl->action > IWL_MIMO2_SWITCH_MIMO3_ABC)
if (tbl->action > IWL_MIMO2_SWITCH_GI)
tbl->action = IWL_MIMO2_SWITCH_ANTENNA1;
if (tbl->action == start_action)
@ -1710,7 +1565,7 @@ static int rs_move_mimo2_to_other(struct iwl_mvm *mvm,
out:
lq_sta->search_better_tbl = 1;
tbl->action++;
if (tbl->action > IWL_MIMO2_SWITCH_MIMO3_ABC)
if (tbl->action > IWL_MIMO2_SWITCH_GI)
tbl->action = IWL_MIMO2_SWITCH_ANTENNA1;
if (update_search_tbl_counter)
search_tbl->action = tbl->action;
@ -1718,171 +1573,6 @@ static int rs_move_mimo2_to_other(struct iwl_mvm *mvm,
return 0;
}
/*
* Try to switch to new modulation mode from MIMO3
*/
static int rs_move_mimo3_to_other(struct iwl_mvm *mvm,
struct iwl_lq_sta *lq_sta,
struct ieee80211_sta *sta, int index)
{
s8 is_green = lq_sta->is_green;
struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
struct iwl_scale_tbl_info *search_tbl =
&(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
struct iwl_rate_scale_data *window = &(tbl->win[index]);
struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
u32 sz = (sizeof(struct iwl_scale_tbl_info) -
(sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
u8 start_action;
u8 valid_tx_ant = iwl_fw_valid_tx_ant(mvm->fw);
u8 tx_chains_num = num_of_ant(valid_tx_ant);
int ret;
u8 update_search_tbl_counter = 0;
switch (BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD)) {
case IWL_BT_COEX_TRAFFIC_LOAD_NONE:
/* nothing */
break;
case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
/* avoid antenna B and MIMO */
if (tbl->action != IWL_MIMO3_SWITCH_SISO_A)
tbl->action = IWL_MIMO3_SWITCH_SISO_A;
break;
case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
/* avoid antenna B unless MIMO */
if (tbl->action == IWL_MIMO3_SWITCH_SISO_B ||
tbl->action == IWL_MIMO3_SWITCH_SISO_C)
tbl->action = IWL_MIMO3_SWITCH_SISO_A;
break;
default:
IWL_ERR(mvm, "Invalid BT load %d",
BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD));
break;
}
start_action = tbl->action;
while (1) {
lq_sta->action_counter++;
switch (tbl->action) {
case IWL_MIMO3_SWITCH_ANTENNA1:
case IWL_MIMO3_SWITCH_ANTENNA2:
IWL_DEBUG_RATE(mvm, "LQ: MIMO3 toggle Antennas\n");
if (tx_chains_num <= 3)
break;
if (window->success_ratio >= IWL_RS_GOOD_RATIO)
break;
memcpy(search_tbl, tbl, sz);
if (rs_toggle_antenna(valid_tx_ant,
&search_tbl->current_rate,
search_tbl))
goto out;
break;
case IWL_MIMO3_SWITCH_SISO_A:
case IWL_MIMO3_SWITCH_SISO_B:
case IWL_MIMO3_SWITCH_SISO_C:
IWL_DEBUG_RATE(mvm, "LQ: MIMO3 switch to SISO\n");
/* Set up new search table for SISO */
memcpy(search_tbl, tbl, sz);
if (tbl->action == IWL_MIMO3_SWITCH_SISO_A)
search_tbl->ant_type = ANT_A;
else if (tbl->action == IWL_MIMO3_SWITCH_SISO_B)
search_tbl->ant_type = ANT_B;
else
search_tbl->ant_type = ANT_C;
if (!rs_is_valid_ant(valid_tx_ant,
search_tbl->ant_type))
break;
ret = rs_switch_to_siso(mvm, lq_sta, sta,
search_tbl, index);
if (!ret)
goto out;
break;
case IWL_MIMO3_SWITCH_MIMO2_AB:
case IWL_MIMO3_SWITCH_MIMO2_AC:
case IWL_MIMO3_SWITCH_MIMO2_BC:
IWL_DEBUG_RATE(mvm, "LQ: MIMO3 switch to MIMO2\n");
memcpy(search_tbl, tbl, sz);
search_tbl->is_SGI = 0;
if (tbl->action == IWL_MIMO3_SWITCH_MIMO2_AB)
search_tbl->ant_type = ANT_AB;
else if (tbl->action == IWL_MIMO3_SWITCH_MIMO2_AC)
search_tbl->ant_type = ANT_AC;
else
search_tbl->ant_type = ANT_BC;
if (!rs_is_valid_ant(valid_tx_ant,
search_tbl->ant_type))
break;
ret = rs_switch_to_mimo2(mvm, lq_sta, sta,
search_tbl, index);
if (!ret)
goto out;
break;
case IWL_MIMO3_SWITCH_GI:
if (!tbl->is_ht40 && !(ht_cap->cap &
IEEE80211_HT_CAP_SGI_20))
break;
if (tbl->is_ht40 && !(ht_cap->cap &
IEEE80211_HT_CAP_SGI_40))
break;
IWL_DEBUG_RATE(mvm, "LQ: MIMO3 toggle SGI/NGI\n");
/* Set up new search table for MIMO */
memcpy(search_tbl, tbl, sz);
search_tbl->is_SGI = !tbl->is_SGI;
rs_set_expected_tpt_table(lq_sta, search_tbl);
/*
* If active table already uses the fastest possible
* modulation (dual stream with short guard interval),
* and it's working well, there's no need to look
* for a better type of modulation!
*/
if (tbl->is_SGI) {
s32 tpt = lq_sta->last_tpt / 100;
if (tpt >= search_tbl->expected_tpt[index])
break;
}
search_tbl->current_rate =
rate_n_flags_from_tbl(mvm, search_tbl,
index, is_green);
update_search_tbl_counter = 1;
goto out;
}
tbl->action++;
if (tbl->action > IWL_MIMO3_SWITCH_GI)
tbl->action = IWL_MIMO3_SWITCH_ANTENNA1;
if (tbl->action == start_action)
break;
}
search_tbl->lq_type = LQ_NONE;
return 0;
out:
lq_sta->search_better_tbl = 1;
tbl->action++;
if (tbl->action > IWL_MIMO3_SWITCH_GI)
tbl->action = IWL_MIMO3_SWITCH_ANTENNA1;
if (update_search_tbl_counter)
search_tbl->action = tbl->action;
return 0;
}
/*
* Check whether we should continue using same modulation mode, or
* begin search for a new mode, based on:
@ -2289,8 +1979,7 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
scale_action = 0;
if ((BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD) >=
IWL_BT_COEX_TRAFFIC_LOAD_HIGH) &&
(is_mimo2(tbl->lq_type) || is_mimo3(tbl->lq_type))) {
IWL_BT_COEX_TRAFFIC_LOAD_HIGH) && (is_mimo(tbl->lq_type))) {
if (lq_sta->last_bt_traffic >
BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD)) {
/*
@ -2307,8 +1996,7 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD);
if ((BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD) >=
IWL_BT_COEX_TRAFFIC_LOAD_HIGH) &&
(is_mimo2(tbl->lq_type) || is_mimo3(tbl->lq_type))) {
IWL_BT_COEX_TRAFFIC_LOAD_HIGH) && is_mimo(tbl->lq_type)) {
/* search for a new modulation */
rs_stay_in_table(lq_sta, true);
goto lq_update;
@ -2368,7 +2056,7 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
else if (is_mimo2(tbl->lq_type))
rs_move_mimo2_to_other(mvm, lq_sta, sta, index);
else
rs_move_mimo3_to_other(mvm, lq_sta, sta, index);
WARN_ON_ONCE(1);
/* If new "search" mode was selected, set up in uCode table */
if (lq_sta->search_better_tbl) {
@ -2533,11 +2221,10 @@ static void rs_get_rate(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta,
rate_idx -= IWL_FIRST_OFDM_RATE;
/* 6M and 9M shared same MCS index */
rate_idx = (rate_idx > 0) ? (rate_idx - 1) : 0;
WARN_ON_ONCE(rs_extract_rate(lq_sta->last_rate_n_flags) >=
IWL_RATE_MIMO3_6M_PLCP);
if (rs_extract_rate(lq_sta->last_rate_n_flags) >=
IWL_RATE_MIMO3_6M_PLCP)
rate_idx = rate_idx + (2 * MCS_INDEX_PER_STREAM);
else if (rs_extract_rate(lq_sta->last_rate_n_flags) >=
IWL_RATE_MIMO2_6M_PLCP)
IWL_RATE_MIMO2_6M_PLCP)
rate_idx = rate_idx + MCS_INDEX_PER_STREAM;
info->control.rates[0].flags = IEEE80211_TX_RC_MCS;
if (lq_sta->last_rate_n_flags & RATE_MCS_SGI_MSK)
@ -2636,16 +2323,10 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
lq_sta->active_mimo2_rate &= ~((u16)0x2);
lq_sta->active_mimo2_rate <<= IWL_FIRST_OFDM_RATE;
lq_sta->active_mimo3_rate = ht_cap->mcs.rx_mask[2] << 1;
lq_sta->active_mimo3_rate |= ht_cap->mcs.rx_mask[2] & 0x1;
lq_sta->active_mimo3_rate &= ~((u16)0x2);
lq_sta->active_mimo3_rate <<= IWL_FIRST_OFDM_RATE;
IWL_DEBUG_RATE(mvm,
"SISO-RATE=%X MIMO2-RATE=%X MIMO3-RATE=%X\n",
"SISO-RATE=%X MIMO2-RATE=%X\n",
lq_sta->active_siso_rate,
lq_sta->active_mimo2_rate,
lq_sta->active_mimo3_rate);
lq_sta->active_mimo2_rate);
/* These values will be overridden later */
lq_sta->lq.single_stream_ant_msk =
@ -2689,7 +2370,7 @@ static void rs_fill_link_cmd(struct iwl_mvm *mvm,
struct iwl_lq_cmd *lq_cmd = &lq_sta->lq;
/* Override starting rate (index 0) if needed for debug purposes */
rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
rs_dbgfs_set_mcs(lq_sta, &new_rate);
/* Interpret new_rate (rate_n_flags) */
rs_get_tbl_info_from_mcs(new_rate, lq_sta->band,
@ -2736,7 +2417,7 @@ static void rs_fill_link_cmd(struct iwl_mvm *mvm,
}
/* Override next rate if needed for debug purposes */
rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
rs_dbgfs_set_mcs(lq_sta, &new_rate);
/* Fill next table entry */
lq_cmd->rs_table[index] =
@ -2778,7 +2459,7 @@ static void rs_fill_link_cmd(struct iwl_mvm *mvm,
use_ht_possible = 0;
/* Override next rate if needed for debug purposes */
rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
rs_dbgfs_set_mcs(lq_sta, &new_rate);
/* Fill next table entry */
lq_cmd->rs_table[index] = cpu_to_le32(new_rate);
@ -2823,7 +2504,7 @@ static void rs_free_sta(void *mvm_r, struct ieee80211_sta *sta,
#ifdef CONFIG_MAC80211_DEBUGFS
static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
u32 *rate_n_flags, int index)
u32 *rate_n_flags)
{
struct iwl_mvm *mvm;
u8 valid_tx_ant;
@ -2908,8 +2589,7 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
(is_legacy(tbl->lq_type)) ? "legacy" : "HT");
if (is_Ht(tbl->lq_type)) {
desc += sprintf(buff+desc, " %s",
(is_siso(tbl->lq_type)) ? "SISO" :
((is_mimo2(tbl->lq_type)) ? "MIMO2" : "MIMO3"));
(is_siso(tbl->lq_type)) ? "SISO" : "MIMO2");
desc += sprintf(buff+desc, " %s",
(tbl->is_ht40) ? "40MHz" : "20MHz");
desc += sprintf(buff+desc, " %s %s %s\n",
@ -3009,32 +2689,6 @@ static const struct file_operations rs_sta_dbgfs_stats_table_ops = {
.llseek = default_llseek,
};
static ssize_t rs_sta_dbgfs_rate_scale_data_read(struct file *file,
char __user *user_buf, size_t count, loff_t *ppos)
{
struct iwl_lq_sta *lq_sta = file->private_data;
struct iwl_scale_tbl_info *tbl = &lq_sta->lq_info[lq_sta->active_tbl];
char buff[120];
int desc = 0;
if (is_Ht(tbl->lq_type))
desc += sprintf(buff+desc,
"Bit Rate= %d Mb/s\n",
tbl->expected_tpt[lq_sta->last_txrate_idx]);
else
desc += sprintf(buff+desc,
"Bit Rate= %d Mb/s\n",
iwl_rates[lq_sta->last_txrate_idx].ieee >> 1);
return simple_read_from_buffer(user_buf, count, ppos, buff, desc);
}
static const struct file_operations rs_sta_dbgfs_rate_scale_data_ops = {
.read = rs_sta_dbgfs_rate_scale_data_read,
.open = simple_open,
.llseek = default_llseek,
};
static void rs_add_debugfs(void *mvm, void *mvm_sta, struct dentry *dir)
{
struct iwl_lq_sta *lq_sta = mvm_sta;
@ -3044,9 +2698,6 @@ static void rs_add_debugfs(void *mvm, void *mvm_sta, struct dentry *dir)
lq_sta->rs_sta_dbgfs_stats_table_file =
debugfs_create_file("rate_stats_table", S_IRUSR, dir,
lq_sta, &rs_sta_dbgfs_stats_table_ops);
lq_sta->rs_sta_dbgfs_rate_scale_data_file =
debugfs_create_file("rate_scale_data", S_IRUSR, dir,
lq_sta, &rs_sta_dbgfs_rate_scale_data_ops);
lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file =
debugfs_create_u8("tx_agg_tid_enable", S_IRUSR | S_IWUSR, dir,
&lq_sta->tx_agg_tid_en);
@ -3057,7 +2708,6 @@ static void rs_remove_debugfs(void *mvm, void *mvm_sta)
struct iwl_lq_sta *lq_sta = mvm_sta;
debugfs_remove(lq_sta->rs_sta_dbgfs_scale_table_file);
debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file);
debugfs_remove(lq_sta->rs_sta_dbgfs_rate_scale_data_file);
debugfs_remove(lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file);
}
#endif

Some files were not shown because too many files have changed in this diff Show More