2010-03-17 08:47:58 +08:00
|
|
|
/******************************************************************************
|
|
|
|
*
|
|
|
|
* GPL LICENSE SUMMARY
|
|
|
|
*
|
2012-01-07 05:16:33 +08:00
|
|
|
* Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
|
2010-03-17 08:47:58 +08:00
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of version 2 of the GNU General Public License as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful, but
|
|
|
|
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program; if not, write to the Free Software
|
|
|
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
|
|
|
|
* USA
|
|
|
|
*
|
|
|
|
* The full GNU General Public License is included in this distribution
|
|
|
|
* in the file called LICENSE.GPL.
|
|
|
|
*
|
|
|
|
* Contact Information:
|
|
|
|
* Intel Linux Wireless <ilw@linux.intel.com>
|
|
|
|
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
|
|
|
|
*
|
|
|
|
*****************************************************************************/
|
2010-03-18 04:34:36 +08:00
|
|
|
#include <linux/etherdevice.h>
|
2010-03-17 08:47:58 +08:00
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/sched.h>
|
|
|
|
|
2011-12-08 00:50:46 +08:00
|
|
|
#include "iwl-wifi.h"
|
2010-03-17 08:47:58 +08:00
|
|
|
#include "iwl-dev.h"
|
|
|
|
#include "iwl-core.h"
|
|
|
|
#include "iwl-io.h"
|
|
|
|
#include "iwl-agn-hw.h"
|
|
|
|
#include "iwl-agn.h"
|
2011-07-08 23:46:16 +08:00
|
|
|
#include "iwl-trans.h"
|
2011-08-26 14:10:36 +08:00
|
|
|
#include "iwl-shared.h"
|
2010-03-17 08:47:58 +08:00
|
|
|
|
|
|
|
int iwlagn_hw_valid_rtc_data_addr(u32 addr)
|
|
|
|
{
|
|
|
|
return (addr >= IWLAGN_RTC_DATA_LOWER_BOUND) &&
|
|
|
|
(addr < IWLAGN_RTC_DATA_UPPER_BOUND);
|
|
|
|
}
|
|
|
|
|
|
|
|
int iwlagn_send_tx_power(struct iwl_priv *priv)
|
|
|
|
{
|
2010-09-21 00:12:31 +08:00
|
|
|
struct iwlagn_tx_power_dbm_cmd tx_power_cmd;
|
2010-03-17 08:47:58 +08:00
|
|
|
u8 tx_ant_cfg_cmd;
|
|
|
|
|
2011-08-26 14:10:42 +08:00
|
|
|
if (WARN_ONCE(test_bit(STATUS_SCAN_HW, &priv->shrd->status),
|
2010-10-25 16:34:50 +08:00
|
|
|
"TX Power requested while scanning!\n"))
|
|
|
|
return -EAGAIN;
|
|
|
|
|
2010-03-17 08:47:58 +08:00
|
|
|
/* half dBm need to multiply */
|
|
|
|
tx_power_cmd.global_lmt = (s8)(2 * priv->tx_power_user_lmt);
|
|
|
|
|
|
|
|
if (priv->tx_power_lmt_in_half_dbm &&
|
|
|
|
priv->tx_power_lmt_in_half_dbm < tx_power_cmd.global_lmt) {
|
|
|
|
/*
|
|
|
|
* For the newer devices which using enhanced/extend tx power
|
|
|
|
* table in EEPROM, the format is in half dBm. driver need to
|
|
|
|
* convert to dBm format before report to mac80211.
|
|
|
|
* By doing so, there is a possibility of 1/2 dBm resolution
|
|
|
|
* lost. driver will perform "round-up" operation before
|
|
|
|
* reporting, but it will cause 1/2 dBm tx power over the
|
|
|
|
* regulatory limit. Perform the checking here, if the
|
|
|
|
* "tx_power_user_lmt" is higher than EEPROM value (in
|
|
|
|
* half-dBm format), lower the tx power based on EEPROM
|
|
|
|
*/
|
|
|
|
tx_power_cmd.global_lmt = priv->tx_power_lmt_in_half_dbm;
|
|
|
|
}
|
2010-09-21 00:12:31 +08:00
|
|
|
tx_power_cmd.flags = IWLAGN_TX_POWER_NO_CLOSED;
|
|
|
|
tx_power_cmd.srv_chan_lmt = IWLAGN_TX_POWER_AUTO;
|
2010-03-17 08:47:58 +08:00
|
|
|
|
2012-02-07 07:51:15 +08:00
|
|
|
if (IWL_UCODE_API(nic(priv)->fw.ucode_ver) == 1)
|
2010-03-17 08:47:58 +08:00
|
|
|
tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD_V1;
|
|
|
|
else
|
|
|
|
tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD;
|
|
|
|
|
iwlagn: bus layer chooses its transport layer
Remove iwl_transport_register which was a W/A. The bus layer knows what
transport to use. So now, the bus layer gives the upper layer a pointer to the
iwl_trans_ops struct that it wants to use. The upper layer then, allocates the
desired transport layer using iwl_trans_ops->alloc function.
As a result of this, priv->trans, no longer exists, priv holds a pointer to
iwl_shared, which holds a pointer to iwl_trans. This required to change all the
calls to the transport layer from upper layer. While we were at it, trans_X
inlines have been renamed to iwl_trans_X to avoid confusions, which of course
required to rename the functions inside the transport layer because of
conflicts in names. So the static API functions inside the transport layer
implementation have been renamed to iwl_trans_pcie_X.
Until now, the IRQ / Tasklet were initialized in iwl_transport_layer. This is
confusing since the registration doesn't mean to request IRQ, so I added a
handler for that.
Signed-off-by: Emmanuel Grumbach <emmanuel.grumbach@intel.com>
Signed-off-by: Wey-Yi Guy <wey-yi.w.guy@intel.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
2011-08-26 14:10:48 +08:00
|
|
|
return iwl_trans_send_cmd_pdu(trans(priv), tx_ant_cfg_cmd, CMD_SYNC,
|
2011-07-08 23:46:14 +08:00
|
|
|
sizeof(tx_power_cmd), &tx_power_cmd);
|
2010-03-17 08:47:58 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void iwlagn_temperature(struct iwl_priv *priv)
|
|
|
|
{
|
2011-02-26 01:44:48 +08:00
|
|
|
/* store temperature from correct statistics (in Celsius) */
|
2011-04-08 23:14:56 +08:00
|
|
|
priv->temperature = le32_to_cpu(priv->statistics.common.temperature);
|
2010-03-17 08:47:58 +08:00
|
|
|
iwl_tt_handler(priv);
|
|
|
|
}
|
|
|
|
|
2011-12-01 07:37:32 +08:00
|
|
|
u16 iwl_eeprom_calib_version(struct iwl_shared *shrd)
|
2010-03-17 08:47:58 +08:00
|
|
|
{
|
2011-09-16 02:46:51 +08:00
|
|
|
struct iwl_eeprom_calib_hdr *hdr;
|
2010-03-17 08:47:58 +08:00
|
|
|
|
2011-12-01 07:37:32 +08:00
|
|
|
hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(shrd,
|
2010-04-07 12:10:33 +08:00
|
|
|
EEPROM_CALIB_ALL);
|
2010-03-17 08:47:58 +08:00
|
|
|
return hdr->version;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* EEPROM
|
|
|
|
*/
|
2011-12-01 07:37:32 +08:00
|
|
|
static u32 eeprom_indirect_address(const struct iwl_shared *shrd, u32 address)
|
2010-03-17 08:47:58 +08:00
|
|
|
{
|
|
|
|
u16 offset = 0;
|
|
|
|
|
|
|
|
if ((address & INDIRECT_ADDRESS) == 0)
|
|
|
|
return address;
|
|
|
|
|
|
|
|
switch (address & INDIRECT_TYPE_MSK) {
|
|
|
|
case INDIRECT_HOST:
|
2011-12-01 07:37:32 +08:00
|
|
|
offset = iwl_eeprom_query16(shrd, EEPROM_LINK_HOST);
|
2010-03-17 08:47:58 +08:00
|
|
|
break;
|
|
|
|
case INDIRECT_GENERAL:
|
2011-12-01 07:37:32 +08:00
|
|
|
offset = iwl_eeprom_query16(shrd, EEPROM_LINK_GENERAL);
|
2010-03-17 08:47:58 +08:00
|
|
|
break;
|
|
|
|
case INDIRECT_REGULATORY:
|
2011-12-01 07:37:32 +08:00
|
|
|
offset = iwl_eeprom_query16(shrd, EEPROM_LINK_REGULATORY);
|
2010-03-17 08:47:58 +08:00
|
|
|
break;
|
2010-12-10 01:30:14 +08:00
|
|
|
case INDIRECT_TXP_LIMIT:
|
2011-12-01 07:37:32 +08:00
|
|
|
offset = iwl_eeprom_query16(shrd, EEPROM_LINK_TXP_LIMIT);
|
2010-12-10 01:30:14 +08:00
|
|
|
break;
|
|
|
|
case INDIRECT_TXP_LIMIT_SIZE:
|
2011-12-01 07:37:32 +08:00
|
|
|
offset = iwl_eeprom_query16(shrd, EEPROM_LINK_TXP_LIMIT_SIZE);
|
2010-12-10 01:30:14 +08:00
|
|
|
break;
|
2010-03-17 08:47:58 +08:00
|
|
|
case INDIRECT_CALIBRATION:
|
2011-12-01 07:37:32 +08:00
|
|
|
offset = iwl_eeprom_query16(shrd, EEPROM_LINK_CALIBRATION);
|
2010-03-17 08:47:58 +08:00
|
|
|
break;
|
|
|
|
case INDIRECT_PROCESS_ADJST:
|
2011-12-01 07:37:32 +08:00
|
|
|
offset = iwl_eeprom_query16(shrd, EEPROM_LINK_PROCESS_ADJST);
|
2010-03-17 08:47:58 +08:00
|
|
|
break;
|
|
|
|
case INDIRECT_OTHERS:
|
2011-12-01 07:37:32 +08:00
|
|
|
offset = iwl_eeprom_query16(shrd, EEPROM_LINK_OTHERS);
|
2010-03-17 08:47:58 +08:00
|
|
|
break;
|
|
|
|
default:
|
2011-12-01 07:37:32 +08:00
|
|
|
IWL_ERR(shrd->trans, "illegal indirect type: 0x%X\n",
|
2010-03-17 08:47:58 +08:00
|
|
|
address & INDIRECT_TYPE_MSK);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* translate the offset from words to byte */
|
|
|
|
return (address & ADDRESS_MSK) + (offset << 1);
|
|
|
|
}
|
|
|
|
|
2011-12-01 07:37:32 +08:00
|
|
|
const u8 *iwl_eeprom_query_addr(const struct iwl_shared *shrd, size_t offset)
|
2010-03-17 08:47:58 +08:00
|
|
|
{
|
2011-12-01 07:37:32 +08:00
|
|
|
u32 address = eeprom_indirect_address(shrd, offset);
|
2011-12-16 23:07:36 +08:00
|
|
|
BUG_ON(address >= shrd->cfg->base_params->eeprom_size);
|
2011-12-01 07:37:32 +08:00
|
|
|
return &shrd->eeprom[address];
|
2010-03-17 08:47:58 +08:00
|
|
|
}
|
2010-03-17 03:37:27 +08:00
|
|
|
|
|
|
|
struct iwl_mod_params iwlagn_mod_params = {
|
|
|
|
.amsdu_size_8K = 1,
|
|
|
|
.restart_fw = 1,
|
2011-02-28 21:33:15 +08:00
|
|
|
.plcp_check = true,
|
2011-06-04 04:52:38 +08:00
|
|
|
.bt_coex_active = true,
|
2011-06-04 04:52:40 +08:00
|
|
|
.no_sleep_autoadjust = true,
|
2011-06-12 01:00:06 +08:00
|
|
|
.power_level = IWL_POWER_INDEX_1,
|
2011-08-26 14:10:57 +08:00
|
|
|
.bt_ch_announce = true,
|
2011-08-26 14:10:36 +08:00
|
|
|
.wanted_ucode_alternative = 1,
|
2011-08-26 14:10:55 +08:00
|
|
|
.auto_agg = true,
|
2010-03-17 03:37:27 +08:00
|
|
|
/* the rest are 0 by default */
|
|
|
|
};
|
2010-03-18 04:34:34 +08:00
|
|
|
|
2010-03-18 04:34:36 +08:00
|
|
|
int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band)
|
|
|
|
{
|
|
|
|
int idx = 0;
|
|
|
|
int band_offset = 0;
|
|
|
|
|
|
|
|
/* HT rate format: mac80211 wants an MCS number, which is just LSB */
|
|
|
|
if (rate_n_flags & RATE_MCS_HT_MSK) {
|
|
|
|
idx = (rate_n_flags & 0xff);
|
|
|
|
return idx;
|
|
|
|
/* Legacy rate format, search for match in table */
|
|
|
|
} else {
|
|
|
|
if (band == IEEE80211_BAND_5GHZ)
|
|
|
|
band_offset = IWL_FIRST_OFDM_RATE;
|
|
|
|
for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++)
|
|
|
|
if (iwl_rates[idx].plcp == (rate_n_flags & 0xFF))
|
|
|
|
return idx - band_offset;
|
|
|
|
}
|
|
|
|
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2010-04-28 23:44:52 +08:00
|
|
|
int iwlagn_manage_ibss_station(struct iwl_priv *priv,
|
|
|
|
struct ieee80211_vif *vif, bool add)
|
|
|
|
{
|
2010-05-01 02:30:43 +08:00
|
|
|
struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
|
|
|
|
|
2010-04-28 23:44:52 +08:00
|
|
|
if (add)
|
2010-09-23 00:02:01 +08:00
|
|
|
return iwlagn_add_bssid_station(priv, vif_priv->ctx,
|
|
|
|
vif->bss_conf.bssid,
|
|
|
|
&vif_priv->ibss_bssid_sta_id);
|
2010-05-01 02:30:43 +08:00
|
|
|
return iwl_remove_station(priv, vif_priv->ibss_bssid_sta_id,
|
|
|
|
vif->bss_conf.bssid);
|
2010-04-28 23:44:52 +08:00
|
|
|
}
|
2010-05-03 16:22:42 +08:00
|
|
|
|
2010-06-25 04:22:36 +08:00
|
|
|
/**
|
|
|
|
* iwlagn_txfifo_flush: send REPLY_TXFIFO_FLUSH command to uCode
|
|
|
|
*
|
|
|
|
* pre-requirements:
|
|
|
|
* 1. acquire mutex before calling
|
|
|
|
* 2. make sure rf is on and not in exit state
|
|
|
|
*/
|
|
|
|
int iwlagn_txfifo_flush(struct iwl_priv *priv, u16 flush_control)
|
|
|
|
{
|
|
|
|
struct iwl_txfifo_flush_cmd flush_cmd;
|
|
|
|
struct iwl_host_cmd cmd = {
|
|
|
|
.id = REPLY_TXFIFO_FLUSH,
|
2011-05-04 22:50:38 +08:00
|
|
|
.len = { sizeof(struct iwl_txfifo_flush_cmd), },
|
2010-06-25 04:22:36 +08:00
|
|
|
.flags = CMD_SYNC,
|
2011-05-04 22:50:38 +08:00
|
|
|
.data = { &flush_cmd, },
|
2010-06-25 04:22:36 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
might_sleep();
|
|
|
|
|
|
|
|
memset(&flush_cmd, 0, sizeof(flush_cmd));
|
2011-06-09 00:57:26 +08:00
|
|
|
if (flush_control & BIT(IWL_RXON_CTX_BSS))
|
|
|
|
flush_cmd.fifo_control = IWL_SCD_VO_MSK | IWL_SCD_VI_MSK |
|
2011-06-09 00:57:25 +08:00
|
|
|
IWL_SCD_BE_MSK | IWL_SCD_BK_MSK |
|
|
|
|
IWL_SCD_MGMT_MSK;
|
2011-06-09 00:57:26 +08:00
|
|
|
if ((flush_control & BIT(IWL_RXON_CTX_PAN)) &&
|
2011-09-07 00:31:21 +08:00
|
|
|
(priv->shrd->valid_contexts != BIT(IWL_RXON_CTX_BSS)))
|
2011-06-09 00:57:25 +08:00
|
|
|
flush_cmd.fifo_control |= IWL_PAN_SCD_VO_MSK |
|
|
|
|
IWL_PAN_SCD_VI_MSK | IWL_PAN_SCD_BE_MSK |
|
|
|
|
IWL_PAN_SCD_BK_MSK | IWL_PAN_SCD_MGMT_MSK |
|
|
|
|
IWL_PAN_SCD_MULTICAST_MSK;
|
|
|
|
|
2011-12-16 23:07:36 +08:00
|
|
|
if (cfg(priv)->sku & EEPROM_SKU_CAP_11N_ENABLE)
|
2010-06-25 04:22:36 +08:00
|
|
|
flush_cmd.fifo_control |= IWL_AGG_TX_QUEUE_MSK;
|
|
|
|
|
|
|
|
IWL_DEBUG_INFO(priv, "fifo queue control: 0X%x\n",
|
|
|
|
flush_cmd.fifo_control);
|
|
|
|
flush_cmd.flush_control = cpu_to_le16(flush_control);
|
|
|
|
|
iwlagn: bus layer chooses its transport layer
Remove iwl_transport_register which was a W/A. The bus layer knows what
transport to use. So now, the bus layer gives the upper layer a pointer to the
iwl_trans_ops struct that it wants to use. The upper layer then, allocates the
desired transport layer using iwl_trans_ops->alloc function.
As a result of this, priv->trans, no longer exists, priv holds a pointer to
iwl_shared, which holds a pointer to iwl_trans. This required to change all the
calls to the transport layer from upper layer. While we were at it, trans_X
inlines have been renamed to iwl_trans_X to avoid confusions, which of course
required to rename the functions inside the transport layer because of
conflicts in names. So the static API functions inside the transport layer
implementation have been renamed to iwl_trans_pcie_X.
Until now, the IRQ / Tasklet were initialized in iwl_transport_layer. This is
confusing since the registration doesn't mean to request IRQ, so I added a
handler for that.
Signed-off-by: Emmanuel Grumbach <emmanuel.grumbach@intel.com>
Signed-off-by: Wey-Yi Guy <wey-yi.w.guy@intel.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
2011-08-26 14:10:48 +08:00
|
|
|
return iwl_trans_send_cmd(trans(priv), &cmd);
|
2010-06-25 04:22:36 +08:00
|
|
|
}
|
2010-06-25 04:18:35 +08:00
|
|
|
|
|
|
|
void iwlagn_dev_txfifo_flush(struct iwl_priv *priv, u16 flush_control)
|
|
|
|
{
|
2011-08-26 14:10:44 +08:00
|
|
|
mutex_lock(&priv->shrd->mutex);
|
2010-06-25 04:18:35 +08:00
|
|
|
ieee80211_stop_queues(priv->hw);
|
2011-06-18 23:03:18 +08:00
|
|
|
if (iwlagn_txfifo_flush(priv, IWL_DROP_ALL)) {
|
2010-06-25 04:18:35 +08:00
|
|
|
IWL_ERR(priv, "flush request fail\n");
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
IWL_DEBUG_INFO(priv, "wait transmit/flush all frames\n");
|
2011-08-26 14:11:29 +08:00
|
|
|
iwl_trans_wait_tx_queue_empty(trans(priv));
|
2010-06-25 04:18:35 +08:00
|
|
|
done:
|
|
|
|
ieee80211_wake_queues(priv->hw);
|
2011-08-26 14:10:44 +08:00
|
|
|
mutex_unlock(&priv->shrd->mutex);
|
2010-06-25 04:18:35 +08:00
|
|
|
}
|
2010-08-23 22:57:14 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* BT coex
|
|
|
|
*/
|
|
|
|
/*
|
|
|
|
* Macros to access the lookup table.
|
|
|
|
*
|
|
|
|
* The lookup table has 7 inputs: bt3_prio, bt3_txrx, bt_rf_act, wifi_req,
|
|
|
|
* wifi_prio, wifi_txrx and wifi_sh_ant_req.
|
|
|
|
*
|
|
|
|
* It has three outputs: WLAN_ACTIVE, WLAN_KILL and ANT_SWITCH
|
|
|
|
*
|
|
|
|
* The format is that "registers" 8 through 11 contain the WLAN_ACTIVE bits
|
|
|
|
* one after another in 32-bit registers, and "registers" 0 through 7 contain
|
|
|
|
* the WLAN_KILL and ANT_SWITCH bits interleaved (in that order).
|
|
|
|
*
|
|
|
|
* These macros encode that format.
|
|
|
|
*/
|
|
|
|
#define LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, wifi_req, wifi_prio, \
|
|
|
|
wifi_txrx, wifi_sh_ant_req) \
|
|
|
|
(bt3_prio | (bt3_txrx << 1) | (bt_rf_act << 2) | (wifi_req << 3) | \
|
|
|
|
(wifi_prio << 4) | (wifi_txrx << 5) | (wifi_sh_ant_req << 6))
|
|
|
|
|
|
|
|
#define LUT_PTA_WLAN_ACTIVE_OP(lut, op, val) \
|
|
|
|
lut[8 + ((val) >> 5)] op (cpu_to_le32(BIT((val) & 0x1f)))
|
|
|
|
#define LUT_TEST_PTA_WLAN_ACTIVE(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
|
|
|
|
wifi_prio, wifi_txrx, wifi_sh_ant_req) \
|
|
|
|
(!!(LUT_PTA_WLAN_ACTIVE_OP(lut, &, LUT_VALUE(bt3_prio, bt3_txrx, \
|
|
|
|
bt_rf_act, wifi_req, wifi_prio, wifi_txrx, \
|
|
|
|
wifi_sh_ant_req))))
|
|
|
|
#define LUT_SET_PTA_WLAN_ACTIVE(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
|
|
|
|
wifi_prio, wifi_txrx, wifi_sh_ant_req) \
|
|
|
|
LUT_PTA_WLAN_ACTIVE_OP(lut, |=, LUT_VALUE(bt3_prio, bt3_txrx, \
|
|
|
|
bt_rf_act, wifi_req, wifi_prio, wifi_txrx, \
|
|
|
|
wifi_sh_ant_req))
|
|
|
|
#define LUT_CLEAR_PTA_WLAN_ACTIVE(lut, bt3_prio, bt3_txrx, bt_rf_act, \
|
|
|
|
wifi_req, wifi_prio, wifi_txrx, \
|
|
|
|
wifi_sh_ant_req) \
|
|
|
|
LUT_PTA_WLAN_ACTIVE_OP(lut, &= ~, LUT_VALUE(bt3_prio, bt3_txrx, \
|
|
|
|
bt_rf_act, wifi_req, wifi_prio, wifi_txrx, \
|
|
|
|
wifi_sh_ant_req))
|
|
|
|
|
|
|
|
#define LUT_WLAN_KILL_OP(lut, op, val) \
|
|
|
|
lut[(val) >> 4] op (cpu_to_le32(BIT(((val) << 1) & 0x1e)))
|
|
|
|
#define LUT_TEST_WLAN_KILL(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
|
|
|
|
wifi_prio, wifi_txrx, wifi_sh_ant_req) \
|
|
|
|
(!!(LUT_WLAN_KILL_OP(lut, &, LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, \
|
|
|
|
wifi_req, wifi_prio, wifi_txrx, wifi_sh_ant_req))))
|
|
|
|
#define LUT_SET_WLAN_KILL(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
|
|
|
|
wifi_prio, wifi_txrx, wifi_sh_ant_req) \
|
|
|
|
LUT_WLAN_KILL_OP(lut, |=, LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, \
|
|
|
|
wifi_req, wifi_prio, wifi_txrx, wifi_sh_ant_req))
|
|
|
|
#define LUT_CLEAR_WLAN_KILL(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
|
|
|
|
wifi_prio, wifi_txrx, wifi_sh_ant_req) \
|
|
|
|
LUT_WLAN_KILL_OP(lut, &= ~, LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, \
|
|
|
|
wifi_req, wifi_prio, wifi_txrx, wifi_sh_ant_req))
|
|
|
|
|
|
|
|
#define LUT_ANT_SWITCH_OP(lut, op, val) \
|
|
|
|
lut[(val) >> 4] op (cpu_to_le32(BIT((((val) << 1) & 0x1e) + 1)))
|
|
|
|
#define LUT_TEST_ANT_SWITCH(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
|
|
|
|
wifi_prio, wifi_txrx, wifi_sh_ant_req) \
|
|
|
|
(!!(LUT_ANT_SWITCH_OP(lut, &, LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, \
|
|
|
|
wifi_req, wifi_prio, wifi_txrx, \
|
|
|
|
wifi_sh_ant_req))))
|
|
|
|
#define LUT_SET_ANT_SWITCH(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
|
|
|
|
wifi_prio, wifi_txrx, wifi_sh_ant_req) \
|
|
|
|
LUT_ANT_SWITCH_OP(lut, |=, LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, \
|
|
|
|
wifi_req, wifi_prio, wifi_txrx, wifi_sh_ant_req))
|
|
|
|
#define LUT_CLEAR_ANT_SWITCH(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
|
|
|
|
wifi_prio, wifi_txrx, wifi_sh_ant_req) \
|
|
|
|
LUT_ANT_SWITCH_OP(lut, &= ~, LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, \
|
|
|
|
wifi_req, wifi_prio, wifi_txrx, wifi_sh_ant_req))
|
|
|
|
|
|
|
|
static const __le32 iwlagn_def_3w_lookup[12] = {
|
|
|
|
cpu_to_le32(0xaaaaaaaa),
|
|
|
|
cpu_to_le32(0xaaaaaaaa),
|
|
|
|
cpu_to_le32(0xaeaaaaaa),
|
|
|
|
cpu_to_le32(0xaaaaaaaa),
|
|
|
|
cpu_to_le32(0xcc00ff28),
|
|
|
|
cpu_to_le32(0x0000aaaa),
|
|
|
|
cpu_to_le32(0xcc00aaaa),
|
|
|
|
cpu_to_le32(0x0000aaaa),
|
|
|
|
cpu_to_le32(0xc0004000),
|
|
|
|
cpu_to_le32(0x00004000),
|
|
|
|
cpu_to_le32(0xf0005000),
|
2010-11-19 02:40:03 +08:00
|
|
|
cpu_to_le32(0xf0005000),
|
2010-08-23 22:57:14 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
static const __le32 iwlagn_concurrent_lookup[12] = {
|
|
|
|
cpu_to_le32(0xaaaaaaaa),
|
|
|
|
cpu_to_le32(0xaaaaaaaa),
|
|
|
|
cpu_to_le32(0xaaaaaaaa),
|
|
|
|
cpu_to_le32(0xaaaaaaaa),
|
|
|
|
cpu_to_le32(0xaaaaaaaa),
|
|
|
|
cpu_to_le32(0xaaaaaaaa),
|
|
|
|
cpu_to_le32(0xaaaaaaaa),
|
|
|
|
cpu_to_le32(0xaaaaaaaa),
|
|
|
|
cpu_to_le32(0x00000000),
|
|
|
|
cpu_to_le32(0x00000000),
|
|
|
|
cpu_to_le32(0x00000000),
|
|
|
|
cpu_to_le32(0x00000000),
|
|
|
|
};
|
|
|
|
|
|
|
|
void iwlagn_send_advance_bt_config(struct iwl_priv *priv)
|
|
|
|
{
|
2011-02-19 09:23:54 +08:00
|
|
|
struct iwl_basic_bt_cmd basic = {
|
2010-08-23 22:57:14 +08:00
|
|
|
.max_kill = IWLAGN_BT_MAX_KILL_DEFAULT,
|
|
|
|
.bt3_timer_t7_value = IWLAGN_BT3_T7_DEFAULT,
|
|
|
|
.bt3_prio_sample_time = IWLAGN_BT3_PRIO_SAMPLE_DEFAULT,
|
|
|
|
.bt3_timer_t2_value = IWLAGN_BT3_T2_DEFAULT,
|
|
|
|
};
|
2011-02-19 09:23:54 +08:00
|
|
|
struct iwl6000_bt_cmd bt_cmd_6000;
|
|
|
|
struct iwl2000_bt_cmd bt_cmd_2000;
|
|
|
|
int ret;
|
2010-08-23 22:57:14 +08:00
|
|
|
|
|
|
|
BUILD_BUG_ON(sizeof(iwlagn_def_3w_lookup) !=
|
2011-02-19 09:23:54 +08:00
|
|
|
sizeof(basic.bt3_lookup_table));
|
|
|
|
|
2011-12-16 23:07:36 +08:00
|
|
|
if (cfg(priv)->bt_params) {
|
|
|
|
if (cfg(priv)->bt_params->bt_session_2) {
|
2011-02-19 09:23:54 +08:00
|
|
|
bt_cmd_2000.prio_boost = cpu_to_le32(
|
2011-12-16 23:07:36 +08:00
|
|
|
cfg(priv)->bt_params->bt_prio_boost);
|
2011-02-19 09:23:54 +08:00
|
|
|
bt_cmd_2000.tx_prio_boost = 0;
|
|
|
|
bt_cmd_2000.rx_prio_boost = 0;
|
|
|
|
} else {
|
|
|
|
bt_cmd_6000.prio_boost =
|
2011-12-16 23:07:36 +08:00
|
|
|
cfg(priv)->bt_params->bt_prio_boost;
|
2011-02-19 09:23:54 +08:00
|
|
|
bt_cmd_6000.tx_prio_boost = 0;
|
|
|
|
bt_cmd_6000.rx_prio_boost = 0;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
IWL_ERR(priv, "failed to construct BT Coex Config\n");
|
|
|
|
return;
|
|
|
|
}
|
2010-11-25 09:25:03 +08:00
|
|
|
|
2011-02-19 09:23:54 +08:00
|
|
|
basic.kill_ack_mask = priv->kill_ack_mask;
|
|
|
|
basic.kill_cts_mask = priv->kill_cts_mask;
|
|
|
|
basic.valid = priv->bt_valid;
|
2010-08-23 22:57:14 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Configure BT coex mode to "no coexistence" when the
|
|
|
|
* user disabled BT coexistence, we have no interface
|
|
|
|
* (might be in monitor mode), or the interface is in
|
|
|
|
* IBSS mode (no proper uCode support for coex then).
|
|
|
|
*/
|
2011-06-04 04:52:38 +08:00
|
|
|
if (!iwlagn_mod_params.bt_coex_active ||
|
|
|
|
priv->iw_mode == NL80211_IFTYPE_ADHOC) {
|
2011-02-19 09:23:54 +08:00
|
|
|
basic.flags = IWLAGN_BT_FLAG_COEX_MODE_DISABLED;
|
2010-08-23 22:57:14 +08:00
|
|
|
} else {
|
2011-02-19 09:23:54 +08:00
|
|
|
basic.flags = IWLAGN_BT_FLAG_COEX_MODE_3W <<
|
2010-08-23 22:57:14 +08:00
|
|
|
IWLAGN_BT_FLAG_COEX_MODE_SHIFT;
|
2011-07-08 23:46:23 +08:00
|
|
|
|
|
|
|
if (!priv->bt_enable_pspoll)
|
2011-02-19 09:23:54 +08:00
|
|
|
basic.flags |= IWLAGN_BT_FLAG_SYNC_2_BT_DISABLE;
|
2011-07-08 23:46:23 +08:00
|
|
|
else
|
|
|
|
basic.flags &= ~IWLAGN_BT_FLAG_SYNC_2_BT_DISABLE;
|
2010-11-24 02:58:54 +08:00
|
|
|
|
2010-08-23 22:57:14 +08:00
|
|
|
if (priv->bt_ch_announce)
|
2011-02-19 09:23:54 +08:00
|
|
|
basic.flags |= IWLAGN_BT_FLAG_CHANNEL_INHIBITION;
|
2011-06-07 03:49:25 +08:00
|
|
|
IWL_DEBUG_COEX(priv, "BT coex flag: 0X%x\n", basic.flags);
|
2010-08-23 22:57:14 +08:00
|
|
|
}
|
2011-02-19 09:23:54 +08:00
|
|
|
priv->bt_enable_flag = basic.flags;
|
2010-08-23 22:57:14 +08:00
|
|
|
if (priv->bt_full_concurrent)
|
2011-02-19 09:23:54 +08:00
|
|
|
memcpy(basic.bt3_lookup_table, iwlagn_concurrent_lookup,
|
2010-08-23 22:57:14 +08:00
|
|
|
sizeof(iwlagn_concurrent_lookup));
|
|
|
|
else
|
2011-02-19 09:23:54 +08:00
|
|
|
memcpy(basic.bt3_lookup_table, iwlagn_def_3w_lookup,
|
2010-08-23 22:57:14 +08:00
|
|
|
sizeof(iwlagn_def_3w_lookup));
|
|
|
|
|
2011-06-07 03:49:25 +08:00
|
|
|
IWL_DEBUG_COEX(priv, "BT coex %s in %s mode\n",
|
2011-02-19 09:23:54 +08:00
|
|
|
basic.flags ? "active" : "disabled",
|
2010-08-23 22:57:14 +08:00
|
|
|
priv->bt_full_concurrent ?
|
|
|
|
"full concurrency" : "3-wire");
|
|
|
|
|
2011-12-16 23:07:36 +08:00
|
|
|
if (cfg(priv)->bt_params->bt_session_2) {
|
2011-02-19 09:23:54 +08:00
|
|
|
memcpy(&bt_cmd_2000.basic, &basic,
|
|
|
|
sizeof(basic));
|
iwlagn: bus layer chooses its transport layer
Remove iwl_transport_register which was a W/A. The bus layer knows what
transport to use. So now, the bus layer gives the upper layer a pointer to the
iwl_trans_ops struct that it wants to use. The upper layer then, allocates the
desired transport layer using iwl_trans_ops->alloc function.
As a result of this, priv->trans, no longer exists, priv holds a pointer to
iwl_shared, which holds a pointer to iwl_trans. This required to change all the
calls to the transport layer from upper layer. While we were at it, trans_X
inlines have been renamed to iwl_trans_X to avoid confusions, which of course
required to rename the functions inside the transport layer because of
conflicts in names. So the static API functions inside the transport layer
implementation have been renamed to iwl_trans_pcie_X.
Until now, the IRQ / Tasklet were initialized in iwl_transport_layer. This is
confusing since the registration doesn't mean to request IRQ, so I added a
handler for that.
Signed-off-by: Emmanuel Grumbach <emmanuel.grumbach@intel.com>
Signed-off-by: Wey-Yi Guy <wey-yi.w.guy@intel.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
2011-08-26 14:10:48 +08:00
|
|
|
ret = iwl_trans_send_cmd_pdu(trans(priv), REPLY_BT_CONFIG,
|
2011-07-08 23:46:14 +08:00
|
|
|
CMD_SYNC, sizeof(bt_cmd_2000), &bt_cmd_2000);
|
2011-02-19 09:23:54 +08:00
|
|
|
} else {
|
|
|
|
memcpy(&bt_cmd_6000.basic, &basic,
|
|
|
|
sizeof(basic));
|
iwlagn: bus layer chooses its transport layer
Remove iwl_transport_register which was a W/A. The bus layer knows what
transport to use. So now, the bus layer gives the upper layer a pointer to the
iwl_trans_ops struct that it wants to use. The upper layer then, allocates the
desired transport layer using iwl_trans_ops->alloc function.
As a result of this, priv->trans, no longer exists, priv holds a pointer to
iwl_shared, which holds a pointer to iwl_trans. This required to change all the
calls to the transport layer from upper layer. While we were at it, trans_X
inlines have been renamed to iwl_trans_X to avoid confusions, which of course
required to rename the functions inside the transport layer because of
conflicts in names. So the static API functions inside the transport layer
implementation have been renamed to iwl_trans_pcie_X.
Until now, the IRQ / Tasklet were initialized in iwl_transport_layer. This is
confusing since the registration doesn't mean to request IRQ, so I added a
handler for that.
Signed-off-by: Emmanuel Grumbach <emmanuel.grumbach@intel.com>
Signed-off-by: Wey-Yi Guy <wey-yi.w.guy@intel.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
2011-08-26 14:10:48 +08:00
|
|
|
ret = iwl_trans_send_cmd_pdu(trans(priv), REPLY_BT_CONFIG,
|
2011-07-08 23:46:14 +08:00
|
|
|
CMD_SYNC, sizeof(bt_cmd_6000), &bt_cmd_6000);
|
2011-02-19 09:23:54 +08:00
|
|
|
}
|
|
|
|
if (ret)
|
2010-08-23 22:57:14 +08:00
|
|
|
IWL_ERR(priv, "failed to send BT Coex Config\n");
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2011-07-08 23:46:23 +08:00
|
|
|
void iwlagn_bt_adjust_rssi_monitor(struct iwl_priv *priv, bool rssi_ena)
|
|
|
|
{
|
|
|
|
struct iwl_rxon_context *ctx, *found_ctx = NULL;
|
|
|
|
bool found_ap = false;
|
|
|
|
|
2011-08-26 14:10:44 +08:00
|
|
|
lockdep_assert_held(&priv->shrd->mutex);
|
2011-07-08 23:46:23 +08:00
|
|
|
|
|
|
|
/* Check whether AP or GO mode is active. */
|
|
|
|
if (rssi_ena) {
|
|
|
|
for_each_context(priv, ctx) {
|
|
|
|
if (ctx->vif && ctx->vif->type == NL80211_IFTYPE_AP &&
|
|
|
|
iwl_is_associated_ctx(ctx)) {
|
|
|
|
found_ap = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If disable was received or If GO/AP mode, disable RSSI
|
|
|
|
* measurements.
|
|
|
|
*/
|
|
|
|
if (!rssi_ena || found_ap) {
|
|
|
|
if (priv->cur_rssi_ctx) {
|
|
|
|
ctx = priv->cur_rssi_ctx;
|
|
|
|
ieee80211_disable_rssi_reports(ctx->vif);
|
|
|
|
priv->cur_rssi_ctx = NULL;
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If rssi measurements need to be enabled, consider all cases now.
|
|
|
|
* Figure out how many contexts are active.
|
|
|
|
*/
|
|
|
|
for_each_context(priv, ctx) {
|
|
|
|
if (ctx->vif && ctx->vif->type == NL80211_IFTYPE_STATION &&
|
|
|
|
iwl_is_associated_ctx(ctx)) {
|
|
|
|
found_ctx = ctx;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* rssi monitor already enabled for the correct interface...nothing
|
|
|
|
* to do.
|
|
|
|
*/
|
|
|
|
if (found_ctx == priv->cur_rssi_ctx)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Figure out if rssi monitor is currently enabled, and needs
|
|
|
|
* to be changed. If rssi monitor is already enabled, disable
|
|
|
|
* it first else just enable rssi measurements on the
|
|
|
|
* interface found above.
|
|
|
|
*/
|
|
|
|
if (priv->cur_rssi_ctx) {
|
|
|
|
ctx = priv->cur_rssi_ctx;
|
|
|
|
if (ctx->vif)
|
|
|
|
ieee80211_disable_rssi_reports(ctx->vif);
|
|
|
|
}
|
|
|
|
|
|
|
|
priv->cur_rssi_ctx = found_ctx;
|
|
|
|
|
|
|
|
if (!found_ctx)
|
|
|
|
return;
|
|
|
|
|
|
|
|
ieee80211_enable_rssi_reports(found_ctx->vif,
|
|
|
|
IWLAGN_BT_PSP_MIN_RSSI_THRESHOLD,
|
|
|
|
IWLAGN_BT_PSP_MAX_RSSI_THRESHOLD);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool iwlagn_bt_traffic_is_sco(struct iwl_bt_uart_msg *uart_msg)
|
|
|
|
{
|
|
|
|
return BT_UART_MSG_FRAME3SCOESCO_MSK & uart_msg->frame3 >>
|
|
|
|
BT_UART_MSG_FRAME3SCOESCO_POS;
|
|
|
|
}
|
|
|
|
|
2010-08-23 22:57:14 +08:00
|
|
|
static void iwlagn_bt_traffic_change_work(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct iwl_priv *priv =
|
|
|
|
container_of(work, struct iwl_priv, bt_traffic_change_work);
|
2010-08-23 16:46:40 +08:00
|
|
|
struct iwl_rxon_context *ctx;
|
2010-08-23 22:57:14 +08:00
|
|
|
int smps_request = -1;
|
|
|
|
|
2011-02-07 00:56:35 +08:00
|
|
|
if (priv->bt_enable_flag == IWLAGN_BT_FLAG_COEX_MODE_DISABLED) {
|
|
|
|
/* bt coex disabled */
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2010-10-22 23:04:28 +08:00
|
|
|
/*
|
|
|
|
* Note: bt_traffic_load can be overridden by scan complete and
|
|
|
|
* coex profile notifications. Ignore that since only bad consequence
|
|
|
|
* can be not matching debug print with actual state.
|
|
|
|
*/
|
2011-06-07 03:49:25 +08:00
|
|
|
IWL_DEBUG_COEX(priv, "BT traffic load changes: %d\n",
|
2010-08-23 22:57:14 +08:00
|
|
|
priv->bt_traffic_load);
|
|
|
|
|
|
|
|
switch (priv->bt_traffic_load) {
|
|
|
|
case IWL_BT_COEX_TRAFFIC_LOAD_NONE:
|
2010-10-24 00:15:44 +08:00
|
|
|
if (priv->bt_status)
|
|
|
|
smps_request = IEEE80211_SMPS_DYNAMIC;
|
|
|
|
else
|
|
|
|
smps_request = IEEE80211_SMPS_AUTOMATIC;
|
2010-08-23 22:57:14 +08:00
|
|
|
break;
|
|
|
|
case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
|
|
|
|
smps_request = IEEE80211_SMPS_DYNAMIC;
|
|
|
|
break;
|
|
|
|
case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
|
|
|
|
case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
|
|
|
|
smps_request = IEEE80211_SMPS_STATIC;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
IWL_ERR(priv, "Invalid BT traffic load: %d\n",
|
|
|
|
priv->bt_traffic_load);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2011-08-26 14:10:44 +08:00
|
|
|
mutex_lock(&priv->shrd->mutex);
|
2010-08-23 22:57:14 +08:00
|
|
|
|
2010-10-22 23:04:28 +08:00
|
|
|
/*
|
|
|
|
* We can not send command to firmware while scanning. When the scan
|
|
|
|
* complete we will schedule this work again. We do check with mutex
|
|
|
|
* locked to prevent new scan request to arrive. We do not check
|
|
|
|
* STATUS_SCANNING to avoid race when queue_work two times from
|
|
|
|
* different notifications, but quit and not perform any work at all.
|
|
|
|
*/
|
2011-08-26 14:10:42 +08:00
|
|
|
if (test_bit(STATUS_SCAN_HW, &priv->shrd->status))
|
2010-10-22 23:04:28 +08:00
|
|
|
goto out;
|
|
|
|
|
2011-07-08 23:46:17 +08:00
|
|
|
iwl_update_chain_flags(priv);
|
2010-08-23 22:57:14 +08:00
|
|
|
|
2010-08-23 16:46:40 +08:00
|
|
|
if (smps_request != -1) {
|
2011-06-03 22:54:12 +08:00
|
|
|
priv->current_ht_config.smps = smps_request;
|
2010-08-23 16:46:40 +08:00
|
|
|
for_each_context(priv, ctx) {
|
|
|
|
if (ctx->vif && ctx->vif->type == NL80211_IFTYPE_STATION)
|
|
|
|
ieee80211_request_smps(ctx->vif, smps_request);
|
|
|
|
}
|
|
|
|
}
|
2011-07-08 23:46:23 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Dynamic PS poll related functionality. Adjust RSSI measurements if
|
|
|
|
* necessary.
|
|
|
|
*/
|
|
|
|
iwlagn_bt_coex_rssi_monitor(priv);
|
2010-10-22 23:04:28 +08:00
|
|
|
out:
|
2011-08-26 14:10:44 +08:00
|
|
|
mutex_unlock(&priv->shrd->mutex);
|
2010-08-23 22:57:14 +08:00
|
|
|
}
|
|
|
|
|
2011-07-08 23:46:23 +08:00
|
|
|
/*
|
|
|
|
* If BT sco traffic, and RSSI monitor is enabled, move measurements to the
|
|
|
|
* correct interface or disable it if this is the last interface to be
|
|
|
|
* removed.
|
|
|
|
*/
|
|
|
|
void iwlagn_bt_coex_rssi_monitor(struct iwl_priv *priv)
|
|
|
|
{
|
|
|
|
if (priv->bt_is_sco &&
|
|
|
|
priv->bt_traffic_load == IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS)
|
|
|
|
iwlagn_bt_adjust_rssi_monitor(priv, true);
|
|
|
|
else
|
|
|
|
iwlagn_bt_adjust_rssi_monitor(priv, false);
|
|
|
|
}
|
|
|
|
|
2010-08-23 22:57:14 +08:00
|
|
|
static void iwlagn_print_uartmsg(struct iwl_priv *priv,
|
|
|
|
struct iwl_bt_uart_msg *uart_msg)
|
|
|
|
{
|
2011-06-07 03:49:25 +08:00
|
|
|
IWL_DEBUG_COEX(priv, "Message Type = 0x%X, SSN = 0x%X, "
|
2010-08-23 22:57:14 +08:00
|
|
|
"Update Req = 0x%X",
|
|
|
|
(BT_UART_MSG_FRAME1MSGTYPE_MSK & uart_msg->frame1) >>
|
|
|
|
BT_UART_MSG_FRAME1MSGTYPE_POS,
|
|
|
|
(BT_UART_MSG_FRAME1SSN_MSK & uart_msg->frame1) >>
|
|
|
|
BT_UART_MSG_FRAME1SSN_POS,
|
|
|
|
(BT_UART_MSG_FRAME1UPDATEREQ_MSK & uart_msg->frame1) >>
|
|
|
|
BT_UART_MSG_FRAME1UPDATEREQ_POS);
|
|
|
|
|
2011-06-07 03:49:25 +08:00
|
|
|
IWL_DEBUG_COEX(priv, "Open connections = 0x%X, Traffic load = 0x%X, "
|
2010-08-23 22:57:14 +08:00
|
|
|
"Chl_SeqN = 0x%X, In band = 0x%X",
|
|
|
|
(BT_UART_MSG_FRAME2OPENCONNECTIONS_MSK & uart_msg->frame2) >>
|
|
|
|
BT_UART_MSG_FRAME2OPENCONNECTIONS_POS,
|
|
|
|
(BT_UART_MSG_FRAME2TRAFFICLOAD_MSK & uart_msg->frame2) >>
|
|
|
|
BT_UART_MSG_FRAME2TRAFFICLOAD_POS,
|
|
|
|
(BT_UART_MSG_FRAME2CHLSEQN_MSK & uart_msg->frame2) >>
|
|
|
|
BT_UART_MSG_FRAME2CHLSEQN_POS,
|
|
|
|
(BT_UART_MSG_FRAME2INBAND_MSK & uart_msg->frame2) >>
|
|
|
|
BT_UART_MSG_FRAME2INBAND_POS);
|
|
|
|
|
2011-06-07 03:49:25 +08:00
|
|
|
IWL_DEBUG_COEX(priv, "SCO/eSCO = 0x%X, Sniff = 0x%X, A2DP = 0x%X, "
|
2010-08-23 22:57:14 +08:00
|
|
|
"ACL = 0x%X, Master = 0x%X, OBEX = 0x%X",
|
|
|
|
(BT_UART_MSG_FRAME3SCOESCO_MSK & uart_msg->frame3) >>
|
|
|
|
BT_UART_MSG_FRAME3SCOESCO_POS,
|
|
|
|
(BT_UART_MSG_FRAME3SNIFF_MSK & uart_msg->frame3) >>
|
|
|
|
BT_UART_MSG_FRAME3SNIFF_POS,
|
|
|
|
(BT_UART_MSG_FRAME3A2DP_MSK & uart_msg->frame3) >>
|
|
|
|
BT_UART_MSG_FRAME3A2DP_POS,
|
|
|
|
(BT_UART_MSG_FRAME3ACL_MSK & uart_msg->frame3) >>
|
|
|
|
BT_UART_MSG_FRAME3ACL_POS,
|
|
|
|
(BT_UART_MSG_FRAME3MASTER_MSK & uart_msg->frame3) >>
|
|
|
|
BT_UART_MSG_FRAME3MASTER_POS,
|
|
|
|
(BT_UART_MSG_FRAME3OBEX_MSK & uart_msg->frame3) >>
|
|
|
|
BT_UART_MSG_FRAME3OBEX_POS);
|
|
|
|
|
2011-06-07 03:49:25 +08:00
|
|
|
IWL_DEBUG_COEX(priv, "Idle duration = 0x%X",
|
2010-08-23 22:57:14 +08:00
|
|
|
(BT_UART_MSG_FRAME4IDLEDURATION_MSK & uart_msg->frame4) >>
|
|
|
|
BT_UART_MSG_FRAME4IDLEDURATION_POS);
|
|
|
|
|
2011-06-07 03:49:25 +08:00
|
|
|
IWL_DEBUG_COEX(priv, "Tx Activity = 0x%X, Rx Activity = 0x%X, "
|
2010-08-23 22:57:14 +08:00
|
|
|
"eSCO Retransmissions = 0x%X",
|
|
|
|
(BT_UART_MSG_FRAME5TXACTIVITY_MSK & uart_msg->frame5) >>
|
|
|
|
BT_UART_MSG_FRAME5TXACTIVITY_POS,
|
|
|
|
(BT_UART_MSG_FRAME5RXACTIVITY_MSK & uart_msg->frame5) >>
|
|
|
|
BT_UART_MSG_FRAME5RXACTIVITY_POS,
|
|
|
|
(BT_UART_MSG_FRAME5ESCORETRANSMIT_MSK & uart_msg->frame5) >>
|
|
|
|
BT_UART_MSG_FRAME5ESCORETRANSMIT_POS);
|
|
|
|
|
2011-06-07 03:49:25 +08:00
|
|
|
IWL_DEBUG_COEX(priv, "Sniff Interval = 0x%X, Discoverable = 0x%X",
|
2010-08-23 22:57:14 +08:00
|
|
|
(BT_UART_MSG_FRAME6SNIFFINTERVAL_MSK & uart_msg->frame6) >>
|
|
|
|
BT_UART_MSG_FRAME6SNIFFINTERVAL_POS,
|
|
|
|
(BT_UART_MSG_FRAME6DISCOVERABLE_MSK & uart_msg->frame6) >>
|
|
|
|
BT_UART_MSG_FRAME6DISCOVERABLE_POS);
|
|
|
|
|
2011-06-07 03:49:25 +08:00
|
|
|
IWL_DEBUG_COEX(priv, "Sniff Activity = 0x%X, Page = "
|
2011-02-23 00:24:22 +08:00
|
|
|
"0x%X, Inquiry = 0x%X, Connectable = 0x%X",
|
2010-08-23 22:57:14 +08:00
|
|
|
(BT_UART_MSG_FRAME7SNIFFACTIVITY_MSK & uart_msg->frame7) >>
|
|
|
|
BT_UART_MSG_FRAME7SNIFFACTIVITY_POS,
|
2011-02-23 00:24:22 +08:00
|
|
|
(BT_UART_MSG_FRAME7PAGE_MSK & uart_msg->frame7) >>
|
|
|
|
BT_UART_MSG_FRAME7PAGE_POS,
|
|
|
|
(BT_UART_MSG_FRAME7INQUIRY_MSK & uart_msg->frame7) >>
|
|
|
|
BT_UART_MSG_FRAME7INQUIRY_POS,
|
2010-08-23 22:57:14 +08:00
|
|
|
(BT_UART_MSG_FRAME7CONNECTABLE_MSK & uart_msg->frame7) >>
|
|
|
|
BT_UART_MSG_FRAME7CONNECTABLE_POS);
|
|
|
|
}
|
|
|
|
|
2010-11-25 09:25:03 +08:00
|
|
|
static void iwlagn_set_kill_msk(struct iwl_priv *priv,
|
|
|
|
struct iwl_bt_uart_msg *uart_msg)
|
2010-08-23 22:57:14 +08:00
|
|
|
{
|
2010-11-25 09:25:03 +08:00
|
|
|
u8 kill_msk;
|
2010-11-21 10:38:57 +08:00
|
|
|
static const __le32 bt_kill_ack_msg[2] = {
|
2010-11-25 09:25:03 +08:00
|
|
|
IWLAGN_BT_KILL_ACK_MASK_DEFAULT,
|
|
|
|
IWLAGN_BT_KILL_ACK_CTS_MASK_SCO };
|
|
|
|
static const __le32 bt_kill_cts_msg[2] = {
|
|
|
|
IWLAGN_BT_KILL_CTS_MASK_DEFAULT,
|
|
|
|
IWLAGN_BT_KILL_ACK_CTS_MASK_SCO };
|
|
|
|
|
|
|
|
kill_msk = (BT_UART_MSG_FRAME3SCOESCO_MSK & uart_msg->frame3)
|
|
|
|
? 1 : 0;
|
|
|
|
if (priv->kill_ack_mask != bt_kill_ack_msg[kill_msk] ||
|
|
|
|
priv->kill_cts_mask != bt_kill_cts_msg[kill_msk]) {
|
2010-08-23 22:57:14 +08:00
|
|
|
priv->bt_valid |= IWLAGN_BT_VALID_KILL_ACK_MASK;
|
2010-11-25 09:25:03 +08:00
|
|
|
priv->kill_ack_mask = bt_kill_ack_msg[kill_msk];
|
|
|
|
priv->bt_valid |= IWLAGN_BT_VALID_KILL_CTS_MASK;
|
|
|
|
priv->kill_cts_mask = bt_kill_cts_msg[kill_msk];
|
|
|
|
|
2010-08-23 22:57:14 +08:00
|
|
|
/* schedule to send runtime bt_config */
|
2012-02-18 02:07:44 +08:00
|
|
|
queue_work(priv->workqueue, &priv->bt_runtime_config);
|
2010-08-23 22:57:14 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-09-21 06:37:23 +08:00
|
|
|
int iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
|
|
|
|
struct iwl_rx_mem_buffer *rxb,
|
|
|
|
struct iwl_device_cmd *cmd)
|
2010-08-23 22:57:14 +08:00
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
struct iwl_rx_packet *pkt = rxb_addr(rxb);
|
|
|
|
struct iwl_bt_coex_profile_notif *coex = &pkt->u.bt_coex_profile_notif;
|
|
|
|
struct iwl_bt_uart_msg *uart_msg = &coex->last_bt_uart_msg;
|
|
|
|
|
2011-02-07 00:56:35 +08:00
|
|
|
if (priv->bt_enable_flag == IWLAGN_BT_FLAG_COEX_MODE_DISABLED) {
|
|
|
|
/* bt coex disabled */
|
2011-09-21 06:37:23 +08:00
|
|
|
return 0;
|
2011-02-07 00:56:35 +08:00
|
|
|
}
|
|
|
|
|
2011-06-07 03:49:25 +08:00
|
|
|
IWL_DEBUG_COEX(priv, "BT Coex notification:\n");
|
|
|
|
IWL_DEBUG_COEX(priv, " status: %d\n", coex->bt_status);
|
|
|
|
IWL_DEBUG_COEX(priv, " traffic load: %d\n", coex->bt_traffic_load);
|
|
|
|
IWL_DEBUG_COEX(priv, " CI compliance: %d\n",
|
2010-08-23 22:57:14 +08:00
|
|
|
coex->bt_ci_compliance);
|
|
|
|
iwlagn_print_uartmsg(priv, uart_msg);
|
|
|
|
|
2010-11-09 06:54:37 +08:00
|
|
|
priv->last_bt_traffic_load = priv->bt_traffic_load;
|
2011-07-08 23:46:23 +08:00
|
|
|
priv->bt_is_sco = iwlagn_bt_traffic_is_sco(uart_msg);
|
|
|
|
|
2010-08-23 22:57:14 +08:00
|
|
|
if (priv->iw_mode != NL80211_IFTYPE_ADHOC) {
|
|
|
|
if (priv->bt_status != coex->bt_status ||
|
2010-11-09 06:54:37 +08:00
|
|
|
priv->last_bt_traffic_load != coex->bt_traffic_load) {
|
2010-08-23 22:57:14 +08:00
|
|
|
if (coex->bt_status) {
|
|
|
|
/* BT on */
|
|
|
|
if (!priv->bt_ch_announce)
|
|
|
|
priv->bt_traffic_load =
|
|
|
|
IWL_BT_COEX_TRAFFIC_LOAD_HIGH;
|
|
|
|
else
|
|
|
|
priv->bt_traffic_load =
|
|
|
|
coex->bt_traffic_load;
|
|
|
|
} else {
|
|
|
|
/* BT off */
|
|
|
|
priv->bt_traffic_load =
|
|
|
|
IWL_BT_COEX_TRAFFIC_LOAD_NONE;
|
|
|
|
}
|
|
|
|
priv->bt_status = coex->bt_status;
|
2012-02-18 02:07:44 +08:00
|
|
|
queue_work(priv->workqueue,
|
2010-08-23 22:57:14 +08:00
|
|
|
&priv->bt_traffic_change_work);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-11-25 09:25:03 +08:00
|
|
|
iwlagn_set_kill_msk(priv, uart_msg);
|
2010-08-23 22:57:14 +08:00
|
|
|
|
|
|
|
/* FIXME: based on notification, adjust the prio_boost */
|
|
|
|
|
2011-08-26 14:10:43 +08:00
|
|
|
spin_lock_irqsave(&priv->shrd->lock, flags);
|
2010-08-23 22:57:14 +08:00
|
|
|
priv->bt_ci_compliance = coex->bt_ci_compliance;
|
2011-08-26 14:10:43 +08:00
|
|
|
spin_unlock_irqrestore(&priv->shrd->lock, flags);
|
2011-09-21 06:37:23 +08:00
|
|
|
return 0;
|
2010-08-23 22:57:14 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void iwlagn_bt_rx_handler_setup(struct iwl_priv *priv)
|
|
|
|
{
|
|
|
|
priv->rx_handlers[REPLY_BT_COEX_PROFILE_NOTIF] =
|
|
|
|
iwlagn_bt_coex_profile_notif;
|
|
|
|
}
|
|
|
|
|
|
|
|
void iwlagn_bt_setup_deferred_work(struct iwl_priv *priv)
|
|
|
|
{
|
|
|
|
INIT_WORK(&priv->bt_traffic_change_work,
|
|
|
|
iwlagn_bt_traffic_change_work);
|
|
|
|
}
|
|
|
|
|
|
|
|
void iwlagn_bt_cancel_deferred_work(struct iwl_priv *priv)
|
|
|
|
{
|
|
|
|
cancel_work_sync(&priv->bt_traffic_change_work);
|
|
|
|
}
|
2010-09-23 00:01:58 +08:00
|
|
|
|
|
|
|
static bool is_single_rx_stream(struct iwl_priv *priv)
|
|
|
|
{
|
|
|
|
return priv->current_ht_config.smps == IEEE80211_SMPS_STATIC ||
|
|
|
|
priv->current_ht_config.single_chain_sufficient;
|
|
|
|
}
|
|
|
|
|
|
|
|
#define IWL_NUM_RX_CHAINS_MULTIPLE 3
|
|
|
|
#define IWL_NUM_RX_CHAINS_SINGLE 2
|
|
|
|
#define IWL_NUM_IDLE_CHAINS_DUAL 2
|
|
|
|
#define IWL_NUM_IDLE_CHAINS_SINGLE 1
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Determine how many receiver/antenna chains to use.
|
|
|
|
*
|
|
|
|
* More provides better reception via diversity. Fewer saves power
|
|
|
|
* at the expense of throughput, but only when not in powersave to
|
|
|
|
* start with.
|
|
|
|
*
|
|
|
|
* MIMO (dual stream) requires at least 2, but works better with 3.
|
|
|
|
* This does not determine *which* chains to use, just how many.
|
|
|
|
*/
|
|
|
|
static int iwl_get_active_rx_chain_count(struct iwl_priv *priv)
|
|
|
|
{
|
2011-12-16 23:07:36 +08:00
|
|
|
if (cfg(priv)->bt_params &&
|
|
|
|
cfg(priv)->bt_params->advanced_bt_coexist &&
|
2010-09-23 00:01:58 +08:00
|
|
|
(priv->bt_full_concurrent ||
|
|
|
|
priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)) {
|
|
|
|
/*
|
|
|
|
* only use chain 'A' in bt high traffic load or
|
|
|
|
* full concurrency mode
|
|
|
|
*/
|
|
|
|
return IWL_NUM_RX_CHAINS_SINGLE;
|
|
|
|
}
|
|
|
|
/* # of Rx chains to use when expecting MIMO. */
|
|
|
|
if (is_single_rx_stream(priv))
|
|
|
|
return IWL_NUM_RX_CHAINS_SINGLE;
|
|
|
|
else
|
|
|
|
return IWL_NUM_RX_CHAINS_MULTIPLE;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* When we are in power saving mode, unless device support spatial
|
|
|
|
* multiplexing power save, use the active count for rx chain count.
|
|
|
|
*/
|
|
|
|
static int iwl_get_idle_rx_chain_count(struct iwl_priv *priv, int active_cnt)
|
|
|
|
{
|
|
|
|
/* # Rx chains when idling, depending on SMPS mode */
|
|
|
|
switch (priv->current_ht_config.smps) {
|
|
|
|
case IEEE80211_SMPS_STATIC:
|
|
|
|
case IEEE80211_SMPS_DYNAMIC:
|
|
|
|
return IWL_NUM_IDLE_CHAINS_SINGLE;
|
2011-11-10 22:55:04 +08:00
|
|
|
case IEEE80211_SMPS_AUTOMATIC:
|
2010-09-23 00:01:58 +08:00
|
|
|
case IEEE80211_SMPS_OFF:
|
|
|
|
return active_cnt;
|
|
|
|
default:
|
|
|
|
WARN(1, "invalid SMPS mode %d",
|
|
|
|
priv->current_ht_config.smps);
|
|
|
|
return active_cnt;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* up to 4 chains */
|
|
|
|
static u8 iwl_count_chain_bitmap(u32 chain_bitmap)
|
|
|
|
{
|
|
|
|
u8 res;
|
|
|
|
res = (chain_bitmap & BIT(0)) >> 0;
|
|
|
|
res += (chain_bitmap & BIT(1)) >> 1;
|
|
|
|
res += (chain_bitmap & BIT(2)) >> 2;
|
|
|
|
res += (chain_bitmap & BIT(3)) >> 3;
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* iwlagn_set_rxon_chain - Set up Rx chain usage in "staging" RXON image
|
|
|
|
*
|
|
|
|
* Selects how many and which Rx receivers/antennas/chains to use.
|
|
|
|
* This should not be used for scan command ... it puts data in wrong place.
|
|
|
|
*/
|
|
|
|
void iwlagn_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
|
|
|
|
{
|
|
|
|
bool is_single = is_single_rx_stream(priv);
|
2011-08-26 14:10:42 +08:00
|
|
|
bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->shrd->status);
|
2010-09-23 00:01:58 +08:00
|
|
|
u8 idle_rx_cnt, active_rx_cnt, valid_rx_cnt;
|
|
|
|
u32 active_chains;
|
|
|
|
u16 rx_chain;
|
|
|
|
|
|
|
|
/* Tell uCode which antennas are actually connected.
|
|
|
|
* Before first association, we assume all antennas are connected.
|
|
|
|
* Just after first association, iwl_chain_noise_calibration()
|
|
|
|
* checks which antennas actually *are* connected. */
|
|
|
|
if (priv->chain_noise_data.active_chains)
|
|
|
|
active_chains = priv->chain_noise_data.active_chains;
|
|
|
|
else
|
2011-08-26 14:10:39 +08:00
|
|
|
active_chains = hw_params(priv).valid_rx_ant;
|
2010-09-23 00:01:58 +08:00
|
|
|
|
2011-12-16 23:07:36 +08:00
|
|
|
if (cfg(priv)->bt_params &&
|
|
|
|
cfg(priv)->bt_params->advanced_bt_coexist &&
|
2010-09-23 00:01:58 +08:00
|
|
|
(priv->bt_full_concurrent ||
|
|
|
|
priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)) {
|
|
|
|
/*
|
|
|
|
* only use chain 'A' in bt high traffic load or
|
|
|
|
* full concurrency mode
|
|
|
|
*/
|
|
|
|
active_chains = first_antenna(active_chains);
|
|
|
|
}
|
|
|
|
|
|
|
|
rx_chain = active_chains << RXON_RX_CHAIN_VALID_POS;
|
|
|
|
|
|
|
|
/* How many receivers should we use? */
|
|
|
|
active_rx_cnt = iwl_get_active_rx_chain_count(priv);
|
|
|
|
idle_rx_cnt = iwl_get_idle_rx_chain_count(priv, active_rx_cnt);
|
|
|
|
|
|
|
|
|
|
|
|
/* correct rx chain count according hw settings
|
|
|
|
* and chain noise calibration
|
|
|
|
*/
|
|
|
|
valid_rx_cnt = iwl_count_chain_bitmap(active_chains);
|
|
|
|
if (valid_rx_cnt < active_rx_cnt)
|
|
|
|
active_rx_cnt = valid_rx_cnt;
|
|
|
|
|
|
|
|
if (valid_rx_cnt < idle_rx_cnt)
|
|
|
|
idle_rx_cnt = valid_rx_cnt;
|
|
|
|
|
|
|
|
rx_chain |= active_rx_cnt << RXON_RX_CHAIN_MIMO_CNT_POS;
|
|
|
|
rx_chain |= idle_rx_cnt << RXON_RX_CHAIN_CNT_POS;
|
|
|
|
|
|
|
|
ctx->staging.rx_chain = cpu_to_le16(rx_chain);
|
|
|
|
|
|
|
|
if (!is_single && (active_rx_cnt >= IWL_NUM_RX_CHAINS_SINGLE) && is_cam)
|
|
|
|
ctx->staging.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK;
|
|
|
|
else
|
|
|
|
ctx->staging.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK;
|
|
|
|
|
|
|
|
IWL_DEBUG_ASSOC(priv, "rx_chain=0x%X active=%d idle=%d\n",
|
|
|
|
ctx->staging.rx_chain,
|
|
|
|
active_rx_cnt, idle_rx_cnt);
|
|
|
|
|
|
|
|
WARN_ON(active_rx_cnt == 0 || idle_rx_cnt == 0 ||
|
|
|
|
active_rx_cnt < idle_rx_cnt);
|
|
|
|
}
|
2010-09-23 00:02:05 +08:00
|
|
|
|
|
|
|
u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant, u8 valid)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
u8 ind = ant;
|
|
|
|
|
|
|
|
if (priv->band == IEEE80211_BAND_2GHZ &&
|
|
|
|
priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
for (i = 0; i < RATE_ANT_NUM - 1; i++) {
|
|
|
|
ind = (ind + 1) < RATE_ANT_NUM ? ind + 1 : 0;
|
|
|
|
if (valid & BIT(ind))
|
|
|
|
return ind;
|
|
|
|
}
|
|
|
|
return ant;
|
|
|
|
}
|
2010-09-23 00:02:06 +08:00
|
|
|
|
2011-11-10 22:55:17 +08:00
|
|
|
#ifdef CONFIG_PM_SLEEP
|
|
|
|
static void iwlagn_convert_p1k(u16 *p1k, __le16 *out)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < IWLAGN_P1K_SIZE; i++)
|
|
|
|
out[i] = cpu_to_le16(p1k[i]);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct wowlan_key_data {
|
|
|
|
struct iwl_rxon_context *ctx;
|
|
|
|
struct iwlagn_wowlan_rsc_tsc_params_cmd *rsc_tsc;
|
|
|
|
struct iwlagn_wowlan_tkip_params_cmd *tkip;
|
|
|
|
const u8 *bssid;
|
|
|
|
bool error, use_rsc_tsc, use_tkip;
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
static void iwlagn_wowlan_program_keys(struct ieee80211_hw *hw,
|
|
|
|
struct ieee80211_vif *vif,
|
|
|
|
struct ieee80211_sta *sta,
|
|
|
|
struct ieee80211_key_conf *key,
|
|
|
|
void *_data)
|
|
|
|
{
|
2012-02-09 22:08:15 +08:00
|
|
|
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
|
2011-11-10 22:55:17 +08:00
|
|
|
struct wowlan_key_data *data = _data;
|
|
|
|
struct iwl_rxon_context *ctx = data->ctx;
|
|
|
|
struct aes_sc *aes_sc, *aes_tx_sc = NULL;
|
|
|
|
struct tkip_sc *tkip_sc, *tkip_tx_sc = NULL;
|
|
|
|
struct iwlagn_p1k_cache *rx_p1ks;
|
|
|
|
u8 *rx_mic_key;
|
|
|
|
struct ieee80211_key_seq seq;
|
|
|
|
u32 cur_rx_iv32 = 0;
|
|
|
|
u16 p1k[IWLAGN_P1K_SIZE];
|
|
|
|
int ret, i;
|
|
|
|
|
|
|
|
mutex_lock(&priv->shrd->mutex);
|
|
|
|
|
|
|
|
if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
|
|
|
|
key->cipher == WLAN_CIPHER_SUITE_WEP104) &&
|
|
|
|
!sta && !ctx->key_mapping_keys)
|
|
|
|
ret = iwl_set_default_wep_key(priv, ctx, key);
|
|
|
|
else
|
|
|
|
ret = iwl_set_dynamic_key(priv, ctx, key, sta);
|
|
|
|
|
|
|
|
if (ret) {
|
|
|
|
IWL_ERR(priv, "Error setting key during suspend!\n");
|
|
|
|
data->error = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (key->cipher) {
|
|
|
|
case WLAN_CIPHER_SUITE_TKIP:
|
|
|
|
if (sta) {
|
|
|
|
tkip_sc = data->rsc_tsc->all_tsc_rsc.tkip.unicast_rsc;
|
|
|
|
tkip_tx_sc = &data->rsc_tsc->all_tsc_rsc.tkip.tsc;
|
|
|
|
|
|
|
|
rx_p1ks = data->tkip->rx_uni;
|
|
|
|
|
|
|
|
ieee80211_get_key_tx_seq(key, &seq);
|
|
|
|
tkip_tx_sc->iv16 = cpu_to_le16(seq.tkip.iv16);
|
|
|
|
tkip_tx_sc->iv32 = cpu_to_le32(seq.tkip.iv32);
|
|
|
|
|
|
|
|
ieee80211_get_tkip_p1k_iv(key, seq.tkip.iv32, p1k);
|
|
|
|
iwlagn_convert_p1k(p1k, data->tkip->tx.p1k);
|
|
|
|
|
|
|
|
memcpy(data->tkip->mic_keys.tx,
|
|
|
|
&key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
|
|
|
|
IWLAGN_MIC_KEY_SIZE);
|
|
|
|
|
|
|
|
rx_mic_key = data->tkip->mic_keys.rx_unicast;
|
|
|
|
} else {
|
|
|
|
tkip_sc =
|
|
|
|
data->rsc_tsc->all_tsc_rsc.tkip.multicast_rsc;
|
|
|
|
rx_p1ks = data->tkip->rx_multi;
|
|
|
|
rx_mic_key = data->tkip->mic_keys.rx_mcast;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* For non-QoS this relies on the fact that both the uCode and
|
|
|
|
* mac80211 use TID 0 (as they need to to avoid replay attacks)
|
|
|
|
* for checking the IV in the frames.
|
|
|
|
*/
|
|
|
|
for (i = 0; i < IWLAGN_NUM_RSC; i++) {
|
|
|
|
ieee80211_get_key_rx_seq(key, i, &seq);
|
|
|
|
tkip_sc[i].iv16 = cpu_to_le16(seq.tkip.iv16);
|
|
|
|
tkip_sc[i].iv32 = cpu_to_le32(seq.tkip.iv32);
|
|
|
|
/* wrapping isn't allowed, AP must rekey */
|
|
|
|
if (seq.tkip.iv32 > cur_rx_iv32)
|
|
|
|
cur_rx_iv32 = seq.tkip.iv32;
|
|
|
|
}
|
|
|
|
|
|
|
|
ieee80211_get_tkip_rx_p1k(key, data->bssid, cur_rx_iv32, p1k);
|
|
|
|
iwlagn_convert_p1k(p1k, rx_p1ks[0].p1k);
|
|
|
|
ieee80211_get_tkip_rx_p1k(key, data->bssid,
|
|
|
|
cur_rx_iv32 + 1, p1k);
|
|
|
|
iwlagn_convert_p1k(p1k, rx_p1ks[1].p1k);
|
|
|
|
|
|
|
|
memcpy(rx_mic_key,
|
|
|
|
&key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],
|
|
|
|
IWLAGN_MIC_KEY_SIZE);
|
|
|
|
|
|
|
|
data->use_tkip = true;
|
|
|
|
data->use_rsc_tsc = true;
|
|
|
|
break;
|
|
|
|
case WLAN_CIPHER_SUITE_CCMP:
|
|
|
|
if (sta) {
|
|
|
|
u8 *pn = seq.ccmp.pn;
|
|
|
|
|
|
|
|
aes_sc = data->rsc_tsc->all_tsc_rsc.aes.unicast_rsc;
|
|
|
|
aes_tx_sc = &data->rsc_tsc->all_tsc_rsc.aes.tsc;
|
|
|
|
|
|
|
|
ieee80211_get_key_tx_seq(key, &seq);
|
|
|
|
aes_tx_sc->pn = cpu_to_le64(
|
|
|
|
(u64)pn[5] |
|
|
|
|
((u64)pn[4] << 8) |
|
|
|
|
((u64)pn[3] << 16) |
|
|
|
|
((u64)pn[2] << 24) |
|
|
|
|
((u64)pn[1] << 32) |
|
|
|
|
((u64)pn[0] << 40));
|
|
|
|
} else
|
|
|
|
aes_sc = data->rsc_tsc->all_tsc_rsc.aes.multicast_rsc;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* For non-QoS this relies on the fact that both the uCode and
|
|
|
|
* mac80211 use TID 0 for checking the IV in the frames.
|
|
|
|
*/
|
|
|
|
for (i = 0; i < IWLAGN_NUM_RSC; i++) {
|
|
|
|
u8 *pn = seq.ccmp.pn;
|
|
|
|
|
|
|
|
ieee80211_get_key_rx_seq(key, i, &seq);
|
|
|
|
aes_sc->pn = cpu_to_le64(
|
|
|
|
(u64)pn[5] |
|
|
|
|
((u64)pn[4] << 8) |
|
|
|
|
((u64)pn[3] << 16) |
|
|
|
|
((u64)pn[2] << 24) |
|
|
|
|
((u64)pn[1] << 32) |
|
|
|
|
((u64)pn[0] << 40));
|
|
|
|
}
|
|
|
|
data->use_rsc_tsc = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
mutex_unlock(&priv->shrd->mutex);
|
|
|
|
}
|
|
|
|
|
|
|
|
int iwlagn_send_patterns(struct iwl_priv *priv,
|
|
|
|
struct cfg80211_wowlan *wowlan)
|
|
|
|
{
|
|
|
|
struct iwlagn_wowlan_patterns_cmd *pattern_cmd;
|
|
|
|
struct iwl_host_cmd cmd = {
|
|
|
|
.id = REPLY_WOWLAN_PATTERNS,
|
|
|
|
.dataflags[0] = IWL_HCMD_DFL_NOCOPY,
|
|
|
|
.flags = CMD_SYNC,
|
|
|
|
};
|
|
|
|
int i, err;
|
|
|
|
|
|
|
|
if (!wowlan->n_patterns)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
cmd.len[0] = sizeof(*pattern_cmd) +
|
|
|
|
wowlan->n_patterns * sizeof(struct iwlagn_wowlan_pattern);
|
|
|
|
|
|
|
|
pattern_cmd = kmalloc(cmd.len[0], GFP_KERNEL);
|
|
|
|
if (!pattern_cmd)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
pattern_cmd->n_patterns = cpu_to_le32(wowlan->n_patterns);
|
|
|
|
|
|
|
|
for (i = 0; i < wowlan->n_patterns; i++) {
|
|
|
|
int mask_len = DIV_ROUND_UP(wowlan->patterns[i].pattern_len, 8);
|
|
|
|
|
|
|
|
memcpy(&pattern_cmd->patterns[i].mask,
|
|
|
|
wowlan->patterns[i].mask, mask_len);
|
|
|
|
memcpy(&pattern_cmd->patterns[i].pattern,
|
|
|
|
wowlan->patterns[i].pattern,
|
|
|
|
wowlan->patterns[i].pattern_len);
|
|
|
|
pattern_cmd->patterns[i].mask_size = mask_len;
|
|
|
|
pattern_cmd->patterns[i].pattern_size =
|
|
|
|
wowlan->patterns[i].pattern_len;
|
|
|
|
}
|
|
|
|
|
|
|
|
cmd.data[0] = pattern_cmd;
|
|
|
|
err = iwl_trans_send_cmd(trans(priv), &cmd);
|
|
|
|
kfree(pattern_cmd);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
int iwlagn_suspend(struct iwl_priv *priv,
|
|
|
|
struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
|
|
|
|
{
|
|
|
|
struct iwlagn_wowlan_wakeup_filter_cmd wakeup_filter_cmd;
|
|
|
|
struct iwl_rxon_cmd rxon;
|
|
|
|
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
|
|
|
|
struct iwlagn_wowlan_kek_kck_material_cmd kek_kck_cmd;
|
|
|
|
struct iwlagn_wowlan_tkip_params_cmd tkip_cmd = {};
|
|
|
|
struct iwlagn_d3_config_cmd d3_cfg_cmd = {};
|
|
|
|
struct wowlan_key_data key_data = {
|
|
|
|
.ctx = ctx,
|
|
|
|
.bssid = ctx->active.bssid_addr,
|
|
|
|
.use_rsc_tsc = false,
|
|
|
|
.tkip = &tkip_cmd,
|
|
|
|
.use_tkip = false,
|
|
|
|
};
|
|
|
|
int ret, i;
|
|
|
|
u16 seq;
|
|
|
|
|
|
|
|
key_data.rsc_tsc = kzalloc(sizeof(*key_data.rsc_tsc), GFP_KERNEL);
|
|
|
|
if (!key_data.rsc_tsc)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
memset(&wakeup_filter_cmd, 0, sizeof(wakeup_filter_cmd));
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We know the last used seqno, and the uCode expects to know that
|
|
|
|
* one, it will increment before TX.
|
|
|
|
*/
|
|
|
|
seq = le16_to_cpu(priv->last_seq_ctl) & IEEE80211_SCTL_SEQ;
|
|
|
|
wakeup_filter_cmd.non_qos_seq = cpu_to_le16(seq);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* For QoS counters, we store the one to use next, so subtract 0x10
|
|
|
|
* since the uCode will add 0x10 before using the value.
|
|
|
|
*/
|
2011-12-03 04:24:45 +08:00
|
|
|
for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
|
2011-11-23 17:06:12 +08:00
|
|
|
seq = priv->tid_data[IWL_AP_ID][i].seq_number;
|
2011-11-10 22:55:17 +08:00
|
|
|
seq -= 0x10;
|
|
|
|
wakeup_filter_cmd.qos_seq[i] = cpu_to_le16(seq);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (wowlan->disconnect)
|
|
|
|
wakeup_filter_cmd.enabled |=
|
|
|
|
cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_BEACON_MISS |
|
|
|
|
IWLAGN_WOWLAN_WAKEUP_LINK_CHANGE);
|
|
|
|
if (wowlan->magic_pkt)
|
|
|
|
wakeup_filter_cmd.enabled |=
|
|
|
|
cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_MAGIC_PACKET);
|
|
|
|
if (wowlan->gtk_rekey_failure)
|
|
|
|
wakeup_filter_cmd.enabled |=
|
|
|
|
cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_GTK_REKEY_FAIL);
|
|
|
|
if (wowlan->eap_identity_req)
|
|
|
|
wakeup_filter_cmd.enabled |=
|
|
|
|
cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_EAP_IDENT_REQ);
|
|
|
|
if (wowlan->four_way_handshake)
|
|
|
|
wakeup_filter_cmd.enabled |=
|
|
|
|
cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_4WAY_HANDSHAKE);
|
|
|
|
if (wowlan->n_patterns)
|
|
|
|
wakeup_filter_cmd.enabled |=
|
|
|
|
cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_PATTERN_MATCH);
|
|
|
|
|
|
|
|
if (wowlan->rfkill_release)
|
|
|
|
d3_cfg_cmd.wakeup_flags |=
|
|
|
|
cpu_to_le32(IWLAGN_D3_WAKEUP_RFKILL);
|
|
|
|
|
|
|
|
iwl_scan_cancel_timeout(priv, 200);
|
|
|
|
|
|
|
|
memcpy(&rxon, &ctx->active, sizeof(rxon));
|
|
|
|
|
|
|
|
iwl_trans_stop_device(trans(priv));
|
|
|
|
|
|
|
|
priv->shrd->wowlan = true;
|
|
|
|
|
2011-12-08 00:50:46 +08:00
|
|
|
ret = iwl_load_ucode_wait_alive(trans(priv), IWL_UCODE_WOWLAN);
|
2011-11-10 22:55:17 +08:00
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
/* now configure WoWLAN ucode */
|
|
|
|
ret = iwl_alive_start(priv);
|
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
memcpy(&ctx->staging, &rxon, sizeof(rxon));
|
|
|
|
ret = iwlagn_commit_rxon(priv, ctx);
|
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
ret = iwl_power_update_mode(priv, true);
|
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (!iwlagn_mod_params.sw_crypto) {
|
|
|
|
/* mark all keys clear */
|
|
|
|
priv->ucode_key_table = 0;
|
|
|
|
ctx->key_mapping_keys = 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This needs to be unlocked due to lock ordering
|
|
|
|
* constraints. Since we're in the suspend path
|
|
|
|
* that isn't really a problem though.
|
|
|
|
*/
|
|
|
|
mutex_unlock(&priv->shrd->mutex);
|
|
|
|
ieee80211_iter_keys(priv->hw, ctx->vif,
|
|
|
|
iwlagn_wowlan_program_keys,
|
|
|
|
&key_data);
|
|
|
|
mutex_lock(&priv->shrd->mutex);
|
|
|
|
if (key_data.error) {
|
|
|
|
ret = -EIO;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (key_data.use_rsc_tsc) {
|
|
|
|
struct iwl_host_cmd rsc_tsc_cmd = {
|
|
|
|
.id = REPLY_WOWLAN_TSC_RSC_PARAMS,
|
|
|
|
.flags = CMD_SYNC,
|
|
|
|
.data[0] = key_data.rsc_tsc,
|
|
|
|
.dataflags[0] = IWL_HCMD_DFL_NOCOPY,
|
|
|
|
.len[0] = sizeof(key_data.rsc_tsc),
|
|
|
|
};
|
|
|
|
|
|
|
|
ret = iwl_trans_send_cmd(trans(priv), &rsc_tsc_cmd);
|
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (key_data.use_tkip) {
|
|
|
|
ret = iwl_trans_send_cmd_pdu(trans(priv),
|
|
|
|
REPLY_WOWLAN_TKIP_PARAMS,
|
|
|
|
CMD_SYNC, sizeof(tkip_cmd),
|
|
|
|
&tkip_cmd);
|
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (priv->have_rekey_data) {
|
|
|
|
memset(&kek_kck_cmd, 0, sizeof(kek_kck_cmd));
|
|
|
|
memcpy(kek_kck_cmd.kck, priv->kck, NL80211_KCK_LEN);
|
|
|
|
kek_kck_cmd.kck_len = cpu_to_le16(NL80211_KCK_LEN);
|
|
|
|
memcpy(kek_kck_cmd.kek, priv->kek, NL80211_KEK_LEN);
|
|
|
|
kek_kck_cmd.kek_len = cpu_to_le16(NL80211_KEK_LEN);
|
|
|
|
kek_kck_cmd.replay_ctr = priv->replay_ctr;
|
|
|
|
|
|
|
|
ret = iwl_trans_send_cmd_pdu(trans(priv),
|
|
|
|
REPLY_WOWLAN_KEK_KCK_MATERIAL,
|
|
|
|
CMD_SYNC, sizeof(kek_kck_cmd),
|
|
|
|
&kek_kck_cmd);
|
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = iwl_trans_send_cmd_pdu(trans(priv), REPLY_D3_CONFIG, CMD_SYNC,
|
|
|
|
sizeof(d3_cfg_cmd), &d3_cfg_cmd);
|
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
ret = iwl_trans_send_cmd_pdu(trans(priv), REPLY_WOWLAN_WAKEUP_FILTER,
|
|
|
|
CMD_SYNC, sizeof(wakeup_filter_cmd),
|
|
|
|
&wakeup_filter_cmd);
|
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
ret = iwlagn_send_patterns(priv, wowlan);
|
|
|
|
out:
|
|
|
|
kfree(key_data.rsc_tsc);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
#endif
|