mirror of https://gitee.com/openkylin/linux.git
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless into for-davem
This commit is contained in:
commit
32cdd592b7
|
@ -151,7 +151,7 @@ int iwl_send_add_sta(struct iwl_priv *priv,
|
||||||
sta_id, sta->sta.addr, flags & CMD_ASYNC ? "a" : "");
|
sta_id, sta->sta.addr, flags & CMD_ASYNC ? "a" : "");
|
||||||
|
|
||||||
if (!(flags & CMD_ASYNC)) {
|
if (!(flags & CMD_ASYNC)) {
|
||||||
cmd.flags |= CMD_WANT_SKB | CMD_WANT_HCMD;
|
cmd.flags |= CMD_WANT_SKB;
|
||||||
might_sleep();
|
might_sleep();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -363,7 +363,7 @@ TRACE_EVENT(iwlwifi_dev_hcmd,
|
||||||
__entry->flags = cmd->flags;
|
__entry->flags = cmd->flags;
|
||||||
memcpy(__get_dynamic_array(hcmd), hdr, sizeof(*hdr));
|
memcpy(__get_dynamic_array(hcmd), hdr, sizeof(*hdr));
|
||||||
|
|
||||||
for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
|
for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
|
||||||
if (!cmd->len[i])
|
if (!cmd->len[i])
|
||||||
continue;
|
continue;
|
||||||
memcpy((u8 *)__get_dynamic_array(hcmd) + offset,
|
memcpy((u8 *)__get_dynamic_array(hcmd) + offset,
|
||||||
|
|
|
@ -1102,7 +1102,6 @@ void iwl_drv_stop(struct iwl_drv *drv)
|
||||||
|
|
||||||
/* shared module parameters */
|
/* shared module parameters */
|
||||||
struct iwl_mod_params iwlwifi_mod_params = {
|
struct iwl_mod_params iwlwifi_mod_params = {
|
||||||
.amsdu_size_8K = 1,
|
|
||||||
.restart_fw = 1,
|
.restart_fw = 1,
|
||||||
.plcp_check = true,
|
.plcp_check = true,
|
||||||
.bt_coex_active = true,
|
.bt_coex_active = true,
|
||||||
|
@ -1207,7 +1206,7 @@ MODULE_PARM_DESC(11n_disable,
|
||||||
"disable 11n functionality, bitmap: 1: full, 2: agg TX, 4: agg RX");
|
"disable 11n functionality, bitmap: 1: full, 2: agg TX, 4: agg RX");
|
||||||
module_param_named(amsdu_size_8K, iwlwifi_mod_params.amsdu_size_8K,
|
module_param_named(amsdu_size_8K, iwlwifi_mod_params.amsdu_size_8K,
|
||||||
int, S_IRUGO);
|
int, S_IRUGO);
|
||||||
MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size");
|
MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size (default 0)");
|
||||||
module_param_named(fw_restart, iwlwifi_mod_params.restart_fw, int, S_IRUGO);
|
module_param_named(fw_restart, iwlwifi_mod_params.restart_fw, int, S_IRUGO);
|
||||||
MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
|
MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
|
||||||
|
|
||||||
|
|
|
@ -91,7 +91,7 @@ enum iwl_power_level {
|
||||||
* @sw_crypto: using hardware encryption, default = 0
|
* @sw_crypto: using hardware encryption, default = 0
|
||||||
* @disable_11n: disable 11n capabilities, default = 0,
|
* @disable_11n: disable 11n capabilities, default = 0,
|
||||||
* use IWL_DISABLE_HT_* constants
|
* use IWL_DISABLE_HT_* constants
|
||||||
* @amsdu_size_8K: enable 8K amsdu size, default = 1
|
* @amsdu_size_8K: enable 8K amsdu size, default = 0
|
||||||
* @restart_fw: restart firmware, default = 1
|
* @restart_fw: restart firmware, default = 1
|
||||||
* @plcp_check: enable plcp health check, default = true
|
* @plcp_check: enable plcp health check, default = true
|
||||||
* @wd_disable: enable stuck queue check, default = 0
|
* @wd_disable: enable stuck queue check, default = 0
|
||||||
|
|
|
@ -186,19 +186,13 @@ struct iwl_rx_packet {
|
||||||
* @CMD_ASYNC: Return right away and don't want for the response
|
* @CMD_ASYNC: Return right away and don't want for the response
|
||||||
* @CMD_WANT_SKB: valid only with CMD_SYNC. The caller needs the buffer of the
|
* @CMD_WANT_SKB: valid only with CMD_SYNC. The caller needs the buffer of the
|
||||||
* response. The caller needs to call iwl_free_resp when done.
|
* response. The caller needs to call iwl_free_resp when done.
|
||||||
* @CMD_WANT_HCMD: The caller needs to get the HCMD that was sent in the
|
|
||||||
* response handler. Chunks flagged by %IWL_HCMD_DFL_NOCOPY won't be
|
|
||||||
* copied. The pointer passed to the response handler is in the transport
|
|
||||||
* ownership and don't need to be freed by the op_mode. This also means
|
|
||||||
* that the pointer is invalidated after the op_mode's handler returns.
|
|
||||||
* @CMD_ON_DEMAND: This command is sent by the test mode pipe.
|
* @CMD_ON_DEMAND: This command is sent by the test mode pipe.
|
||||||
*/
|
*/
|
||||||
enum CMD_MODE {
|
enum CMD_MODE {
|
||||||
CMD_SYNC = 0,
|
CMD_SYNC = 0,
|
||||||
CMD_ASYNC = BIT(0),
|
CMD_ASYNC = BIT(0),
|
||||||
CMD_WANT_SKB = BIT(1),
|
CMD_WANT_SKB = BIT(1),
|
||||||
CMD_WANT_HCMD = BIT(2),
|
CMD_ON_DEMAND = BIT(2),
|
||||||
CMD_ON_DEMAND = BIT(3),
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#define DEF_CMD_PAYLOAD_SIZE 320
|
#define DEF_CMD_PAYLOAD_SIZE 320
|
||||||
|
@ -217,7 +211,11 @@ struct iwl_device_cmd {
|
||||||
|
|
||||||
#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd))
|
#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd))
|
||||||
|
|
||||||
#define IWL_MAX_CMD_TFDS 2
|
/*
|
||||||
|
* number of transfer buffers (fragments) per transmit frame descriptor;
|
||||||
|
* this is just the driver's idea, the hardware supports 20
|
||||||
|
*/
|
||||||
|
#define IWL_MAX_CMD_TBS_PER_TFD 2
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* struct iwl_hcmd_dataflag - flag for each one of the chunks of the command
|
* struct iwl_hcmd_dataflag - flag for each one of the chunks of the command
|
||||||
|
@ -254,15 +252,15 @@ enum iwl_hcmd_dataflag {
|
||||||
* @id: id of the host command
|
* @id: id of the host command
|
||||||
*/
|
*/
|
||||||
struct iwl_host_cmd {
|
struct iwl_host_cmd {
|
||||||
const void *data[IWL_MAX_CMD_TFDS];
|
const void *data[IWL_MAX_CMD_TBS_PER_TFD];
|
||||||
struct iwl_rx_packet *resp_pkt;
|
struct iwl_rx_packet *resp_pkt;
|
||||||
unsigned long _rx_page_addr;
|
unsigned long _rx_page_addr;
|
||||||
u32 _rx_page_order;
|
u32 _rx_page_order;
|
||||||
int handler_status;
|
int handler_status;
|
||||||
|
|
||||||
u32 flags;
|
u32 flags;
|
||||||
u16 len[IWL_MAX_CMD_TFDS];
|
u16 len[IWL_MAX_CMD_TBS_PER_TFD];
|
||||||
u8 dataflags[IWL_MAX_CMD_TFDS];
|
u8 dataflags[IWL_MAX_CMD_TBS_PER_TFD];
|
||||||
u8 id;
|
u8 id;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -762,18 +762,20 @@ struct iwl_phy_context_cmd {
|
||||||
#define IWL_RX_INFO_PHY_CNT 8
|
#define IWL_RX_INFO_PHY_CNT 8
|
||||||
#define IWL_RX_INFO_AGC_IDX 1
|
#define IWL_RX_INFO_AGC_IDX 1
|
||||||
#define IWL_RX_INFO_RSSI_AB_IDX 2
|
#define IWL_RX_INFO_RSSI_AB_IDX 2
|
||||||
#define IWL_RX_INFO_RSSI_C_IDX 3
|
#define IWL_OFDM_AGC_A_MSK 0x0000007f
|
||||||
#define IWL_OFDM_AGC_DB_MSK 0xfe00
|
#define IWL_OFDM_AGC_A_POS 0
|
||||||
#define IWL_OFDM_AGC_DB_POS 9
|
#define IWL_OFDM_AGC_B_MSK 0x00003f80
|
||||||
|
#define IWL_OFDM_AGC_B_POS 7
|
||||||
|
#define IWL_OFDM_AGC_CODE_MSK 0x3fe00000
|
||||||
|
#define IWL_OFDM_AGC_CODE_POS 20
|
||||||
#define IWL_OFDM_RSSI_INBAND_A_MSK 0x00ff
|
#define IWL_OFDM_RSSI_INBAND_A_MSK 0x00ff
|
||||||
#define IWL_OFDM_RSSI_ALLBAND_A_MSK 0xff00
|
|
||||||
#define IWL_OFDM_RSSI_A_POS 0
|
#define IWL_OFDM_RSSI_A_POS 0
|
||||||
|
#define IWL_OFDM_RSSI_ALLBAND_A_MSK 0xff00
|
||||||
|
#define IWL_OFDM_RSSI_ALLBAND_A_POS 8
|
||||||
#define IWL_OFDM_RSSI_INBAND_B_MSK 0xff0000
|
#define IWL_OFDM_RSSI_INBAND_B_MSK 0xff0000
|
||||||
#define IWL_OFDM_RSSI_ALLBAND_B_MSK 0xff000000
|
|
||||||
#define IWL_OFDM_RSSI_B_POS 16
|
#define IWL_OFDM_RSSI_B_POS 16
|
||||||
#define IWL_OFDM_RSSI_INBAND_C_MSK 0x00ff
|
#define IWL_OFDM_RSSI_ALLBAND_B_MSK 0xff000000
|
||||||
#define IWL_OFDM_RSSI_ALLBAND_C_MSK 0xff00
|
#define IWL_OFDM_RSSI_ALLBAND_B_POS 24
|
||||||
#define IWL_OFDM_RSSI_C_POS 0
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* struct iwl_rx_phy_info - phy info
|
* struct iwl_rx_phy_info - phy info
|
||||||
|
|
|
@ -79,17 +79,8 @@
|
||||||
#define UCODE_VALID_OK cpu_to_le32(0x1)
|
#define UCODE_VALID_OK cpu_to_le32(0x1)
|
||||||
|
|
||||||
/* Default calibration values for WkP - set to INIT image w/o running */
|
/* Default calibration values for WkP - set to INIT image w/o running */
|
||||||
static const u8 wkp_calib_values_bb_filter[] = { 0xbf, 0x00, 0x5f, 0x00, 0x2f,
|
|
||||||
0x00, 0x18, 0x00 };
|
|
||||||
static const u8 wkp_calib_values_rx_dc[] = { 0x7f, 0x7f, 0x7f, 0x7f, 0x7f,
|
|
||||||
0x7f, 0x7f, 0x7f };
|
|
||||||
static const u8 wkp_calib_values_tx_lo[] = { 0x00, 0x00, 0x00, 0x00 };
|
|
||||||
static const u8 wkp_calib_values_tx_iq[] = { 0xff, 0x00, 0xff, 0x00, 0x00,
|
|
||||||
0x00 };
|
|
||||||
static const u8 wkp_calib_values_rx_iq[] = { 0xff, 0x00, 0x00, 0x00 };
|
|
||||||
static const u8 wkp_calib_values_rx_iq_skew[] = { 0x00, 0x00, 0x01, 0x00 };
|
static const u8 wkp_calib_values_rx_iq_skew[] = { 0x00, 0x00, 0x01, 0x00 };
|
||||||
static const u8 wkp_calib_values_tx_iq_skew[] = { 0x01, 0x00, 0x00, 0x00 };
|
static const u8 wkp_calib_values_tx_iq_skew[] = { 0x01, 0x00, 0x00, 0x00 };
|
||||||
static const u8 wkp_calib_values_xtal[] = { 0xd2, 0xd2 };
|
|
||||||
|
|
||||||
struct iwl_calib_default_data {
|
struct iwl_calib_default_data {
|
||||||
u16 size;
|
u16 size;
|
||||||
|
@ -99,12 +90,7 @@ struct iwl_calib_default_data {
|
||||||
#define CALIB_SIZE_N_DATA(_buf) {.size = sizeof(_buf), .data = &_buf}
|
#define CALIB_SIZE_N_DATA(_buf) {.size = sizeof(_buf), .data = &_buf}
|
||||||
|
|
||||||
static const struct iwl_calib_default_data wkp_calib_default_data[12] = {
|
static const struct iwl_calib_default_data wkp_calib_default_data[12] = {
|
||||||
[5] = CALIB_SIZE_N_DATA(wkp_calib_values_rx_dc),
|
|
||||||
[6] = CALIB_SIZE_N_DATA(wkp_calib_values_bb_filter),
|
|
||||||
[7] = CALIB_SIZE_N_DATA(wkp_calib_values_tx_lo),
|
|
||||||
[8] = CALIB_SIZE_N_DATA(wkp_calib_values_tx_iq),
|
|
||||||
[9] = CALIB_SIZE_N_DATA(wkp_calib_values_tx_iq_skew),
|
[9] = CALIB_SIZE_N_DATA(wkp_calib_values_tx_iq_skew),
|
||||||
[10] = CALIB_SIZE_N_DATA(wkp_calib_values_rx_iq),
|
|
||||||
[11] = CALIB_SIZE_N_DATA(wkp_calib_values_rx_iq_skew),
|
[11] = CALIB_SIZE_N_DATA(wkp_calib_values_rx_iq_skew),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -241,20 +227,6 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
#define IWL_HW_REV_ID_RAINBOW 0x2
|
|
||||||
#define IWL_PROJ_TYPE_LHP 0x5
|
|
||||||
|
|
||||||
static u32 iwl_mvm_build_phy_cfg(struct iwl_mvm *mvm)
|
|
||||||
{
|
|
||||||
struct iwl_nvm_data *data = mvm->nvm_data;
|
|
||||||
/* Temp calls to static definitions, will be changed to CSR calls */
|
|
||||||
u8 hw_rev_id = IWL_HW_REV_ID_RAINBOW;
|
|
||||||
u8 project_type = IWL_PROJ_TYPE_LHP;
|
|
||||||
|
|
||||||
return data->radio_cfg_dash | (data->radio_cfg_step << 2) |
|
|
||||||
(hw_rev_id << 4) | ((project_type & 0x7f) << 6) |
|
|
||||||
(data->valid_tx_ant << 16) | (data->valid_rx_ant << 20);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
|
static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
|
||||||
{
|
{
|
||||||
|
@ -262,7 +234,7 @@ static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
|
||||||
enum iwl_ucode_type ucode_type = mvm->cur_ucode;
|
enum iwl_ucode_type ucode_type = mvm->cur_ucode;
|
||||||
|
|
||||||
/* Set parameters */
|
/* Set parameters */
|
||||||
phy_cfg_cmd.phy_cfg = cpu_to_le32(iwl_mvm_build_phy_cfg(mvm));
|
phy_cfg_cmd.phy_cfg = cpu_to_le32(mvm->fw->phy_config);
|
||||||
phy_cfg_cmd.calib_control.event_trigger =
|
phy_cfg_cmd.calib_control.event_trigger =
|
||||||
mvm->fw->default_calib[ucode_type].event_trigger;
|
mvm->fw->default_calib[ucode_type].event_trigger;
|
||||||
phy_cfg_cmd.calib_control.flow_trigger =
|
phy_cfg_cmd.calib_control.flow_trigger =
|
||||||
|
@ -275,103 +247,6 @@ static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
|
||||||
sizeof(phy_cfg_cmd), &phy_cfg_cmd);
|
sizeof(phy_cfg_cmd), &phy_cfg_cmd);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Starting with the new PHY DB implementation - New calibs are enabled */
|
|
||||||
/* Value - 0x405e7 */
|
|
||||||
#define IWL_CALIB_DEFAULT_FLOW_INIT (IWL_CALIB_CFG_XTAL_IDX |\
|
|
||||||
IWL_CALIB_CFG_TEMPERATURE_IDX |\
|
|
||||||
IWL_CALIB_CFG_VOLTAGE_READ_IDX |\
|
|
||||||
IWL_CALIB_CFG_DC_IDX |\
|
|
||||||
IWL_CALIB_CFG_BB_FILTER_IDX |\
|
|
||||||
IWL_CALIB_CFG_LO_LEAKAGE_IDX |\
|
|
||||||
IWL_CALIB_CFG_TX_IQ_IDX |\
|
|
||||||
IWL_CALIB_CFG_RX_IQ_IDX |\
|
|
||||||
IWL_CALIB_CFG_AGC_IDX)
|
|
||||||
|
|
||||||
#define IWL_CALIB_DEFAULT_EVENT_INIT 0x0
|
|
||||||
|
|
||||||
/* Value 0x41567 */
|
|
||||||
#define IWL_CALIB_DEFAULT_FLOW_RUN (IWL_CALIB_CFG_XTAL_IDX |\
|
|
||||||
IWL_CALIB_CFG_TEMPERATURE_IDX |\
|
|
||||||
IWL_CALIB_CFG_VOLTAGE_READ_IDX |\
|
|
||||||
IWL_CALIB_CFG_BB_FILTER_IDX |\
|
|
||||||
IWL_CALIB_CFG_DC_IDX |\
|
|
||||||
IWL_CALIB_CFG_TX_IQ_IDX |\
|
|
||||||
IWL_CALIB_CFG_RX_IQ_IDX |\
|
|
||||||
IWL_CALIB_CFG_SENSITIVITY_IDX |\
|
|
||||||
IWL_CALIB_CFG_AGC_IDX)
|
|
||||||
|
|
||||||
#define IWL_CALIB_DEFAULT_EVENT_RUN (IWL_CALIB_CFG_XTAL_IDX |\
|
|
||||||
IWL_CALIB_CFG_TEMPERATURE_IDX |\
|
|
||||||
IWL_CALIB_CFG_VOLTAGE_READ_IDX |\
|
|
||||||
IWL_CALIB_CFG_TX_PWR_IDX |\
|
|
||||||
IWL_CALIB_CFG_DC_IDX |\
|
|
||||||
IWL_CALIB_CFG_TX_IQ_IDX |\
|
|
||||||
IWL_CALIB_CFG_SENSITIVITY_IDX)
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Sets the calibrations trigger values that will be sent to the FW for runtime
|
|
||||||
* and init calibrations.
|
|
||||||
* The ones given in the FW TLV are not correct.
|
|
||||||
*/
|
|
||||||
static void iwl_set_default_calib_trigger(struct iwl_mvm *mvm)
|
|
||||||
{
|
|
||||||
struct iwl_tlv_calib_ctrl default_calib;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* WkP FW TLV calib bits are wrong, overwrite them.
|
|
||||||
* This defines the dynamic calibrations which are implemented in the
|
|
||||||
* uCode both for init(flow) calculation and event driven calibs.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/* Init Image */
|
|
||||||
default_calib.event_trigger = cpu_to_le32(IWL_CALIB_DEFAULT_EVENT_INIT);
|
|
||||||
default_calib.flow_trigger = cpu_to_le32(IWL_CALIB_DEFAULT_FLOW_INIT);
|
|
||||||
|
|
||||||
if (default_calib.event_trigger !=
|
|
||||||
mvm->fw->default_calib[IWL_UCODE_INIT].event_trigger)
|
|
||||||
IWL_ERR(mvm,
|
|
||||||
"Updating the event calib for INIT image: 0x%x -> 0x%x\n",
|
|
||||||
mvm->fw->default_calib[IWL_UCODE_INIT].event_trigger,
|
|
||||||
default_calib.event_trigger);
|
|
||||||
if (default_calib.flow_trigger !=
|
|
||||||
mvm->fw->default_calib[IWL_UCODE_INIT].flow_trigger)
|
|
||||||
IWL_ERR(mvm,
|
|
||||||
"Updating the flow calib for INIT image: 0x%x -> 0x%x\n",
|
|
||||||
mvm->fw->default_calib[IWL_UCODE_INIT].flow_trigger,
|
|
||||||
default_calib.flow_trigger);
|
|
||||||
|
|
||||||
memcpy((void *)&mvm->fw->default_calib[IWL_UCODE_INIT],
|
|
||||||
&default_calib, sizeof(struct iwl_tlv_calib_ctrl));
|
|
||||||
IWL_ERR(mvm,
|
|
||||||
"Setting uCode init calibrations event 0x%x, trigger 0x%x\n",
|
|
||||||
default_calib.event_trigger,
|
|
||||||
default_calib.flow_trigger);
|
|
||||||
|
|
||||||
/* Run time image */
|
|
||||||
default_calib.event_trigger = cpu_to_le32(IWL_CALIB_DEFAULT_EVENT_RUN);
|
|
||||||
default_calib.flow_trigger = cpu_to_le32(IWL_CALIB_DEFAULT_FLOW_RUN);
|
|
||||||
|
|
||||||
if (default_calib.event_trigger !=
|
|
||||||
mvm->fw->default_calib[IWL_UCODE_REGULAR].event_trigger)
|
|
||||||
IWL_ERR(mvm,
|
|
||||||
"Updating the event calib for RT image: 0x%x -> 0x%x\n",
|
|
||||||
mvm->fw->default_calib[IWL_UCODE_REGULAR].event_trigger,
|
|
||||||
default_calib.event_trigger);
|
|
||||||
if (default_calib.flow_trigger !=
|
|
||||||
mvm->fw->default_calib[IWL_UCODE_REGULAR].flow_trigger)
|
|
||||||
IWL_ERR(mvm,
|
|
||||||
"Updating the flow calib for RT image: 0x%x -> 0x%x\n",
|
|
||||||
mvm->fw->default_calib[IWL_UCODE_REGULAR].flow_trigger,
|
|
||||||
default_calib.flow_trigger);
|
|
||||||
|
|
||||||
memcpy((void *)&mvm->fw->default_calib[IWL_UCODE_REGULAR],
|
|
||||||
&default_calib, sizeof(struct iwl_tlv_calib_ctrl));
|
|
||||||
IWL_ERR(mvm,
|
|
||||||
"Setting uCode runtime calibs event 0x%x, trigger 0x%x\n",
|
|
||||||
default_calib.event_trigger,
|
|
||||||
default_calib.flow_trigger);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int iwl_set_default_calibrations(struct iwl_mvm *mvm)
|
static int iwl_set_default_calibrations(struct iwl_mvm *mvm)
|
||||||
{
|
{
|
||||||
u8 cmd_raw[16]; /* holds the variable size commands */
|
u8 cmd_raw[16]; /* holds the variable size commands */
|
||||||
|
@ -446,8 +321,10 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
|
||||||
ret = iwl_nvm_check_version(mvm->nvm_data, mvm->trans);
|
ret = iwl_nvm_check_version(mvm->nvm_data, mvm->trans);
|
||||||
WARN_ON(ret);
|
WARN_ON(ret);
|
||||||
|
|
||||||
/* Override the calibrations from TLV and the const of fw */
|
/* Send TX valid antennas before triggering calibrations */
|
||||||
iwl_set_default_calib_trigger(mvm);
|
ret = iwl_send_tx_ant_cfg(mvm, mvm->nvm_data->valid_tx_ant);
|
||||||
|
if (ret)
|
||||||
|
goto error;
|
||||||
|
|
||||||
/* WkP doesn't have all calibrations, need to set default values */
|
/* WkP doesn't have all calibrations, need to set default values */
|
||||||
if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
|
if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
|
||||||
|
|
|
@ -80,7 +80,8 @@
|
||||||
|
|
||||||
#define IWL_INVALID_MAC80211_QUEUE 0xff
|
#define IWL_INVALID_MAC80211_QUEUE 0xff
|
||||||
#define IWL_MVM_MAX_ADDRESSES 2
|
#define IWL_MVM_MAX_ADDRESSES 2
|
||||||
#define IWL_RSSI_OFFSET 44
|
/* RSSI offset for WkP */
|
||||||
|
#define IWL_RSSI_OFFSET 50
|
||||||
|
|
||||||
enum iwl_mvm_tx_fifo {
|
enum iwl_mvm_tx_fifo {
|
||||||
IWL_MVM_TX_FIFO_BK = 0,
|
IWL_MVM_TX_FIFO_BK = 0,
|
||||||
|
|
|
@ -624,12 +624,8 @@ static void iwl_mvm_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb)
|
||||||
ieee80211_free_txskb(mvm->hw, skb);
|
ieee80211_free_txskb(mvm->hw, skb);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode)
|
static void iwl_mvm_nic_restart(struct iwl_mvm *mvm)
|
||||||
{
|
{
|
||||||
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
|
|
||||||
|
|
||||||
iwl_mvm_dump_nic_error_log(mvm);
|
|
||||||
|
|
||||||
iwl_abort_notification_waits(&mvm->notif_wait);
|
iwl_abort_notification_waits(&mvm->notif_wait);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -663,9 +659,21 @@ static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode)
|
||||||
|
{
|
||||||
|
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
|
||||||
|
|
||||||
|
iwl_mvm_dump_nic_error_log(mvm);
|
||||||
|
|
||||||
|
iwl_mvm_nic_restart(mvm);
|
||||||
|
}
|
||||||
|
|
||||||
static void iwl_mvm_cmd_queue_full(struct iwl_op_mode *op_mode)
|
static void iwl_mvm_cmd_queue_full(struct iwl_op_mode *op_mode)
|
||||||
{
|
{
|
||||||
|
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
|
||||||
|
|
||||||
WARN_ON(1);
|
WARN_ON(1);
|
||||||
|
iwl_mvm_nic_restart(mvm);
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct iwl_op_mode_ops iwl_mvm_ops = {
|
static const struct iwl_op_mode_ops iwl_mvm_ops = {
|
||||||
|
|
|
@ -131,33 +131,42 @@ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
|
||||||
static int iwl_mvm_calc_rssi(struct iwl_mvm *mvm,
|
static int iwl_mvm_calc_rssi(struct iwl_mvm *mvm,
|
||||||
struct iwl_rx_phy_info *phy_info)
|
struct iwl_rx_phy_info *phy_info)
|
||||||
{
|
{
|
||||||
u32 rssi_a, rssi_b, rssi_c, max_rssi, agc_db;
|
int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
|
||||||
|
int rssi_all_band_a, rssi_all_band_b;
|
||||||
|
u32 agc_a, agc_b, max_agc;
|
||||||
u32 val;
|
u32 val;
|
||||||
|
|
||||||
/* Find max rssi among 3 possible receivers.
|
/* Find max rssi among 2 possible receivers.
|
||||||
* These values are measured by the Digital Signal Processor (DSP).
|
* These values are measured by the Digital Signal Processor (DSP).
|
||||||
* They should stay fairly constant even as the signal strength varies,
|
* They should stay fairly constant even as the signal strength varies,
|
||||||
* if the radio's Automatic Gain Control (AGC) is working right.
|
* if the radio's Automatic Gain Control (AGC) is working right.
|
||||||
* AGC value (see below) will provide the "interesting" info.
|
* AGC value (see below) will provide the "interesting" info.
|
||||||
*/
|
*/
|
||||||
|
val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_AGC_IDX]);
|
||||||
|
agc_a = (val & IWL_OFDM_AGC_A_MSK) >> IWL_OFDM_AGC_A_POS;
|
||||||
|
agc_b = (val & IWL_OFDM_AGC_B_MSK) >> IWL_OFDM_AGC_B_POS;
|
||||||
|
max_agc = max_t(u32, agc_a, agc_b);
|
||||||
|
|
||||||
val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_RSSI_AB_IDX]);
|
val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_RSSI_AB_IDX]);
|
||||||
rssi_a = (val & IWL_OFDM_RSSI_INBAND_A_MSK) >> IWL_OFDM_RSSI_A_POS;
|
rssi_a = (val & IWL_OFDM_RSSI_INBAND_A_MSK) >> IWL_OFDM_RSSI_A_POS;
|
||||||
rssi_b = (val & IWL_OFDM_RSSI_INBAND_B_MSK) >> IWL_OFDM_RSSI_B_POS;
|
rssi_b = (val & IWL_OFDM_RSSI_INBAND_B_MSK) >> IWL_OFDM_RSSI_B_POS;
|
||||||
val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_RSSI_C_IDX]);
|
rssi_all_band_a = (val & IWL_OFDM_RSSI_ALLBAND_A_MSK) >>
|
||||||
rssi_c = (val & IWL_OFDM_RSSI_INBAND_C_MSK) >> IWL_OFDM_RSSI_C_POS;
|
IWL_OFDM_RSSI_ALLBAND_A_POS;
|
||||||
|
rssi_all_band_b = (val & IWL_OFDM_RSSI_ALLBAND_B_MSK) >>
|
||||||
|
IWL_OFDM_RSSI_ALLBAND_B_POS;
|
||||||
|
|
||||||
val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_AGC_IDX]);
|
/*
|
||||||
agc_db = (val & IWL_OFDM_AGC_DB_MSK) >> IWL_OFDM_AGC_DB_POS;
|
* dBm = rssi dB - agc dB - constant.
|
||||||
|
* Higher AGC (higher radio gain) means lower signal.
|
||||||
|
*/
|
||||||
|
rssi_a_dbm = rssi_a - IWL_RSSI_OFFSET - agc_a;
|
||||||
|
rssi_b_dbm = rssi_b - IWL_RSSI_OFFSET - agc_b;
|
||||||
|
max_rssi_dbm = max_t(int, rssi_a_dbm, rssi_b_dbm);
|
||||||
|
|
||||||
max_rssi = max_t(u32, rssi_a, rssi_b);
|
IWL_DEBUG_STATS(mvm, "Rssi In A %d B %d Max %d AGCA %d AGCB %d\n",
|
||||||
max_rssi = max_t(u32, max_rssi, rssi_c);
|
rssi_a_dbm, rssi_b_dbm, max_rssi_dbm, agc_a, agc_b);
|
||||||
|
|
||||||
IWL_DEBUG_STATS(mvm, "Rssi In A %d B %d C %d Max %d AGC dB %d\n",
|
return max_rssi_dbm;
|
||||||
rssi_a, rssi_b, rssi_c, max_rssi, agc_db);
|
|
||||||
|
|
||||||
/* dBm = max_rssi dB - agc dB - constant.
|
|
||||||
* Higher AGC (higher radio gain) means lower signal. */
|
|
||||||
return max_rssi - agc_db - IWL_RSSI_OFFSET;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -770,6 +770,16 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
||||||
u16 txq_id;
|
u16 txq_id;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If mac80211 is cleaning its state, then say that we finished since
|
||||||
|
* our state has been cleared anyway.
|
||||||
|
*/
|
||||||
|
if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
|
||||||
|
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
spin_lock_bh(&mvmsta->lock);
|
spin_lock_bh(&mvmsta->lock);
|
||||||
|
|
||||||
txq_id = tid_data->txq_id;
|
txq_id = tid_data->txq_id;
|
||||||
|
|
|
@ -607,12 +607,8 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
|
||||||
|
|
||||||
/* Single frame failure in an AMPDU queue => send BAR */
|
/* Single frame failure in an AMPDU queue => send BAR */
|
||||||
if (txq_id >= IWL_FIRST_AMPDU_QUEUE &&
|
if (txq_id >= IWL_FIRST_AMPDU_QUEUE &&
|
||||||
!(info->flags & IEEE80211_TX_STAT_ACK)) {
|
!(info->flags & IEEE80211_TX_STAT_ACK))
|
||||||
/* there must be only one skb in the skb_list */
|
|
||||||
WARN_ON_ONCE(skb_freed > 1 ||
|
|
||||||
!skb_queue_empty(&skbs));
|
|
||||||
info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
|
info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
|
||||||
}
|
|
||||||
|
|
||||||
/* W/A FW bug: seq_ctl is wrong when the queue is flushed */
|
/* W/A FW bug: seq_ctl is wrong when the queue is flushed */
|
||||||
if (status == TX_STATUS_FAIL_FIFO_FLUSHED) {
|
if (status == TX_STATUS_FAIL_FIFO_FLUSHED) {
|
||||||
|
|
|
@ -137,10 +137,6 @@ static inline int iwl_queue_dec_wrap(int index, int n_bd)
|
||||||
struct iwl_cmd_meta {
|
struct iwl_cmd_meta {
|
||||||
/* only for SYNC commands, iff the reply skb is wanted */
|
/* only for SYNC commands, iff the reply skb is wanted */
|
||||||
struct iwl_host_cmd *source;
|
struct iwl_host_cmd *source;
|
||||||
|
|
||||||
DEFINE_DMA_UNMAP_ADDR(mapping);
|
|
||||||
DEFINE_DMA_UNMAP_LEN(len);
|
|
||||||
|
|
||||||
u32 flags;
|
u32 flags;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -185,25 +181,36 @@ struct iwl_queue {
|
||||||
/*
|
/*
|
||||||
* The FH will write back to the first TB only, so we need
|
* The FH will write back to the first TB only, so we need
|
||||||
* to copy some data into the buffer regardless of whether
|
* to copy some data into the buffer regardless of whether
|
||||||
* it should be mapped or not. This indicates how much to
|
* it should be mapped or not. This indicates how big the
|
||||||
* copy, even for HCMDs it must be big enough to fit the
|
* first TB must be to include the scratch buffer. Since
|
||||||
* DRAM scratch from the TX cmd, at least 16 bytes.
|
* the scratch is 4 bytes at offset 12, it's 16 now. If we
|
||||||
|
* make it bigger then allocations will be bigger and copy
|
||||||
|
* slower, so that's probably not useful.
|
||||||
*/
|
*/
|
||||||
#define IWL_HCMD_MIN_COPY_SIZE 16
|
#define IWL_HCMD_SCRATCHBUF_SIZE 16
|
||||||
|
|
||||||
struct iwl_pcie_txq_entry {
|
struct iwl_pcie_txq_entry {
|
||||||
struct iwl_device_cmd *cmd;
|
struct iwl_device_cmd *cmd;
|
||||||
struct iwl_device_cmd *copy_cmd;
|
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
/* buffer to free after command completes */
|
/* buffer to free after command completes */
|
||||||
const void *free_buf;
|
const void *free_buf;
|
||||||
struct iwl_cmd_meta meta;
|
struct iwl_cmd_meta meta;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct iwl_pcie_txq_scratch_buf {
|
||||||
|
struct iwl_cmd_header hdr;
|
||||||
|
u8 buf[8];
|
||||||
|
__le32 scratch;
|
||||||
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* struct iwl_txq - Tx Queue for DMA
|
* struct iwl_txq - Tx Queue for DMA
|
||||||
* @q: generic Rx/Tx queue descriptor
|
* @q: generic Rx/Tx queue descriptor
|
||||||
* @tfds: transmit frame descriptors (DMA memory)
|
* @tfds: transmit frame descriptors (DMA memory)
|
||||||
|
* @scratchbufs: start of command headers, including scratch buffers, for
|
||||||
|
* the writeback -- this is DMA memory and an array holding one buffer
|
||||||
|
* for each command on the queue
|
||||||
|
* @scratchbufs_dma: DMA address for the scratchbufs start
|
||||||
* @entries: transmit entries (driver state)
|
* @entries: transmit entries (driver state)
|
||||||
* @lock: queue lock
|
* @lock: queue lock
|
||||||
* @stuck_timer: timer that fires if queue gets stuck
|
* @stuck_timer: timer that fires if queue gets stuck
|
||||||
|
@ -217,6 +224,8 @@ struct iwl_pcie_txq_entry {
|
||||||
struct iwl_txq {
|
struct iwl_txq {
|
||||||
struct iwl_queue q;
|
struct iwl_queue q;
|
||||||
struct iwl_tfd *tfds;
|
struct iwl_tfd *tfds;
|
||||||
|
struct iwl_pcie_txq_scratch_buf *scratchbufs;
|
||||||
|
dma_addr_t scratchbufs_dma;
|
||||||
struct iwl_pcie_txq_entry *entries;
|
struct iwl_pcie_txq_entry *entries;
|
||||||
spinlock_t lock;
|
spinlock_t lock;
|
||||||
struct timer_list stuck_timer;
|
struct timer_list stuck_timer;
|
||||||
|
@ -225,6 +234,13 @@ struct iwl_txq {
|
||||||
u8 active;
|
u8 active;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static inline dma_addr_t
|
||||||
|
iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx)
|
||||||
|
{
|
||||||
|
return txq->scratchbufs_dma +
|
||||||
|
sizeof(struct iwl_pcie_txq_scratch_buf) * idx;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* struct iwl_trans_pcie - PCIe transport specific data
|
* struct iwl_trans_pcie - PCIe transport specific data
|
||||||
* @rxq: all the RX queue data
|
* @rxq: all the RX queue data
|
||||||
|
|
|
@ -637,22 +637,14 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
|
||||||
index = SEQ_TO_INDEX(sequence);
|
index = SEQ_TO_INDEX(sequence);
|
||||||
cmd_index = get_cmd_index(&txq->q, index);
|
cmd_index = get_cmd_index(&txq->q, index);
|
||||||
|
|
||||||
if (reclaim) {
|
if (reclaim)
|
||||||
struct iwl_pcie_txq_entry *ent;
|
cmd = txq->entries[cmd_index].cmd;
|
||||||
ent = &txq->entries[cmd_index];
|
else
|
||||||
cmd = ent->copy_cmd;
|
|
||||||
WARN_ON_ONCE(!cmd && ent->meta.flags & CMD_WANT_HCMD);
|
|
||||||
} else {
|
|
||||||
cmd = NULL;
|
cmd = NULL;
|
||||||
}
|
|
||||||
|
|
||||||
err = iwl_op_mode_rx(trans->op_mode, &rxcb, cmd);
|
err = iwl_op_mode_rx(trans->op_mode, &rxcb, cmd);
|
||||||
|
|
||||||
if (reclaim) {
|
if (reclaim) {
|
||||||
/* The original command isn't needed any more */
|
|
||||||
kfree(txq->entries[cmd_index].copy_cmd);
|
|
||||||
txq->entries[cmd_index].copy_cmd = NULL;
|
|
||||||
/* nor is the duplicated part of the command */
|
|
||||||
kfree(txq->entries[cmd_index].free_buf);
|
kfree(txq->entries[cmd_index].free_buf);
|
||||||
txq->entries[cmd_index].free_buf = NULL;
|
txq->entries[cmd_index].free_buf = NULL;
|
||||||
}
|
}
|
||||||
|
|
|
@ -191,12 +191,9 @@ static void iwl_pcie_txq_stuck_timer(unsigned long data)
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = q->read_ptr; i != q->write_ptr;
|
for (i = q->read_ptr; i != q->write_ptr;
|
||||||
i = iwl_queue_inc_wrap(i, q->n_bd)) {
|
i = iwl_queue_inc_wrap(i, q->n_bd))
|
||||||
struct iwl_tx_cmd *tx_cmd =
|
|
||||||
(struct iwl_tx_cmd *)txq->entries[i].cmd->payload;
|
|
||||||
IWL_ERR(trans, "scratch %d = 0x%08x\n", i,
|
IWL_ERR(trans, "scratch %d = 0x%08x\n", i,
|
||||||
get_unaligned_le32(&tx_cmd->scratch));
|
le32_to_cpu(txq->scratchbufs[i].scratch));
|
||||||
}
|
|
||||||
|
|
||||||
iwl_op_mode_nic_error(trans->op_mode);
|
iwl_op_mode_nic_error(trans->op_mode);
|
||||||
}
|
}
|
||||||
|
@ -367,8 +364,8 @@ static inline u8 iwl_pcie_tfd_get_num_tbs(struct iwl_tfd *tfd)
|
||||||
}
|
}
|
||||||
|
|
||||||
static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
|
static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
|
||||||
struct iwl_cmd_meta *meta, struct iwl_tfd *tfd,
|
struct iwl_cmd_meta *meta,
|
||||||
enum dma_data_direction dma_dir)
|
struct iwl_tfd *tfd)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
int num_tbs;
|
int num_tbs;
|
||||||
|
@ -382,17 +379,12 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Unmap tx_cmd */
|
/* first TB is never freed - it's the scratchbuf data */
|
||||||
if (num_tbs)
|
|
||||||
dma_unmap_single(trans->dev,
|
|
||||||
dma_unmap_addr(meta, mapping),
|
|
||||||
dma_unmap_len(meta, len),
|
|
||||||
DMA_BIDIRECTIONAL);
|
|
||||||
|
|
||||||
/* Unmap chunks, if any. */
|
|
||||||
for (i = 1; i < num_tbs; i++)
|
for (i = 1; i < num_tbs; i++)
|
||||||
dma_unmap_single(trans->dev, iwl_pcie_tfd_tb_get_addr(tfd, i),
|
dma_unmap_single(trans->dev, iwl_pcie_tfd_tb_get_addr(tfd, i),
|
||||||
iwl_pcie_tfd_tb_get_len(tfd, i), dma_dir);
|
iwl_pcie_tfd_tb_get_len(tfd, i),
|
||||||
|
DMA_TO_DEVICE);
|
||||||
|
|
||||||
tfd->num_tbs = 0;
|
tfd->num_tbs = 0;
|
||||||
}
|
}
|
||||||
|
@ -406,8 +398,7 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
|
||||||
* Does NOT advance any TFD circular buffer read/write indexes
|
* Does NOT advance any TFD circular buffer read/write indexes
|
||||||
* Does NOT free the TFD itself (which is within circular buffer)
|
* Does NOT free the TFD itself (which is within circular buffer)
|
||||||
*/
|
*/
|
||||||
static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
|
static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
|
||||||
enum dma_data_direction dma_dir)
|
|
||||||
{
|
{
|
||||||
struct iwl_tfd *tfd_tmp = txq->tfds;
|
struct iwl_tfd *tfd_tmp = txq->tfds;
|
||||||
|
|
||||||
|
@ -418,8 +409,7 @@ static void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
|
||||||
lockdep_assert_held(&txq->lock);
|
lockdep_assert_held(&txq->lock);
|
||||||
|
|
||||||
/* We have only q->n_window txq->entries, but we use q->n_bd tfds */
|
/* We have only q->n_window txq->entries, but we use q->n_bd tfds */
|
||||||
iwl_pcie_tfd_unmap(trans, &txq->entries[idx].meta, &tfd_tmp[rd_ptr],
|
iwl_pcie_tfd_unmap(trans, &txq->entries[idx].meta, &tfd_tmp[rd_ptr]);
|
||||||
dma_dir);
|
|
||||||
|
|
||||||
/* free SKB */
|
/* free SKB */
|
||||||
if (txq->entries) {
|
if (txq->entries) {
|
||||||
|
@ -479,6 +469,7 @@ static int iwl_pcie_txq_alloc(struct iwl_trans *trans,
|
||||||
{
|
{
|
||||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||||
size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX;
|
size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX;
|
||||||
|
size_t scratchbuf_sz;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
if (WARN_ON(txq->entries || txq->tfds))
|
if (WARN_ON(txq->entries || txq->tfds))
|
||||||
|
@ -514,9 +505,25 @@ static int iwl_pcie_txq_alloc(struct iwl_trans *trans,
|
||||||
IWL_ERR(trans, "dma_alloc_coherent(%zd) failed\n", tfd_sz);
|
IWL_ERR(trans, "dma_alloc_coherent(%zd) failed\n", tfd_sz);
|
||||||
goto error;
|
goto error;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
BUILD_BUG_ON(IWL_HCMD_SCRATCHBUF_SIZE != sizeof(*txq->scratchbufs));
|
||||||
|
BUILD_BUG_ON(offsetof(struct iwl_pcie_txq_scratch_buf, scratch) !=
|
||||||
|
sizeof(struct iwl_cmd_header) +
|
||||||
|
offsetof(struct iwl_tx_cmd, scratch));
|
||||||
|
|
||||||
|
scratchbuf_sz = sizeof(*txq->scratchbufs) * slots_num;
|
||||||
|
|
||||||
|
txq->scratchbufs = dma_alloc_coherent(trans->dev, scratchbuf_sz,
|
||||||
|
&txq->scratchbufs_dma,
|
||||||
|
GFP_KERNEL);
|
||||||
|
if (!txq->scratchbufs)
|
||||||
|
goto err_free_tfds;
|
||||||
|
|
||||||
txq->q.id = txq_id;
|
txq->q.id = txq_id;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
err_free_tfds:
|
||||||
|
dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->q.dma_addr);
|
||||||
error:
|
error:
|
||||||
if (txq->entries && txq_id == trans_pcie->cmd_queue)
|
if (txq->entries && txq_id == trans_pcie->cmd_queue)
|
||||||
for (i = 0; i < slots_num; i++)
|
for (i = 0; i < slots_num; i++)
|
||||||
|
@ -565,22 +572,13 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
|
||||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||||
struct iwl_txq *txq = &trans_pcie->txq[txq_id];
|
struct iwl_txq *txq = &trans_pcie->txq[txq_id];
|
||||||
struct iwl_queue *q = &txq->q;
|
struct iwl_queue *q = &txq->q;
|
||||||
enum dma_data_direction dma_dir;
|
|
||||||
|
|
||||||
if (!q->n_bd)
|
if (!q->n_bd)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/* In the command queue, all the TBs are mapped as BIDI
|
|
||||||
* so unmap them as such.
|
|
||||||
*/
|
|
||||||
if (txq_id == trans_pcie->cmd_queue)
|
|
||||||
dma_dir = DMA_BIDIRECTIONAL;
|
|
||||||
else
|
|
||||||
dma_dir = DMA_TO_DEVICE;
|
|
||||||
|
|
||||||
spin_lock_bh(&txq->lock);
|
spin_lock_bh(&txq->lock);
|
||||||
while (q->write_ptr != q->read_ptr) {
|
while (q->write_ptr != q->read_ptr) {
|
||||||
iwl_pcie_txq_free_tfd(trans, txq, dma_dir);
|
iwl_pcie_txq_free_tfd(trans, txq);
|
||||||
q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
|
q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
|
||||||
}
|
}
|
||||||
spin_unlock_bh(&txq->lock);
|
spin_unlock_bh(&txq->lock);
|
||||||
|
@ -610,7 +608,6 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
|
||||||
if (txq_id == trans_pcie->cmd_queue)
|
if (txq_id == trans_pcie->cmd_queue)
|
||||||
for (i = 0; i < txq->q.n_window; i++) {
|
for (i = 0; i < txq->q.n_window; i++) {
|
||||||
kfree(txq->entries[i].cmd);
|
kfree(txq->entries[i].cmd);
|
||||||
kfree(txq->entries[i].copy_cmd);
|
|
||||||
kfree(txq->entries[i].free_buf);
|
kfree(txq->entries[i].free_buf);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -619,6 +616,10 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
|
||||||
dma_free_coherent(dev, sizeof(struct iwl_tfd) *
|
dma_free_coherent(dev, sizeof(struct iwl_tfd) *
|
||||||
txq->q.n_bd, txq->tfds, txq->q.dma_addr);
|
txq->q.n_bd, txq->tfds, txq->q.dma_addr);
|
||||||
txq->q.dma_addr = 0;
|
txq->q.dma_addr = 0;
|
||||||
|
|
||||||
|
dma_free_coherent(dev,
|
||||||
|
sizeof(*txq->scratchbufs) * txq->q.n_window,
|
||||||
|
txq->scratchbufs, txq->scratchbufs_dma);
|
||||||
}
|
}
|
||||||
|
|
||||||
kfree(txq->entries);
|
kfree(txq->entries);
|
||||||
|
@ -962,7 +963,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
|
||||||
|
|
||||||
iwl_pcie_txq_inval_byte_cnt_tbl(trans, txq);
|
iwl_pcie_txq_inval_byte_cnt_tbl(trans, txq);
|
||||||
|
|
||||||
iwl_pcie_txq_free_tfd(trans, txq, DMA_TO_DEVICE);
|
iwl_pcie_txq_free_tfd(trans, txq);
|
||||||
}
|
}
|
||||||
|
|
||||||
iwl_pcie_txq_progress(trans_pcie, txq);
|
iwl_pcie_txq_progress(trans_pcie, txq);
|
||||||
|
@ -1152,29 +1153,29 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
|
||||||
void *dup_buf = NULL;
|
void *dup_buf = NULL;
|
||||||
dma_addr_t phys_addr;
|
dma_addr_t phys_addr;
|
||||||
int idx;
|
int idx;
|
||||||
u16 copy_size, cmd_size, dma_size;
|
u16 copy_size, cmd_size, scratch_size;
|
||||||
bool had_nocopy = false;
|
bool had_nocopy = false;
|
||||||
int i;
|
int i;
|
||||||
u32 cmd_pos;
|
u32 cmd_pos;
|
||||||
const u8 *cmddata[IWL_MAX_CMD_TFDS];
|
const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
|
||||||
u16 cmdlen[IWL_MAX_CMD_TFDS];
|
u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
|
||||||
|
|
||||||
copy_size = sizeof(out_cmd->hdr);
|
copy_size = sizeof(out_cmd->hdr);
|
||||||
cmd_size = sizeof(out_cmd->hdr);
|
cmd_size = sizeof(out_cmd->hdr);
|
||||||
|
|
||||||
/* need one for the header if the first is NOCOPY */
|
/* need one for the header if the first is NOCOPY */
|
||||||
BUILD_BUG_ON(IWL_MAX_CMD_TFDS > IWL_NUM_OF_TBS - 1);
|
BUILD_BUG_ON(IWL_MAX_CMD_TBS_PER_TFD > IWL_NUM_OF_TBS - 1);
|
||||||
|
|
||||||
for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
|
for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
|
||||||
cmddata[i] = cmd->data[i];
|
cmddata[i] = cmd->data[i];
|
||||||
cmdlen[i] = cmd->len[i];
|
cmdlen[i] = cmd->len[i];
|
||||||
|
|
||||||
if (!cmd->len[i])
|
if (!cmd->len[i])
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
/* need at least IWL_HCMD_MIN_COPY_SIZE copied */
|
/* need at least IWL_HCMD_SCRATCHBUF_SIZE copied */
|
||||||
if (copy_size < IWL_HCMD_MIN_COPY_SIZE) {
|
if (copy_size < IWL_HCMD_SCRATCHBUF_SIZE) {
|
||||||
int copy = IWL_HCMD_MIN_COPY_SIZE - copy_size;
|
int copy = IWL_HCMD_SCRATCHBUF_SIZE - copy_size;
|
||||||
|
|
||||||
if (copy > cmdlen[i])
|
if (copy > cmdlen[i])
|
||||||
copy = cmdlen[i];
|
copy = cmdlen[i];
|
||||||
|
@ -1260,15 +1261,15 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
|
||||||
/* and copy the data that needs to be copied */
|
/* and copy the data that needs to be copied */
|
||||||
cmd_pos = offsetof(struct iwl_device_cmd, payload);
|
cmd_pos = offsetof(struct iwl_device_cmd, payload);
|
||||||
copy_size = sizeof(out_cmd->hdr);
|
copy_size = sizeof(out_cmd->hdr);
|
||||||
for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
|
for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
|
||||||
int copy = 0;
|
int copy = 0;
|
||||||
|
|
||||||
if (!cmd->len)
|
if (!cmd->len)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
/* need at least IWL_HCMD_MIN_COPY_SIZE copied */
|
/* need at least IWL_HCMD_SCRATCHBUF_SIZE copied */
|
||||||
if (copy_size < IWL_HCMD_MIN_COPY_SIZE) {
|
if (copy_size < IWL_HCMD_SCRATCHBUF_SIZE) {
|
||||||
copy = IWL_HCMD_MIN_COPY_SIZE - copy_size;
|
copy = IWL_HCMD_SCRATCHBUF_SIZE - copy_size;
|
||||||
|
|
||||||
if (copy > cmd->len[i])
|
if (copy > cmd->len[i])
|
||||||
copy = cmd->len[i];
|
copy = cmd->len[i];
|
||||||
|
@ -1286,50 +1287,38 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
WARN_ON_ONCE(txq->entries[idx].copy_cmd);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* since out_cmd will be the source address of the FH, it will write
|
|
||||||
* the retry count there. So when the user needs to receivce the HCMD
|
|
||||||
* that corresponds to the response in the response handler, it needs
|
|
||||||
* to set CMD_WANT_HCMD.
|
|
||||||
*/
|
|
||||||
if (cmd->flags & CMD_WANT_HCMD) {
|
|
||||||
txq->entries[idx].copy_cmd =
|
|
||||||
kmemdup(out_cmd, cmd_pos, GFP_ATOMIC);
|
|
||||||
if (unlikely(!txq->entries[idx].copy_cmd)) {
|
|
||||||
idx = -ENOMEM;
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
IWL_DEBUG_HC(trans,
|
IWL_DEBUG_HC(trans,
|
||||||
"Sending command %s (#%x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
|
"Sending command %s (#%x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
|
||||||
get_cmd_string(trans_pcie, out_cmd->hdr.cmd),
|
get_cmd_string(trans_pcie, out_cmd->hdr.cmd),
|
||||||
out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
|
out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
|
||||||
cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue);
|
cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue);
|
||||||
|
|
||||||
/*
|
/* start the TFD with the scratchbuf */
|
||||||
* If the entire command is smaller than IWL_HCMD_MIN_COPY_SIZE, we must
|
scratch_size = min_t(int, copy_size, IWL_HCMD_SCRATCHBUF_SIZE);
|
||||||
* still map at least that many bytes for the hardware to write back to.
|
memcpy(&txq->scratchbufs[q->write_ptr], &out_cmd->hdr, scratch_size);
|
||||||
* We have enough space, so that's not a problem.
|
iwl_pcie_txq_build_tfd(trans, txq,
|
||||||
*/
|
iwl_pcie_get_scratchbuf_dma(txq, q->write_ptr),
|
||||||
dma_size = max_t(u16, copy_size, IWL_HCMD_MIN_COPY_SIZE);
|
scratch_size, 1);
|
||||||
|
|
||||||
phys_addr = dma_map_single(trans->dev, &out_cmd->hdr, dma_size,
|
/* map first command fragment, if any remains */
|
||||||
DMA_BIDIRECTIONAL);
|
if (copy_size > scratch_size) {
|
||||||
if (unlikely(dma_mapping_error(trans->dev, phys_addr))) {
|
phys_addr = dma_map_single(trans->dev,
|
||||||
|
((u8 *)&out_cmd->hdr) + scratch_size,
|
||||||
|
copy_size - scratch_size,
|
||||||
|
DMA_TO_DEVICE);
|
||||||
|
if (dma_mapping_error(trans->dev, phys_addr)) {
|
||||||
|
iwl_pcie_tfd_unmap(trans, out_meta,
|
||||||
|
&txq->tfds[q->write_ptr]);
|
||||||
idx = -ENOMEM;
|
idx = -ENOMEM;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
dma_unmap_addr_set(out_meta, mapping, phys_addr);
|
iwl_pcie_txq_build_tfd(trans, txq, phys_addr,
|
||||||
dma_unmap_len_set(out_meta, len, dma_size);
|
copy_size - scratch_size, 0);
|
||||||
|
}
|
||||||
iwl_pcie_txq_build_tfd(trans, txq, phys_addr, copy_size, 1);
|
|
||||||
|
|
||||||
/* map the remaining (adjusted) nocopy/dup fragments */
|
/* map the remaining (adjusted) nocopy/dup fragments */
|
||||||
for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
|
for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
|
||||||
const void *data = cmddata[i];
|
const void *data = cmddata[i];
|
||||||
|
|
||||||
if (!cmdlen[i])
|
if (!cmdlen[i])
|
||||||
|
@ -1340,11 +1329,10 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
|
||||||
if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP)
|
if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP)
|
||||||
data = dup_buf;
|
data = dup_buf;
|
||||||
phys_addr = dma_map_single(trans->dev, (void *)data,
|
phys_addr = dma_map_single(trans->dev, (void *)data,
|
||||||
cmdlen[i], DMA_BIDIRECTIONAL);
|
cmdlen[i], DMA_TO_DEVICE);
|
||||||
if (dma_mapping_error(trans->dev, phys_addr)) {
|
if (dma_mapping_error(trans->dev, phys_addr)) {
|
||||||
iwl_pcie_tfd_unmap(trans, out_meta,
|
iwl_pcie_tfd_unmap(trans, out_meta,
|
||||||
&txq->tfds[q->write_ptr],
|
&txq->tfds[q->write_ptr]);
|
||||||
DMA_BIDIRECTIONAL);
|
|
||||||
idx = -ENOMEM;
|
idx = -ENOMEM;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
@ -1418,7 +1406,7 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
|
||||||
cmd = txq->entries[cmd_index].cmd;
|
cmd = txq->entries[cmd_index].cmd;
|
||||||
meta = &txq->entries[cmd_index].meta;
|
meta = &txq->entries[cmd_index].meta;
|
||||||
|
|
||||||
iwl_pcie_tfd_unmap(trans, meta, &txq->tfds[index], DMA_BIDIRECTIONAL);
|
iwl_pcie_tfd_unmap(trans, meta, &txq->tfds[index]);
|
||||||
|
|
||||||
/* Input error checking is done when commands are added to queue. */
|
/* Input error checking is done when commands are added to queue. */
|
||||||
if (meta->flags & CMD_WANT_SKB) {
|
if (meta->flags & CMD_WANT_SKB) {
|
||||||
|
@ -1597,10 +1585,9 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
|
||||||
struct iwl_cmd_meta *out_meta;
|
struct iwl_cmd_meta *out_meta;
|
||||||
struct iwl_txq *txq;
|
struct iwl_txq *txq;
|
||||||
struct iwl_queue *q;
|
struct iwl_queue *q;
|
||||||
dma_addr_t phys_addr = 0;
|
dma_addr_t tb0_phys, tb1_phys, scratch_phys;
|
||||||
dma_addr_t txcmd_phys;
|
void *tb1_addr;
|
||||||
dma_addr_t scratch_phys;
|
u16 len, tb1_len, tb2_len;
|
||||||
u16 len, firstlen, secondlen;
|
|
||||||
u8 wait_write_ptr = 0;
|
u8 wait_write_ptr = 0;
|
||||||
__le16 fc = hdr->frame_control;
|
__le16 fc = hdr->frame_control;
|
||||||
u8 hdr_len = ieee80211_hdrlen(fc);
|
u8 hdr_len = ieee80211_hdrlen(fc);
|
||||||
|
@ -1638,35 +1625,73 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
|
||||||
cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
|
cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
|
||||||
INDEX_TO_SEQ(q->write_ptr)));
|
INDEX_TO_SEQ(q->write_ptr)));
|
||||||
|
|
||||||
|
tb0_phys = iwl_pcie_get_scratchbuf_dma(txq, q->write_ptr);
|
||||||
|
scratch_phys = tb0_phys + sizeof(struct iwl_cmd_header) +
|
||||||
|
offsetof(struct iwl_tx_cmd, scratch);
|
||||||
|
|
||||||
|
tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
|
||||||
|
tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
|
||||||
|
|
||||||
/* Set up first empty entry in queue's array of Tx/cmd buffers */
|
/* Set up first empty entry in queue's array of Tx/cmd buffers */
|
||||||
out_meta = &txq->entries[q->write_ptr].meta;
|
out_meta = &txq->entries[q->write_ptr].meta;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Use the first empty entry in this queue's command buffer array
|
* The second TB (tb1) points to the remainder of the TX command
|
||||||
* to contain the Tx command and MAC header concatenated together
|
* and the 802.11 header - dword aligned size
|
||||||
* (payload data will be in another buffer).
|
* (This calculation modifies the TX command, so do it before the
|
||||||
* Size of this varies, due to varying MAC header length.
|
* setup of the first TB)
|
||||||
* If end is not dword aligned, we'll have 2 extra bytes at the end
|
|
||||||
* of the MAC header (device reads on dword boundaries).
|
|
||||||
* We'll tell device about this padding later.
|
|
||||||
*/
|
*/
|
||||||
len = sizeof(struct iwl_tx_cmd) +
|
len = sizeof(struct iwl_tx_cmd) + sizeof(struct iwl_cmd_header) +
|
||||||
sizeof(struct iwl_cmd_header) + hdr_len;
|
hdr_len - IWL_HCMD_SCRATCHBUF_SIZE;
|
||||||
firstlen = (len + 3) & ~3;
|
tb1_len = (len + 3) & ~3;
|
||||||
|
|
||||||
/* Tell NIC about any 2-byte padding after MAC header */
|
/* Tell NIC about any 2-byte padding after MAC header */
|
||||||
if (firstlen != len)
|
if (tb1_len != len)
|
||||||
tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
|
tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
|
||||||
|
|
||||||
/* Physical address of this Tx command's header (not MAC header!),
|
/* The first TB points to the scratchbuf data - min_copy bytes */
|
||||||
* within command buffer array. */
|
memcpy(&txq->scratchbufs[q->write_ptr], &dev_cmd->hdr,
|
||||||
txcmd_phys = dma_map_single(trans->dev,
|
IWL_HCMD_SCRATCHBUF_SIZE);
|
||||||
&dev_cmd->hdr, firstlen,
|
iwl_pcie_txq_build_tfd(trans, txq, tb0_phys,
|
||||||
DMA_BIDIRECTIONAL);
|
IWL_HCMD_SCRATCHBUF_SIZE, 1);
|
||||||
if (unlikely(dma_mapping_error(trans->dev, txcmd_phys)))
|
|
||||||
|
/* there must be data left over for TB1 or this code must be changed */
|
||||||
|
BUILD_BUG_ON(sizeof(struct iwl_tx_cmd) < IWL_HCMD_SCRATCHBUF_SIZE);
|
||||||
|
|
||||||
|
/* map the data for TB1 */
|
||||||
|
tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_HCMD_SCRATCHBUF_SIZE;
|
||||||
|
tb1_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE);
|
||||||
|
if (unlikely(dma_mapping_error(trans->dev, tb1_phys)))
|
||||||
goto out_err;
|
goto out_err;
|
||||||
dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
|
iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, 0);
|
||||||
dma_unmap_len_set(out_meta, len, firstlen);
|
|
||||||
|
/*
|
||||||
|
* Set up TFD's third entry to point directly to remainder
|
||||||
|
* of skb, if any (802.11 null frames have no payload).
|
||||||
|
*/
|
||||||
|
tb2_len = skb->len - hdr_len;
|
||||||
|
if (tb2_len > 0) {
|
||||||
|
dma_addr_t tb2_phys = dma_map_single(trans->dev,
|
||||||
|
skb->data + hdr_len,
|
||||||
|
tb2_len, DMA_TO_DEVICE);
|
||||||
|
if (unlikely(dma_mapping_error(trans->dev, tb2_phys))) {
|
||||||
|
iwl_pcie_tfd_unmap(trans, out_meta,
|
||||||
|
&txq->tfds[q->write_ptr]);
|
||||||
|
goto out_err;
|
||||||
|
}
|
||||||
|
iwl_pcie_txq_build_tfd(trans, txq, tb2_phys, tb2_len, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Set up entry for this TFD in Tx byte-count array */
|
||||||
|
iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len));
|
||||||
|
|
||||||
|
trace_iwlwifi_dev_tx(trans->dev, skb,
|
||||||
|
&txq->tfds[txq->q.write_ptr],
|
||||||
|
sizeof(struct iwl_tfd),
|
||||||
|
&dev_cmd->hdr, IWL_HCMD_SCRATCHBUF_SIZE + tb1_len,
|
||||||
|
skb->data + hdr_len, tb2_len);
|
||||||
|
trace_iwlwifi_dev_tx_data(trans->dev, skb,
|
||||||
|
skb->data + hdr_len, tb2_len);
|
||||||
|
|
||||||
if (!ieee80211_has_morefrags(fc)) {
|
if (!ieee80211_has_morefrags(fc)) {
|
||||||
txq->need_update = 1;
|
txq->need_update = 1;
|
||||||
|
@ -1675,49 +1700,6 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
|
||||||
txq->need_update = 0;
|
txq->need_update = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Set up TFD's 2nd entry to point directly to remainder of skb,
|
|
||||||
* if any (802.11 null frames have no payload). */
|
|
||||||
secondlen = skb->len - hdr_len;
|
|
||||||
if (secondlen > 0) {
|
|
||||||
phys_addr = dma_map_single(trans->dev, skb->data + hdr_len,
|
|
||||||
secondlen, DMA_TO_DEVICE);
|
|
||||||
if (unlikely(dma_mapping_error(trans->dev, phys_addr))) {
|
|
||||||
dma_unmap_single(trans->dev,
|
|
||||||
dma_unmap_addr(out_meta, mapping),
|
|
||||||
dma_unmap_len(out_meta, len),
|
|
||||||
DMA_BIDIRECTIONAL);
|
|
||||||
goto out_err;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Attach buffers to TFD */
|
|
||||||
iwl_pcie_txq_build_tfd(trans, txq, txcmd_phys, firstlen, 1);
|
|
||||||
if (secondlen > 0)
|
|
||||||
iwl_pcie_txq_build_tfd(trans, txq, phys_addr, secondlen, 0);
|
|
||||||
|
|
||||||
scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
|
|
||||||
offsetof(struct iwl_tx_cmd, scratch);
|
|
||||||
|
|
||||||
/* take back ownership of DMA buffer to enable update */
|
|
||||||
dma_sync_single_for_cpu(trans->dev, txcmd_phys, firstlen,
|
|
||||||
DMA_BIDIRECTIONAL);
|
|
||||||
tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
|
|
||||||
tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
|
|
||||||
|
|
||||||
/* Set up entry for this TFD in Tx byte-count array */
|
|
||||||
iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len));
|
|
||||||
|
|
||||||
dma_sync_single_for_device(trans->dev, txcmd_phys, firstlen,
|
|
||||||
DMA_BIDIRECTIONAL);
|
|
||||||
|
|
||||||
trace_iwlwifi_dev_tx(trans->dev, skb,
|
|
||||||
&txq->tfds[txq->q.write_ptr],
|
|
||||||
sizeof(struct iwl_tfd),
|
|
||||||
&dev_cmd->hdr, firstlen,
|
|
||||||
skb->data + hdr_len, secondlen);
|
|
||||||
trace_iwlwifi_dev_tx_data(trans->dev, skb,
|
|
||||||
skb->data + hdr_len, secondlen);
|
|
||||||
|
|
||||||
/* start timer if queue currently empty */
|
/* start timer if queue currently empty */
|
||||||
if (txq->need_update && q->read_ptr == q->write_ptr &&
|
if (txq->need_update && q->read_ptr == q->write_ptr &&
|
||||||
trans_pcie->wd_timeout)
|
trans_pcie->wd_timeout)
|
||||||
|
|
|
@ -3290,14 +3290,19 @@ static int ieee80211_cfg_get_channel(struct wiphy *wiphy,
|
||||||
int ret = -ENODATA;
|
int ret = -ENODATA;
|
||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
if (local->use_chanctx) {
|
|
||||||
chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
|
chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
|
||||||
if (chanctx_conf) {
|
if (chanctx_conf) {
|
||||||
*chandef = chanctx_conf->def;
|
*chandef = chanctx_conf->def;
|
||||||
ret = 0;
|
ret = 0;
|
||||||
}
|
} else if (local->open_count > 0 &&
|
||||||
} else if (local->open_count == local->monitors) {
|
local->open_count == local->monitors &&
|
||||||
|
sdata->vif.type == NL80211_IFTYPE_MONITOR) {
|
||||||
|
if (local->use_chanctx)
|
||||||
*chandef = local->monitor_chandef;
|
*chandef = local->monitor_chandef;
|
||||||
|
else
|
||||||
|
cfg80211_chandef_create(chandef,
|
||||||
|
local->_oper_channel,
|
||||||
|
local->_oper_channel_type);
|
||||||
ret = 0;
|
ret = 0;
|
||||||
}
|
}
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
|
|
|
@ -541,6 +541,9 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
|
||||||
|
|
||||||
ieee80211_adjust_monitor_flags(sdata, 1);
|
ieee80211_adjust_monitor_flags(sdata, 1);
|
||||||
ieee80211_configure_filter(local);
|
ieee80211_configure_filter(local);
|
||||||
|
mutex_lock(&local->mtx);
|
||||||
|
ieee80211_recalc_idle(local);
|
||||||
|
mutex_unlock(&local->mtx);
|
||||||
|
|
||||||
netif_carrier_on(dev);
|
netif_carrier_on(dev);
|
||||||
break;
|
break;
|
||||||
|
@ -812,6 +815,9 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
|
||||||
|
|
||||||
ieee80211_adjust_monitor_flags(sdata, -1);
|
ieee80211_adjust_monitor_flags(sdata, -1);
|
||||||
ieee80211_configure_filter(local);
|
ieee80211_configure_filter(local);
|
||||||
|
mutex_lock(&local->mtx);
|
||||||
|
ieee80211_recalc_idle(local);
|
||||||
|
mutex_unlock(&local->mtx);
|
||||||
break;
|
break;
|
||||||
case NL80211_IFTYPE_P2P_DEVICE:
|
case NL80211_IFTYPE_P2P_DEVICE:
|
||||||
/* relies on synchronize_rcu() below */
|
/* relies on synchronize_rcu() below */
|
||||||
|
|
|
@ -647,6 +647,9 @@ static void ieee80211_add_vht_ie(struct ieee80211_sub_if_data *sdata,
|
||||||
our_mcs = (le16_to_cpu(vht_cap.vht_mcs.rx_mcs_map) &
|
our_mcs = (le16_to_cpu(vht_cap.vht_mcs.rx_mcs_map) &
|
||||||
mask) >> shift;
|
mask) >> shift;
|
||||||
|
|
||||||
|
if (our_mcs == IEEE80211_VHT_MCS_NOT_SUPPORTED)
|
||||||
|
continue;
|
||||||
|
|
||||||
switch (ap_mcs) {
|
switch (ap_mcs) {
|
||||||
default:
|
default:
|
||||||
if (our_mcs <= ap_mcs)
|
if (our_mcs <= ap_mcs)
|
||||||
|
@ -3502,6 +3505,14 @@ void ieee80211_sta_quiesce(struct ieee80211_sub_if_data *sdata)
|
||||||
{
|
{
|
||||||
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
|
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Stop timers before deleting work items, as timers
|
||||||
|
* could race and re-add the work-items. They will be
|
||||||
|
* re-established on connection.
|
||||||
|
*/
|
||||||
|
del_timer_sync(&ifmgd->conn_mon_timer);
|
||||||
|
del_timer_sync(&ifmgd->bcn_mon_timer);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* we need to use atomic bitops for the running bits
|
* we need to use atomic bitops for the running bits
|
||||||
* only because both timers might fire at the same
|
* only because both timers might fire at the same
|
||||||
|
@ -3516,13 +3527,9 @@ void ieee80211_sta_quiesce(struct ieee80211_sub_if_data *sdata)
|
||||||
if (del_timer_sync(&ifmgd->timer))
|
if (del_timer_sync(&ifmgd->timer))
|
||||||
set_bit(TMR_RUNNING_TIMER, &ifmgd->timers_running);
|
set_bit(TMR_RUNNING_TIMER, &ifmgd->timers_running);
|
||||||
|
|
||||||
cancel_work_sync(&ifmgd->chswitch_work);
|
|
||||||
if (del_timer_sync(&ifmgd->chswitch_timer))
|
if (del_timer_sync(&ifmgd->chswitch_timer))
|
||||||
set_bit(TMR_RUNNING_CHANSW, &ifmgd->timers_running);
|
set_bit(TMR_RUNNING_CHANSW, &ifmgd->timers_running);
|
||||||
|
cancel_work_sync(&ifmgd->chswitch_work);
|
||||||
/* these will just be re-established on connection */
|
|
||||||
del_timer_sync(&ifmgd->conn_mon_timer);
|
|
||||||
del_timer_sync(&ifmgd->bcn_mon_timer);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata)
|
void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata)
|
||||||
|
@ -4315,6 +4322,17 @@ void ieee80211_mgd_stop(struct ieee80211_sub_if_data *sdata)
|
||||||
{
|
{
|
||||||
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
|
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Make sure some work items will not run after this,
|
||||||
|
* they will not do anything but might not have been
|
||||||
|
* cancelled when disconnecting.
|
||||||
|
*/
|
||||||
|
cancel_work_sync(&ifmgd->monitor_work);
|
||||||
|
cancel_work_sync(&ifmgd->beacon_connection_loss_work);
|
||||||
|
cancel_work_sync(&ifmgd->request_smps_work);
|
||||||
|
cancel_work_sync(&ifmgd->csa_connection_drop_work);
|
||||||
|
cancel_work_sync(&ifmgd->chswitch_work);
|
||||||
|
|
||||||
mutex_lock(&ifmgd->mtx);
|
mutex_lock(&ifmgd->mtx);
|
||||||
if (ifmgd->assoc_data)
|
if (ifmgd->assoc_data)
|
||||||
ieee80211_destroy_assoc_data(sdata, false);
|
ieee80211_destroy_assoc_data(sdata, false);
|
||||||
|
|
|
@ -2745,6 +2745,7 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw,
|
||||||
cpu_to_le16(IEEE80211_FCTL_MOREDATA);
|
cpu_to_le16(IEEE80211_FCTL_MOREDATA);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
|
||||||
sdata = IEEE80211_DEV_TO_SUB_IF(skb->dev);
|
sdata = IEEE80211_DEV_TO_SUB_IF(skb->dev);
|
||||||
if (!ieee80211_tx_prepare(sdata, &tx, skb))
|
if (!ieee80211_tx_prepare(sdata, &tx, skb))
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -367,8 +367,7 @@ struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv)
|
||||||
rdev->wiphy.rts_threshold = (u32) -1;
|
rdev->wiphy.rts_threshold = (u32) -1;
|
||||||
rdev->wiphy.coverage_class = 0;
|
rdev->wiphy.coverage_class = 0;
|
||||||
|
|
||||||
rdev->wiphy.features = NL80211_FEATURE_SCAN_FLUSH |
|
rdev->wiphy.features = NL80211_FEATURE_SCAN_FLUSH;
|
||||||
NL80211_FEATURE_ADVERTISE_CHAN_LIMITS;
|
|
||||||
|
|
||||||
return &rdev->wiphy;
|
return &rdev->wiphy;
|
||||||
}
|
}
|
||||||
|
|
|
@ -557,18 +557,6 @@ static int nl80211_msg_put_channel(struct sk_buff *msg,
|
||||||
if ((chan->flags & IEEE80211_CHAN_RADAR) &&
|
if ((chan->flags & IEEE80211_CHAN_RADAR) &&
|
||||||
nla_put_flag(msg, NL80211_FREQUENCY_ATTR_RADAR))
|
nla_put_flag(msg, NL80211_FREQUENCY_ATTR_RADAR))
|
||||||
goto nla_put_failure;
|
goto nla_put_failure;
|
||||||
if ((chan->flags & IEEE80211_CHAN_NO_HT40MINUS) &&
|
|
||||||
nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_HT40_MINUS))
|
|
||||||
goto nla_put_failure;
|
|
||||||
if ((chan->flags & IEEE80211_CHAN_NO_HT40PLUS) &&
|
|
||||||
nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_HT40_PLUS))
|
|
||||||
goto nla_put_failure;
|
|
||||||
if ((chan->flags & IEEE80211_CHAN_NO_80MHZ) &&
|
|
||||||
nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_80MHZ))
|
|
||||||
goto nla_put_failure;
|
|
||||||
if ((chan->flags & IEEE80211_CHAN_NO_160MHZ) &&
|
|
||||||
nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_160MHZ))
|
|
||||||
goto nla_put_failure;
|
|
||||||
|
|
||||||
if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_MAX_TX_POWER,
|
if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_MAX_TX_POWER,
|
||||||
DBM_TO_MBM(chan->max_power)))
|
DBM_TO_MBM(chan->max_power)))
|
||||||
|
@ -1310,15 +1298,6 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 portid, u32 seq, int flag
|
||||||
dev->wiphy.max_acl_mac_addrs))
|
dev->wiphy.max_acl_mac_addrs))
|
||||||
goto nla_put_failure;
|
goto nla_put_failure;
|
||||||
|
|
||||||
if (dev->wiphy.extended_capabilities &&
|
|
||||||
(nla_put(msg, NL80211_ATTR_EXT_CAPA,
|
|
||||||
dev->wiphy.extended_capabilities_len,
|
|
||||||
dev->wiphy.extended_capabilities) ||
|
|
||||||
nla_put(msg, NL80211_ATTR_EXT_CAPA_MASK,
|
|
||||||
dev->wiphy.extended_capabilities_len,
|
|
||||||
dev->wiphy.extended_capabilities_mask)))
|
|
||||||
goto nla_put_failure;
|
|
||||||
|
|
||||||
return genlmsg_end(msg, hdr);
|
return genlmsg_end(msg, hdr);
|
||||||
|
|
||||||
nla_put_failure:
|
nla_put_failure:
|
||||||
|
@ -1328,7 +1307,7 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 portid, u32 seq, int flag
|
||||||
|
|
||||||
static int nl80211_dump_wiphy(struct sk_buff *skb, struct netlink_callback *cb)
|
static int nl80211_dump_wiphy(struct sk_buff *skb, struct netlink_callback *cb)
|
||||||
{
|
{
|
||||||
int idx = 0;
|
int idx = 0, ret;
|
||||||
int start = cb->args[0];
|
int start = cb->args[0];
|
||||||
struct cfg80211_registered_device *dev;
|
struct cfg80211_registered_device *dev;
|
||||||
|
|
||||||
|
@ -1338,9 +1317,29 @@ static int nl80211_dump_wiphy(struct sk_buff *skb, struct netlink_callback *cb)
|
||||||
continue;
|
continue;
|
||||||
if (++idx <= start)
|
if (++idx <= start)
|
||||||
continue;
|
continue;
|
||||||
if (nl80211_send_wiphy(skb, NETLINK_CB(cb->skb).portid,
|
ret = nl80211_send_wiphy(skb, NETLINK_CB(cb->skb).portid,
|
||||||
cb->nlh->nlmsg_seq, NLM_F_MULTI,
|
cb->nlh->nlmsg_seq, NLM_F_MULTI,
|
||||||
dev) < 0) {
|
dev);
|
||||||
|
if (ret < 0) {
|
||||||
|
/*
|
||||||
|
* If sending the wiphy data didn't fit (ENOBUFS or
|
||||||
|
* EMSGSIZE returned), this SKB is still empty (so
|
||||||
|
* it's not too big because another wiphy dataset is
|
||||||
|
* already in the skb) and we've not tried to adjust
|
||||||
|
* the dump allocation yet ... then adjust the alloc
|
||||||
|
* size to be bigger, and return 1 but with the empty
|
||||||
|
* skb. This results in an empty message being RX'ed
|
||||||
|
* in userspace, but that is ignored.
|
||||||
|
*
|
||||||
|
* We can then retry with the larger buffer.
|
||||||
|
*/
|
||||||
|
if ((ret == -ENOBUFS || ret == -EMSGSIZE) &&
|
||||||
|
!skb->len &&
|
||||||
|
cb->min_dump_alloc < 4096) {
|
||||||
|
cb->min_dump_alloc = 4096;
|
||||||
|
mutex_unlock(&cfg80211_mutex);
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
idx--;
|
idx--;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -1357,7 +1356,7 @@ static int nl80211_get_wiphy(struct sk_buff *skb, struct genl_info *info)
|
||||||
struct sk_buff *msg;
|
struct sk_buff *msg;
|
||||||
struct cfg80211_registered_device *dev = info->user_ptr[0];
|
struct cfg80211_registered_device *dev = info->user_ptr[0];
|
||||||
|
|
||||||
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
|
msg = nlmsg_new(4096, GFP_KERNEL);
|
||||||
if (!msg)
|
if (!msg)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue