mirror of https://gitee.com/openkylin/linux.git
First batch of iwlwifi patches intended for v5.1
* Support for Target Wakeup Time (TWT) -- a feature that allows the AP to specify when individual stations can access the medium; * Support for mac80211 AMSDU handling; * Debugging infrastructure work; * Preparations for improvements in the device selection code; * Some new PCI IDs; * Some updates in the documentation; * A bunch of fixes for issues found with static analyzers; * A couple of janitorial fixes from the community; * Some fixes in P2P; * Other cleanups and small fixes; -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEF3LNfgb2BPWm68smoUecoho8xfoFAlxLXbsACgkQoUecoho8 xfr0RhAAuqzQWqk5KWpIDUX8ZNIepiTAtQ4qOAX8s5yQCeHMANo9Boxzarpt5dai fVbjZPu4O0g5p2kKcTtQBfyE2a7kxxdfLgehofb/P//7hcGEzOP5VeYqfLyvorzV sufivBcqDg6Mx1vw6cIufwdnQG0GblyKu3KlcFA+Z31HeJjds2tNFAo8lGZhL/Eb vQmCPqT6nLwuTxXax45j1rJz6qWGxMx1TwoV2KqFQGD/TzerwypUNcAbSr4KZJJb aPbWLmJ3+NB5MeBLvJ+PHPKpEjEd0lRGy60cWx+wh7sxXcjC7UpmEiik908IxUFa d/dvlZ+0attZyU10HV7lEOrgJ7eSzJQ9PuSkrig3NHepNB9McwiN18HTqcN/CNBQ hvrIiSvpYvNr6+bEyIYg573QyLOEj7abShuN4/C+jETi9AI6imcvw/qrrlnHSMXM IfyLbLWEXC31KU3IDJpbajIA2n84eNOv/yn0jT2OuEKKfD1u2qeXNCdIi2PGm0LH 3xbvbLQmCzp65gh9ZKiALNpK5SruiO7ZRb5ChnxDOMo7M6FS46O59jrIt/Y3ZgF0 eGDWhuSuOY+q/FQ+ZTaxXM1QvBtqdZTvULBQakt+J79RYCztIjlhiqGILSTnLi01 DYgRWfUe2wllZ8yZVmqU2W3ZLSGw9Vqs+P9qQDr+OzNplbi3q44= =3ESa -----END PGP SIGNATURE----- Merge tag 'iwlwifi-next-for-kalle-2019-01-25' of git://git.kernel.org/pub/scm/linux/kernel/git/iwlwifi/iwlwifi-next First batch of iwlwifi patches intended for v5.1 * Support for Target Wakeup Time (TWT) -- a feature that allows the AP to specify when individual stations can access the medium; * Support for mac80211 AMSDU handling; * Debugging infrastructure work; * Preparations for improvements in the device selection code; * Some new PCI IDs; * Some updates in the documentation; * A bunch of fixes for issues found with static analyzers; * A couple of janitorial fixes from the community; * Some fixes in P2P; * Other cleanups and small fixes;
This commit is contained in:
commit
64e2330580
|
@ -83,6 +83,7 @@
|
|||
#define IWL_22000_HR_A0_FW_PRE "iwlwifi-QuQnj-a0-hr-a0-"
|
||||
#define IWL_22000_SU_Z0_FW_PRE "iwlwifi-su-z0-"
|
||||
#define IWL_QU_B_JF_B_FW_PRE "iwlwifi-Qu-b0-jf-b0-"
|
||||
#define IWL_CC_A_FW_PRE "iwlwifi-cc-a0-"
|
||||
|
||||
#define IWL_22000_HR_MODULE_FIRMWARE(api) \
|
||||
IWL_22000_HR_FW_PRE __stringify(api) ".ucode"
|
||||
|
@ -104,6 +105,8 @@
|
|||
IWL_22000_SU_Z0_FW_PRE __stringify(api) ".ucode"
|
||||
#define IWL_QU_B_JF_B_MODULE_FIRMWARE(api) \
|
||||
IWL_QU_B_JF_B_FW_PRE __stringify(api) ".ucode"
|
||||
#define IWL_CC_A_MODULE_FIRMWARE(api) \
|
||||
IWL_CC_A_FW_PRE __stringify(api) ".ucode"
|
||||
|
||||
static const struct iwl_base_params iwl_22000_base_params = {
|
||||
.eeprom_size = OTP_LOW_IMAGE_SIZE_32K,
|
||||
|
@ -195,8 +198,8 @@ const struct iwl_cfg iwl22000_2ac_cfg_jf = {
|
|||
IWL_DEVICE_22500,
|
||||
};
|
||||
|
||||
const struct iwl_cfg iwl22000_2ax_cfg_hr = {
|
||||
.name = "Intel(R) Dual Band Wireless AX 22000",
|
||||
const struct iwl_cfg iwl22560_2ax_cfg_hr = {
|
||||
.name = "Intel(R) Wireless-AX 22560",
|
||||
.fw_name_pre = IWL_22000_QU_B_HR_B_FW_PRE,
|
||||
IWL_DEVICE_22500,
|
||||
/*
|
||||
|
@ -207,6 +210,42 @@ const struct iwl_cfg iwl22000_2ax_cfg_hr = {
|
|||
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
|
||||
};
|
||||
|
||||
const struct iwl_cfg iwl22260_2ax_cfg = {
|
||||
.name = "Intel(R) Wireless-AX 22260",
|
||||
.fw_name_pre = IWL_CC_A_FW_PRE,
|
||||
IWL_DEVICE_22500,
|
||||
/*
|
||||
* This device doesn't support receiving BlockAck with a large bitmap
|
||||
* so we need to restrict the size of transmitted aggregation to the
|
||||
* HT size; mac80211 would otherwise pick the HE max (256) by default.
|
||||
*/
|
||||
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
|
||||
};
|
||||
|
||||
const struct iwl_cfg killer1650x_2ax_cfg = {
|
||||
.name = "Killer(R) Wireless-AX 1650x Wireless Network Adapter (22260NGW)",
|
||||
.fw_name_pre = IWL_CC_A_FW_PRE,
|
||||
IWL_DEVICE_22500,
|
||||
/*
|
||||
* This device doesn't support receiving BlockAck with a large bitmap
|
||||
* so we need to restrict the size of transmitted aggregation to the
|
||||
* HT size; mac80211 would otherwise pick the HE max (256) by default.
|
||||
*/
|
||||
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
|
||||
};
|
||||
|
||||
const struct iwl_cfg killer1650w_2ax_cfg = {
|
||||
.name = "Killer(R) Wireless-AX 1650w Wireless Network Adapter (22260D2W)",
|
||||
.fw_name_pre = IWL_CC_A_FW_PRE,
|
||||
IWL_DEVICE_22500,
|
||||
/*
|
||||
* This device doesn't support receiving BlockAck with a large bitmap
|
||||
* so we need to restrict the size of transmitted aggregation to the
|
||||
* HT size; mac80211 would otherwise pick the HE max (256) by default.
|
||||
*/
|
||||
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
|
||||
};
|
||||
|
||||
/*
|
||||
* All JF radio modules are part of the 9000 series, but the MAC part
|
||||
* looks more like 22000. That's why this device is here, but called
|
||||
|
@ -242,6 +281,30 @@ const struct iwl_cfg killer1550s_2ac_cfg_qu_b0_jf_b0 = {
|
|||
IWL_DEVICE_22500,
|
||||
};
|
||||
|
||||
const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0 = {
|
||||
.name = "Killer(R) Wireless-AX 1650i Wireless Network Adapter (22560NGW)",
|
||||
.fw_name_pre = IWL_22000_QU_B_HR_B_FW_PRE,
|
||||
IWL_DEVICE_22500,
|
||||
/*
|
||||
* This device doesn't support receiving BlockAck with a large bitmap
|
||||
* so we need to restrict the size of transmitted aggregation to the
|
||||
* HT size; mac80211 would otherwise pick the HE max (256) by default.
|
||||
*/
|
||||
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
|
||||
};
|
||||
|
||||
const struct iwl_cfg killer1650i_2ax_cfg_qu_b0_hr_b0 = {
|
||||
.name = "Killer(R) Wireless-AX 1650s Wireless Network Adapter (22560D2W)",
|
||||
.fw_name_pre = IWL_22000_QU_B_HR_B_FW_PRE,
|
||||
IWL_DEVICE_22500,
|
||||
/*
|
||||
* This device doesn't support receiving BlockAck with a large bitmap
|
||||
* so we need to restrict the size of transmitted aggregation to the
|
||||
* HT size; mac80211 would otherwise pick the HE max (256) by default.
|
||||
*/
|
||||
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
|
||||
};
|
||||
|
||||
const struct iwl_cfg iwl22000_2ax_cfg_jf = {
|
||||
.name = "Intel(R) Dual Band Wireless AX 22000",
|
||||
.fw_name_pre = IWL_QU_B_JF_B_FW_PRE,
|
||||
|
@ -324,3 +387,4 @@ MODULE_FIRMWARE(IWL_22000_JF_B0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
|
|||
MODULE_FIRMWARE(IWL_22000_HR_A0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
|
||||
MODULE_FIRMWARE(IWL_22000_SU_Z0_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
|
||||
MODULE_FIRMWARE(IWL_QU_B_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
|
||||
MODULE_FIRMWARE(IWL_CC_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
|
||||
|
|
|
@ -73,21 +73,12 @@
|
|||
#define IWL9000_SMEM_OFFSET 0x400000
|
||||
#define IWL9000_SMEM_LEN 0x68000
|
||||
|
||||
#define IWL9000A_FW_PRE "iwlwifi-9000-pu-a0-jf-a0-"
|
||||
#define IWL9000B_FW_PRE "iwlwifi-9000-pu-b0-jf-b0-"
|
||||
#define IWL9000RFB_FW_PRE "iwlwifi-9000-pu-a0-jf-b0-"
|
||||
#define IWL9260A_FW_PRE "iwlwifi-9260-th-a0-jf-a0-"
|
||||
#define IWL9260B_FW_PRE "iwlwifi-9260-th-b0-jf-b0-"
|
||||
#define IWL9000A_MODULE_FIRMWARE(api) \
|
||||
IWL9000A_FW_PRE __stringify(api) ".ucode"
|
||||
#define IWL9000B_MODULE_FIRMWARE(api) \
|
||||
IWL9000B_FW_PRE __stringify(api) ".ucode"
|
||||
#define IWL9000RFB_MODULE_FIRMWARE(api) \
|
||||
IWL9000RFB_FW_PRE __stringify(api) ".ucode"
|
||||
#define IWL9260A_MODULE_FIRMWARE(api) \
|
||||
IWL9260A_FW_PRE __stringify(api) ".ucode"
|
||||
#define IWL9260B_MODULE_FIRMWARE(api) \
|
||||
IWL9260B_FW_PRE __stringify(api) ".ucode"
|
||||
#define IWL9000_FW_PRE "iwlwifi-9000-pu-b0-jf-b0-"
|
||||
#define IWL9260_FW_PRE "iwlwifi-9260-th-b0-jf-b0-"
|
||||
#define IWL9000_MODULE_FIRMWARE(api) \
|
||||
IWL9000_FW_PRE __stringify(api) ".ucode"
|
||||
#define IWL9260_MODULE_FIRMWARE(api) \
|
||||
IWL9260_FW_PRE __stringify(api) ".ucode"
|
||||
|
||||
static const struct iwl_base_params iwl9000_base_params = {
|
||||
.eeprom_size = OTP_LOW_IMAGE_SIZE_32K,
|
||||
|
@ -162,81 +153,67 @@ static const struct iwl_tt_params iwl9000_tt_params = {
|
|||
|
||||
const struct iwl_cfg iwl9160_2ac_cfg = {
|
||||
.name = "Intel(R) Dual Band Wireless AC 9160",
|
||||
.fw_name_pre = IWL9260A_FW_PRE,
|
||||
.fw_name_pre_b_or_c_step = IWL9260B_FW_PRE,
|
||||
.fw_name_pre = IWL9260_FW_PRE,
|
||||
IWL_DEVICE_9000,
|
||||
};
|
||||
|
||||
const struct iwl_cfg iwl9260_2ac_cfg = {
|
||||
.name = "Intel(R) Dual Band Wireless AC 9260",
|
||||
.fw_name_pre = IWL9260A_FW_PRE,
|
||||
.fw_name_pre_b_or_c_step = IWL9260B_FW_PRE,
|
||||
.fw_name_pre = IWL9260_FW_PRE,
|
||||
IWL_DEVICE_9000,
|
||||
};
|
||||
|
||||
const struct iwl_cfg iwl9260_killer_2ac_cfg = {
|
||||
.name = "Killer (R) Wireless-AC 1550 Wireless Network Adapter (9260NGW)",
|
||||
.fw_name_pre = IWL9260A_FW_PRE,
|
||||
.fw_name_pre_b_or_c_step = IWL9260B_FW_PRE,
|
||||
.fw_name_pre = IWL9260_FW_PRE,
|
||||
IWL_DEVICE_9000,
|
||||
};
|
||||
|
||||
const struct iwl_cfg iwl9270_2ac_cfg = {
|
||||
.name = "Intel(R) Dual Band Wireless AC 9270",
|
||||
.fw_name_pre = IWL9260A_FW_PRE,
|
||||
.fw_name_pre_b_or_c_step = IWL9260B_FW_PRE,
|
||||
.fw_name_pre = IWL9260_FW_PRE,
|
||||
IWL_DEVICE_9000,
|
||||
};
|
||||
|
||||
const struct iwl_cfg iwl9460_2ac_cfg = {
|
||||
.name = "Intel(R) Dual Band Wireless AC 9460",
|
||||
.fw_name_pre = IWL9260A_FW_PRE,
|
||||
.fw_name_pre_b_or_c_step = IWL9260B_FW_PRE,
|
||||
.fw_name_pre = IWL9260_FW_PRE,
|
||||
IWL_DEVICE_9000,
|
||||
};
|
||||
|
||||
const struct iwl_cfg iwl9460_2ac_cfg_soc = {
|
||||
.name = "Intel(R) Dual Band Wireless AC 9460",
|
||||
.fw_name_pre = IWL9000A_FW_PRE,
|
||||
.fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
|
||||
.fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
|
||||
.fw_name_pre = IWL9000_FW_PRE,
|
||||
IWL_DEVICE_9000,
|
||||
.integrated = true,
|
||||
.soc_latency = 5000,
|
||||
};
|
||||
|
||||
const struct iwl_cfg iwl9461_2ac_cfg_soc = {
|
||||
.name = "Intel(R) Dual Band Wireless AC 9461",
|
||||
.fw_name_pre = IWL9000A_FW_PRE,
|
||||
.fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
|
||||
.fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
|
||||
IWL_DEVICE_9000,
|
||||
.integrated = true,
|
||||
.soc_latency = 5000,
|
||||
.name = "Intel(R) Dual Band Wireless AC 9461",
|
||||
.fw_name_pre = IWL9000_FW_PRE,
|
||||
IWL_DEVICE_9000,
|
||||
.integrated = true,
|
||||
.soc_latency = 5000,
|
||||
};
|
||||
|
||||
const struct iwl_cfg iwl9462_2ac_cfg_soc = {
|
||||
.name = "Intel(R) Dual Band Wireless AC 9462",
|
||||
.fw_name_pre = IWL9000A_FW_PRE,
|
||||
.fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
|
||||
.fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
|
||||
IWL_DEVICE_9000,
|
||||
.integrated = true,
|
||||
.soc_latency = 5000,
|
||||
.name = "Intel(R) Dual Band Wireless AC 9462",
|
||||
.fw_name_pre = IWL9000_FW_PRE,
|
||||
IWL_DEVICE_9000,
|
||||
.integrated = true,
|
||||
.soc_latency = 5000,
|
||||
};
|
||||
|
||||
const struct iwl_cfg iwl9560_2ac_cfg = {
|
||||
.name = "Intel(R) Dual Band Wireless AC 9560",
|
||||
.fw_name_pre = IWL9260A_FW_PRE,
|
||||
.fw_name_pre_b_or_c_step = IWL9260B_FW_PRE,
|
||||
.fw_name_pre = IWL9260_FW_PRE,
|
||||
IWL_DEVICE_9000,
|
||||
};
|
||||
|
||||
const struct iwl_cfg iwl9560_2ac_cfg_soc = {
|
||||
.name = "Intel(R) Dual Band Wireless AC 9560",
|
||||
.fw_name_pre = IWL9000A_FW_PRE,
|
||||
.fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
|
||||
.fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
|
||||
.fw_name_pre = IWL9000_FW_PRE,
|
||||
IWL_DEVICE_9000,
|
||||
.integrated = true,
|
||||
.soc_latency = 5000,
|
||||
|
@ -244,9 +221,7 @@ const struct iwl_cfg iwl9560_2ac_cfg_soc = {
|
|||
|
||||
const struct iwl_cfg iwl9560_killer_2ac_cfg_soc = {
|
||||
.name = "Killer (R) Wireless-AC 1550i Wireless Network Adapter (9560NGW)",
|
||||
.fw_name_pre = IWL9000A_FW_PRE,
|
||||
.fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
|
||||
.fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
|
||||
.fw_name_pre = IWL9000_FW_PRE,
|
||||
IWL_DEVICE_9000,
|
||||
.integrated = true,
|
||||
.soc_latency = 5000,
|
||||
|
@ -254,9 +229,7 @@ const struct iwl_cfg iwl9560_killer_2ac_cfg_soc = {
|
|||
|
||||
const struct iwl_cfg iwl9560_killer_s_2ac_cfg_soc = {
|
||||
.name = "Killer (R) Wireless-AC 1550s Wireless Network Adapter (9560NGW)",
|
||||
.fw_name_pre = IWL9000A_FW_PRE,
|
||||
.fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
|
||||
.fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
|
||||
.fw_name_pre = IWL9000_FW_PRE,
|
||||
IWL_DEVICE_9000,
|
||||
.integrated = true,
|
||||
.soc_latency = 5000,
|
||||
|
@ -264,9 +237,7 @@ const struct iwl_cfg iwl9560_killer_s_2ac_cfg_soc = {
|
|||
|
||||
const struct iwl_cfg iwl9460_2ac_cfg_shared_clk = {
|
||||
.name = "Intel(R) Dual Band Wireless AC 9460",
|
||||
.fw_name_pre = IWL9000A_FW_PRE,
|
||||
.fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
|
||||
.fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
|
||||
.fw_name_pre = IWL9000_FW_PRE,
|
||||
IWL_DEVICE_9000,
|
||||
.integrated = true,
|
||||
.soc_latency = 5000,
|
||||
|
@ -275,9 +246,7 @@ const struct iwl_cfg iwl9460_2ac_cfg_shared_clk = {
|
|||
|
||||
const struct iwl_cfg iwl9461_2ac_cfg_shared_clk = {
|
||||
.name = "Intel(R) Dual Band Wireless AC 9461",
|
||||
.fw_name_pre = IWL9000A_FW_PRE,
|
||||
.fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
|
||||
.fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
|
||||
.fw_name_pre = IWL9000_FW_PRE,
|
||||
IWL_DEVICE_9000,
|
||||
.integrated = true,
|
||||
.soc_latency = 5000,
|
||||
|
@ -286,9 +255,7 @@ const struct iwl_cfg iwl9461_2ac_cfg_shared_clk = {
|
|||
|
||||
const struct iwl_cfg iwl9462_2ac_cfg_shared_clk = {
|
||||
.name = "Intel(R) Dual Band Wireless AC 9462",
|
||||
.fw_name_pre = IWL9000A_FW_PRE,
|
||||
.fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
|
||||
.fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
|
||||
.fw_name_pre = IWL9000_FW_PRE,
|
||||
IWL_DEVICE_9000,
|
||||
.integrated = true,
|
||||
.soc_latency = 5000,
|
||||
|
@ -297,9 +264,7 @@ const struct iwl_cfg iwl9462_2ac_cfg_shared_clk = {
|
|||
|
||||
const struct iwl_cfg iwl9560_2ac_cfg_shared_clk = {
|
||||
.name = "Intel(R) Dual Band Wireless AC 9560",
|
||||
.fw_name_pre = IWL9000A_FW_PRE,
|
||||
.fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
|
||||
.fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
|
||||
.fw_name_pre = IWL9000_FW_PRE,
|
||||
IWL_DEVICE_9000,
|
||||
.integrated = true,
|
||||
.soc_latency = 5000,
|
||||
|
@ -308,9 +273,7 @@ const struct iwl_cfg iwl9560_2ac_cfg_shared_clk = {
|
|||
|
||||
const struct iwl_cfg iwl9560_killer_2ac_cfg_shared_clk = {
|
||||
.name = "Killer (R) Wireless-AC 1550i Wireless Network Adapter (9560NGW)",
|
||||
.fw_name_pre = IWL9000A_FW_PRE,
|
||||
.fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
|
||||
.fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
|
||||
.fw_name_pre = IWL9000_FW_PRE,
|
||||
IWL_DEVICE_9000,
|
||||
.integrated = true,
|
||||
.soc_latency = 5000,
|
||||
|
@ -319,17 +282,12 @@ const struct iwl_cfg iwl9560_killer_2ac_cfg_shared_clk = {
|
|||
|
||||
const struct iwl_cfg iwl9560_killer_s_2ac_cfg_shared_clk = {
|
||||
.name = "Killer (R) Wireless-AC 1550s Wireless Network Adapter (9560NGW)",
|
||||
.fw_name_pre = IWL9000A_FW_PRE,
|
||||
.fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
|
||||
.fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
|
||||
.fw_name_pre = IWL9000_FW_PRE,
|
||||
IWL_DEVICE_9000,
|
||||
.integrated = true,
|
||||
.soc_latency = 5000,
|
||||
.extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK
|
||||
};
|
||||
|
||||
MODULE_FIRMWARE(IWL9000A_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
|
||||
MODULE_FIRMWARE(IWL9000B_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
|
||||
MODULE_FIRMWARE(IWL9000RFB_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
|
||||
MODULE_FIRMWARE(IWL9260A_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
|
||||
MODULE_FIRMWARE(IWL9260B_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
|
||||
MODULE_FIRMWARE(IWL9000_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
|
||||
MODULE_FIRMWARE(IWL9260_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
|
||||
|
|
|
@ -1054,7 +1054,7 @@ static void iwl_bg_restart(struct work_struct *data)
|
|||
ieee80211_restart_hw(priv->hw);
|
||||
else
|
||||
IWL_ERR(priv,
|
||||
"Cannot request restart before registrating with mac80211\n");
|
||||
"Cannot request restart before registering with mac80211\n");
|
||||
} else {
|
||||
WARN_ON(1);
|
||||
}
|
||||
|
|
|
@ -415,7 +415,11 @@ enum iwl_legacy_cmds {
|
|||
TX_ANT_CONFIGURATION_CMD = 0x98,
|
||||
|
||||
/**
|
||||
* @STATISTICS_CMD: &struct iwl_statistics_cmd
|
||||
* @STATISTICS_CMD:
|
||||
* one of &struct iwl_statistics_cmd,
|
||||
* &struct iwl_notif_statistics_v11,
|
||||
* &struct iwl_notif_statistics_v10,
|
||||
* &struct iwl_notif_statistics
|
||||
*/
|
||||
STATISTICS_CMD = 0x9c,
|
||||
|
||||
|
@ -423,7 +427,7 @@ enum iwl_legacy_cmds {
|
|||
* @STATISTICS_NOTIFICATION:
|
||||
* one of &struct iwl_notif_statistics_v10,
|
||||
* &struct iwl_notif_statistics_v11,
|
||||
* &struct iwl_notif_statistics_cdb
|
||||
* &struct iwl_notif_statistics
|
||||
*/
|
||||
STATISTICS_NOTIFICATION = 0x9d,
|
||||
|
||||
|
|
|
@ -224,8 +224,18 @@ struct iwl_wowlan_pattern {
|
|||
|
||||
#define IWL_WOWLAN_MAX_PATTERNS 20
|
||||
|
||||
/**
|
||||
* struct iwl_wowlan_patterns_cmd - WoWLAN wakeup patterns
|
||||
*/
|
||||
struct iwl_wowlan_patterns_cmd {
|
||||
/**
|
||||
* @n_patterns: number of patterns
|
||||
*/
|
||||
__le32 n_patterns;
|
||||
|
||||
/**
|
||||
* @patterns: the patterns, array length in @n_patterns
|
||||
*/
|
||||
struct iwl_wowlan_pattern patterns[];
|
||||
} __packed; /* WOWLAN_PATTERN_ARRAY_API_S_VER_1 */
|
||||
|
||||
|
|
|
@ -209,8 +209,6 @@ enum iwl_rx_phy_flags {
|
|||
* @RX_MPDU_RES_STATUS_CSUM_OK: checksum found no errors
|
||||
* @RX_MPDU_RES_STATUS_STA_ID_MSK: station ID mask
|
||||
* @RX_MDPU_RES_STATUS_STA_ID_SHIFT: station ID bit shift
|
||||
* @RX_MPDU_RES_STATUS_FILTERING_MSK: filter status
|
||||
* @RX_MPDU_RES_STATUS2_FILTERING_MSK: filter status 2
|
||||
*/
|
||||
enum iwl_mvm_rx_status {
|
||||
RX_MPDU_RES_STATUS_CRC_OK = BIT(0),
|
||||
|
@ -238,8 +236,6 @@ enum iwl_mvm_rx_status {
|
|||
RX_MPDU_RES_STATUS_CSUM_OK = BIT(17),
|
||||
RX_MDPU_RES_STATUS_STA_ID_SHIFT = 24,
|
||||
RX_MPDU_RES_STATUS_STA_ID_MSK = 0x1f << RX_MDPU_RES_STATUS_STA_ID_SHIFT,
|
||||
RX_MPDU_RES_STATUS_FILTERING_MSK = (0xc00000),
|
||||
RX_MPDU_RES_STATUS2_FILTERING_MSK = (0xc0000000),
|
||||
};
|
||||
|
||||
/* 9000 series API */
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
|
||||
* Copyright (C) 2018 Intel Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
|
@ -29,6 +30,7 @@
|
|||
*
|
||||
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
|
||||
* Copyright (C) 2018 Intel Corporation
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
@ -363,14 +365,7 @@ struct mvm_statistics_general_v8 {
|
|||
u8 reserved[4 - (NUM_MAC_INDEX % 4)];
|
||||
} __packed; /* STATISTICS_GENERAL_API_S_VER_8 */
|
||||
|
||||
struct mvm_statistics_general_cdb_v9 {
|
||||
struct mvm_statistics_general_common_v19 common;
|
||||
__le32 beacon_counter[NUM_MAC_INDEX_CDB];
|
||||
u8 beacon_average_energy[NUM_MAC_INDEX_CDB];
|
||||
u8 reserved[4 - (NUM_MAC_INDEX_CDB % 4)];
|
||||
} __packed; /* STATISTICS_GENERAL_API_S_VER_9 */
|
||||
|
||||
struct mvm_statistics_general_cdb {
|
||||
struct mvm_statistics_general {
|
||||
struct mvm_statistics_general_common common;
|
||||
__le32 beacon_counter[MAC_INDEX_AUX];
|
||||
u8 beacon_average_energy[MAC_INDEX_AUX];
|
||||
|
@ -435,11 +430,11 @@ struct iwl_notif_statistics_v11 {
|
|||
struct mvm_statistics_load_v1 load_stats;
|
||||
} __packed; /* STATISTICS_NTFY_API_S_VER_11 */
|
||||
|
||||
struct iwl_notif_statistics_cdb {
|
||||
struct iwl_notif_statistics {
|
||||
__le32 flag;
|
||||
struct mvm_statistics_rx rx;
|
||||
struct mvm_statistics_tx tx;
|
||||
struct mvm_statistics_general_cdb general;
|
||||
struct mvm_statistics_general general;
|
||||
struct mvm_statistics_load load_stats;
|
||||
} __packed; /* STATISTICS_NTFY_API_S_VER_13 */
|
||||
|
||||
|
|
|
@ -469,6 +469,93 @@ static const struct iwl_prph_range iwl_prph_dump_addr_9000[] = {
|
|||
{ .start = 0x00a02400, .end = 0x00a02758 },
|
||||
};
|
||||
|
||||
static const struct iwl_prph_range iwl_prph_dump_addr_22000[] = {
|
||||
{ .start = 0x00a00000, .end = 0x00a00000 },
|
||||
{ .start = 0x00a0000c, .end = 0x00a00024 },
|
||||
{ .start = 0x00a0002c, .end = 0x00a00034 },
|
||||
{ .start = 0x00a0003c, .end = 0x00a0003c },
|
||||
{ .start = 0x00a00410, .end = 0x00a00418 },
|
||||
{ .start = 0x00a00420, .end = 0x00a00420 },
|
||||
{ .start = 0x00a00428, .end = 0x00a00428 },
|
||||
{ .start = 0x00a00430, .end = 0x00a0043c },
|
||||
{ .start = 0x00a00444, .end = 0x00a00444 },
|
||||
{ .start = 0x00a00840, .end = 0x00a00840 },
|
||||
{ .start = 0x00a00850, .end = 0x00a00858 },
|
||||
{ .start = 0x00a01004, .end = 0x00a01008 },
|
||||
{ .start = 0x00a01010, .end = 0x00a01010 },
|
||||
{ .start = 0x00a01018, .end = 0x00a01018 },
|
||||
{ .start = 0x00a01024, .end = 0x00a01024 },
|
||||
{ .start = 0x00a0102c, .end = 0x00a01034 },
|
||||
{ .start = 0x00a0103c, .end = 0x00a01040 },
|
||||
{ .start = 0x00a01048, .end = 0x00a01050 },
|
||||
{ .start = 0x00a01058, .end = 0x00a01058 },
|
||||
{ .start = 0x00a01060, .end = 0x00a01070 },
|
||||
{ .start = 0x00a0108c, .end = 0x00a0108c },
|
||||
{ .start = 0x00a01c20, .end = 0x00a01c28 },
|
||||
{ .start = 0x00a01d10, .end = 0x00a01d10 },
|
||||
{ .start = 0x00a01e28, .end = 0x00a01e2c },
|
||||
{ .start = 0x00a01e60, .end = 0x00a01e60 },
|
||||
{ .start = 0x00a01e80, .end = 0x00a01e80 },
|
||||
{ .start = 0x00a01ea0, .end = 0x00a01ea0 },
|
||||
{ .start = 0x00a02000, .end = 0x00a0201c },
|
||||
{ .start = 0x00a02024, .end = 0x00a02024 },
|
||||
{ .start = 0x00a02040, .end = 0x00a02048 },
|
||||
{ .start = 0x00a020c0, .end = 0x00a020e0 },
|
||||
{ .start = 0x00a02400, .end = 0x00a02404 },
|
||||
{ .start = 0x00a0240c, .end = 0x00a02414 },
|
||||
{ .start = 0x00a0241c, .end = 0x00a0243c },
|
||||
{ .start = 0x00a02448, .end = 0x00a024bc },
|
||||
{ .start = 0x00a024c4, .end = 0x00a024cc },
|
||||
{ .start = 0x00a02508, .end = 0x00a02508 },
|
||||
{ .start = 0x00a02510, .end = 0x00a02514 },
|
||||
{ .start = 0x00a0251c, .end = 0x00a0251c },
|
||||
{ .start = 0x00a0252c, .end = 0x00a0255c },
|
||||
{ .start = 0x00a02564, .end = 0x00a025a0 },
|
||||
{ .start = 0x00a025a8, .end = 0x00a025b4 },
|
||||
{ .start = 0x00a025c0, .end = 0x00a025c0 },
|
||||
{ .start = 0x00a025e8, .end = 0x00a025f4 },
|
||||
{ .start = 0x00a02c08, .end = 0x00a02c18 },
|
||||
{ .start = 0x00a02c2c, .end = 0x00a02c38 },
|
||||
{ .start = 0x00a02c68, .end = 0x00a02c78 },
|
||||
{ .start = 0x00a03000, .end = 0x00a03000 },
|
||||
{ .start = 0x00a03010, .end = 0x00a03014 },
|
||||
{ .start = 0x00a0301c, .end = 0x00a0302c },
|
||||
{ .start = 0x00a03034, .end = 0x00a03038 },
|
||||
{ .start = 0x00a03040, .end = 0x00a03044 },
|
||||
{ .start = 0x00a03060, .end = 0x00a03068 },
|
||||
{ .start = 0x00a03070, .end = 0x00a03070 },
|
||||
{ .start = 0x00a0307c, .end = 0x00a03084 },
|
||||
{ .start = 0x00a0308c, .end = 0x00a03090 },
|
||||
{ .start = 0x00a03098, .end = 0x00a03098 },
|
||||
{ .start = 0x00a030a0, .end = 0x00a030a0 },
|
||||
{ .start = 0x00a030a8, .end = 0x00a030b4 },
|
||||
{ .start = 0x00a030bc, .end = 0x00a030c0 },
|
||||
{ .start = 0x00a030c8, .end = 0x00a030f4 },
|
||||
{ .start = 0x00a03100, .end = 0x00a0312c },
|
||||
{ .start = 0x00a03c00, .end = 0x00a03c5c },
|
||||
{ .start = 0x00a04400, .end = 0x00a04454 },
|
||||
{ .start = 0x00a04460, .end = 0x00a04474 },
|
||||
{ .start = 0x00a044c0, .end = 0x00a044ec },
|
||||
{ .start = 0x00a04500, .end = 0x00a04504 },
|
||||
{ .start = 0x00a04510, .end = 0x00a04538 },
|
||||
{ .start = 0x00a04540, .end = 0x00a04548 },
|
||||
{ .start = 0x00a04560, .end = 0x00a04560 },
|
||||
{ .start = 0x00a04570, .end = 0x00a0457c },
|
||||
{ .start = 0x00a04590, .end = 0x00a04590 },
|
||||
{ .start = 0x00a04598, .end = 0x00a04598 },
|
||||
{ .start = 0x00a045c0, .end = 0x00a045f4 },
|
||||
{ .start = 0x00a0c000, .end = 0x00a0c018 },
|
||||
{ .start = 0x00a0c020, .end = 0x00a0c028 },
|
||||
{ .start = 0x00a0c038, .end = 0x00a0c094 },
|
||||
{ .start = 0x00a0c0c0, .end = 0x00a0c104 },
|
||||
{ .start = 0x00a0c10c, .end = 0x00a0c118 },
|
||||
{ .start = 0x00a0c150, .end = 0x00a0c174 },
|
||||
{ .start = 0x00a0c17c, .end = 0x00a0c188 },
|
||||
{ .start = 0x00a0c190, .end = 0x00a0c198 },
|
||||
{ .start = 0x00a0c1a0, .end = 0x00a0c1a8 },
|
||||
{ .start = 0x00a0c1b0, .end = 0x00a0c1b8 },
|
||||
};
|
||||
|
||||
static void iwl_read_prph_block(struct iwl_trans *trans, u32 start,
|
||||
u32 len_bytes, __le32 *data)
|
||||
{
|
||||
|
@ -478,15 +565,20 @@ static void iwl_read_prph_block(struct iwl_trans *trans, u32 start,
|
|||
*data++ = cpu_to_le32(iwl_read_prph_no_grab(trans, start + i));
|
||||
}
|
||||
|
||||
static void iwl_dump_prph(struct iwl_trans *trans,
|
||||
struct iwl_fw_error_dump_data **data,
|
||||
static void iwl_dump_prph(struct iwl_fw_runtime *fwrt,
|
||||
const struct iwl_prph_range *iwl_prph_dump_addr,
|
||||
u32 range_len)
|
||||
u32 range_len, void *ptr)
|
||||
{
|
||||
struct iwl_fw_error_dump_prph *prph;
|
||||
struct iwl_trans *trans = fwrt->trans;
|
||||
struct iwl_fw_error_dump_data **data =
|
||||
(struct iwl_fw_error_dump_data **)ptr;
|
||||
unsigned long flags;
|
||||
u32 i;
|
||||
|
||||
if (!data)
|
||||
return;
|
||||
|
||||
IWL_DEBUG_INFO(trans, "WRT PRPH dump\n");
|
||||
|
||||
if (!iwl_trans_grab_nic_access(trans, &flags))
|
||||
|
@ -552,37 +644,47 @@ static struct scatterlist *alloc_sgtable(int size)
|
|||
return table;
|
||||
}
|
||||
|
||||
static int iwl_fw_get_prph_len(struct iwl_fw_runtime *fwrt)
|
||||
static void iwl_fw_get_prph_len(struct iwl_fw_runtime *fwrt,
|
||||
const struct iwl_prph_range *iwl_prph_dump_addr,
|
||||
u32 range_len, void *ptr)
|
||||
{
|
||||
u32 prph_len = 0;
|
||||
int i;
|
||||
u32 *prph_len = (u32 *)ptr;
|
||||
int i, num_bytes_in_chunk;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr_comm);
|
||||
i++) {
|
||||
if (!prph_len)
|
||||
return;
|
||||
|
||||
for (i = 0; i < range_len; i++) {
|
||||
/* The range includes both boundaries */
|
||||
int num_bytes_in_chunk =
|
||||
iwl_prph_dump_addr_comm[i].end -
|
||||
iwl_prph_dump_addr_comm[i].start + 4;
|
||||
num_bytes_in_chunk =
|
||||
iwl_prph_dump_addr[i].end -
|
||||
iwl_prph_dump_addr[i].start + 4;
|
||||
|
||||
prph_len += sizeof(struct iwl_fw_error_dump_data) +
|
||||
*prph_len += sizeof(struct iwl_fw_error_dump_data) +
|
||||
sizeof(struct iwl_fw_error_dump_prph) +
|
||||
num_bytes_in_chunk;
|
||||
}
|
||||
}
|
||||
|
||||
if (fwrt->trans->cfg->mq_rx_supported) {
|
||||
for (i = 0; i <
|
||||
ARRAY_SIZE(iwl_prph_dump_addr_9000); i++) {
|
||||
/* The range includes both boundaries */
|
||||
int num_bytes_in_chunk =
|
||||
iwl_prph_dump_addr_9000[i].end -
|
||||
iwl_prph_dump_addr_9000[i].start + 4;
|
||||
static void iwl_fw_prph_handler(struct iwl_fw_runtime *fwrt, void *ptr,
|
||||
void (*handler)(struct iwl_fw_runtime *,
|
||||
const struct iwl_prph_range *,
|
||||
u32, void *))
|
||||
{
|
||||
u32 range_len;
|
||||
|
||||
prph_len += sizeof(struct iwl_fw_error_dump_data) +
|
||||
sizeof(struct iwl_fw_error_dump_prph) +
|
||||
num_bytes_in_chunk;
|
||||
if (fwrt->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000) {
|
||||
range_len = ARRAY_SIZE(iwl_prph_dump_addr_22000);
|
||||
handler(fwrt, iwl_prph_dump_addr_22000, range_len, ptr);
|
||||
} else {
|
||||
range_len = ARRAY_SIZE(iwl_prph_dump_addr_comm);
|
||||
handler(fwrt, iwl_prph_dump_addr_comm, range_len, ptr);
|
||||
|
||||
if (fwrt->trans->cfg->mq_rx_supported) {
|
||||
range_len = ARRAY_SIZE(iwl_prph_dump_addr_9000);
|
||||
handler(fwrt, iwl_prph_dump_addr_9000, range_len, ptr);
|
||||
}
|
||||
}
|
||||
return prph_len;
|
||||
}
|
||||
|
||||
static void iwl_fw_dump_mem(struct iwl_fw_runtime *fwrt,
|
||||
|
@ -646,6 +748,9 @@ static int iwl_fw_rxf_len(struct iwl_fw_runtime *fwrt,
|
|||
ADD_LEN(fifo_len, mem_cfg->rxfifo2_size, hdr_len);
|
||||
|
||||
/* Count RXF1 sizes */
|
||||
if (WARN_ON(mem_cfg->num_lmacs > MAX_NUM_LMAC))
|
||||
mem_cfg->num_lmacs = MAX_NUM_LMAC;
|
||||
|
||||
for (i = 0; i < mem_cfg->num_lmacs; i++)
|
||||
ADD_LEN(fifo_len, mem_cfg->lmac[i].rxfifo1_size, hdr_len);
|
||||
|
||||
|
@ -664,6 +769,9 @@ static int iwl_fw_txf_len(struct iwl_fw_runtime *fwrt,
|
|||
goto dump_internal_txf;
|
||||
|
||||
/* Count TXF sizes */
|
||||
if (WARN_ON(mem_cfg->num_lmacs > MAX_NUM_LMAC))
|
||||
mem_cfg->num_lmacs = MAX_NUM_LMAC;
|
||||
|
||||
for (i = 0; i < mem_cfg->num_lmacs; i++) {
|
||||
int j;
|
||||
|
||||
|
@ -733,6 +841,8 @@ _iwl_fw_error_dump(struct iwl_fw_runtime *fwrt,
|
|||
if (!fwrt->trans->cfg->dccm_offset || !fwrt->trans->cfg->dccm_len) {
|
||||
const struct fw_img *img;
|
||||
|
||||
if (fwrt->cur_fw_img >= IWL_UCODE_TYPE_MAX)
|
||||
return NULL;
|
||||
img = &fwrt->fw->img[fwrt->cur_fw_img];
|
||||
sram_ofs = img->sec[IWL_UCODE_SECTION_DATA].offset;
|
||||
sram_len = img->sec[IWL_UCODE_SECTION_DATA].len;
|
||||
|
@ -747,9 +857,9 @@ _iwl_fw_error_dump(struct iwl_fw_runtime *fwrt,
|
|||
fifo_len += iwl_fw_txf_len(fwrt, mem_cfg);
|
||||
|
||||
/* Make room for PRPH registers */
|
||||
if (!fwrt->trans->cfg->gen2 &&
|
||||
iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_PRPH))
|
||||
prph_len += iwl_fw_get_prph_len(fwrt);
|
||||
if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_PRPH))
|
||||
iwl_fw_prph_handler(fwrt, &prph_len,
|
||||
iwl_fw_get_prph_len);
|
||||
|
||||
if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000 &&
|
||||
iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_RADIO_REG))
|
||||
|
@ -828,7 +938,13 @@ _iwl_fw_error_dump(struct iwl_fw_runtime *fwrt,
|
|||
sizeof(dump_info->dev_human_readable) - 1);
|
||||
strncpy(dump_info->bus_human_readable, fwrt->dev->bus->name,
|
||||
sizeof(dump_info->bus_human_readable) - 1);
|
||||
dump_info->rt_status = cpu_to_le32(fwrt->dump.rt_status);
|
||||
dump_info->num_of_lmacs = fwrt->smem_cfg.num_lmacs;
|
||||
dump_info->lmac_err_id[0] =
|
||||
cpu_to_le32(fwrt->dump.lmac_err_id[0]);
|
||||
if (fwrt->smem_cfg.num_lmacs > 1)
|
||||
dump_info->lmac_err_id[1] =
|
||||
cpu_to_le32(fwrt->dump.lmac_err_id[1]);
|
||||
dump_info->umac_err_id = cpu_to_le32(fwrt->dump.umac_err_id);
|
||||
|
||||
dump_data = iwl_fw_error_next_data(dump_data);
|
||||
}
|
||||
|
@ -935,16 +1051,8 @@ _iwl_fw_error_dump(struct iwl_fw_runtime *fwrt,
|
|||
if (iwl_fw_dbg_is_paging_enabled(fwrt))
|
||||
iwl_dump_paging(fwrt, &dump_data);
|
||||
|
||||
if (prph_len) {
|
||||
iwl_dump_prph(fwrt->trans, &dump_data,
|
||||
iwl_prph_dump_addr_comm,
|
||||
ARRAY_SIZE(iwl_prph_dump_addr_comm));
|
||||
|
||||
if (fwrt->trans->cfg->mq_rx_supported)
|
||||
iwl_dump_prph(fwrt->trans, &dump_data,
|
||||
iwl_prph_dump_addr_9000,
|
||||
ARRAY_SIZE(iwl_prph_dump_addr_9000));
|
||||
}
|
||||
if (prph_len)
|
||||
iwl_fw_prph_handler(fwrt, &dump_data, iwl_dump_prph);
|
||||
|
||||
out:
|
||||
dump_file->file_len = cpu_to_le32(file_len);
|
||||
|
|
|
@ -102,7 +102,10 @@ static inline void iwl_fw_free_dump_desc(struct iwl_fw_runtime *fwrt)
|
|||
if (fwrt->dump.desc != &iwl_dump_desc_assert)
|
||||
kfree(fwrt->dump.desc);
|
||||
fwrt->dump.desc = NULL;
|
||||
fwrt->dump.rt_status = 0;
|
||||
fwrt->dump.lmac_err_id[0] = 0;
|
||||
if (fwrt->smem_cfg.num_lmacs > 1)
|
||||
fwrt->dump.lmac_err_id[1] = 0;
|
||||
fwrt->dump.umac_err_id = 0;
|
||||
}
|
||||
|
||||
void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt);
|
||||
|
|
|
@ -180,6 +180,8 @@ enum iwl_fw_error_dump_family {
|
|||
IWL_FW_ERROR_DUMP_FAMILY_8 = 8,
|
||||
};
|
||||
|
||||
#define MAX_NUM_LMAC 2
|
||||
|
||||
/**
|
||||
* struct iwl_fw_error_dump_info - info on the device / firmware
|
||||
* @device_family: the family of the device (7 / 8)
|
||||
|
@ -187,7 +189,10 @@ enum iwl_fw_error_dump_family {
|
|||
* @fw_human_readable: human readable FW version
|
||||
* @dev_human_readable: name of the device
|
||||
* @bus_human_readable: name of the bus used
|
||||
* @rt_status: the error_id/rt_status that that triggered the latest dump
|
||||
* @num_of_lmacs: the number of lmacs
|
||||
* @lmac_err_id: the lmac 0/1 error_id/rt_status that triggered the latest dump
|
||||
* if the dump collection was not initiated by an assert, the value is 0
|
||||
* @umac_err_id: the umac error_id/rt_status that triggered the latest dump
|
||||
* if the dump collection was not initiated by an assert, the value is 0
|
||||
*/
|
||||
struct iwl_fw_error_dump_info {
|
||||
|
@ -196,7 +201,9 @@ struct iwl_fw_error_dump_info {
|
|||
u8 fw_human_readable[FW_VER_HUMAN_READABLE_SZ];
|
||||
u8 dev_human_readable[64];
|
||||
u8 bus_human_readable[8];
|
||||
__le32 rt_status;
|
||||
u8 num_of_lmacs;
|
||||
__le32 umac_err_id;
|
||||
__le32 lmac_err_id[MAX_NUM_LMAC];
|
||||
} __packed;
|
||||
|
||||
/**
|
||||
|
|
|
@ -142,7 +142,8 @@ struct iwl_fw_runtime {
|
|||
u32 *d3_debug_data;
|
||||
struct iwl_fw_ini_active_regs active_regs[IWL_FW_INI_MAX_REGION_ID];
|
||||
struct iwl_fw_ini_active_triggers active_trigs[IWL_FW_TRIGGER_ID_NUM];
|
||||
u32 rt_status;
|
||||
u32 lmac_err_id[MAX_NUM_LMAC];
|
||||
u32 umac_err_id;
|
||||
} dump;
|
||||
#ifdef CONFIG_IWLWIFI_DEBUGFS
|
||||
struct {
|
||||
|
|
|
@ -335,10 +335,6 @@ struct iwl_csr_params {
|
|||
* @fw_name_pre: Firmware filename prefix. The api version and extension
|
||||
* (.ucode) will be added to filename before loading from disk. The
|
||||
* filename is constructed as fw_name_pre<api>.ucode.
|
||||
* @fw_name_pre_b_or_c_step: same as @fw_name_pre, only for b or c steps
|
||||
* (if supported)
|
||||
* @fw_name_pre_rf_next_step: same as @fw_name_pre_b_or_c_step, only for rf
|
||||
* next step. Supported only in integrated solutions.
|
||||
* @ucode_api_max: Highest version of uCode API supported by driver.
|
||||
* @ucode_api_min: Lowest version of uCode API supported by driver.
|
||||
* @max_inst_size: The maximal length of the fw inst section (only DVM)
|
||||
|
@ -392,8 +388,6 @@ struct iwl_cfg {
|
|||
/* params specific to an individual device within a device family */
|
||||
const char *name;
|
||||
const char *fw_name_pre;
|
||||
const char *fw_name_pre_b_or_c_step;
|
||||
const char *fw_name_pre_rf_next_step;
|
||||
/* params not likely to change within a device family */
|
||||
const struct iwl_base_params *base_params;
|
||||
/* params likely to change within a device family */
|
||||
|
@ -570,7 +564,13 @@ extern const struct iwl_cfg iwl9560_killer_s_2ac_cfg_shared_clk;
|
|||
extern const struct iwl_cfg iwl22000_2ac_cfg_hr;
|
||||
extern const struct iwl_cfg iwl22000_2ac_cfg_hr_cdb;
|
||||
extern const struct iwl_cfg iwl22000_2ac_cfg_jf;
|
||||
extern const struct iwl_cfg iwl22560_2ax_cfg_hr;
|
||||
extern const struct iwl_cfg iwl22000_2ax_cfg_hr;
|
||||
extern const struct iwl_cfg iwl22260_2ax_cfg;
|
||||
extern const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0;
|
||||
extern const struct iwl_cfg killer1650i_2ax_cfg_qu_b0_hr_b0;
|
||||
extern const struct iwl_cfg killer1650x_2ax_cfg;
|
||||
extern const struct iwl_cfg killer1650w_2ax_cfg;
|
||||
extern const struct iwl_cfg iwl9461_2ac_cfg_qu_b0_jf_b0;
|
||||
extern const struct iwl_cfg iwl9462_2ac_cfg_qu_b0_jf_b0;
|
||||
extern const struct iwl_cfg iwl9560_2ac_cfg_qu_b0_jf_b0;
|
||||
|
|
|
@ -325,6 +325,7 @@ enum {
|
|||
#define CSR_HW_REV_TYPE_7265D (0x0000210)
|
||||
#define CSR_HW_REV_TYPE_NONE (0x00001F0)
|
||||
#define CSR_HW_REV_TYPE_QNJ (0x0000360)
|
||||
#define CSR_HW_REV_TYPE_QNJ_B0 (0x0000364)
|
||||
#define CSR_HW_REV_TYPE_HR_CDB (0x0000340)
|
||||
|
||||
/* RF_ID value */
|
||||
|
|
|
@ -210,18 +210,15 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first)
|
|||
{
|
||||
const struct iwl_cfg *cfg = drv->trans->cfg;
|
||||
char tag[8];
|
||||
const char *fw_pre_name;
|
||||
|
||||
if (drv->trans->cfg->device_family == IWL_DEVICE_FAMILY_9000 &&
|
||||
(CSR_HW_REV_STEP(drv->trans->hw_rev) == SILICON_B_STEP ||
|
||||
CSR_HW_REV_STEP(drv->trans->hw_rev) == SILICON_C_STEP))
|
||||
fw_pre_name = cfg->fw_name_pre_b_or_c_step;
|
||||
else if (drv->trans->cfg->integrated &&
|
||||
CSR_HW_RFID_STEP(drv->trans->hw_rf_id) == SILICON_B_STEP &&
|
||||
cfg->fw_name_pre_rf_next_step)
|
||||
fw_pre_name = cfg->fw_name_pre_rf_next_step;
|
||||
else
|
||||
fw_pre_name = cfg->fw_name_pre;
|
||||
(CSR_HW_REV_STEP(drv->trans->hw_rev) != SILICON_B_STEP &&
|
||||
CSR_HW_REV_STEP(drv->trans->hw_rev) != SILICON_C_STEP)) {
|
||||
IWL_ERR(drv,
|
||||
"Only HW steps B and C are currently supported (0x%0x)\n",
|
||||
drv->trans->hw_rev);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (first) {
|
||||
drv->fw_index = cfg->ucode_api_max;
|
||||
|
@ -235,15 +232,13 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first)
|
|||
IWL_ERR(drv, "no suitable firmware found!\n");
|
||||
|
||||
if (cfg->ucode_api_min == cfg->ucode_api_max) {
|
||||
IWL_ERR(drv, "%s%d is required\n", fw_pre_name,
|
||||
IWL_ERR(drv, "%s%d is required\n", cfg->fw_name_pre,
|
||||
cfg->ucode_api_max);
|
||||
} else {
|
||||
IWL_ERR(drv, "minimum version required: %s%d\n",
|
||||
fw_pre_name,
|
||||
cfg->ucode_api_min);
|
||||
cfg->fw_name_pre, cfg->ucode_api_min);
|
||||
IWL_ERR(drv, "maximum version supported: %s%d\n",
|
||||
fw_pre_name,
|
||||
cfg->ucode_api_max);
|
||||
cfg->fw_name_pre, cfg->ucode_api_max);
|
||||
}
|
||||
|
||||
IWL_ERR(drv,
|
||||
|
@ -252,7 +247,7 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first)
|
|||
}
|
||||
|
||||
snprintf(drv->firmware_name, sizeof(drv->firmware_name), "%s%s.ucode",
|
||||
fw_pre_name, tag);
|
||||
cfg->fw_name_pre, tag);
|
||||
|
||||
IWL_DEBUG_INFO(drv, "attempting to load firmware '%s'\n",
|
||||
drv->firmware_name);
|
||||
|
|
|
@ -569,8 +569,7 @@ static struct ieee80211_sband_iftype_data iwl_he_capa[] = {
|
|||
.has_he = true,
|
||||
.he_cap_elem = {
|
||||
.mac_cap_info[0] =
|
||||
IEEE80211_HE_MAC_CAP0_HTC_HE |
|
||||
IEEE80211_HE_MAC_CAP0_TWT_RES,
|
||||
IEEE80211_HE_MAC_CAP0_HTC_HE,
|
||||
.mac_cap_info[1] =
|
||||
IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US |
|
||||
IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8,
|
||||
|
@ -1196,14 +1195,12 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
|
|||
regd_to_copy = sizeof(struct ieee80211_regdomain) +
|
||||
valid_rules * sizeof(struct ieee80211_reg_rule);
|
||||
|
||||
copy_rd = kzalloc(regd_to_copy, GFP_KERNEL);
|
||||
copy_rd = kmemdup(regd, regd_to_copy, GFP_KERNEL);
|
||||
if (!copy_rd) {
|
||||
copy_rd = ERR_PTR(-ENOMEM);
|
||||
goto out;
|
||||
}
|
||||
|
||||
memcpy(copy_rd, regd, regd_to_copy);
|
||||
|
||||
out:
|
||||
kfree(regdb_ptrs);
|
||||
kfree(regd);
|
||||
|
|
|
@ -2125,7 +2125,6 @@ static int iwl_mvm_d3_test_open(struct inode *inode, struct file *file)
|
|||
|
||||
file->private_data = inode->i_private;
|
||||
|
||||
ieee80211_stop_queues(mvm->hw);
|
||||
synchronize_net();
|
||||
|
||||
mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_D3;
|
||||
|
@ -2140,10 +2139,9 @@ static int iwl_mvm_d3_test_open(struct inode *inode, struct file *file)
|
|||
rtnl_unlock();
|
||||
if (err > 0)
|
||||
err = -EINVAL;
|
||||
if (err) {
|
||||
ieee80211_wake_queues(mvm->hw);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
mvm->d3_test_active = true;
|
||||
mvm->keep_vif = NULL;
|
||||
return 0;
|
||||
|
@ -2223,8 +2221,6 @@ static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file)
|
|||
mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
|
||||
iwl_mvm_d3_test_disconn_work_iter, mvm->keep_vif);
|
||||
|
||||
ieee80211_wake_queues(mvm->hw);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -295,7 +295,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
|
|||
struct iwl_notification_wait alive_wait;
|
||||
struct iwl_mvm_alive_data alive_data;
|
||||
const struct fw_img *fw;
|
||||
int ret, i;
|
||||
int ret;
|
||||
enum iwl_ucode_type old_type = mvm->fwrt.cur_fw_img;
|
||||
static const u16 alive_cmd[] = { MVM_ALIVE };
|
||||
|
||||
|
@ -373,9 +373,6 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
|
|||
mvm->queue_info[IWL_MVM_DQA_CMD_QUEUE].tid_bitmap =
|
||||
BIT(IWL_MAX_TID_COUNT + 2);
|
||||
|
||||
for (i = 0; i < IEEE80211_MAX_QUEUES; i++)
|
||||
atomic_set(&mvm->mac80211_queue_stop_count[i], 0);
|
||||
|
||||
set_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
|
||||
#ifdef CONFIG_IWLWIFI_DEBUGFS
|
||||
iwl_fw_set_dbg_rec_on(&mvm->fwrt);
|
||||
|
|
|
@ -97,11 +97,6 @@ struct iwl_mvm_mac_iface_iterator_data {
|
|||
bool found_vif;
|
||||
};
|
||||
|
||||
struct iwl_mvm_hw_queues_iface_iterator_data {
|
||||
struct ieee80211_vif *exclude_vif;
|
||||
unsigned long used_hw_queues;
|
||||
};
|
||||
|
||||
static void iwl_mvm_mac_tsf_id_iter(void *_data, u8 *mac,
|
||||
struct ieee80211_vif *vif)
|
||||
{
|
||||
|
@ -208,61 +203,6 @@ static void iwl_mvm_mac_tsf_id_iter(void *_data, u8 *mac,
|
|||
data->preferred_tsf = NUM_TSF_IDS;
|
||||
}
|
||||
|
||||
/*
|
||||
* Get the mask of the queues used by the vif
|
||||
*/
|
||||
u32 iwl_mvm_mac_get_queues_mask(struct ieee80211_vif *vif)
|
||||
{
|
||||
u32 qmask = 0, ac;
|
||||
|
||||
if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
|
||||
return BIT(IWL_MVM_OFFCHANNEL_QUEUE);
|
||||
|
||||
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
|
||||
if (vif->hw_queue[ac] != IEEE80211_INVAL_HW_QUEUE)
|
||||
qmask |= BIT(vif->hw_queue[ac]);
|
||||
}
|
||||
|
||||
if (vif->type == NL80211_IFTYPE_AP ||
|
||||
vif->type == NL80211_IFTYPE_ADHOC)
|
||||
qmask |= BIT(vif->cab_queue);
|
||||
|
||||
return qmask;
|
||||
}
|
||||
|
||||
static void iwl_mvm_iface_hw_queues_iter(void *_data, u8 *mac,
|
||||
struct ieee80211_vif *vif)
|
||||
{
|
||||
struct iwl_mvm_hw_queues_iface_iterator_data *data = _data;
|
||||
|
||||
/* exclude the given vif */
|
||||
if (vif == data->exclude_vif)
|
||||
return;
|
||||
|
||||
data->used_hw_queues |= iwl_mvm_mac_get_queues_mask(vif);
|
||||
}
|
||||
|
||||
unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm,
|
||||
struct ieee80211_vif *exclude_vif)
|
||||
{
|
||||
struct iwl_mvm_hw_queues_iface_iterator_data data = {
|
||||
.exclude_vif = exclude_vif,
|
||||
.used_hw_queues =
|
||||
BIT(IWL_MVM_OFFCHANNEL_QUEUE) |
|
||||
BIT(mvm->aux_queue) |
|
||||
BIT(IWL_MVM_DQA_GCAST_QUEUE),
|
||||
};
|
||||
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
|
||||
/* mark all VIF used hw queues */
|
||||
ieee80211_iterate_active_interfaces_atomic(
|
||||
mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
|
||||
iwl_mvm_iface_hw_queues_iter, &data);
|
||||
|
||||
return data.used_hw_queues;
|
||||
}
|
||||
|
||||
static void iwl_mvm_mac_iface_iterator(void *_data, u8 *mac,
|
||||
struct ieee80211_vif *vif)
|
||||
{
|
||||
|
@ -360,8 +300,6 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
|
|||
mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
|
||||
iwl_mvm_mac_iface_iterator, &data);
|
||||
|
||||
used_hw_queues = iwl_mvm_get_used_hw_queues(mvm, vif);
|
||||
|
||||
/*
|
||||
* In the case we're getting here during resume, it's similar to
|
||||
* firmware restart, and with RESUME_ALL the iterator will find
|
||||
|
@ -416,9 +354,6 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
|
|||
* the ones here - no real limit
|
||||
*/
|
||||
queue_limit = IEEE80211_MAX_QUEUES;
|
||||
BUILD_BUG_ON(IEEE80211_MAX_QUEUES >
|
||||
BITS_PER_BYTE *
|
||||
sizeof(mvm->hw_queue_to_mac80211[0]));
|
||||
|
||||
/*
|
||||
* Find available queues, and allocate them to the ACs. When in
|
||||
|
@ -446,9 +381,6 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
|
|||
* queue value (when queue is enabled).
|
||||
*/
|
||||
mvmvif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
|
||||
vif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
|
||||
} else {
|
||||
vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
|
||||
}
|
||||
|
||||
mvmvif->bcast_sta.sta_id = IWL_MVM_INVALID_STA;
|
||||
|
@ -462,8 +394,6 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
|
|||
|
||||
exit_fail:
|
||||
memset(mvmvif, 0, sizeof(struct iwl_mvm_vif));
|
||||
memset(vif->hw_queue, IEEE80211_INVAL_HW_QUEUE, sizeof(vif->hw_queue));
|
||||
vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -778,27 +708,9 @@ static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm,
|
|||
|
||||
if (vif->bss_conf.assoc && vif->bss_conf.he_support &&
|
||||
!iwlwifi_mod_params.disable_11ax) {
|
||||
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
|
||||
u8 sta_id = mvmvif->ap_sta_id;
|
||||
|
||||
cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_11AX);
|
||||
if (sta_id != IWL_MVM_INVALID_STA) {
|
||||
struct ieee80211_sta *sta;
|
||||
|
||||
sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
|
||||
lockdep_is_held(&mvm->mutex));
|
||||
|
||||
/*
|
||||
* TODO: we should check the ext cap IE but it is
|
||||
* unclear why the spec requires two bits (one in HE
|
||||
* cap IE, and one in the ext cap IE). In the meantime
|
||||
* rely on the HE cap IE only.
|
||||
*/
|
||||
if (sta && (sta->he_cap.he_cap_elem.mac_cap_info[0] &
|
||||
IEEE80211_HE_MAC_CAP0_TWT_RES))
|
||||
ctxt_sta->data_policy |=
|
||||
cpu_to_le32(TWT_SUPPORTED);
|
||||
}
|
||||
if (vif->bss_conf.twt_requester)
|
||||
ctxt_sta->data_policy |= cpu_to_le32(TWT_SUPPORTED);
|
||||
}
|
||||
|
||||
|
||||
|
@ -881,8 +793,6 @@ static int iwl_mvm_mac_ctxt_cmd_p2p_device(struct iwl_mvm *mvm,
|
|||
|
||||
iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action);
|
||||
|
||||
cmd.protection_flags |= cpu_to_le32(MAC_PROT_FLG_TGG_PROTECT);
|
||||
|
||||
/* Override the filter flags to accept only probe requests */
|
||||
cmd.filter_flags = cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST);
|
||||
|
||||
|
@ -1203,7 +1113,7 @@ static void iwl_mvm_mac_ctxt_cmd_fill_ap(struct iwl_mvm *mvm,
|
|||
|
||||
if (!fw_has_api(&mvm->fw->ucode_capa,
|
||||
IWL_UCODE_TLV_API_STA_TYPE))
|
||||
ctxt_ap->mcast_qid = cpu_to_le32(vif->cab_queue);
|
||||
ctxt_ap->mcast_qid = cpu_to_le32(mvmvif->cab_queue);
|
||||
|
||||
/*
|
||||
* Only set the beacon time when the MAC is being added, when we
|
||||
|
|
|
@ -395,6 +395,21 @@ int iwl_mvm_init_fw_regd(struct iwl_mvm *mvm)
|
|||
return ret;
|
||||
}
|
||||
|
||||
const static u8 he_if_types_ext_capa_sta[] = {
|
||||
[0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING,
|
||||
[7] = WLAN_EXT_CAPA8_OPMODE_NOTIF,
|
||||
[9] = WLAN_EXT_CAPA10_TWT_REQUESTER_SUPPORT,
|
||||
};
|
||||
|
||||
const static struct wiphy_iftype_ext_capab he_iftypes_ext_capa[] = {
|
||||
{
|
||||
.iftype = NL80211_IFTYPE_STATION,
|
||||
.extended_capabilities = he_if_types_ext_capa_sta,
|
||||
.extended_capabilities_mask = he_if_types_ext_capa_sta,
|
||||
.extended_capabilities_len = sizeof(he_if_types_ext_capa_sta),
|
||||
},
|
||||
};
|
||||
|
||||
int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
|
||||
{
|
||||
struct ieee80211_hw *hw = mvm->hw;
|
||||
|
@ -410,7 +425,6 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
|
|||
ieee80211_hw_set(hw, SIGNAL_DBM);
|
||||
ieee80211_hw_set(hw, SPECTRUM_MGMT);
|
||||
ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
|
||||
ieee80211_hw_set(hw, QUEUE_CONTROL);
|
||||
ieee80211_hw_set(hw, WANT_MONITOR_VIF);
|
||||
ieee80211_hw_set(hw, SUPPORTS_PS);
|
||||
ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
|
||||
|
@ -424,6 +438,10 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
|
|||
ieee80211_hw_set(hw, NEEDS_UNIQUE_STA_ADDR);
|
||||
ieee80211_hw_set(hw, DEAUTH_NEED_MGD_TX_PREP);
|
||||
ieee80211_hw_set(hw, SUPPORTS_VHT_EXT_NSS_BW);
|
||||
ieee80211_hw_set(hw, BUFF_MMPDU_TXQ);
|
||||
ieee80211_hw_set(hw, STA_MMPDU_TXQ);
|
||||
ieee80211_hw_set(hw, TX_AMSDU);
|
||||
ieee80211_hw_set(hw, TX_FRAG_LIST);
|
||||
|
||||
if (iwl_mvm_has_tlc_offload(mvm)) {
|
||||
ieee80211_hw_set(hw, TX_AMPDU_SETUP_IN_HW);
|
||||
|
@ -469,6 +487,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
|
|||
|
||||
hw->uapsd_queues = IWL_MVM_UAPSD_QUEUES;
|
||||
hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
|
||||
hw->max_tx_fragments = mvm->trans->max_skb_frags;
|
||||
|
||||
BUILD_BUG_ON(ARRAY_SIZE(mvm->ciphers) < ARRAY_SIZE(mvm_ciphers) + 6);
|
||||
memcpy(mvm->ciphers, mvm_ciphers, sizeof(mvm_ciphers));
|
||||
|
@ -534,6 +553,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
|
|||
hw->sta_data_size = sizeof(struct iwl_mvm_sta);
|
||||
hw->vif_data_size = sizeof(struct iwl_mvm_vif);
|
||||
hw->chanctx_data_size = sizeof(u16);
|
||||
hw->txq_data_size = sizeof(struct iwl_mvm_txq);
|
||||
|
||||
hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
|
||||
BIT(NL80211_IFTYPE_P2P_CLIENT) |
|
||||
|
@ -673,6 +693,13 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
|
|||
NL80211_EXT_FEATURE_OCE_PROBE_REQ_HIGH_TX_RATE);
|
||||
}
|
||||
|
||||
if (mvm->nvm_data->sku_cap_11ax_enable &&
|
||||
!iwlwifi_mod_params.disable_11ax) {
|
||||
hw->wiphy->iftype_ext_capab = he_iftypes_ext_capa;
|
||||
hw->wiphy->num_iftype_ext_capab =
|
||||
ARRAY_SIZE(he_iftypes_ext_capa);
|
||||
}
|
||||
|
||||
mvm->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
|
@ -776,7 +803,6 @@ static bool iwl_mvm_defer_tx(struct iwl_mvm *mvm,
|
|||
goto out;
|
||||
|
||||
__skb_queue_tail(&mvm->d0i3_tx, skb);
|
||||
ieee80211_stop_queues(mvm->hw);
|
||||
|
||||
/* trigger wakeup */
|
||||
iwl_mvm_ref(mvm, IWL_MVM_REF_TX);
|
||||
|
@ -796,13 +822,15 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
|
|||
struct ieee80211_sta *sta = control->sta;
|
||||
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
||||
struct ieee80211_hdr *hdr = (void *)skb->data;
|
||||
bool offchannel = IEEE80211_SKB_CB(skb)->flags &
|
||||
IEEE80211_TX_CTL_TX_OFFCHAN;
|
||||
|
||||
if (iwl_mvm_is_radio_killed(mvm)) {
|
||||
IWL_DEBUG_DROP(mvm, "Dropping - RF/CT KILL\n");
|
||||
goto drop;
|
||||
}
|
||||
|
||||
if (info->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE &&
|
||||
if (offchannel &&
|
||||
!test_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status) &&
|
||||
!test_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status))
|
||||
goto drop;
|
||||
|
@ -815,8 +843,8 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
|
|||
sta = NULL;
|
||||
|
||||
/* If there is no sta, and it's not offchannel - send through AP */
|
||||
if (info->control.vif->type == NL80211_IFTYPE_STATION &&
|
||||
info->hw_queue != IWL_MVM_OFFCHANNEL_QUEUE && !sta) {
|
||||
if (!sta && info->control.vif->type == NL80211_IFTYPE_STATION &&
|
||||
!offchannel) {
|
||||
struct iwl_mvm_vif *mvmvif =
|
||||
iwl_mvm_vif_from_mac80211(info->control.vif);
|
||||
u8 ap_sta_id = READ_ONCE(mvmvif->ap_sta_id);
|
||||
|
@ -844,6 +872,77 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
|
|||
ieee80211_free_txskb(hw, skb);
|
||||
}
|
||||
|
||||
void iwl_mvm_mac_itxq_xmit(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
|
||||
{
|
||||
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
|
||||
struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_mac80211(txq);
|
||||
struct sk_buff *skb = NULL;
|
||||
|
||||
spin_lock(&mvmtxq->tx_path_lock);
|
||||
|
||||
rcu_read_lock();
|
||||
while (likely(!mvmtxq->stopped &&
|
||||
(mvm->trans->system_pm_mode ==
|
||||
IWL_PLAT_PM_MODE_DISABLED))) {
|
||||
skb = ieee80211_tx_dequeue(hw, txq);
|
||||
|
||||
if (!skb)
|
||||
break;
|
||||
|
||||
if (!txq->sta)
|
||||
iwl_mvm_tx_skb_non_sta(mvm, skb);
|
||||
else
|
||||
iwl_mvm_tx_skb(mvm, skb, txq->sta);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
spin_unlock(&mvmtxq->tx_path_lock);
|
||||
}
|
||||
|
||||
static void iwl_mvm_mac_wake_tx_queue(struct ieee80211_hw *hw,
|
||||
struct ieee80211_txq *txq)
|
||||
{
|
||||
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
|
||||
struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_mac80211(txq);
|
||||
|
||||
/*
|
||||
* Please note that racing is handled very carefully here:
|
||||
* mvmtxq->txq_id is updated during allocation, and mvmtxq->list is
|
||||
* deleted afterwards.
|
||||
* This means that if:
|
||||
* mvmtxq->txq_id != INVALID_QUEUE && list_empty(&mvmtxq->list):
|
||||
* queue is allocated and we can TX.
|
||||
* mvmtxq->txq_id != INVALID_QUEUE && !list_empty(&mvmtxq->list):
|
||||
* a race, should defer the frame.
|
||||
* mvmtxq->txq_id == INVALID_QUEUE && list_empty(&mvmtxq->list):
|
||||
* need to allocate the queue and defer the frame.
|
||||
* mvmtxq->txq_id == INVALID_QUEUE && !list_empty(&mvmtxq->list):
|
||||
* queue is already scheduled for allocation, no need to allocate,
|
||||
* should defer the frame.
|
||||
*/
|
||||
|
||||
/* If the queue is allocated TX and return. */
|
||||
if (!txq->sta || mvmtxq->txq_id != IWL_MVM_INVALID_QUEUE) {
|
||||
/*
|
||||
* Check that list is empty to avoid a race where txq_id is
|
||||
* already updated, but the queue allocation work wasn't
|
||||
* finished
|
||||
*/
|
||||
if (unlikely(txq->sta && !list_empty(&mvmtxq->list)))
|
||||
return;
|
||||
|
||||
iwl_mvm_mac_itxq_xmit(hw, txq);
|
||||
return;
|
||||
}
|
||||
|
||||
/* The list is being deleted only after the queue is fully allocated. */
|
||||
if (!list_empty(&mvmtxq->list))
|
||||
return;
|
||||
|
||||
list_add_tail(&mvmtxq->list, &mvm->add_stream_txqs);
|
||||
schedule_work(&mvm->add_stream_wk);
|
||||
}
|
||||
|
||||
static inline bool iwl_enable_rx_ampdu(const struct iwl_cfg *cfg)
|
||||
{
|
||||
if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG)
|
||||
|
@ -1085,7 +1184,6 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
|
|||
|
||||
iwl_mvm_reset_phy_ctxts(mvm);
|
||||
memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table));
|
||||
memset(mvm->sta_deferred_frames, 0, sizeof(mvm->sta_deferred_frames));
|
||||
memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif));
|
||||
memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd));
|
||||
|
||||
|
@ -2861,32 +2959,6 @@ iwl_mvm_tdls_check_trigger(struct iwl_mvm *mvm,
|
|||
peer_addr, action);
|
||||
}
|
||||
|
||||
static void iwl_mvm_purge_deferred_tx_frames(struct iwl_mvm *mvm,
|
||||
struct iwl_mvm_sta *mvm_sta)
|
||||
{
|
||||
struct iwl_mvm_tid_data *tid_data;
|
||||
struct sk_buff *skb;
|
||||
int i;
|
||||
|
||||
spin_lock_bh(&mvm_sta->lock);
|
||||
for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
|
||||
tid_data = &mvm_sta->tid_data[i];
|
||||
|
||||
while ((skb = __skb_dequeue(&tid_data->deferred_tx_frames))) {
|
||||
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
||||
|
||||
/*
|
||||
* The first deferred frame should've stopped the MAC
|
||||
* queues, so we should never get a second deferred
|
||||
* frame for the RA/TID.
|
||||
*/
|
||||
iwl_mvm_start_mac_queues(mvm, BIT(info->hw_queue));
|
||||
ieee80211_free_txskb(mvm->hw, skb);
|
||||
}
|
||||
}
|
||||
spin_unlock_bh(&mvm_sta->lock);
|
||||
}
|
||||
|
||||
static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
|
||||
struct ieee80211_vif *vif,
|
||||
struct ieee80211_sta *sta,
|
||||
|
@ -2920,7 +2992,6 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
|
|||
*/
|
||||
if (old_state == IEEE80211_STA_NONE &&
|
||||
new_state == IEEE80211_STA_NOTEXIST) {
|
||||
iwl_mvm_purge_deferred_tx_frames(mvm, mvm_sta);
|
||||
flush_work(&mvm->add_stream_wk);
|
||||
|
||||
/*
|
||||
|
@ -2967,6 +3038,8 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
|
|||
iwl_mvm_tdls_check_trigger(mvm, vif, sta->addr,
|
||||
NL80211_TDLS_SETUP);
|
||||
}
|
||||
|
||||
sta->max_rc_amsdu_len = 1;
|
||||
} else if (old_state == IEEE80211_STA_NONE &&
|
||||
new_state == IEEE80211_STA_AUTH) {
|
||||
/*
|
||||
|
@ -4656,8 +4729,35 @@ static void iwl_mvm_sync_rx_queues(struct ieee80211_hw *hw)
|
|||
mutex_unlock(&mvm->mutex);
|
||||
}
|
||||
|
||||
static bool iwl_mvm_can_hw_csum(struct sk_buff *skb)
|
||||
{
|
||||
u8 protocol = ip_hdr(skb)->protocol;
|
||||
|
||||
if (!IS_ENABLED(CONFIG_INET))
|
||||
return false;
|
||||
|
||||
return protocol == IPPROTO_TCP || protocol == IPPROTO_UDP;
|
||||
}
|
||||
|
||||
static bool iwl_mvm_mac_can_aggregate(struct ieee80211_hw *hw,
|
||||
struct sk_buff *head,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
|
||||
|
||||
/* For now don't aggregate IPv6 in AMSDU */
|
||||
if (skb->protocol != htons(ETH_P_IP))
|
||||
return false;
|
||||
|
||||
if (!iwl_mvm_is_csum_supported(mvm))
|
||||
return true;
|
||||
|
||||
return iwl_mvm_can_hw_csum(skb) == iwl_mvm_can_hw_csum(head);
|
||||
}
|
||||
|
||||
const struct ieee80211_ops iwl_mvm_hw_ops = {
|
||||
.tx = iwl_mvm_mac_tx,
|
||||
.wake_tx_queue = iwl_mvm_mac_wake_tx_queue,
|
||||
.ampdu_action = iwl_mvm_mac_ampdu_action,
|
||||
.start = iwl_mvm_mac_start,
|
||||
.reconfig_complete = iwl_mvm_mac_reconfig_complete,
|
||||
|
@ -4731,6 +4831,7 @@ const struct ieee80211_ops iwl_mvm_hw_ops = {
|
|||
#endif
|
||||
.get_survey = iwl_mvm_mac_get_survey,
|
||||
.sta_statistics = iwl_mvm_mac_sta_statistics,
|
||||
.can_aggregate_in_amsdu = iwl_mvm_mac_can_aggregate,
|
||||
#ifdef CONFIG_IWLWIFI_DEBUGFS
|
||||
.sta_add_debugfs = iwl_mvm_sta_add_debugfs,
|
||||
#endif
|
||||
|
|
|
@ -778,6 +778,40 @@ struct iwl_mvm_geo_profile {
|
|||
u8 values[ACPI_GEO_TABLE_SIZE];
|
||||
};
|
||||
|
||||
struct iwl_mvm_txq {
|
||||
struct list_head list;
|
||||
u16 txq_id;
|
||||
/* Protects TX path invocation from two places */
|
||||
spinlock_t tx_path_lock;
|
||||
bool stopped;
|
||||
};
|
||||
|
||||
static inline struct iwl_mvm_txq *
|
||||
iwl_mvm_txq_from_mac80211(struct ieee80211_txq *txq)
|
||||
{
|
||||
return (void *)txq->drv_priv;
|
||||
}
|
||||
|
||||
static inline struct iwl_mvm_txq *
|
||||
iwl_mvm_txq_from_tid(struct ieee80211_sta *sta, u8 tid)
|
||||
{
|
||||
if (tid == IWL_MAX_TID_COUNT)
|
||||
tid = IEEE80211_NUM_TIDS;
|
||||
|
||||
return (void *)sta->txq[tid]->drv_priv;
|
||||
}
|
||||
|
||||
/**
|
||||
* struct iwl_mvm_tvqm_txq_info - maps TVQM hw queue to tid
|
||||
*
|
||||
* @sta_id: sta id
|
||||
* @txq_tid: txq tid
|
||||
*/
|
||||
struct iwl_mvm_tvqm_txq_info {
|
||||
u8 sta_id;
|
||||
u8 txq_tid;
|
||||
};
|
||||
|
||||
struct iwl_mvm_dqa_txq_info {
|
||||
u8 ra_sta_id; /* The RA this queue is mapped to, if exists */
|
||||
bool reserved; /* Is this the TXQ reserved for a STA */
|
||||
|
@ -843,13 +877,13 @@ struct iwl_mvm {
|
|||
u64 on_time_scan;
|
||||
} radio_stats, accu_radio_stats;
|
||||
|
||||
u16 hw_queue_to_mac80211[IWL_MAX_TVQM_QUEUES];
|
||||
|
||||
struct iwl_mvm_dqa_txq_info queue_info[IWL_MAX_HW_QUEUES];
|
||||
struct list_head add_stream_txqs;
|
||||
union {
|
||||
struct iwl_mvm_dqa_txq_info queue_info[IWL_MAX_HW_QUEUES];
|
||||
struct iwl_mvm_tvqm_txq_info tvqm_info[IWL_MAX_TVQM_QUEUES];
|
||||
};
|
||||
struct work_struct add_stream_wk; /* To add streams to queues */
|
||||
|
||||
atomic_t mac80211_queue_stop_count[IEEE80211_MAX_QUEUES];
|
||||
|
||||
const char *nvm_file_name;
|
||||
struct iwl_nvm_data *nvm_data;
|
||||
/* NVM sections */
|
||||
|
@ -863,7 +897,6 @@ struct iwl_mvm {
|
|||
/* data related to data path */
|
||||
struct iwl_rx_phy_info last_phy_info;
|
||||
struct ieee80211_sta __rcu *fw_id_to_mac_id[IWL_MVM_STATION_COUNT];
|
||||
unsigned long sta_deferred_frames[BITS_TO_LONGS(IWL_MVM_STATION_COUNT)];
|
||||
u8 rx_ba_sessions;
|
||||
|
||||
/* configured by mac80211 */
|
||||
|
@ -1470,6 +1503,11 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
|
|||
void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd,
|
||||
struct ieee80211_tx_info *info,
|
||||
struct ieee80211_sta *sta, __le16 fc);
|
||||
void iwl_mvm_mac_itxq_xmit(struct ieee80211_hw *hw, struct ieee80211_txq *txq);
|
||||
unsigned int iwl_mvm_max_amsdu_size(struct iwl_mvm *mvm,
|
||||
struct ieee80211_sta *sta,
|
||||
unsigned int tid);
|
||||
|
||||
#ifdef CONFIG_IWLWIFI_DEBUG
|
||||
const char *iwl_mvm_get_tx_fail_reason(u32 status);
|
||||
#else
|
||||
|
@ -1599,7 +1637,6 @@ int iwl_mvm_mac_ctxt_add(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
|
|||
int iwl_mvm_mac_ctxt_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
||||
bool force_assoc_off, const u8 *bssid_override);
|
||||
int iwl_mvm_mac_ctxt_remove(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
|
||||
u32 iwl_mvm_mac_get_queues_mask(struct ieee80211_vif *vif);
|
||||
int iwl_mvm_mac_ctxt_beacon_changed(struct iwl_mvm *mvm,
|
||||
struct ieee80211_vif *vif);
|
||||
void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
|
||||
|
@ -1615,8 +1652,6 @@ void iwl_mvm_window_status_notif(struct iwl_mvm *mvm,
|
|||
struct iwl_rx_cmd_buffer *rxb);
|
||||
void iwl_mvm_mac_ctxt_recalc_tsf_id(struct iwl_mvm *mvm,
|
||||
struct ieee80211_vif *vif);
|
||||
unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm,
|
||||
struct ieee80211_vif *exclude_vif);
|
||||
void iwl_mvm_probe_resp_data_notif(struct iwl_mvm *mvm,
|
||||
struct iwl_rx_cmd_buffer *rxb);
|
||||
void iwl_mvm_channel_switch_noa_notif(struct iwl_mvm *mvm,
|
||||
|
@ -1906,10 +1941,6 @@ static inline void iwl_mvm_stop_device(struct iwl_mvm *mvm)
|
|||
iwl_trans_stop_device(mvm->trans);
|
||||
}
|
||||
|
||||
/* Stop/start all mac queues in a given bitmap */
|
||||
void iwl_mvm_start_mac_queues(struct iwl_mvm *mvm, unsigned long mq);
|
||||
void iwl_mvm_stop_mac_queues(struct iwl_mvm *mvm, unsigned long mq);
|
||||
|
||||
/* Re-configure the SCD for a queue that has already been configured */
|
||||
int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
|
||||
int tid, int frame_limit, u16 ssn);
|
||||
|
|
|
@ -179,7 +179,7 @@ static int iwl_nvm_read_chunk(struct iwl_mvm *mvm, u16 section,
|
|||
IWL_DEBUG_EEPROM(mvm->trans->dev,
|
||||
"NVM access command failed with status %d (device: %s)\n",
|
||||
ret, mvm->cfg->name);
|
||||
ret = -EIO;
|
||||
ret = -ENODATA;
|
||||
}
|
||||
goto exit;
|
||||
}
|
||||
|
@ -380,8 +380,12 @@ int iwl_nvm_init(struct iwl_mvm *mvm)
|
|||
/* we override the constness for initial read */
|
||||
ret = iwl_nvm_read_section(mvm, section, nvm_buffer,
|
||||
size_read);
|
||||
if (ret < 0)
|
||||
if (ret == -ENODATA) {
|
||||
ret = 0;
|
||||
continue;
|
||||
}
|
||||
if (ret < 0)
|
||||
break;
|
||||
size_read += ret;
|
||||
temp = kmemdup(nvm_buffer, ret, GFP_KERNEL);
|
||||
if (!temp) {
|
||||
|
@ -454,7 +458,7 @@ int iwl_nvm_init(struct iwl_mvm *mvm)
|
|||
IWL_DEBUG_EEPROM(mvm->trans->dev, "nvm version = %x\n",
|
||||
mvm->nvm_data->nvm_version);
|
||||
|
||||
return 0;
|
||||
return ret < 0 ? ret : 0;
|
||||
}
|
||||
|
||||
struct iwl_mcc_update_resp *
|
||||
|
|
|
@ -685,6 +685,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
|
|||
INIT_DELAYED_WORK(&mvm->tdls_cs.dwork, iwl_mvm_tdls_ch_switch_work);
|
||||
INIT_DELAYED_WORK(&mvm->scan_timeout_dwork, iwl_mvm_scan_timeout_wk);
|
||||
INIT_WORK(&mvm->add_stream_wk, iwl_mvm_add_new_dqa_stream_wk);
|
||||
INIT_LIST_HEAD(&mvm->add_stream_txqs);
|
||||
|
||||
spin_lock_init(&mvm->d0i3_tx_lock);
|
||||
spin_lock_init(&mvm->refs_lock);
|
||||
|
@ -1079,24 +1080,6 @@ static void iwl_mvm_rx_mq(struct iwl_op_mode *op_mode,
|
|||
iwl_mvm_rx_common(mvm, rxb, pkt);
|
||||
}
|
||||
|
||||
void iwl_mvm_stop_mac_queues(struct iwl_mvm *mvm, unsigned long mq)
|
||||
{
|
||||
int q;
|
||||
|
||||
if (WARN_ON_ONCE(!mq))
|
||||
return;
|
||||
|
||||
for_each_set_bit(q, &mq, IEEE80211_MAX_QUEUES) {
|
||||
if (atomic_inc_return(&mvm->mac80211_queue_stop_count[q]) > 1) {
|
||||
IWL_DEBUG_TX_QUEUES(mvm,
|
||||
"mac80211 %d already stopped\n", q);
|
||||
continue;
|
||||
}
|
||||
|
||||
ieee80211_stop_queue(mvm->hw, q);
|
||||
}
|
||||
}
|
||||
|
||||
static void iwl_mvm_async_cb(struct iwl_op_mode *op_mode,
|
||||
const struct iwl_device_cmd *cmd)
|
||||
{
|
||||
|
@ -1109,38 +1092,66 @@ static void iwl_mvm_async_cb(struct iwl_op_mode *op_mode,
|
|||
iwl_trans_block_txq_ptrs(mvm->trans, false);
|
||||
}
|
||||
|
||||
static void iwl_mvm_stop_sw_queue(struct iwl_op_mode *op_mode, int hw_queue)
|
||||
static void iwl_mvm_queue_state_change(struct iwl_op_mode *op_mode,
|
||||
int hw_queue, bool start)
|
||||
{
|
||||
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
|
||||
unsigned long mq = mvm->hw_queue_to_mac80211[hw_queue];
|
||||
struct ieee80211_sta *sta;
|
||||
struct ieee80211_txq *txq;
|
||||
struct iwl_mvm_txq *mvmtxq;
|
||||
int i;
|
||||
unsigned long tid_bitmap;
|
||||
struct iwl_mvm_sta *mvmsta;
|
||||
u8 sta_id;
|
||||
|
||||
iwl_mvm_stop_mac_queues(mvm, mq);
|
||||
}
|
||||
sta_id = iwl_mvm_has_new_tx_api(mvm) ?
|
||||
mvm->tvqm_info[hw_queue].sta_id :
|
||||
mvm->queue_info[hw_queue].ra_sta_id;
|
||||
|
||||
void iwl_mvm_start_mac_queues(struct iwl_mvm *mvm, unsigned long mq)
|
||||
{
|
||||
int q;
|
||||
|
||||
if (WARN_ON_ONCE(!mq))
|
||||
if (WARN_ON_ONCE(sta_id >= ARRAY_SIZE(mvm->fw_id_to_mac_id)))
|
||||
return;
|
||||
|
||||
for_each_set_bit(q, &mq, IEEE80211_MAX_QUEUES) {
|
||||
if (atomic_dec_return(&mvm->mac80211_queue_stop_count[q]) > 0) {
|
||||
IWL_DEBUG_TX_QUEUES(mvm,
|
||||
"mac80211 %d still stopped\n", q);
|
||||
continue;
|
||||
}
|
||||
rcu_read_lock();
|
||||
|
||||
ieee80211_wake_queue(mvm->hw, q);
|
||||
sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
|
||||
if (IS_ERR_OR_NULL(sta))
|
||||
goto out;
|
||||
mvmsta = iwl_mvm_sta_from_mac80211(sta);
|
||||
|
||||
if (iwl_mvm_has_new_tx_api(mvm)) {
|
||||
int tid = mvm->tvqm_info[hw_queue].txq_tid;
|
||||
|
||||
tid_bitmap = BIT(tid);
|
||||
} else {
|
||||
tid_bitmap = mvm->queue_info[hw_queue].tid_bitmap;
|
||||
}
|
||||
|
||||
for_each_set_bit(i, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
|
||||
int tid = i;
|
||||
|
||||
if (tid == IWL_MAX_TID_COUNT)
|
||||
tid = IEEE80211_NUM_TIDS;
|
||||
|
||||
txq = sta->txq[tid];
|
||||
mvmtxq = iwl_mvm_txq_from_mac80211(txq);
|
||||
mvmtxq->stopped = !start;
|
||||
|
||||
if (start && mvmsta->sta_state != IEEE80211_STA_NOTEXIST)
|
||||
iwl_mvm_mac_itxq_xmit(mvm->hw, txq);
|
||||
}
|
||||
|
||||
out:
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static void iwl_mvm_stop_sw_queue(struct iwl_op_mode *op_mode, int hw_queue)
|
||||
{
|
||||
iwl_mvm_queue_state_change(op_mode, hw_queue, false);
|
||||
}
|
||||
|
||||
static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int hw_queue)
|
||||
{
|
||||
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
|
||||
unsigned long mq = mvm->hw_queue_to_mac80211[hw_queue];
|
||||
|
||||
iwl_mvm_start_mac_queues(mvm, mq);
|
||||
iwl_mvm_queue_state_change(op_mode, hw_queue, true);
|
||||
}
|
||||
|
||||
static void iwl_mvm_set_rfkill_state(struct iwl_mvm *mvm)
|
||||
|
|
|
@ -149,14 +149,9 @@ static u16 rs_fw_set_config_flags(struct iwl_mvm *mvm,
|
|||
|
||||
if (he_cap && he_cap->has_he &&
|
||||
(he_cap->he_cap_elem.phy_cap_info[3] &
|
||||
IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_MASK)) {
|
||||
IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_MASK))
|
||||
flags |= IWL_TLC_MNG_CFG_FLAGS_HE_DCM_NSS_1_MSK;
|
||||
|
||||
if (he_cap->he_cap_elem.phy_cap_info[3] &
|
||||
IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_2)
|
||||
flags |= IWL_TLC_MNG_CFG_FLAGS_HE_DCM_NSS_2_MSK;
|
||||
}
|
||||
|
||||
return flags;
|
||||
}
|
||||
|
||||
|
@ -320,12 +315,26 @@ void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm,
|
|||
|
||||
if (flags & IWL_TLC_NOTIF_FLAG_AMSDU) {
|
||||
u16 size = le32_to_cpu(notif->amsdu_size);
|
||||
int i;
|
||||
|
||||
if (WARN_ON(sta->max_amsdu_len < size))
|
||||
goto out;
|
||||
|
||||
mvmsta->amsdu_enabled = le32_to_cpu(notif->amsdu_enabled);
|
||||
mvmsta->max_amsdu_len = size;
|
||||
sta->max_rc_amsdu_len = mvmsta->max_amsdu_len;
|
||||
|
||||
for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
|
||||
if (mvmsta->amsdu_enabled & BIT(i))
|
||||
sta->max_tid_amsdu_len[i] =
|
||||
iwl_mvm_max_amsdu_size(mvm, sta, i);
|
||||
else
|
||||
/*
|
||||
* Not so elegant, but this will effectively
|
||||
* prevent AMSDU on this TID
|
||||
*/
|
||||
sta->max_tid_amsdu_len[i] = 1;
|
||||
}
|
||||
|
||||
IWL_DEBUG_RATE(mvm,
|
||||
"AMSDU update. AMSDU size: %d, AMSDU selected size: %d, AMSDU TID bitmap 0x%X\n",
|
||||
|
|
|
@ -1744,6 +1744,7 @@ static void rs_set_amsdu_len(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
|
|||
enum rs_action scale_action)
|
||||
{
|
||||
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
|
||||
int i;
|
||||
|
||||
/*
|
||||
* In case TLC offload is not active amsdu_enabled is either 0xFFFF
|
||||
|
@ -1757,6 +1758,19 @@ static void rs_set_amsdu_len(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
|
|||
mvmsta->amsdu_enabled = 0xFFFF;
|
||||
|
||||
mvmsta->max_amsdu_len = sta->max_amsdu_len;
|
||||
sta->max_rc_amsdu_len = mvmsta->max_amsdu_len;
|
||||
|
||||
for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
|
||||
if (mvmsta->amsdu_enabled)
|
||||
sta->max_tid_amsdu_len[i] =
|
||||
iwl_mvm_max_amsdu_size(mvm, sta, i);
|
||||
else
|
||||
/*
|
||||
* Not so elegant, but this will effectively
|
||||
* prevent AMSDU on this TID
|
||||
*/
|
||||
sta->max_tid_amsdu_len[i] = 1;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -3332,12 +3346,12 @@ static void rs_fill_rates_for_column(struct iwl_mvm *mvm,
|
|||
/* Building the rate table is non trivial. When we're in MIMO2/VHT/80Mhz/SGI
|
||||
* column the rate table should look like this:
|
||||
*
|
||||
* rate[0] 0x400D019 VHT | ANT: AB BW: 80Mhz MCS: 9 NSS: 2 SGI
|
||||
* rate[1] 0x400D019 VHT | ANT: AB BW: 80Mhz MCS: 9 NSS: 2 SGI
|
||||
* rate[2] 0x400D018 VHT | ANT: AB BW: 80Mhz MCS: 8 NSS: 2 SGI
|
||||
* rate[3] 0x400D018 VHT | ANT: AB BW: 80Mhz MCS: 8 NSS: 2 SGI
|
||||
* rate[4] 0x400D017 VHT | ANT: AB BW: 80Mhz MCS: 7 NSS: 2 SGI
|
||||
* rate[5] 0x400D017 VHT | ANT: AB BW: 80Mhz MCS: 7 NSS: 2 SGI
|
||||
* rate[0] 0x400F019 VHT | ANT: AB BW: 80Mhz MCS: 9 NSS: 2 SGI
|
||||
* rate[1] 0x400F019 VHT | ANT: AB BW: 80Mhz MCS: 9 NSS: 2 SGI
|
||||
* rate[2] 0x400F018 VHT | ANT: AB BW: 80Mhz MCS: 8 NSS: 2 SGI
|
||||
* rate[3] 0x400F018 VHT | ANT: AB BW: 80Mhz MCS: 8 NSS: 2 SGI
|
||||
* rate[4] 0x400F017 VHT | ANT: AB BW: 80Mhz MCS: 7 NSS: 2 SGI
|
||||
* rate[5] 0x400F017 VHT | ANT: AB BW: 80Mhz MCS: 7 NSS: 2 SGI
|
||||
* rate[6] 0x4005007 VHT | ANT: A BW: 80Mhz MCS: 7 NSS: 1 NGI
|
||||
* rate[7] 0x4009006 VHT | ANT: B BW: 80Mhz MCS: 6 NSS: 1 NGI
|
||||
* rate[8] 0x4005005 VHT | ANT: A BW: 80Mhz MCS: 5 NSS: 1 NGI
|
||||
|
|
|
@ -599,8 +599,8 @@ static void iwl_mvm_stat_iterator(void *_data, u8 *mac,
|
|||
* data copied into the "data" struct, but rather the data from
|
||||
* the notification directly.
|
||||
*/
|
||||
if (iwl_mvm_is_cdb_supported(mvm)) {
|
||||
struct mvm_statistics_general_cdb *general =
|
||||
if (iwl_mvm_has_new_rx_stats_api(mvm)) {
|
||||
struct mvm_statistics_general *general =
|
||||
data->general;
|
||||
|
||||
mvmvif->beacon_stats.num_beacons =
|
||||
|
@ -723,7 +723,7 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
|
|||
else
|
||||
expected_size = sizeof(struct iwl_notif_statistics_v10);
|
||||
} else {
|
||||
expected_size = sizeof(struct iwl_notif_statistics_cdb);
|
||||
expected_size = sizeof(struct iwl_notif_statistics);
|
||||
}
|
||||
|
||||
if (WARN_ONCE(iwl_rx_packet_payload_len(pkt) != expected_size,
|
||||
|
@ -753,7 +753,7 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
|
|||
|
||||
flags = stats->flag;
|
||||
} else {
|
||||
struct iwl_notif_statistics_cdb *stats = (void *)&pkt->data;
|
||||
struct iwl_notif_statistics *stats = (void *)&pkt->data;
|
||||
|
||||
data.mac_id = stats->rx.general.mac_id;
|
||||
data.beacon_filter_average_energy =
|
||||
|
@ -792,7 +792,7 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
|
|||
bytes = (void *)&v11->load_stats.byte_count;
|
||||
air_time = (void *)&v11->load_stats.air_time;
|
||||
} else {
|
||||
struct iwl_notif_statistics_cdb *stats = (void *)&pkt->data;
|
||||
struct iwl_notif_statistics *stats = (void *)&pkt->data;
|
||||
|
||||
energy = (void *)&stats->load_stats.avg_energy;
|
||||
bytes = (void *)&stats->load_stats.byte_count;
|
||||
|
|
|
@ -7,6 +7,7 @@
|
|||
*
|
||||
* Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
|
||||
* Copyright (C) 2018 Intel Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
|
@ -28,6 +29,7 @@
|
|||
*
|
||||
* Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
|
||||
* Copyright (C) 2018 Intel Corporation
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
@ -64,7 +66,7 @@ struct iwl_mvm_active_iface_iterator_data {
|
|||
struct ieee80211_vif *ignore_vif;
|
||||
u8 sta_vif_ap_sta_id;
|
||||
enum iwl_sf_state sta_vif_state;
|
||||
int num_active_macs;
|
||||
u32 num_active_macs;
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
|
@ -356,24 +356,16 @@ static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue,
|
||||
int mac80211_queue, u8 tid, u8 flags)
|
||||
static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
|
||||
int queue, u8 tid, u8 flags)
|
||||
{
|
||||
struct iwl_scd_txq_cfg_cmd cmd = {
|
||||
.scd_queue = queue,
|
||||
.action = SCD_CFG_DISABLE_QUEUE,
|
||||
};
|
||||
bool remove_mac_queue = mac80211_queue != IEEE80211_INVAL_HW_QUEUE;
|
||||
int ret;
|
||||
|
||||
if (WARN_ON(remove_mac_queue && mac80211_queue >= IEEE80211_MAX_QUEUES))
|
||||
return -EINVAL;
|
||||
|
||||
if (iwl_mvm_has_new_tx_api(mvm)) {
|
||||
if (remove_mac_queue)
|
||||
mvm->hw_queue_to_mac80211[queue] &=
|
||||
~BIT(mac80211_queue);
|
||||
|
||||
iwl_trans_txq_free(mvm->trans, queue);
|
||||
|
||||
return 0;
|
||||
|
@ -384,36 +376,15 @@ static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue,
|
|||
|
||||
mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
|
||||
|
||||
/*
|
||||
* If there is another TID with the same AC - don't remove the MAC queue
|
||||
* from the mapping
|
||||
*/
|
||||
if (tid < IWL_MAX_TID_COUNT) {
|
||||
unsigned long tid_bitmap =
|
||||
mvm->queue_info[queue].tid_bitmap;
|
||||
int ac = tid_to_mac80211_ac[tid];
|
||||
int i;
|
||||
|
||||
for_each_set_bit(i, &tid_bitmap, IWL_MAX_TID_COUNT) {
|
||||
if (tid_to_mac80211_ac[i] == ac)
|
||||
remove_mac_queue = false;
|
||||
}
|
||||
}
|
||||
|
||||
if (remove_mac_queue)
|
||||
mvm->hw_queue_to_mac80211[queue] &=
|
||||
~BIT(mac80211_queue);
|
||||
|
||||
cmd.action = mvm->queue_info[queue].tid_bitmap ?
|
||||
SCD_CFG_ENABLE_QUEUE : SCD_CFG_DISABLE_QUEUE;
|
||||
if (cmd.action == SCD_CFG_DISABLE_QUEUE)
|
||||
mvm->queue_info[queue].status = IWL_MVM_QUEUE_FREE;
|
||||
|
||||
IWL_DEBUG_TX_QUEUES(mvm,
|
||||
"Disabling TXQ #%d tids=0x%x (mac80211 map:0x%x)\n",
|
||||
"Disabling TXQ #%d tids=0x%x\n",
|
||||
queue,
|
||||
mvm->queue_info[queue].tid_bitmap,
|
||||
mvm->hw_queue_to_mac80211[queue]);
|
||||
mvm->queue_info[queue].tid_bitmap);
|
||||
|
||||
/* If the queue is still enabled - nothing left to do in this func */
|
||||
if (cmd.action == SCD_CFG_ENABLE_QUEUE)
|
||||
|
@ -423,15 +394,19 @@ static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue,
|
|||
cmd.tid = mvm->queue_info[queue].txq_tid;
|
||||
|
||||
/* Make sure queue info is correct even though we overwrite it */
|
||||
WARN(mvm->queue_info[queue].tid_bitmap ||
|
||||
mvm->hw_queue_to_mac80211[queue],
|
||||
"TXQ #%d info out-of-sync - mac map=0x%x, tids=0x%x\n",
|
||||
queue, mvm->hw_queue_to_mac80211[queue],
|
||||
mvm->queue_info[queue].tid_bitmap);
|
||||
WARN(mvm->queue_info[queue].tid_bitmap,
|
||||
"TXQ #%d info out-of-sync - tids=0x%x\n",
|
||||
queue, mvm->queue_info[queue].tid_bitmap);
|
||||
|
||||
/* If we are here - the queue is freed and we can zero out these vals */
|
||||
mvm->queue_info[queue].tid_bitmap = 0;
|
||||
mvm->hw_queue_to_mac80211[queue] = 0;
|
||||
|
||||
if (sta) {
|
||||
struct iwl_mvm_txq *mvmtxq =
|
||||
iwl_mvm_txq_from_tid(sta, tid);
|
||||
|
||||
mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
|
||||
}
|
||||
|
||||
/* Regardless if this is a reserved TXQ for a STA - mark it as false */
|
||||
mvm->queue_info[queue].reserved = false;
|
||||
|
@ -517,9 +492,14 @@ static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
|
|||
spin_lock_bh(&mvmsta->lock);
|
||||
/* Unmap MAC queues and TIDs from this queue */
|
||||
for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
|
||||
struct iwl_mvm_txq *mvmtxq =
|
||||
iwl_mvm_txq_from_tid(sta, tid);
|
||||
|
||||
if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
|
||||
disable_agg_tids |= BIT(tid);
|
||||
mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
|
||||
|
||||
mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
|
||||
}
|
||||
|
||||
mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */
|
||||
|
@ -541,10 +521,11 @@ static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
|
|||
}
|
||||
|
||||
static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
|
||||
struct ieee80211_sta *old_sta,
|
||||
u8 new_sta_id)
|
||||
{
|
||||
struct iwl_mvm_sta *mvmsta;
|
||||
u8 txq_curr_ac, sta_id, tid;
|
||||
u8 sta_id, tid;
|
||||
unsigned long disable_agg_tids = 0;
|
||||
bool same_sta;
|
||||
int ret;
|
||||
|
@ -554,7 +535,6 @@ static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
|
|||
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
|
||||
return -EINVAL;
|
||||
|
||||
txq_curr_ac = mvm->queue_info[queue].mac80211_ac;
|
||||
sta_id = mvm->queue_info[queue].ra_sta_id;
|
||||
tid = mvm->queue_info[queue].txq_tid;
|
||||
|
||||
|
@ -570,9 +550,7 @@ static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
|
|||
iwl_mvm_invalidate_sta_queue(mvm, queue,
|
||||
disable_agg_tids, false);
|
||||
|
||||
ret = iwl_mvm_disable_txq(mvm, queue,
|
||||
mvmsta->vif->hw_queue[txq_curr_ac],
|
||||
tid, 0);
|
||||
ret = iwl_mvm_disable_txq(mvm, old_sta, queue, tid, 0);
|
||||
if (ret) {
|
||||
IWL_ERR(mvm,
|
||||
"Failed to free inactive queue %d (ret=%d)\n",
|
||||
|
@ -662,16 +640,15 @@ static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
|
|||
* in such a case, otherwise - if no redirection required - it does nothing,
|
||||
* unless the %force param is true.
|
||||
*/
|
||||
static int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
|
||||
int ac, int ssn, unsigned int wdg_timeout,
|
||||
bool force)
|
||||
static int iwl_mvm_redirect_queue(struct iwl_mvm *mvm, int queue, int tid,
|
||||
int ac, int ssn, unsigned int wdg_timeout,
|
||||
bool force, struct iwl_mvm_txq *txq)
|
||||
{
|
||||
struct iwl_scd_txq_cfg_cmd cmd = {
|
||||
.scd_queue = queue,
|
||||
.action = SCD_CFG_DISABLE_QUEUE,
|
||||
};
|
||||
bool shared_queue;
|
||||
unsigned long mq;
|
||||
int ret;
|
||||
|
||||
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
|
||||
|
@ -695,14 +672,14 @@ static int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
|
|||
cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
|
||||
cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac];
|
||||
cmd.tid = mvm->queue_info[queue].txq_tid;
|
||||
mq = mvm->hw_queue_to_mac80211[queue];
|
||||
shared_queue = hweight16(mvm->queue_info[queue].tid_bitmap) > 1;
|
||||
|
||||
IWL_DEBUG_TX_QUEUES(mvm, "Redirecting TXQ #%d to FIFO #%d\n",
|
||||
queue, iwl_mvm_ac_to_tx_fifo[ac]);
|
||||
|
||||
/* Stop MAC queues and wait for this queue to empty */
|
||||
iwl_mvm_stop_mac_queues(mvm, mq);
|
||||
/* Stop the queue and wait for it to empty */
|
||||
txq->stopped = true;
|
||||
|
||||
ret = iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(queue));
|
||||
if (ret) {
|
||||
IWL_ERR(mvm, "Error draining queue %d before reconfig\n",
|
||||
|
@ -743,8 +720,8 @@ static int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
|
|||
iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
|
||||
|
||||
out:
|
||||
/* Continue using the MAC queues */
|
||||
iwl_mvm_start_mac_queues(mvm, mq);
|
||||
/* Continue using the queue */
|
||||
txq->stopped = false;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -769,7 +746,7 @@ static int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id,
|
|||
return -ENOSPC;
|
||||
}
|
||||
|
||||
static int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, int mac80211_queue,
|
||||
static int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm,
|
||||
u8 sta_id, u8 tid, unsigned int timeout)
|
||||
{
|
||||
int queue, size = IWL_DEFAULT_QUEUE_SIZE;
|
||||
|
@ -792,10 +769,7 @@ static int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, int mac80211_queue,
|
|||
IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d for sta %d tid %d\n",
|
||||
queue, sta_id, tid);
|
||||
|
||||
mvm->hw_queue_to_mac80211[queue] |= BIT(mac80211_queue);
|
||||
IWL_DEBUG_TX_QUEUES(mvm,
|
||||
"Enabling TXQ #%d (mac80211 map:0x%x)\n",
|
||||
queue, mvm->hw_queue_to_mac80211[queue]);
|
||||
IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d\n", queue);
|
||||
|
||||
return queue;
|
||||
}
|
||||
|
@ -805,9 +779,10 @@ static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm,
|
|||
int tid)
|
||||
{
|
||||
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
|
||||
struct iwl_mvm_txq *mvmtxq =
|
||||
iwl_mvm_txq_from_tid(sta, tid);
|
||||
unsigned int wdg_timeout =
|
||||
iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
|
||||
u8 mac_queue = mvmsta->vif->hw_queue[ac];
|
||||
int queue = -1;
|
||||
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
|
@ -815,11 +790,16 @@ static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm,
|
|||
IWL_DEBUG_TX_QUEUES(mvm,
|
||||
"Allocating queue for sta %d on tid %d\n",
|
||||
mvmsta->sta_id, tid);
|
||||
queue = iwl_mvm_tvqm_enable_txq(mvm, mac_queue, mvmsta->sta_id, tid,
|
||||
wdg_timeout);
|
||||
queue = iwl_mvm_tvqm_enable_txq(mvm, mvmsta->sta_id, tid, wdg_timeout);
|
||||
if (queue < 0)
|
||||
return queue;
|
||||
|
||||
if (sta) {
|
||||
mvmtxq->txq_id = queue;
|
||||
mvm->tvqm_info[queue].txq_tid = tid;
|
||||
mvm->tvqm_info[queue].sta_id = mvmsta->sta_id;
|
||||
}
|
||||
|
||||
IWL_DEBUG_TX_QUEUES(mvm, "Allocated queue is %d\n", queue);
|
||||
|
||||
spin_lock_bh(&mvmsta->lock);
|
||||
|
@ -829,8 +809,9 @@ static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm, int queue,
|
||||
int mac80211_queue, u8 sta_id, u8 tid)
|
||||
static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm,
|
||||
struct ieee80211_sta *sta,
|
||||
int queue, u8 sta_id, u8 tid)
|
||||
{
|
||||
bool enable_queue = true;
|
||||
|
||||
|
@ -845,14 +826,6 @@ static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm, int queue,
|
|||
if (mvm->queue_info[queue].tid_bitmap)
|
||||
enable_queue = false;
|
||||
|
||||
if (mac80211_queue != IEEE80211_INVAL_HW_QUEUE) {
|
||||
WARN(mac80211_queue >=
|
||||
BITS_PER_BYTE * sizeof(mvm->hw_queue_to_mac80211[0]),
|
||||
"cannot track mac80211 queue %d (queue %d, sta %d, tid %d)\n",
|
||||
mac80211_queue, queue, sta_id, tid);
|
||||
mvm->hw_queue_to_mac80211[queue] |= BIT(mac80211_queue);
|
||||
}
|
||||
|
||||
mvm->queue_info[queue].tid_bitmap |= BIT(tid);
|
||||
mvm->queue_info[queue].ra_sta_id = sta_id;
|
||||
|
||||
|
@ -866,16 +839,22 @@ static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm, int queue,
|
|||
mvm->queue_info[queue].txq_tid = tid;
|
||||
}
|
||||
|
||||
if (sta) {
|
||||
struct iwl_mvm_txq *mvmtxq =
|
||||
iwl_mvm_txq_from_tid(sta, tid);
|
||||
|
||||
mvmtxq->txq_id = queue;
|
||||
}
|
||||
|
||||
IWL_DEBUG_TX_QUEUES(mvm,
|
||||
"Enabling TXQ #%d tids=0x%x (mac80211 map:0x%x)\n",
|
||||
queue, mvm->queue_info[queue].tid_bitmap,
|
||||
mvm->hw_queue_to_mac80211[queue]);
|
||||
"Enabling TXQ #%d tids=0x%x\n",
|
||||
queue, mvm->queue_info[queue].tid_bitmap);
|
||||
|
||||
return enable_queue;
|
||||
}
|
||||
|
||||
static bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue,
|
||||
int mac80211_queue, u16 ssn,
|
||||
static bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
|
||||
int queue, u16 ssn,
|
||||
const struct iwl_trans_txq_scd_cfg *cfg,
|
||||
unsigned int wdg_timeout)
|
||||
{
|
||||
|
@ -895,8 +874,7 @@ static bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue,
|
|||
return false;
|
||||
|
||||
/* Send the enabling command if we need to */
|
||||
if (!iwl_mvm_update_txq_mapping(mvm, queue, mac80211_queue,
|
||||
cfg->sta_id, cfg->tid))
|
||||
if (!iwl_mvm_update_txq_mapping(mvm, sta, queue, cfg->sta_id, cfg->tid))
|
||||
return false;
|
||||
|
||||
inc_ssn = iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn,
|
||||
|
@ -989,9 +967,10 @@ static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue)
|
|||
|
||||
ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
|
||||
|
||||
ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid,
|
||||
tid_to_mac80211_ac[tid], ssn,
|
||||
wdg_timeout, true);
|
||||
ret = iwl_mvm_redirect_queue(mvm, queue, tid,
|
||||
tid_to_mac80211_ac[tid], ssn,
|
||||
wdg_timeout, true,
|
||||
iwl_mvm_txq_from_tid(sta, tid));
|
||||
if (ret) {
|
||||
IWL_ERR(mvm, "Failed to redirect TXQ %d\n", queue);
|
||||
return;
|
||||
|
@ -1068,11 +1047,9 @@ static bool iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,
|
|||
* Remove the ones that did.
|
||||
*/
|
||||
for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
|
||||
int mac_queue = mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]];
|
||||
u16 tid_bitmap;
|
||||
|
||||
mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
|
||||
mvm->hw_queue_to_mac80211[queue] &= ~BIT(mac_queue);
|
||||
mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
|
||||
|
||||
tid_bitmap = mvm->queue_info[queue].tid_bitmap;
|
||||
|
@ -1105,10 +1082,6 @@ static bool iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,
|
|||
* sure all TIDs have existing corresponding mac queues enabled
|
||||
*/
|
||||
tid_bitmap = mvm->queue_info[queue].tid_bitmap;
|
||||
for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
|
||||
mvm->hw_queue_to_mac80211[queue] |=
|
||||
BIT(mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]]);
|
||||
}
|
||||
|
||||
/* If the queue is marked as shared - "unshare" it */
|
||||
if (hweight16(mvm->queue_info[queue].tid_bitmap) == 1 &&
|
||||
|
@ -1136,6 +1109,7 @@ static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta)
|
|||
unsigned long unshare_queues = 0;
|
||||
unsigned long changetid_queues = 0;
|
||||
int i, ret, free_queue = -ENOSPC;
|
||||
struct ieee80211_sta *queue_owner = NULL;
|
||||
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
|
||||
|
@ -1201,13 +1175,14 @@ static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta)
|
|||
inactive_tid_bitmap,
|
||||
&unshare_queues,
|
||||
&changetid_queues);
|
||||
if (ret >= 0 && free_queue < 0)
|
||||
if (ret >= 0 && free_queue < 0) {
|
||||
queue_owner = sta;
|
||||
free_queue = ret;
|
||||
}
|
||||
/* only unlock sta lock - we still need the queue info lock */
|
||||
spin_unlock_bh(&mvmsta->lock);
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
|
||||
/* Reconfigure queues requiring reconfiguation */
|
||||
for_each_set_bit(i, &unshare_queues, IWL_MAX_HW_QUEUES)
|
||||
|
@ -1216,18 +1191,21 @@ static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta)
|
|||
iwl_mvm_change_queue_tid(mvm, i);
|
||||
|
||||
if (free_queue >= 0 && alloc_for_sta != IWL_MVM_INVALID_STA) {
|
||||
ret = iwl_mvm_free_inactive_queue(mvm, free_queue,
|
||||
ret = iwl_mvm_free_inactive_queue(mvm, free_queue, queue_owner,
|
||||
alloc_for_sta);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
rcu_read_unlock();
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
|
||||
return free_queue;
|
||||
}
|
||||
|
||||
static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
|
||||
struct ieee80211_sta *sta, u8 ac, int tid,
|
||||
struct ieee80211_hdr *hdr)
|
||||
struct ieee80211_sta *sta, u8 ac, int tid)
|
||||
{
|
||||
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
|
||||
struct iwl_trans_txq_scd_cfg cfg = {
|
||||
|
@ -1238,7 +1216,6 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
|
|||
};
|
||||
unsigned int wdg_timeout =
|
||||
iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
|
||||
u8 mac_queue = mvmsta->vif->hw_queue[ac];
|
||||
int queue = -1;
|
||||
unsigned long disable_agg_tids = 0;
|
||||
enum iwl_mvm_agg_state queue_state;
|
||||
|
@ -1257,12 +1234,7 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
|
|||
ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
|
||||
spin_unlock_bh(&mvmsta->lock);
|
||||
|
||||
/*
|
||||
* Non-QoS, QoS NDP and MGMT frames should go to a MGMT queue, if one
|
||||
* exists
|
||||
*/
|
||||
if (!ieee80211_is_data_qos(hdr->frame_control) ||
|
||||
ieee80211_is_qos_nullfunc(hdr->frame_control)) {
|
||||
if (tid == IWL_MAX_TID_COUNT) {
|
||||
queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
|
||||
IWL_MVM_DQA_MIN_MGMT_QUEUE,
|
||||
IWL_MVM_DQA_MAX_MGMT_QUEUE);
|
||||
|
@ -1341,8 +1313,7 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
|
|||
}
|
||||
}
|
||||
|
||||
inc_ssn = iwl_mvm_enable_txq(mvm, queue, mac_queue,
|
||||
ssn, &cfg, wdg_timeout);
|
||||
inc_ssn = iwl_mvm_enable_txq(mvm, sta, queue, ssn, &cfg, wdg_timeout);
|
||||
|
||||
/*
|
||||
* Mark queue as shared in transport if shared
|
||||
|
@ -1384,8 +1355,9 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
|
|||
}
|
||||
} else {
|
||||
/* Redirect queue, if needed */
|
||||
ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid, ac, ssn,
|
||||
wdg_timeout, false);
|
||||
ret = iwl_mvm_redirect_queue(mvm, queue, tid, ac, ssn,
|
||||
wdg_timeout, false,
|
||||
iwl_mvm_txq_from_tid(sta, tid));
|
||||
if (ret)
|
||||
goto out_err;
|
||||
}
|
||||
|
@ -1393,7 +1365,7 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
|
|||
return 0;
|
||||
|
||||
out_err:
|
||||
iwl_mvm_disable_txq(mvm, queue, mac_queue, tid, 0);
|
||||
iwl_mvm_disable_txq(mvm, sta, queue, tid, 0);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -1406,87 +1378,34 @@ static inline u8 iwl_mvm_tid_to_ac_queue(int tid)
|
|||
return tid_to_mac80211_ac[tid];
|
||||
}
|
||||
|
||||
static void iwl_mvm_tx_deferred_stream(struct iwl_mvm *mvm,
|
||||
struct ieee80211_sta *sta, int tid)
|
||||
{
|
||||
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
|
||||
struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
|
||||
struct sk_buff *skb;
|
||||
struct ieee80211_hdr *hdr;
|
||||
struct sk_buff_head deferred_tx;
|
||||
u8 mac_queue;
|
||||
bool no_queue = false; /* Marks if there is a problem with the queue */
|
||||
u8 ac;
|
||||
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
|
||||
skb = skb_peek(&tid_data->deferred_tx_frames);
|
||||
if (!skb)
|
||||
return;
|
||||
hdr = (void *)skb->data;
|
||||
|
||||
ac = iwl_mvm_tid_to_ac_queue(tid);
|
||||
mac_queue = IEEE80211_SKB_CB(skb)->hw_queue;
|
||||
|
||||
if (tid_data->txq_id == IWL_MVM_INVALID_QUEUE &&
|
||||
iwl_mvm_sta_alloc_queue(mvm, sta, ac, tid, hdr)) {
|
||||
IWL_ERR(mvm,
|
||||
"Can't alloc TXQ for sta %d tid %d - dropping frame\n",
|
||||
mvmsta->sta_id, tid);
|
||||
|
||||
/*
|
||||
* Mark queue as problematic so later the deferred traffic is
|
||||
* freed, as we can do nothing with it
|
||||
*/
|
||||
no_queue = true;
|
||||
}
|
||||
|
||||
__skb_queue_head_init(&deferred_tx);
|
||||
|
||||
/* Disable bottom-halves when entering TX path */
|
||||
local_bh_disable();
|
||||
spin_lock(&mvmsta->lock);
|
||||
skb_queue_splice_init(&tid_data->deferred_tx_frames, &deferred_tx);
|
||||
mvmsta->deferred_traffic_tid_map &= ~BIT(tid);
|
||||
spin_unlock(&mvmsta->lock);
|
||||
|
||||
while ((skb = __skb_dequeue(&deferred_tx)))
|
||||
if (no_queue || iwl_mvm_tx_skb(mvm, skb, sta))
|
||||
ieee80211_free_txskb(mvm->hw, skb);
|
||||
local_bh_enable();
|
||||
|
||||
/* Wake queue */
|
||||
iwl_mvm_start_mac_queues(mvm, BIT(mac_queue));
|
||||
}
|
||||
|
||||
void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
|
||||
{
|
||||
struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm,
|
||||
add_stream_wk);
|
||||
struct ieee80211_sta *sta;
|
||||
struct iwl_mvm_sta *mvmsta;
|
||||
unsigned long deferred_tid_traffic;
|
||||
int sta_id, tid;
|
||||
|
||||
mutex_lock(&mvm->mutex);
|
||||
|
||||
iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA);
|
||||
|
||||
/* Go over all stations with deferred traffic */
|
||||
for_each_set_bit(sta_id, mvm->sta_deferred_frames,
|
||||
IWL_MVM_STATION_COUNT) {
|
||||
clear_bit(sta_id, mvm->sta_deferred_frames);
|
||||
sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
|
||||
lockdep_is_held(&mvm->mutex));
|
||||
if (IS_ERR_OR_NULL(sta))
|
||||
continue;
|
||||
while (!list_empty(&mvm->add_stream_txqs)) {
|
||||
struct iwl_mvm_txq *mvmtxq;
|
||||
struct ieee80211_txq *txq;
|
||||
u8 tid;
|
||||
|
||||
mvmsta = iwl_mvm_sta_from_mac80211(sta);
|
||||
deferred_tid_traffic = mvmsta->deferred_traffic_tid_map;
|
||||
mvmtxq = list_first_entry(&mvm->add_stream_txqs,
|
||||
struct iwl_mvm_txq, list);
|
||||
|
||||
for_each_set_bit(tid, &deferred_tid_traffic,
|
||||
IWL_MAX_TID_COUNT + 1)
|
||||
iwl_mvm_tx_deferred_stream(mvm, sta, tid);
|
||||
txq = container_of((void *)mvmtxq, struct ieee80211_txq,
|
||||
drv_priv);
|
||||
tid = txq->tid;
|
||||
if (tid == IEEE80211_NUM_TIDS)
|
||||
tid = IWL_MAX_TID_COUNT;
|
||||
|
||||
iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, tid);
|
||||
list_del_init(&mvmtxq->list);
|
||||
local_bh_disable();
|
||||
iwl_mvm_mac_itxq_xmit(mvm->hw, txq);
|
||||
local_bh_enable();
|
||||
}
|
||||
|
||||
mutex_unlock(&mvm->mutex);
|
||||
|
@ -1542,10 +1461,11 @@ static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
|
|||
* Note that re-enabling aggregations isn't done in this function.
|
||||
*/
|
||||
static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
|
||||
struct iwl_mvm_sta *mvm_sta)
|
||||
struct ieee80211_sta *sta)
|
||||
{
|
||||
unsigned int wdg_timeout =
|
||||
iwl_mvm_get_wd_timeout(mvm, mvm_sta->vif, false, false);
|
||||
struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
|
||||
unsigned int wdg =
|
||||
iwl_mvm_get_wd_timeout(mvm, mvm_sta->vif, false, false);
|
||||
int i;
|
||||
struct iwl_trans_txq_scd_cfg cfg = {
|
||||
.sta_id = mvm_sta->sta_id,
|
||||
|
@ -1561,23 +1481,18 @@ static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
|
|||
struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[i];
|
||||
int txq_id = tid_data->txq_id;
|
||||
int ac;
|
||||
u8 mac_queue;
|
||||
|
||||
if (txq_id == IWL_MVM_INVALID_QUEUE)
|
||||
continue;
|
||||
|
||||
skb_queue_head_init(&tid_data->deferred_tx_frames);
|
||||
|
||||
ac = tid_to_mac80211_ac[i];
|
||||
mac_queue = mvm_sta->vif->hw_queue[ac];
|
||||
|
||||
if (iwl_mvm_has_new_tx_api(mvm)) {
|
||||
IWL_DEBUG_TX_QUEUES(mvm,
|
||||
"Re-mapping sta %d tid %d\n",
|
||||
mvm_sta->sta_id, i);
|
||||
txq_id = iwl_mvm_tvqm_enable_txq(mvm, mac_queue,
|
||||
mvm_sta->sta_id,
|
||||
i, wdg_timeout);
|
||||
txq_id = iwl_mvm_tvqm_enable_txq(mvm, mvm_sta->sta_id,
|
||||
i, wdg);
|
||||
tid_data->txq_id = txq_id;
|
||||
|
||||
/*
|
||||
|
@ -1600,8 +1515,7 @@ static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
|
|||
"Re-mapping sta %d tid %d to queue %d\n",
|
||||
mvm_sta->sta_id, i, txq_id);
|
||||
|
||||
iwl_mvm_enable_txq(mvm, txq_id, mac_queue, seq, &cfg,
|
||||
wdg_timeout);
|
||||
iwl_mvm_enable_txq(mvm, sta, txq_id, seq, &cfg, wdg);
|
||||
mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY;
|
||||
}
|
||||
}
|
||||
|
@ -1691,7 +1605,7 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
|
|||
if (ret)
|
||||
goto err;
|
||||
|
||||
iwl_mvm_realloc_queues_after_restart(mvm, mvm_sta);
|
||||
iwl_mvm_realloc_queues_after_restart(mvm, sta);
|
||||
sta_update = true;
|
||||
sta_flags = iwl_mvm_has_new_tx_api(mvm) ? 0 : STA_MODIFY_QUEUES;
|
||||
goto update_fw;
|
||||
|
@ -1724,9 +1638,17 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
|
|||
* frames until the queue is allocated
|
||||
*/
|
||||
mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
|
||||
skb_queue_head_init(&mvm_sta->tid_data[i].deferred_tx_frames);
|
||||
}
|
||||
mvm_sta->deferred_traffic_tid_map = 0;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
|
||||
struct iwl_mvm_txq *mvmtxq =
|
||||
iwl_mvm_txq_from_mac80211(sta->txq[i]);
|
||||
|
||||
mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
|
||||
INIT_LIST_HEAD(&mvmtxq->list);
|
||||
spin_lock_init(&mvmtxq->tx_path_lock);
|
||||
}
|
||||
|
||||
mvm_sta->agg_tids = 0;
|
||||
|
||||
if (iwl_mvm_has_new_rx_api(mvm) &&
|
||||
|
@ -1861,9 +1783,9 @@ static int iwl_mvm_rm_sta_common(struct iwl_mvm *mvm, u8 sta_id)
|
|||
|
||||
static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
|
||||
struct ieee80211_vif *vif,
|
||||
struct iwl_mvm_sta *mvm_sta)
|
||||
struct ieee80211_sta *sta)
|
||||
{
|
||||
int ac;
|
||||
struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
|
||||
int i;
|
||||
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
|
@ -1872,11 +1794,17 @@ static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
|
|||
if (mvm_sta->tid_data[i].txq_id == IWL_MVM_INVALID_QUEUE)
|
||||
continue;
|
||||
|
||||
ac = iwl_mvm_tid_to_ac_queue(i);
|
||||
iwl_mvm_disable_txq(mvm, mvm_sta->tid_data[i].txq_id,
|
||||
vif->hw_queue[ac], i, 0);
|
||||
iwl_mvm_disable_txq(mvm, sta, mvm_sta->tid_data[i].txq_id, i,
|
||||
0);
|
||||
mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
|
||||
}
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
|
||||
struct iwl_mvm_txq *mvmtxq =
|
||||
iwl_mvm_txq_from_mac80211(sta->txq[i]);
|
||||
|
||||
mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
|
||||
}
|
||||
}
|
||||
|
||||
int iwl_mvm_wait_sta_queues_empty(struct iwl_mvm *mvm,
|
||||
|
@ -1938,7 +1866,7 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
|
|||
|
||||
ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
|
||||
|
||||
iwl_mvm_disable_sta_queues(mvm, vif, mvm_sta);
|
||||
iwl_mvm_disable_sta_queues(mvm, vif, sta);
|
||||
|
||||
/* If there is a TXQ still marked as reserved - free it */
|
||||
if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) {
|
||||
|
@ -2044,7 +1972,7 @@ static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 *queue,
|
|||
|
||||
if (iwl_mvm_has_new_tx_api(mvm)) {
|
||||
int tvqm_queue =
|
||||
iwl_mvm_tvqm_enable_txq(mvm, *queue, sta_id,
|
||||
iwl_mvm_tvqm_enable_txq(mvm, sta_id,
|
||||
IWL_MAX_TID_COUNT,
|
||||
wdg_timeout);
|
||||
*queue = tvqm_queue;
|
||||
|
@ -2057,7 +1985,7 @@ static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 *queue,
|
|||
.frame_limit = IWL_FRAME_LIMIT,
|
||||
};
|
||||
|
||||
iwl_mvm_enable_txq(mvm, *queue, *queue, 0, &cfg, wdg_timeout);
|
||||
iwl_mvm_enable_txq(mvm, NULL, *queue, 0, &cfg, wdg_timeout);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2135,8 +2063,7 @@ int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
|
|||
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
|
||||
iwl_mvm_disable_txq(mvm, mvm->snif_queue, mvm->snif_queue,
|
||||
IWL_MAX_TID_COUNT, 0);
|
||||
iwl_mvm_disable_txq(mvm, NULL, mvm->snif_queue, IWL_MAX_TID_COUNT, 0);
|
||||
ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id);
|
||||
if (ret)
|
||||
IWL_WARN(mvm, "Failed sending remove station\n");
|
||||
|
@ -2195,8 +2122,7 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
|
|||
|
||||
bsta->tfd_queue_msk |= BIT(queue);
|
||||
|
||||
iwl_mvm_enable_txq(mvm, queue, vif->hw_queue[0], 0,
|
||||
&cfg, wdg_timeout);
|
||||
iwl_mvm_enable_txq(mvm, NULL, queue, 0, &cfg, wdg_timeout);
|
||||
}
|
||||
|
||||
if (vif->type == NL80211_IFTYPE_ADHOC)
|
||||
|
@ -2215,8 +2141,7 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
|
|||
* to firmware so enable queue here - after the station was added
|
||||
*/
|
||||
if (iwl_mvm_has_new_tx_api(mvm)) {
|
||||
queue = iwl_mvm_tvqm_enable_txq(mvm, vif->hw_queue[0],
|
||||
bsta->sta_id,
|
||||
queue = iwl_mvm_tvqm_enable_txq(mvm, bsta->sta_id,
|
||||
IWL_MAX_TID_COUNT,
|
||||
wdg_timeout);
|
||||
|
||||
|
@ -2254,7 +2179,7 @@ static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm,
|
|||
return;
|
||||
}
|
||||
|
||||
iwl_mvm_disable_txq(mvm, queue, vif->hw_queue[0], IWL_MAX_TID_COUNT, 0);
|
||||
iwl_mvm_disable_txq(mvm, NULL, queue, IWL_MAX_TID_COUNT, 0);
|
||||
if (iwl_mvm_has_new_tx_api(mvm))
|
||||
return;
|
||||
|
||||
|
@ -2377,10 +2302,8 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
|
|||
* Note that this is done here as we want to avoid making DQA
|
||||
* changes in mac80211 layer.
|
||||
*/
|
||||
if (vif->type == NL80211_IFTYPE_ADHOC) {
|
||||
vif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
|
||||
mvmvif->cab_queue = vif->cab_queue;
|
||||
}
|
||||
if (vif->type == NL80211_IFTYPE_ADHOC)
|
||||
mvmvif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
|
||||
|
||||
/*
|
||||
* While in previous FWs we had to exclude cab queue from TFD queue
|
||||
|
@ -2388,9 +2311,9 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
|
|||
*/
|
||||
if (!iwl_mvm_has_new_tx_api(mvm) &&
|
||||
fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) {
|
||||
iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0,
|
||||
&cfg, timeout);
|
||||
msta->tfd_queue_msk |= BIT(vif->cab_queue);
|
||||
iwl_mvm_enable_txq(mvm, NULL, mvmvif->cab_queue, 0, &cfg,
|
||||
timeout);
|
||||
msta->tfd_queue_msk |= BIT(mvmvif->cab_queue);
|
||||
}
|
||||
ret = iwl_mvm_add_int_sta_common(mvm, msta, maddr,
|
||||
mvmvif->id, mvmvif->color);
|
||||
|
@ -2407,15 +2330,14 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
|
|||
* tfd_queue_mask.
|
||||
*/
|
||||
if (iwl_mvm_has_new_tx_api(mvm)) {
|
||||
int queue = iwl_mvm_tvqm_enable_txq(mvm, vif->cab_queue,
|
||||
msta->sta_id,
|
||||
int queue = iwl_mvm_tvqm_enable_txq(mvm, msta->sta_id,
|
||||
0,
|
||||
timeout);
|
||||
mvmvif->cab_queue = queue;
|
||||
} else if (!fw_has_api(&mvm->fw->ucode_capa,
|
||||
IWL_UCODE_TLV_API_STA_TYPE))
|
||||
iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0,
|
||||
&cfg, timeout);
|
||||
iwl_mvm_enable_txq(mvm, NULL, mvmvif->cab_queue, 0, &cfg,
|
||||
timeout);
|
||||
|
||||
if (mvmvif->ap_wep_key) {
|
||||
u8 key_offset = iwl_mvm_set_fw_key_idx(mvm);
|
||||
|
@ -2446,8 +2368,7 @@ int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
|
|||
|
||||
iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true, 0);
|
||||
|
||||
iwl_mvm_disable_txq(mvm, mvmvif->cab_queue, vif->cab_queue,
|
||||
0, 0);
|
||||
iwl_mvm_disable_txq(mvm, NULL, mvmvif->cab_queue, 0, 0);
|
||||
|
||||
ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id);
|
||||
if (ret)
|
||||
|
@ -2781,7 +2702,7 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
|||
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
|
||||
struct iwl_mvm_tid_data *tid_data;
|
||||
u16 normalized_ssn;
|
||||
int txq_id;
|
||||
u16 txq_id;
|
||||
int ret;
|
||||
|
||||
if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
|
||||
|
@ -2823,17 +2744,24 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
|||
*/
|
||||
txq_id = mvmsta->tid_data[tid].txq_id;
|
||||
if (txq_id == IWL_MVM_INVALID_QUEUE) {
|
||||
txq_id = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
|
||||
IWL_MVM_DQA_MIN_DATA_QUEUE,
|
||||
IWL_MVM_DQA_MAX_DATA_QUEUE);
|
||||
if (txq_id < 0) {
|
||||
ret = txq_id;
|
||||
ret = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
|
||||
IWL_MVM_DQA_MIN_DATA_QUEUE,
|
||||
IWL_MVM_DQA_MAX_DATA_QUEUE);
|
||||
if (ret < 0) {
|
||||
IWL_ERR(mvm, "Failed to allocate agg queue\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
txq_id = ret;
|
||||
|
||||
/* TXQ hasn't yet been enabled, so mark it only as reserved */
|
||||
mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED;
|
||||
} else if (WARN_ON(txq_id >= IWL_MAX_HW_QUEUES)) {
|
||||
ret = -ENXIO;
|
||||
IWL_ERR(mvm, "tid_id %d out of range (0, %d)!\n",
|
||||
tid, IWL_MAX_HW_QUEUES - 1);
|
||||
goto out;
|
||||
|
||||
} else if (unlikely(mvm->queue_info[txq_id].status ==
|
||||
IWL_MVM_QUEUE_SHARED)) {
|
||||
ret = -ENXIO;
|
||||
|
@ -2976,8 +2904,7 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
|||
}
|
||||
|
||||
if (alloc_queue)
|
||||
iwl_mvm_enable_txq(mvm, queue,
|
||||
vif->hw_queue[tid_to_mac80211_ac[tid]], ssn,
|
||||
iwl_mvm_enable_txq(mvm, sta, queue, ssn,
|
||||
&cfg, wdg_timeout);
|
||||
|
||||
/* Send ADD_STA command to enable aggs only if the queue isn't shared */
|
||||
|
|
|
@ -297,7 +297,6 @@ enum iwl_mvm_agg_state {
|
|||
|
||||
/**
|
||||
* struct iwl_mvm_tid_data - holds the states for each RA / TID
|
||||
* @deferred_tx_frames: deferred TX frames for this RA/TID
|
||||
* @seq_number: the next WiFi sequence number to use
|
||||
* @next_reclaimed: the WiFi sequence number of the next packet to be acked.
|
||||
* This is basically (last acked packet++).
|
||||
|
@ -318,7 +317,6 @@ enum iwl_mvm_agg_state {
|
|||
* tpt_meas_start
|
||||
*/
|
||||
struct iwl_mvm_tid_data {
|
||||
struct sk_buff_head deferred_tx_frames;
|
||||
u16 seq_number;
|
||||
u16 next_reclaimed;
|
||||
/* The rest is Tx AGG related */
|
||||
|
@ -427,8 +425,6 @@ struct iwl_mvm_sta {
|
|||
struct iwl_mvm_key_pn __rcu *ptk_pn[4];
|
||||
struct iwl_mvm_rxq_dup_data *dup_data;
|
||||
|
||||
u16 deferred_traffic_tid_map;
|
||||
|
||||
u8 reserved_queue;
|
||||
|
||||
/* Temporary, until the new TLC will control the Tx protection */
|
||||
|
|
|
@ -334,6 +334,7 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm,
|
|||
switch (te_data->vif->type) {
|
||||
case NL80211_IFTYPE_P2P_DEVICE:
|
||||
ieee80211_remain_on_channel_expired(mvm->hw);
|
||||
set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status);
|
||||
iwl_mvm_roc_finished(mvm);
|
||||
break;
|
||||
case NL80211_IFTYPE_STATION:
|
||||
|
|
|
@ -533,10 +533,11 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
|
|||
|
||||
/*
|
||||
* For data packets rate info comes from the fw. Only
|
||||
* set rate/antenna during connection establishment.
|
||||
* set rate/antenna during connection establishment or in case
|
||||
* no station is given.
|
||||
*/
|
||||
if (sta && (!ieee80211_is_data(hdr->frame_control) ||
|
||||
mvmsta->sta_state < IEEE80211_STA_AUTHORIZED)) {
|
||||
if (!sta || !ieee80211_is_data(hdr->frame_control) ||
|
||||
mvmsta->sta_state < IEEE80211_STA_AUTHORIZED) {
|
||||
flags |= IWL_TX_FLAGS_CMD_RATE;
|
||||
rate_n_flags =
|
||||
iwl_mvm_get_tx_rate_n_flags(mvm, info, sta,
|
||||
|
@ -602,11 +603,12 @@ static void iwl_mvm_skb_prepare_status(struct sk_buff *skb,
|
|||
}
|
||||
|
||||
static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm,
|
||||
struct ieee80211_tx_info *info, __le16 fc)
|
||||
struct ieee80211_tx_info *info,
|
||||
struct ieee80211_hdr *hdr)
|
||||
{
|
||||
struct iwl_mvm_vif *mvmvif;
|
||||
|
||||
mvmvif = iwl_mvm_vif_from_mac80211(info->control.vif);
|
||||
struct iwl_mvm_vif *mvmvif =
|
||||
iwl_mvm_vif_from_mac80211(info->control.vif);
|
||||
__le16 fc = hdr->frame_control;
|
||||
|
||||
switch (info->control.vif->type) {
|
||||
case NL80211_IFTYPE_AP:
|
||||
|
@ -625,7 +627,9 @@ static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm,
|
|||
(!ieee80211_is_bufferable_mmpdu(fc) ||
|
||||
ieee80211_is_deauth(fc) || ieee80211_is_disassoc(fc)))
|
||||
return mvm->probe_queue;
|
||||
if (info->hw_queue == info->control.vif->cab_queue)
|
||||
|
||||
if (!ieee80211_has_order(fc) && !ieee80211_is_probe_req(fc) &&
|
||||
is_multicast_ether_addr(hdr->addr1))
|
||||
return mvmvif->cab_queue;
|
||||
|
||||
WARN_ONCE(info->control.vif->type != NL80211_IFTYPE_ADHOC,
|
||||
|
@ -634,8 +638,6 @@ static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm,
|
|||
case NL80211_IFTYPE_P2P_DEVICE:
|
||||
if (ieee80211_is_mgmt(fc))
|
||||
return mvm->p2p_dev_queue;
|
||||
if (info->hw_queue == info->control.vif->cab_queue)
|
||||
return mvmvif->cab_queue;
|
||||
|
||||
WARN_ON_ONCE(1);
|
||||
return mvm->p2p_dev_queue;
|
||||
|
@ -713,6 +715,8 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
|
|||
u8 sta_id;
|
||||
int hdrlen = ieee80211_hdrlen(hdr->frame_control);
|
||||
__le16 fc = hdr->frame_control;
|
||||
bool offchannel = IEEE80211_SKB_CB(skb)->flags &
|
||||
IEEE80211_TX_CTL_TX_OFFCHAN;
|
||||
int queue = -1;
|
||||
|
||||
memcpy(&info, skb->cb, sizeof(info));
|
||||
|
@ -720,11 +724,6 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
|
|||
if (WARN_ON_ONCE(info.flags & IEEE80211_TX_CTL_AMPDU))
|
||||
return -1;
|
||||
|
||||
if (WARN_ON_ONCE(info.flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM &&
|
||||
(!info.control.vif ||
|
||||
info.hw_queue != info.control.vif->cab_queue)))
|
||||
return -1;
|
||||
|
||||
if (info.control.vif) {
|
||||
struct iwl_mvm_vif *mvmvif =
|
||||
iwl_mvm_vif_from_mac80211(info.control.vif);
|
||||
|
@ -737,14 +736,12 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
|
|||
else
|
||||
sta_id = mvmvif->mcast_sta.sta_id;
|
||||
|
||||
queue = iwl_mvm_get_ctrl_vif_queue(mvm, &info,
|
||||
hdr->frame_control);
|
||||
|
||||
queue = iwl_mvm_get_ctrl_vif_queue(mvm, &info, hdr);
|
||||
} else if (info.control.vif->type == NL80211_IFTYPE_MONITOR) {
|
||||
queue = mvm->snif_queue;
|
||||
sta_id = mvm->snif_sta.sta_id;
|
||||
} else if (info.control.vif->type == NL80211_IFTYPE_STATION &&
|
||||
info.hw_queue == IWL_MVM_OFFCHANNEL_QUEUE) {
|
||||
offchannel) {
|
||||
/*
|
||||
* IWL_MVM_OFFCHANNEL_QUEUE is used for ROC packets
|
||||
* that can be used in 2 different types of vifs, P2P &
|
||||
|
@ -758,8 +755,10 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
|
|||
}
|
||||
}
|
||||
|
||||
if (queue < 0)
|
||||
if (queue < 0) {
|
||||
IWL_ERR(mvm, "No queue was found. Dropping TX\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (unlikely(ieee80211_is_probe_resp(fc)))
|
||||
iwl_mvm_probe_resp_set_noa(mvm, skb);
|
||||
|
@ -781,6 +780,35 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
|
|||
return 0;
|
||||
}
|
||||
|
||||
unsigned int iwl_mvm_max_amsdu_size(struct iwl_mvm *mvm,
|
||||
struct ieee80211_sta *sta, unsigned int tid)
|
||||
{
|
||||
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
|
||||
enum nl80211_band band = mvmsta->vif->bss_conf.chandef.chan->band;
|
||||
u8 ac = tid_to_mac80211_ac[tid];
|
||||
unsigned int txf;
|
||||
int lmac = IWL_LMAC_24G_INDEX;
|
||||
|
||||
if (iwl_mvm_is_cdb_supported(mvm) &&
|
||||
band == NL80211_BAND_5GHZ)
|
||||
lmac = IWL_LMAC_5G_INDEX;
|
||||
|
||||
/* For HE redirect to trigger based fifos */
|
||||
if (sta->he_cap.has_he && !WARN_ON(!iwl_mvm_has_new_tx_api(mvm)))
|
||||
ac += 4;
|
||||
|
||||
txf = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac);
|
||||
|
||||
/*
|
||||
* Don't send an AMSDU that will be longer than the TXF.
|
||||
* Add a security margin of 256 for the TX command + headers.
|
||||
* We also want to have the start of the next packet inside the
|
||||
* fifo to be able to send bursts.
|
||||
*/
|
||||
return min_t(unsigned int, mvmsta->max_amsdu_len,
|
||||
mvm->fwrt.smem_cfg.lmac[lmac].txfifo_size[txf] - 256);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_INET
|
||||
|
||||
static int
|
||||
|
@ -850,36 +878,6 @@ iwl_mvm_tx_tso_segment(struct sk_buff *skb, unsigned int num_subframes,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static unsigned int iwl_mvm_max_amsdu_size(struct iwl_mvm *mvm,
|
||||
struct ieee80211_sta *sta,
|
||||
unsigned int tid)
|
||||
{
|
||||
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
|
||||
enum nl80211_band band = mvmsta->vif->bss_conf.chandef.chan->band;
|
||||
u8 ac = tid_to_mac80211_ac[tid];
|
||||
unsigned int txf;
|
||||
int lmac = IWL_LMAC_24G_INDEX;
|
||||
|
||||
if (iwl_mvm_is_cdb_supported(mvm) &&
|
||||
band == NL80211_BAND_5GHZ)
|
||||
lmac = IWL_LMAC_5G_INDEX;
|
||||
|
||||
/* For HE redirect to trigger based fifos */
|
||||
if (sta->he_cap.has_he && !WARN_ON(!iwl_mvm_has_new_tx_api(mvm)))
|
||||
ac += 4;
|
||||
|
||||
txf = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac);
|
||||
|
||||
/*
|
||||
* Don't send an AMSDU that will be longer than the TXF.
|
||||
* Add a security margin of 256 for the TX command + headers.
|
||||
* We also want to have the start of the next packet inside the
|
||||
* fifo to be able to send bursts.
|
||||
*/
|
||||
return min_t(unsigned int, mvmsta->max_amsdu_len,
|
||||
mvm->fwrt.smem_cfg.lmac[lmac].txfifo_size[txf] - 256);
|
||||
}
|
||||
|
||||
static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
|
||||
struct ieee80211_tx_info *info,
|
||||
struct ieee80211_sta *sta,
|
||||
|
@ -1002,34 +1000,6 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
|
|||
}
|
||||
#endif
|
||||
|
||||
static void iwl_mvm_tx_add_stream(struct iwl_mvm *mvm,
|
||||
struct iwl_mvm_sta *mvm_sta, u8 tid,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
||||
u8 mac_queue = info->hw_queue;
|
||||
struct sk_buff_head *deferred_tx_frames;
|
||||
|
||||
lockdep_assert_held(&mvm_sta->lock);
|
||||
|
||||
mvm_sta->deferred_traffic_tid_map |= BIT(tid);
|
||||
set_bit(mvm_sta->sta_id, mvm->sta_deferred_frames);
|
||||
|
||||
deferred_tx_frames = &mvm_sta->tid_data[tid].deferred_tx_frames;
|
||||
|
||||
skb_queue_tail(deferred_tx_frames, skb);
|
||||
|
||||
/*
|
||||
* The first deferred frame should've stopped the MAC queues, so we
|
||||
* should never get a second deferred frame for the RA/TID.
|
||||
* In case of GSO the first packet may have been split, so don't warn.
|
||||
*/
|
||||
if (skb_queue_len(deferred_tx_frames) == 1) {
|
||||
iwl_mvm_stop_mac_queues(mvm, BIT(mac_queue));
|
||||
schedule_work(&mvm->add_stream_wk);
|
||||
}
|
||||
}
|
||||
|
||||
/* Check if there are any timed-out TIDs on a given shared TXQ */
|
||||
static bool iwl_mvm_txq_should_update(struct iwl_mvm *mvm, int txq_id)
|
||||
{
|
||||
|
@ -1054,7 +1024,12 @@ static void iwl_mvm_tx_airtime(struct iwl_mvm *mvm,
|
|||
int airtime)
|
||||
{
|
||||
int mac = mvmsta->mac_id_n_color & FW_CTXT_ID_MSK;
|
||||
struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[mac];
|
||||
struct iwl_mvm_tcm_mac *mdata;
|
||||
|
||||
if (mac >= NUM_MAC_INDEX_DRIVER)
|
||||
return;
|
||||
|
||||
mdata = &mvm->tcm.data[mac];
|
||||
|
||||
if (mvm->tcm.paused)
|
||||
return;
|
||||
|
@ -1065,14 +1040,21 @@ static void iwl_mvm_tx_airtime(struct iwl_mvm *mvm,
|
|||
mdata->tx.airtime += airtime;
|
||||
}
|
||||
|
||||
static void iwl_mvm_tx_pkt_queued(struct iwl_mvm *mvm,
|
||||
struct iwl_mvm_sta *mvmsta, int tid)
|
||||
static int iwl_mvm_tx_pkt_queued(struct iwl_mvm *mvm,
|
||||
struct iwl_mvm_sta *mvmsta, int tid)
|
||||
{
|
||||
u32 ac = tid_to_mac80211_ac[tid];
|
||||
int mac = mvmsta->mac_id_n_color & FW_CTXT_ID_MSK;
|
||||
struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[mac];
|
||||
struct iwl_mvm_tcm_mac *mdata;
|
||||
|
||||
if (mac >= NUM_MAC_INDEX_DRIVER)
|
||||
return -EINVAL;
|
||||
|
||||
mdata = &mvm->tcm.data[mac];
|
||||
|
||||
mdata->tx.pkts[ac]++;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1088,7 +1070,7 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
|
|||
__le16 fc;
|
||||
u16 seq_number = 0;
|
||||
u8 tid = IWL_MAX_TID_COUNT;
|
||||
u16 txq_id = info->hw_queue;
|
||||
u16 txq_id;
|
||||
bool is_ampdu = false;
|
||||
int hdrlen;
|
||||
|
||||
|
@ -1152,14 +1134,7 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
|
|||
|
||||
WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM);
|
||||
|
||||
/* Check if TXQ needs to be allocated or re-activated */
|
||||
if (unlikely(txq_id == IWL_MVM_INVALID_QUEUE)) {
|
||||
iwl_mvm_tx_add_stream(mvm, mvmsta, tid, skb);
|
||||
|
||||
/*
|
||||
* The frame is now deferred, and the worker scheduled
|
||||
* will re-allocate it, so we can free it for now.
|
||||
*/
|
||||
if (WARN_ON_ONCE(txq_id == IWL_MVM_INVALID_QUEUE)) {
|
||||
iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
|
||||
spin_unlock(&mvmsta->lock);
|
||||
return 0;
|
||||
|
@ -1199,7 +1174,9 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
|
|||
|
||||
spin_unlock(&mvmsta->lock);
|
||||
|
||||
iwl_mvm_tx_pkt_queued(mvm, mvmsta, tid == IWL_MAX_TID_COUNT ? 0 : tid);
|
||||
if (iwl_mvm_tx_pkt_queued(mvm, mvmsta,
|
||||
tid == IWL_MAX_TID_COUNT ? 0 : tid))
|
||||
goto drop;
|
||||
|
||||
return 0;
|
||||
|
||||
|
|
|
@ -463,6 +463,9 @@ static void iwl_mvm_dump_umac_error_log(struct iwl_mvm *mvm)
|
|||
iwl_trans_read_mem_bytes(trans, mvm->umac_error_event_table, &table,
|
||||
sizeof(table));
|
||||
|
||||
if (table.valid)
|
||||
mvm->fwrt.dump.umac_err_id = table.error_id;
|
||||
|
||||
if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
|
||||
IWL_ERR(trans, "Start IWL Error Log Dump:\n");
|
||||
IWL_ERR(trans, "Status: 0x%08lX, count: %d\n",
|
||||
|
@ -486,11 +489,11 @@ static void iwl_mvm_dump_umac_error_log(struct iwl_mvm *mvm)
|
|||
IWL_ERR(mvm, "0x%08X | isr status reg\n", table.nic_isr_pref);
|
||||
}
|
||||
|
||||
static void iwl_mvm_dump_lmac_error_log(struct iwl_mvm *mvm, u32 base)
|
||||
static void iwl_mvm_dump_lmac_error_log(struct iwl_mvm *mvm, u8 lmac_num)
|
||||
{
|
||||
struct iwl_trans *trans = mvm->trans;
|
||||
struct iwl_error_event_table table;
|
||||
u32 val;
|
||||
u32 val, base = mvm->error_event_table[lmac_num];
|
||||
|
||||
if (mvm->fwrt.cur_fw_img == IWL_UCODE_INIT) {
|
||||
if (!base)
|
||||
|
@ -541,7 +544,7 @@ static void iwl_mvm_dump_lmac_error_log(struct iwl_mvm *mvm, u32 base)
|
|||
iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
|
||||
|
||||
if (table.valid)
|
||||
mvm->fwrt.dump.rt_status = table.error_id;
|
||||
mvm->fwrt.dump.lmac_err_id[lmac_num] = table.error_id;
|
||||
|
||||
if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
|
||||
IWL_ERR(trans, "Start IWL Error Log Dump:\n");
|
||||
|
@ -598,10 +601,10 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
|
|||
return;
|
||||
}
|
||||
|
||||
iwl_mvm_dump_lmac_error_log(mvm, mvm->error_event_table[0]);
|
||||
iwl_mvm_dump_lmac_error_log(mvm, 0);
|
||||
|
||||
if (mvm->error_event_table[1])
|
||||
iwl_mvm_dump_lmac_error_log(mvm, mvm->error_event_table[1]);
|
||||
iwl_mvm_dump_lmac_error_log(mvm, 1);
|
||||
|
||||
iwl_mvm_dump_umac_error_log(mvm);
|
||||
}
|
||||
|
|
|
@ -566,6 +566,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
|
|||
{IWL_PCI_DEVICE(0x2526, 0x0010, iwl9260_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x2526, 0x0014, iwl9260_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x2526, 0x0018, iwl9260_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x2526, 0x001C, iwl9260_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x2526, 0x0030, iwl9560_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x2526, 0x0034, iwl9560_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x2526, 0x0038, iwl9560_2ac_cfg)},
|
||||
|
@ -596,12 +597,14 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
|
|||
{IWL_PCI_DEVICE(0x2526, 0x2030, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x2526, 0x2034, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x2526, 0x4010, iwl9260_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x2526, 0x401C, iwl9260_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x2526, 0x4030, iwl9560_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x2526, 0x4034, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x2526, 0x40A4, iwl9460_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x2526, 0x4234, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x2526, 0x42A4, iwl9462_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x2526, 0x8014, iwl9260_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x2526, 0x8010, iwl9260_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x2526, 0xA014, iwl9260_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x271B, 0x0010, iwl9160_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x271B, 0x0014, iwl9160_2ac_cfg)},
|
||||
|
@ -871,32 +874,79 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
|
|||
{IWL_PCI_DEVICE(0xA370, 0x42A4, iwl9462_2ac_cfg_soc)},
|
||||
|
||||
/* 22000 Series */
|
||||
{IWL_PCI_DEVICE(0x2720, 0x0000, iwl22000_2ax_cfg_hr)},
|
||||
{IWL_PCI_DEVICE(0x2720, 0x0040, iwl22000_2ax_cfg_hr)},
|
||||
{IWL_PCI_DEVICE(0x2720, 0x0078, iwl22000_2ax_cfg_hr)},
|
||||
{IWL_PCI_DEVICE(0x2720, 0x0070, iwl22000_2ac_cfg_hr_cdb)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x0070, iwl22560_2ax_cfg_hr)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x0074, iwl22560_2ax_cfg_hr)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x0078, iwl22560_2ax_cfg_hr)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x007C, iwl22560_2ax_cfg_hr)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x0310, iwl22560_2ax_cfg_hr)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x4070, iwl22560_2ax_cfg_hr)},
|
||||
{IWL_PCI_DEVICE(0x06F0, 0x0070, iwl22560_2ax_cfg_hr)},
|
||||
{IWL_PCI_DEVICE(0x06F0, 0x0074, iwl22560_2ax_cfg_hr)},
|
||||
{IWL_PCI_DEVICE(0x06F0, 0x0078, iwl22560_2ax_cfg_hr)},
|
||||
{IWL_PCI_DEVICE(0x06F0, 0x007C, iwl22560_2ax_cfg_hr)},
|
||||
{IWL_PCI_DEVICE(0x06F0, 0x0310, iwl22560_2ax_cfg_hr)},
|
||||
{IWL_PCI_DEVICE(0x06F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0)},
|
||||
{IWL_PCI_DEVICE(0x06F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0)},
|
||||
{IWL_PCI_DEVICE(0x06F0, 0x4070, iwl22560_2ax_cfg_hr)},
|
||||
{IWL_PCI_DEVICE(0x2720, 0x0000, iwl22560_2ax_cfg_hr)},
|
||||
{IWL_PCI_DEVICE(0x2720, 0x0030, iwl22000_2ac_cfg_hr_cdb)},
|
||||
{IWL_PCI_DEVICE(0x2720, 0x1080, iwl22000_2ax_cfg_hr)},
|
||||
{IWL_PCI_DEVICE(0x2720, 0x0040, iwl22560_2ax_cfg_hr)},
|
||||
{IWL_PCI_DEVICE(0x2720, 0x0070, iwl22000_2ac_cfg_hr_cdb)},
|
||||
{IWL_PCI_DEVICE(0x2720, 0x0074, iwl22560_2ax_cfg_hr)},
|
||||
{IWL_PCI_DEVICE(0x2720, 0x0078, iwl22560_2ax_cfg_hr)},
|
||||
{IWL_PCI_DEVICE(0x2720, 0x007C, iwl22560_2ax_cfg_hr)},
|
||||
{IWL_PCI_DEVICE(0x2720, 0x0090, iwl22000_2ac_cfg_hr_cdb)},
|
||||
{IWL_PCI_DEVICE(0x2720, 0x0310, iwl22000_2ac_cfg_hr_cdb)},
|
||||
{IWL_PCI_DEVICE(0x34F0, 0x0040, iwl22000_2ax_cfg_hr)},
|
||||
{IWL_PCI_DEVICE(0x34F0, 0x0070, iwl22000_2ax_cfg_hr)},
|
||||
{IWL_PCI_DEVICE(0x34F0, 0x0078, iwl22000_2ax_cfg_hr)},
|
||||
{IWL_PCI_DEVICE(0x34F0, 0x0310, iwl22000_2ax_cfg_hr)},
|
||||
{IWL_PCI_DEVICE(0x2720, 0x0A10, iwl22000_2ac_cfg_hr_cdb)},
|
||||
{IWL_PCI_DEVICE(0x2720, 0x1080, iwl22560_2ax_cfg_hr)},
|
||||
{IWL_PCI_DEVICE(0x2720, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0)},
|
||||
{IWL_PCI_DEVICE(0x2720, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0)},
|
||||
{IWL_PCI_DEVICE(0x2720, 0x4070, iwl22560_2ax_cfg_hr)},
|
||||
{IWL_PCI_DEVICE(0x34F0, 0x0040, iwl22560_2ax_cfg_hr)},
|
||||
{IWL_PCI_DEVICE(0x34F0, 0x0070, iwl22560_2ax_cfg_hr)},
|
||||
{IWL_PCI_DEVICE(0x34F0, 0x0074, iwl22560_2ax_cfg_hr)},
|
||||
{IWL_PCI_DEVICE(0x34F0, 0x0078, iwl22560_2ax_cfg_hr)},
|
||||
{IWL_PCI_DEVICE(0x34F0, 0x007C, iwl22560_2ax_cfg_hr)},
|
||||
{IWL_PCI_DEVICE(0x34F0, 0x0310, iwl22560_2ax_cfg_hr)},
|
||||
{IWL_PCI_DEVICE(0x34F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0)},
|
||||
{IWL_PCI_DEVICE(0x34F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0)},
|
||||
{IWL_PCI_DEVICE(0x34F0, 0x4070, iwl22560_2ax_cfg_hr)},
|
||||
{IWL_PCI_DEVICE(0x40C0, 0x0000, iwl22560_2ax_cfg_su_cdb)},
|
||||
{IWL_PCI_DEVICE(0x40C0, 0x0010, iwl22560_2ax_cfg_su_cdb)},
|
||||
{IWL_PCI_DEVICE(0x40c0, 0x0090, iwl22560_2ax_cfg_su_cdb)},
|
||||
{IWL_PCI_DEVICE(0x40C0, 0x0310, iwl22560_2ax_cfg_su_cdb)},
|
||||
{IWL_PCI_DEVICE(0x40C0, 0x0A10, iwl22560_2ax_cfg_su_cdb)},
|
||||
{IWL_PCI_DEVICE(0x43F0, 0x0040, iwl22000_2ax_cfg_hr)},
|
||||
{IWL_PCI_DEVICE(0x43F0, 0x0070, iwl22000_2ax_cfg_hr)},
|
||||
{IWL_PCI_DEVICE(0x43F0, 0x0078, iwl22000_2ax_cfg_hr)},
|
||||
{IWL_PCI_DEVICE(0xA0F0, 0x0000, iwl22000_2ax_cfg_hr)},
|
||||
{IWL_PCI_DEVICE(0xA0F0, 0x0040, iwl22000_2ax_cfg_hr)},
|
||||
{IWL_PCI_DEVICE(0xA0F0, 0x0070, iwl22000_2ax_cfg_hr)},
|
||||
{IWL_PCI_DEVICE(0xA0F0, 0x0078, iwl22000_2ax_cfg_hr)},
|
||||
{IWL_PCI_DEVICE(0xA0F0, 0x00B0, iwl22000_2ax_cfg_hr)},
|
||||
{IWL_PCI_DEVICE(0xA0F0, 0x0A10, iwl22000_2ax_cfg_hr)},
|
||||
{IWL_PCI_DEVICE(0x43F0, 0x0040, iwl22560_2ax_cfg_hr)},
|
||||
{IWL_PCI_DEVICE(0x43F0, 0x0070, iwl22560_2ax_cfg_hr)},
|
||||
{IWL_PCI_DEVICE(0x43F0, 0x0074, iwl22560_2ax_cfg_hr)},
|
||||
{IWL_PCI_DEVICE(0x43F0, 0x0078, iwl22560_2ax_cfg_hr)},
|
||||
{IWL_PCI_DEVICE(0x43F0, 0x007C, iwl22560_2ax_cfg_hr)},
|
||||
{IWL_PCI_DEVICE(0x43F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0)},
|
||||
{IWL_PCI_DEVICE(0x43F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0)},
|
||||
{IWL_PCI_DEVICE(0x43F0, 0x4070, iwl22560_2ax_cfg_hr)},
|
||||
{IWL_PCI_DEVICE(0xA0F0, 0x0000, iwl22560_2ax_cfg_hr)},
|
||||
{IWL_PCI_DEVICE(0xA0F0, 0x0040, iwl22560_2ax_cfg_hr)},
|
||||
{IWL_PCI_DEVICE(0xA0F0, 0x0070, iwl22560_2ax_cfg_hr)},
|
||||
{IWL_PCI_DEVICE(0xA0F0, 0x0074, iwl22560_2ax_cfg_hr)},
|
||||
{IWL_PCI_DEVICE(0xA0F0, 0x0078, iwl22560_2ax_cfg_hr)},
|
||||
{IWL_PCI_DEVICE(0xA0F0, 0x007C, iwl22560_2ax_cfg_hr)},
|
||||
{IWL_PCI_DEVICE(0xA0F0, 0x00B0, iwl22560_2ax_cfg_hr)},
|
||||
{IWL_PCI_DEVICE(0xA0F0, 0x0A10, iwl22560_2ax_cfg_hr)},
|
||||
{IWL_PCI_DEVICE(0xA0F0, 0x1651, killer1650s_2ax_cfg_qu_b0_hr_b0)},
|
||||
{IWL_PCI_DEVICE(0xA0F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0)},
|
||||
{IWL_PCI_DEVICE(0xA0F0, 0x4070, iwl22560_2ax_cfg_hr)},
|
||||
|
||||
{IWL_PCI_DEVICE(0x2723, 0x0080, iwl22260_2ax_cfg)},
|
||||
{IWL_PCI_DEVICE(0x2723, 0x0084, iwl22260_2ax_cfg)},
|
||||
{IWL_PCI_DEVICE(0x2723, 0x0088, iwl22260_2ax_cfg)},
|
||||
{IWL_PCI_DEVICE(0x2723, 0x008C, iwl22260_2ax_cfg)},
|
||||
{IWL_PCI_DEVICE(0x2723, 0x4080, iwl22260_2ax_cfg)},
|
||||
{IWL_PCI_DEVICE(0x2723, 0x4088, iwl22260_2ax_cfg)},
|
||||
|
||||
{IWL_PCI_DEVICE(0x1a56, 0x1653, killer1650w_2ax_cfg)},
|
||||
{IWL_PCI_DEVICE(0x1a56, 0x1654, killer1650x_2ax_cfg)},
|
||||
|
||||
#endif /* CONFIG_IWLMVM */
|
||||
|
||||
|
|
|
@ -1029,8 +1029,6 @@ static inline int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans)
|
|||
int iwl_pci_fw_exit_d0i3(struct iwl_trans *trans);
|
||||
int iwl_pci_fw_enter_d0i3(struct iwl_trans *trans);
|
||||
|
||||
void iwl_pcie_enable_rx_wake(struct iwl_trans *trans, bool enable);
|
||||
|
||||
void iwl_pcie_rx_allocator_work(struct work_struct *data);
|
||||
|
||||
/* common functions that are used by gen2 transport */
|
||||
|
|
|
@ -868,30 +868,6 @@ static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
|
|||
iwl_set_bit(trans, CSR_INT_COALESCING, IWL_HOST_INT_OPER_MODE);
|
||||
}
|
||||
|
||||
void iwl_pcie_enable_rx_wake(struct iwl_trans *trans, bool enable)
|
||||
{
|
||||
if (trans->cfg->device_family != IWL_DEVICE_FAMILY_9000)
|
||||
return;
|
||||
|
||||
if (CSR_HW_REV_STEP(trans->hw_rev) != SILICON_A_STEP)
|
||||
return;
|
||||
|
||||
if (!trans->cfg->integrated)
|
||||
return;
|
||||
|
||||
/*
|
||||
* Turn on the chicken-bits that cause MAC wakeup for RX-related
|
||||
* values.
|
||||
* This costs some power, but needed for W/A 9000 integrated A-step
|
||||
* bug where shadow registers are not in the retention list and their
|
||||
* value is lost when NIC powers down
|
||||
*/
|
||||
iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL,
|
||||
CSR_MAC_SHADOW_REG_CTRL_RX_WAKE);
|
||||
iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTL2,
|
||||
CSR_MAC_SHADOW_REG_CTL2_RX_WAKE);
|
||||
}
|
||||
|
||||
static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
|
@ -979,8 +955,6 @@ static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
|
|||
|
||||
/* Set interrupt coalescing timer to default (2048 usecs) */
|
||||
iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
|
||||
|
||||
iwl_pcie_enable_rx_wake(trans, true);
|
||||
}
|
||||
|
||||
void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
|
||||
|
|
|
@ -1530,8 +1530,6 @@ static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test,
|
|||
iwl_clear_bit(trans, CSR_GP_CNTRL,
|
||||
BIT(trans->cfg->csr->flag_init_done));
|
||||
|
||||
iwl_pcie_enable_rx_wake(trans, false);
|
||||
|
||||
if (reset) {
|
||||
/*
|
||||
* reset TX queues -- some of their registers reset during S3
|
||||
|
@ -1558,8 +1556,6 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
|
|||
return 0;
|
||||
}
|
||||
|
||||
iwl_pcie_enable_rx_wake(trans, true);
|
||||
|
||||
iwl_set_bit(trans, CSR_GP_CNTRL,
|
||||
BIT(trans->cfg->csr->flag_mac_access_req));
|
||||
iwl_set_bit(trans, CSR_GP_CNTRL,
|
||||
|
@ -1968,7 +1964,7 @@ static void iwl_trans_pcie_removal_wk(struct work_struct *wk)
|
|||
struct iwl_trans_pcie_removal *removal =
|
||||
container_of(wk, struct iwl_trans_pcie_removal, work);
|
||||
struct pci_dev *pdev = removal->pdev;
|
||||
char *prop[] = {"EVENT=INACCESSIBLE", NULL};
|
||||
static char *prop[] = {"EVENT=INACCESSIBLE", NULL};
|
||||
|
||||
dev_err(&pdev->dev, "Device gone - attempting removal\n");
|
||||
kobject_uevent_env(&pdev->dev.kobj, KOBJ_CHANGE, prop);
|
||||
|
@ -3569,24 +3565,15 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* 9000-series integrated A-step has a problem with suspend/resume
|
||||
* and sometimes even causes the whole platform to get stuck. This
|
||||
* workaround makes the hardware not go into the problematic state.
|
||||
*/
|
||||
if (trans->cfg->integrated &&
|
||||
trans->cfg->device_family == IWL_DEVICE_FAMILY_9000 &&
|
||||
CSR_HW_REV_STEP(trans->hw_rev) == SILICON_A_STEP)
|
||||
iwl_set_bit(trans, CSR_HOST_CHICKEN,
|
||||
CSR_HOST_CHICKEN_PM_IDLE_SRC_DIS_SB_PME);
|
||||
IWL_DEBUG_INFO(trans, "HW REV: 0x%0x\n", trans->hw_rev);
|
||||
|
||||
#if IS_ENABLED(CONFIG_IWLMVM)
|
||||
trans->hw_rf_id = iwl_read32(trans, CSR_HW_RF_ID);
|
||||
|
||||
if (cfg == &iwl22000_2ax_cfg_hr) {
|
||||
if (cfg == &iwl22560_2ax_cfg_hr) {
|
||||
if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
|
||||
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR)) {
|
||||
trans->cfg = &iwl22000_2ax_cfg_hr;
|
||||
trans->cfg = &iwl22560_2ax_cfg_hr;
|
||||
} else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
|
||||
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_JF)) {
|
||||
trans->cfg = &iwl22000_2ax_cfg_jf;
|
||||
|
@ -3602,7 +3589,9 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
|
|||
goto out_no_pci;
|
||||
}
|
||||
} else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
|
||||
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR)) {
|
||||
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) &&
|
||||
(trans->cfg != &iwl22260_2ax_cfg ||
|
||||
trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0)) {
|
||||
u32 hw_status;
|
||||
|
||||
hw_status = iwl_read_prph(trans, UMAG_GEN_HW_STATUS);
|
||||
|
|
|
@ -214,7 +214,11 @@ static int iwl_pcie_gen2_set_tb(struct iwl_trans *trans,
|
|||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
int idx = iwl_pcie_gen2_get_num_tbs(trans, tfd);
|
||||
struct iwl_tfh_tb *tb = &tfd->tbs[idx];
|
||||
struct iwl_tfh_tb *tb;
|
||||
|
||||
if (WARN_ON(idx >= IWL_NUM_OF_TBS))
|
||||
return -EINVAL;
|
||||
tb = &tfd->tbs[idx];
|
||||
|
||||
/* Each TFD can point to a maximum max_tbs Tx buffers */
|
||||
if (le16_to_cpu(tfd->num_tbs) >= trans_pcie->max_tbs) {
|
||||
|
@ -408,7 +412,7 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx_amsdu(struct iwl_trans *trans,
|
|||
goto out_err;
|
||||
|
||||
/* building the A-MSDU might have changed this data, memcpy it now */
|
||||
memcpy(&txq->first_tb_bufs[idx], &dev_cmd->hdr, IWL_FIRST_TB_SIZE);
|
||||
memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE);
|
||||
return tfd;
|
||||
|
||||
out_err:
|
||||
|
@ -469,7 +473,7 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
|
|||
tb_phys = iwl_pcie_get_first_tb_dma(txq, idx);
|
||||
|
||||
/* The first TB points to bi-directional DMA data */
|
||||
memcpy(&txq->first_tb_bufs[idx], &dev_cmd->hdr, IWL_FIRST_TB_SIZE);
|
||||
memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE);
|
||||
|
||||
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
|
||||
|
||||
|
|
|
@ -2438,8 +2438,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
|
|||
}
|
||||
|
||||
/* building the A-MSDU might have changed this data, so memcpy it now */
|
||||
memcpy(&txq->first_tb_bufs[txq->write_ptr], &dev_cmd->hdr,
|
||||
IWL_FIRST_TB_SIZE);
|
||||
memcpy(&txq->first_tb_bufs[txq->write_ptr], dev_cmd, IWL_FIRST_TB_SIZE);
|
||||
|
||||
tfd = iwl_pcie_get_tfd(trans, txq, txq->write_ptr);
|
||||
/* Set up entry for this TFD in Tx byte-count array */
|
||||
|
|
Loading…
Reference in New Issue