mirror of https://gitee.com/openkylin/linux.git
We have a lot of fixes, most of them are also applicable to stable.
Notably: * fix use-after-free issues * fix DMA mapping API usage errors * fix frame drop occurring due to reorder buffer handling in RSS in certain conditions * fix rate scale locking issues * disable TX A-MSDU on older NICs as it causes problems and was never supposed to be supported * new PCI IDs * GEO_TX_POWER_LIMIT API issue that many people were hitting -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEH1e1rEeCd0AIMq6MB8qZga/fl8QFAl1AceMACgkQB8qZga/f l8S4kw//Xp3BgiFXiEwJ3aX+iz3BE5dOxmo69cvVGYz6No8IXjkciL0v3cP4POvJ 6TuQLxGnbJ59GdrHaHS/vy2RhZCnhsGpiaWtaYBsv4U1+fvJl6GHtG6wLTHzPwmV WRxAOoYTcKs/j4RZBeAJz4LJAHcyHUyD/KCTwPGFaqZp1V0gtcPh7N4NsMUquwyi RduWI8ceXvjj9YsLwSyZfxr9eRKwj9EVagTmSlL4Yf7h2OOCD8nEMWCfpo80cKGT X6BFhmN30bP0WC3WKTEFfO/raW2d6r0JSb/qgydI37qpQUFQ3mejs7r9n3/v9ECn 3pjyEwFLbws3I1FCn3Nn3c2DnD2SH5YYh0z87dMO4Yg0jM+MemfTEJ39hhwGeJ9Q 9nN7o3arG8uS+mqK7/i48WAygqTY33V6xBZ3kf17l2wlSJwn134KbSQ1GZogyoCg 3I/ZXNJL6x+GXWEdpb5DbDwRRIbn6B7AMJ8lPRVu0fJHHB+3ZPHNqCHHynPDKHZA Kni1TEqMgifMnjKvnzKGylvQD+IFZ5a805pbXIsPT63aI/WnEY2crA0Xb2KV96BP cNsWGg/oDDcs4n02DQ29tBVpLNgv2pY5wpbB8C8DX4DPBlvk4D1OCuzTVwXSr48K g6T/ZIx6nnQdoIB4uaJtFfQpUsIlt5FQ4ve3WUImm0Flv9F3pH0= =7W8I -----END PGP SIGNATURE----- Merge tag 'iwlwifi-fixes-for-kvalo-2019-07-30' of git://git.kernel.org/pub/scm/linux/kernel/git/iwlwifi/iwlwifi-fixes We have a lot of fixes, most of them are also applicable to stable. Notably: * fix use-after-free issues * fix DMA mapping API usage errors * fix frame drop occurring due to reorder buffer handling in RSS in certain conditions * fix rate scale locking issues * disable TX A-MSDU on older NICs as it causes problems and was never supposed to be supported * new PCI IDs * GEO_TX_POWER_LIMIT API issue that many people were hitting
This commit is contained in:
commit
66f5772ee2
|
@ -776,7 +776,6 @@ struct iwl_rss_config_cmd {
|
||||||
u8 indirection_table[IWL_RSS_INDIRECTION_TABLE_SIZE];
|
u8 indirection_table[IWL_RSS_INDIRECTION_TABLE_SIZE];
|
||||||
} __packed; /* RSS_CONFIG_CMD_API_S_VER_1 */
|
} __packed; /* RSS_CONFIG_CMD_API_S_VER_1 */
|
||||||
|
|
||||||
#define IWL_MULTI_QUEUE_SYNC_MSG_MAX_SIZE 128
|
|
||||||
#define IWL_MULTI_QUEUE_SYNC_SENDER_POS 0
|
#define IWL_MULTI_QUEUE_SYNC_SENDER_POS 0
|
||||||
#define IWL_MULTI_QUEUE_SYNC_SENDER_MSK 0xf
|
#define IWL_MULTI_QUEUE_SYNC_SENDER_MSK 0xf
|
||||||
|
|
||||||
|
@ -812,10 +811,12 @@ struct iwl_rxq_sync_notification {
|
||||||
*
|
*
|
||||||
* @IWL_MVM_RXQ_EMPTY: empty sync notification
|
* @IWL_MVM_RXQ_EMPTY: empty sync notification
|
||||||
* @IWL_MVM_RXQ_NOTIF_DEL_BA: notify RSS queues of delBA
|
* @IWL_MVM_RXQ_NOTIF_DEL_BA: notify RSS queues of delBA
|
||||||
|
* @IWL_MVM_RXQ_NSSN_SYNC: notify all the RSS queues with the new NSSN
|
||||||
*/
|
*/
|
||||||
enum iwl_mvm_rxq_notif_type {
|
enum iwl_mvm_rxq_notif_type {
|
||||||
IWL_MVM_RXQ_EMPTY,
|
IWL_MVM_RXQ_EMPTY,
|
||||||
IWL_MVM_RXQ_NOTIF_DEL_BA,
|
IWL_MVM_RXQ_NOTIF_DEL_BA,
|
||||||
|
IWL_MVM_RXQ_NSSN_SYNC,
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -1640,6 +1640,8 @@ struct iwl_drv *iwl_drv_start(struct iwl_trans *trans)
|
||||||
init_completion(&drv->request_firmware_complete);
|
init_completion(&drv->request_firmware_complete);
|
||||||
INIT_LIST_HEAD(&drv->list);
|
INIT_LIST_HEAD(&drv->list);
|
||||||
|
|
||||||
|
iwl_load_fw_dbg_tlv(drv->trans->dev, drv->trans);
|
||||||
|
|
||||||
#ifdef CONFIG_IWLWIFI_DEBUGFS
|
#ifdef CONFIG_IWLWIFI_DEBUGFS
|
||||||
/* Create the device debugfs entries. */
|
/* Create the device debugfs entries. */
|
||||||
drv->dbgfs_drv = debugfs_create_dir(dev_name(trans->dev),
|
drv->dbgfs_drv = debugfs_create_dir(dev_name(trans->dev),
|
||||||
|
@ -1660,8 +1662,8 @@ struct iwl_drv *iwl_drv_start(struct iwl_trans *trans)
|
||||||
err_fw:
|
err_fw:
|
||||||
#ifdef CONFIG_IWLWIFI_DEBUGFS
|
#ifdef CONFIG_IWLWIFI_DEBUGFS
|
||||||
debugfs_remove_recursive(drv->dbgfs_drv);
|
debugfs_remove_recursive(drv->dbgfs_drv);
|
||||||
iwl_fw_dbg_free(drv->trans);
|
|
||||||
#endif
|
#endif
|
||||||
|
iwl_fw_dbg_free(drv->trans);
|
||||||
kfree(drv);
|
kfree(drv);
|
||||||
err:
|
err:
|
||||||
return ERR_PTR(ret);
|
return ERR_PTR(ret);
|
||||||
|
|
|
@ -755,7 +755,7 @@ static int iwl_mvm_sar_get_ewrd_table(struct iwl_mvm *mvm)
|
||||||
|
|
||||||
for (i = 0; i < n_profiles; i++) {
|
for (i = 0; i < n_profiles; i++) {
|
||||||
/* the tables start at element 3 */
|
/* the tables start at element 3 */
|
||||||
static int pos = 3;
|
int pos = 3;
|
||||||
|
|
||||||
/* The EWRD profiles officially go from 2 to 4, but we
|
/* The EWRD profiles officially go from 2 to 4, but we
|
||||||
* save them in sar_profiles[1-3] (because we don't
|
* save them in sar_profiles[1-3] (because we don't
|
||||||
|
@ -880,6 +880,22 @@ int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
|
||||||
return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd);
|
return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool iwl_mvm_sar_geo_support(struct iwl_mvm *mvm)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* The GEO_TX_POWER_LIMIT command is not supported on earlier
|
||||||
|
* firmware versions. Unfortunately, we don't have a TLV API
|
||||||
|
* flag to rely on, so rely on the major version which is in
|
||||||
|
* the first byte of ucode_ver. This was implemented
|
||||||
|
* initially on version 38 and then backported to 36, 29 and
|
||||||
|
* 17.
|
||||||
|
*/
|
||||||
|
return IWL_UCODE_SERIAL(mvm->fw->ucode_ver) >= 38 ||
|
||||||
|
IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 36 ||
|
||||||
|
IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 29 ||
|
||||||
|
IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 17;
|
||||||
|
}
|
||||||
|
|
||||||
int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
|
int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
|
||||||
{
|
{
|
||||||
struct iwl_geo_tx_power_profiles_resp *resp;
|
struct iwl_geo_tx_power_profiles_resp *resp;
|
||||||
|
@ -909,6 +925,9 @@ int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
|
||||||
.data = { data },
|
.data = { data },
|
||||||
};
|
};
|
||||||
|
|
||||||
|
if (!iwl_mvm_sar_geo_support(mvm))
|
||||||
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
ret = iwl_mvm_send_cmd(mvm, &cmd);
|
ret = iwl_mvm_send_cmd(mvm, &cmd);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
IWL_ERR(mvm, "Failed to get geographic profile info %d\n", ret);
|
IWL_ERR(mvm, "Failed to get geographic profile info %d\n", ret);
|
||||||
|
@ -934,13 +953,7 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
|
||||||
int ret, i, j;
|
int ret, i, j;
|
||||||
u16 cmd_wide_id = WIDE_ID(PHY_OPS_GROUP, GEO_TX_POWER_LIMIT);
|
u16 cmd_wide_id = WIDE_ID(PHY_OPS_GROUP, GEO_TX_POWER_LIMIT);
|
||||||
|
|
||||||
/*
|
if (!iwl_mvm_sar_geo_support(mvm))
|
||||||
* This command is not supported on earlier firmware versions.
|
|
||||||
* Unfortunately, we don't have a TLV API flag to rely on, so
|
|
||||||
* rely on the major version which is in the first byte of
|
|
||||||
* ucode_ver.
|
|
||||||
*/
|
|
||||||
if (IWL_UCODE_SERIAL(mvm->fw->ucode_ver) < 41)
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
ret = iwl_mvm_sar_get_wgds_table(mvm);
|
ret = iwl_mvm_sar_get_wgds_table(mvm);
|
||||||
|
|
|
@ -207,11 +207,11 @@ static const struct cfg80211_pmsr_capabilities iwl_mvm_pmsr_capa = {
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
|
static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
|
||||||
enum set_key_cmd cmd,
|
enum set_key_cmd cmd,
|
||||||
struct ieee80211_vif *vif,
|
struct ieee80211_vif *vif,
|
||||||
struct ieee80211_sta *sta,
|
struct ieee80211_sta *sta,
|
||||||
struct ieee80211_key_conf *key);
|
struct ieee80211_key_conf *key);
|
||||||
|
|
||||||
void iwl_mvm_ref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type)
|
void iwl_mvm_ref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type)
|
||||||
{
|
{
|
||||||
|
@ -474,7 +474,19 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
|
||||||
ieee80211_hw_set(hw, SUPPORTS_VHT_EXT_NSS_BW);
|
ieee80211_hw_set(hw, SUPPORTS_VHT_EXT_NSS_BW);
|
||||||
ieee80211_hw_set(hw, BUFF_MMPDU_TXQ);
|
ieee80211_hw_set(hw, BUFF_MMPDU_TXQ);
|
||||||
ieee80211_hw_set(hw, STA_MMPDU_TXQ);
|
ieee80211_hw_set(hw, STA_MMPDU_TXQ);
|
||||||
ieee80211_hw_set(hw, TX_AMSDU);
|
/*
|
||||||
|
* On older devices, enabling TX A-MSDU occasionally leads to
|
||||||
|
* something getting messed up, the command read from the FIFO
|
||||||
|
* gets out of sync and isn't a TX command, so that we have an
|
||||||
|
* assert EDC.
|
||||||
|
*
|
||||||
|
* It's not clear where the bug is, but since we didn't used to
|
||||||
|
* support A-MSDU until moving the mac80211 iTXQs, just leave it
|
||||||
|
* for older devices. We also don't see this issue on any newer
|
||||||
|
* devices.
|
||||||
|
*/
|
||||||
|
if (mvm->cfg->device_family >= IWL_DEVICE_FAMILY_9000)
|
||||||
|
ieee80211_hw_set(hw, TX_AMSDU);
|
||||||
ieee80211_hw_set(hw, TX_FRAG_LIST);
|
ieee80211_hw_set(hw, TX_FRAG_LIST);
|
||||||
|
|
||||||
if (iwl_mvm_has_tlc_offload(mvm)) {
|
if (iwl_mvm_has_tlc_offload(mvm)) {
|
||||||
|
@ -2726,7 +2738,7 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
|
||||||
|
|
||||||
mvmvif->ap_early_keys[i] = NULL;
|
mvmvif->ap_early_keys[i] = NULL;
|
||||||
|
|
||||||
ret = iwl_mvm_mac_set_key(hw, SET_KEY, vif, NULL, key);
|
ret = __iwl_mvm_mac_set_key(hw, SET_KEY, vif, NULL, key);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out_quota_failed;
|
goto out_quota_failed;
|
||||||
}
|
}
|
||||||
|
@ -3494,11 +3506,11 @@ static int iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
|
static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
|
||||||
enum set_key_cmd cmd,
|
enum set_key_cmd cmd,
|
||||||
struct ieee80211_vif *vif,
|
struct ieee80211_vif *vif,
|
||||||
struct ieee80211_sta *sta,
|
struct ieee80211_sta *sta,
|
||||||
struct ieee80211_key_conf *key)
|
struct ieee80211_key_conf *key)
|
||||||
{
|
{
|
||||||
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
|
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
|
||||||
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
|
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
|
||||||
|
@ -3553,8 +3565,6 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_lock(&mvm->mutex);
|
|
||||||
|
|
||||||
switch (cmd) {
|
switch (cmd) {
|
||||||
case SET_KEY:
|
case SET_KEY:
|
||||||
if ((vif->type == NL80211_IFTYPE_ADHOC ||
|
if ((vif->type == NL80211_IFTYPE_ADHOC ||
|
||||||
|
@ -3700,7 +3710,22 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
|
||||||
|
enum set_key_cmd cmd,
|
||||||
|
struct ieee80211_vif *vif,
|
||||||
|
struct ieee80211_sta *sta,
|
||||||
|
struct ieee80211_key_conf *key)
|
||||||
|
{
|
||||||
|
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
mutex_lock(&mvm->mutex);
|
||||||
|
ret = __iwl_mvm_mac_set_key(hw, cmd, vif, sta, key);
|
||||||
mutex_unlock(&mvm->mutex);
|
mutex_unlock(&mvm->mutex);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -5041,7 +5066,6 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
|
||||||
u32 qmask = BIT(mvm->trans->num_rx_queues) - 1;
|
u32 qmask = BIT(mvm->trans->num_rx_queues) - 1;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
lockdep_assert_held(&mvm->mutex);
|
|
||||||
|
|
||||||
if (!iwl_mvm_has_new_rx_api(mvm))
|
if (!iwl_mvm_has_new_rx_api(mvm))
|
||||||
return;
|
return;
|
||||||
|
@ -5052,13 +5076,15 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
|
||||||
atomic_set(&mvm->queue_sync_counter,
|
atomic_set(&mvm->queue_sync_counter,
|
||||||
mvm->trans->num_rx_queues);
|
mvm->trans->num_rx_queues);
|
||||||
|
|
||||||
ret = iwl_mvm_notify_rx_queue(mvm, qmask, (u8 *)notif, size);
|
ret = iwl_mvm_notify_rx_queue(mvm, qmask, (u8 *)notif,
|
||||||
|
size, !notif->sync);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
IWL_ERR(mvm, "Failed to trigger RX queues sync (%d)\n", ret);
|
IWL_ERR(mvm, "Failed to trigger RX queues sync (%d)\n", ret);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (notif->sync) {
|
if (notif->sync) {
|
||||||
|
lockdep_assert_held(&mvm->mutex);
|
||||||
ret = wait_event_timeout(mvm->rx_sync_waitq,
|
ret = wait_event_timeout(mvm->rx_sync_waitq,
|
||||||
atomic_read(&mvm->queue_sync_counter) == 0 ||
|
atomic_read(&mvm->queue_sync_counter) == 0 ||
|
||||||
iwl_mvm_is_radio_killed(mvm),
|
iwl_mvm_is_radio_killed(mvm),
|
||||||
|
|
|
@ -1664,9 +1664,9 @@ void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi,
|
||||||
void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi,
|
void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi,
|
||||||
struct iwl_rx_cmd_buffer *rxb, int queue);
|
struct iwl_rx_cmd_buffer *rxb, int queue);
|
||||||
int iwl_mvm_notify_rx_queue(struct iwl_mvm *mvm, u32 rxq_mask,
|
int iwl_mvm_notify_rx_queue(struct iwl_mvm *mvm, u32 rxq_mask,
|
||||||
const u8 *data, u32 count);
|
const u8 *data, u32 count, bool async);
|
||||||
void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
|
void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct napi_struct *napi,
|
||||||
int queue);
|
struct iwl_rx_cmd_buffer *rxb, int queue);
|
||||||
void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
|
void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
|
||||||
void iwl_mvm_mfu_assert_dump_notif(struct iwl_mvm *mvm,
|
void iwl_mvm_mfu_assert_dump_notif(struct iwl_mvm *mvm,
|
||||||
struct iwl_rx_cmd_buffer *rxb);
|
struct iwl_rx_cmd_buffer *rxb);
|
||||||
|
@ -1813,7 +1813,7 @@ iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
|
||||||
#endif /* CONFIG_IWLWIFI_DEBUGFS */
|
#endif /* CONFIG_IWLWIFI_DEBUGFS */
|
||||||
|
|
||||||
/* rate scaling */
|
/* rate scaling */
|
||||||
int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool sync);
|
int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq);
|
||||||
void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg);
|
void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg);
|
||||||
int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate);
|
int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate);
|
||||||
void rs_update_last_rssi(struct iwl_mvm *mvm,
|
void rs_update_last_rssi(struct iwl_mvm *mvm,
|
||||||
|
|
|
@ -620,7 +620,7 @@ void iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm,
|
||||||
enum iwl_mcc_source src;
|
enum iwl_mcc_source src;
|
||||||
char mcc[3];
|
char mcc[3];
|
||||||
struct ieee80211_regdomain *regd;
|
struct ieee80211_regdomain *regd;
|
||||||
u32 wgds_tbl_idx;
|
int wgds_tbl_idx;
|
||||||
|
|
||||||
lockdep_assert_held(&mvm->mutex);
|
lockdep_assert_held(&mvm->mutex);
|
||||||
|
|
||||||
|
|
|
@ -1088,7 +1088,7 @@ static void iwl_mvm_rx_mq(struct iwl_op_mode *op_mode,
|
||||||
iwl_mvm_rx_mpdu_mq(mvm, napi, rxb, 0);
|
iwl_mvm_rx_mpdu_mq(mvm, napi, rxb, 0);
|
||||||
else if (unlikely(cmd == WIDE_ID(DATA_PATH_GROUP,
|
else if (unlikely(cmd == WIDE_ID(DATA_PATH_GROUP,
|
||||||
RX_QUEUES_NOTIFICATION)))
|
RX_QUEUES_NOTIFICATION)))
|
||||||
iwl_mvm_rx_queue_notif(mvm, rxb, 0);
|
iwl_mvm_rx_queue_notif(mvm, napi, rxb, 0);
|
||||||
else if (cmd == WIDE_ID(LEGACY_GROUP, FRAME_RELEASE))
|
else if (cmd == WIDE_ID(LEGACY_GROUP, FRAME_RELEASE))
|
||||||
iwl_mvm_rx_frame_release(mvm, napi, rxb, 0);
|
iwl_mvm_rx_frame_release(mvm, napi, rxb, 0);
|
||||||
else if (cmd == WIDE_ID(DATA_PATH_GROUP, RX_NO_DATA_NOTIF))
|
else if (cmd == WIDE_ID(DATA_PATH_GROUP, RX_NO_DATA_NOTIF))
|
||||||
|
@ -1812,7 +1812,7 @@ static void iwl_mvm_rx_mq_rss(struct iwl_op_mode *op_mode,
|
||||||
iwl_mvm_rx_frame_release(mvm, napi, rxb, queue);
|
iwl_mvm_rx_frame_release(mvm, napi, rxb, queue);
|
||||||
else if (unlikely(cmd == WIDE_ID(DATA_PATH_GROUP,
|
else if (unlikely(cmd == WIDE_ID(DATA_PATH_GROUP,
|
||||||
RX_QUEUES_NOTIFICATION)))
|
RX_QUEUES_NOTIFICATION)))
|
||||||
iwl_mvm_rx_queue_notif(mvm, rxb, queue);
|
iwl_mvm_rx_queue_notif(mvm, napi, rxb, queue);
|
||||||
else if (likely(cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD)))
|
else if (likely(cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD)))
|
||||||
iwl_mvm_rx_mpdu_mq(mvm, napi, rxb, queue);
|
iwl_mvm_rx_mpdu_mq(mvm, napi, rxb, queue);
|
||||||
}
|
}
|
||||||
|
|
|
@ -1197,239 +1197,6 @@ static u8 rs_get_tid(struct ieee80211_hdr *hdr)
|
||||||
return tid;
|
return tid;
|
||||||
}
|
}
|
||||||
|
|
||||||
void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
|
|
||||||
int tid, struct ieee80211_tx_info *info, bool ndp)
|
|
||||||
{
|
|
||||||
int legacy_success;
|
|
||||||
int retries;
|
|
||||||
int i;
|
|
||||||
struct iwl_lq_cmd *table;
|
|
||||||
u32 lq_hwrate;
|
|
||||||
struct rs_rate lq_rate, tx_resp_rate;
|
|
||||||
struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl;
|
|
||||||
u32 tlc_info = (uintptr_t)info->status.status_driver_data[0];
|
|
||||||
u8 reduced_txp = tlc_info & RS_DRV_DATA_TXP_MSK;
|
|
||||||
u8 lq_color = RS_DRV_DATA_LQ_COLOR_GET(tlc_info);
|
|
||||||
u32 tx_resp_hwrate = (uintptr_t)info->status.status_driver_data[1];
|
|
||||||
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
|
|
||||||
struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta.rs_drv;
|
|
||||||
|
|
||||||
/* Treat uninitialized rate scaling data same as non-existing. */
|
|
||||||
if (!lq_sta) {
|
|
||||||
IWL_DEBUG_RATE(mvm, "Station rate scaling not created yet.\n");
|
|
||||||
return;
|
|
||||||
} else if (!lq_sta->pers.drv) {
|
|
||||||
IWL_DEBUG_RATE(mvm, "Rate scaling not initialized yet.\n");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* This packet was aggregated but doesn't carry status info */
|
|
||||||
if ((info->flags & IEEE80211_TX_CTL_AMPDU) &&
|
|
||||||
!(info->flags & IEEE80211_TX_STAT_AMPDU))
|
|
||||||
return;
|
|
||||||
|
|
||||||
if (rs_rate_from_ucode_rate(tx_resp_hwrate, info->band,
|
|
||||||
&tx_resp_rate)) {
|
|
||||||
WARN_ON_ONCE(1);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef CONFIG_MAC80211_DEBUGFS
|
|
||||||
/* Disable last tx check if we are debugging with fixed rate but
|
|
||||||
* update tx stats */
|
|
||||||
if (lq_sta->pers.dbg_fixed_rate) {
|
|
||||||
int index = tx_resp_rate.index;
|
|
||||||
enum rs_column column;
|
|
||||||
int attempts, success;
|
|
||||||
|
|
||||||
column = rs_get_column_from_rate(&tx_resp_rate);
|
|
||||||
if (WARN_ONCE(column == RS_COLUMN_INVALID,
|
|
||||||
"Can't map rate 0x%x to column",
|
|
||||||
tx_resp_hwrate))
|
|
||||||
return;
|
|
||||||
|
|
||||||
if (info->flags & IEEE80211_TX_STAT_AMPDU) {
|
|
||||||
attempts = info->status.ampdu_len;
|
|
||||||
success = info->status.ampdu_ack_len;
|
|
||||||
} else {
|
|
||||||
attempts = info->status.rates[0].count;
|
|
||||||
success = !!(info->flags & IEEE80211_TX_STAT_ACK);
|
|
||||||
}
|
|
||||||
|
|
||||||
lq_sta->pers.tx_stats[column][index].total += attempts;
|
|
||||||
lq_sta->pers.tx_stats[column][index].success += success;
|
|
||||||
|
|
||||||
IWL_DEBUG_RATE(mvm, "Fixed rate 0x%x success %d attempts %d\n",
|
|
||||||
tx_resp_hwrate, success, attempts);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
if (time_after(jiffies,
|
|
||||||
(unsigned long)(lq_sta->last_tx +
|
|
||||||
(IWL_MVM_RS_IDLE_TIMEOUT * HZ)))) {
|
|
||||||
IWL_DEBUG_RATE(mvm, "Tx idle for too long. reinit rs\n");
|
|
||||||
iwl_mvm_rs_rate_init(mvm, sta, info->band, true);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
lq_sta->last_tx = jiffies;
|
|
||||||
|
|
||||||
/* Ignore this Tx frame response if its initial rate doesn't match
|
|
||||||
* that of latest Link Quality command. There may be stragglers
|
|
||||||
* from a previous Link Quality command, but we're no longer interested
|
|
||||||
* in those; they're either from the "active" mode while we're trying
|
|
||||||
* to check "search" mode, or a prior "search" mode after we've moved
|
|
||||||
* to a new "search" mode (which might become the new "active" mode).
|
|
||||||
*/
|
|
||||||
table = &lq_sta->lq;
|
|
||||||
lq_hwrate = le32_to_cpu(table->rs_table[0]);
|
|
||||||
if (rs_rate_from_ucode_rate(lq_hwrate, info->band, &lq_rate)) {
|
|
||||||
WARN_ON_ONCE(1);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Here we actually compare this rate to the latest LQ command */
|
|
||||||
if (lq_color != LQ_FLAG_COLOR_GET(table->flags)) {
|
|
||||||
IWL_DEBUG_RATE(mvm,
|
|
||||||
"tx resp color 0x%x does not match 0x%x\n",
|
|
||||||
lq_color, LQ_FLAG_COLOR_GET(table->flags));
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Since rates mis-match, the last LQ command may have failed.
|
|
||||||
* After IWL_MISSED_RATE_MAX mis-matches, resync the uCode with
|
|
||||||
* ... driver.
|
|
||||||
*/
|
|
||||||
lq_sta->missed_rate_counter++;
|
|
||||||
if (lq_sta->missed_rate_counter > IWL_MVM_RS_MISSED_RATE_MAX) {
|
|
||||||
lq_sta->missed_rate_counter = 0;
|
|
||||||
IWL_DEBUG_RATE(mvm,
|
|
||||||
"Too many rates mismatch. Send sync LQ. rs_state %d\n",
|
|
||||||
lq_sta->rs_state);
|
|
||||||
iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, false);
|
|
||||||
}
|
|
||||||
/* Regardless, ignore this status info for outdated rate */
|
|
||||||
return;
|
|
||||||
} else
|
|
||||||
/* Rate did match, so reset the missed_rate_counter */
|
|
||||||
lq_sta->missed_rate_counter = 0;
|
|
||||||
|
|
||||||
if (!lq_sta->search_better_tbl) {
|
|
||||||
curr_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
|
|
||||||
other_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
|
|
||||||
} else {
|
|
||||||
curr_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
|
|
||||||
other_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (WARN_ON_ONCE(!rs_rate_column_match(&lq_rate, &curr_tbl->rate))) {
|
|
||||||
IWL_DEBUG_RATE(mvm,
|
|
||||||
"Neither active nor search matches tx rate\n");
|
|
||||||
tmp_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
|
|
||||||
rs_dump_rate(mvm, &tmp_tbl->rate, "ACTIVE");
|
|
||||||
tmp_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
|
|
||||||
rs_dump_rate(mvm, &tmp_tbl->rate, "SEARCH");
|
|
||||||
rs_dump_rate(mvm, &lq_rate, "ACTUAL");
|
|
||||||
|
|
||||||
/*
|
|
||||||
* no matching table found, let's by-pass the data collection
|
|
||||||
* and continue to perform rate scale to find the rate table
|
|
||||||
*/
|
|
||||||
rs_stay_in_table(lq_sta, true);
|
|
||||||
goto done;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Updating the frame history depends on whether packets were
|
|
||||||
* aggregated.
|
|
||||||
*
|
|
||||||
* For aggregation, all packets were transmitted at the same rate, the
|
|
||||||
* first index into rate scale table.
|
|
||||||
*/
|
|
||||||
if (info->flags & IEEE80211_TX_STAT_AMPDU) {
|
|
||||||
rs_collect_tpc_data(mvm, lq_sta, curr_tbl, tx_resp_rate.index,
|
|
||||||
info->status.ampdu_len,
|
|
||||||
info->status.ampdu_ack_len,
|
|
||||||
reduced_txp);
|
|
||||||
|
|
||||||
/* ampdu_ack_len = 0 marks no BA was received. For TLC, treat
|
|
||||||
* it as a single frame loss as we don't want the success ratio
|
|
||||||
* to dip too quickly because a BA wasn't received.
|
|
||||||
* For TPC, there's no need for this optimisation since we want
|
|
||||||
* to recover very quickly from a bad power reduction and,
|
|
||||||
* therefore we'd like the success ratio to get an immediate hit
|
|
||||||
* when failing to get a BA, so we'd switch back to a lower or
|
|
||||||
* zero power reduction. When FW transmits agg with a rate
|
|
||||||
* different from the initial rate, it will not use reduced txp
|
|
||||||
* and will send BA notification twice (one empty with reduced
|
|
||||||
* txp equal to the value from LQ and one with reduced txp 0).
|
|
||||||
* We need to update counters for each txp level accordingly.
|
|
||||||
*/
|
|
||||||
if (info->status.ampdu_ack_len == 0)
|
|
||||||
info->status.ampdu_len = 1;
|
|
||||||
|
|
||||||
rs_collect_tlc_data(mvm, mvmsta, tid, curr_tbl, tx_resp_rate.index,
|
|
||||||
info->status.ampdu_len,
|
|
||||||
info->status.ampdu_ack_len);
|
|
||||||
|
|
||||||
/* Update success/fail counts if not searching for new mode */
|
|
||||||
if (lq_sta->rs_state == RS_STATE_STAY_IN_COLUMN) {
|
|
||||||
lq_sta->total_success += info->status.ampdu_ack_len;
|
|
||||||
lq_sta->total_failed += (info->status.ampdu_len -
|
|
||||||
info->status.ampdu_ack_len);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
/* For legacy, update frame history with for each Tx retry. */
|
|
||||||
retries = info->status.rates[0].count - 1;
|
|
||||||
/* HW doesn't send more than 15 retries */
|
|
||||||
retries = min(retries, 15);
|
|
||||||
|
|
||||||
/* The last transmission may have been successful */
|
|
||||||
legacy_success = !!(info->flags & IEEE80211_TX_STAT_ACK);
|
|
||||||
/* Collect data for each rate used during failed TX attempts */
|
|
||||||
for (i = 0; i <= retries; ++i) {
|
|
||||||
lq_hwrate = le32_to_cpu(table->rs_table[i]);
|
|
||||||
if (rs_rate_from_ucode_rate(lq_hwrate, info->band,
|
|
||||||
&lq_rate)) {
|
|
||||||
WARN_ON_ONCE(1);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Only collect stats if retried rate is in the same RS
|
|
||||||
* table as active/search.
|
|
||||||
*/
|
|
||||||
if (rs_rate_column_match(&lq_rate, &curr_tbl->rate))
|
|
||||||
tmp_tbl = curr_tbl;
|
|
||||||
else if (rs_rate_column_match(&lq_rate,
|
|
||||||
&other_tbl->rate))
|
|
||||||
tmp_tbl = other_tbl;
|
|
||||||
else
|
|
||||||
continue;
|
|
||||||
|
|
||||||
rs_collect_tpc_data(mvm, lq_sta, tmp_tbl,
|
|
||||||
tx_resp_rate.index, 1,
|
|
||||||
i < retries ? 0 : legacy_success,
|
|
||||||
reduced_txp);
|
|
||||||
rs_collect_tlc_data(mvm, mvmsta, tid, tmp_tbl,
|
|
||||||
tx_resp_rate.index, 1,
|
|
||||||
i < retries ? 0 : legacy_success);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Update success/fail counts if not searching for new mode */
|
|
||||||
if (lq_sta->rs_state == RS_STATE_STAY_IN_COLUMN) {
|
|
||||||
lq_sta->total_success += legacy_success;
|
|
||||||
lq_sta->total_failed += retries + (1 - legacy_success);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
/* The last TX rate is cached in lq_sta; it's set in if/else above */
|
|
||||||
lq_sta->last_rate_n_flags = lq_hwrate;
|
|
||||||
IWL_DEBUG_RATE(mvm, "reduced txpower: %d\n", reduced_txp);
|
|
||||||
done:
|
|
||||||
/* See if there's a better rate or modulation mode to try. */
|
|
||||||
if (sta->supp_rates[info->band])
|
|
||||||
rs_rate_scale_perform(mvm, sta, lq_sta, tid, ndp);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* mac80211 sends us Tx status
|
* mac80211 sends us Tx status
|
||||||
*/
|
*/
|
||||||
|
@ -1442,8 +1209,9 @@ static void rs_drv_mac80211_tx_status(void *mvm_r,
|
||||||
struct iwl_op_mode *op_mode = mvm_r;
|
struct iwl_op_mode *op_mode = mvm_r;
|
||||||
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
|
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
|
||||||
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
||||||
|
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
|
||||||
|
|
||||||
if (!iwl_mvm_sta_from_mac80211(sta)->vif)
|
if (!mvmsta->vif)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (!ieee80211_is_data(hdr->frame_control) ||
|
if (!ieee80211_is_data(hdr->frame_control) ||
|
||||||
|
@ -1584,6 +1352,18 @@ static void rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta,
|
||||||
tbl->expected_tpt = rs_get_expected_tpt_table(lq_sta, column, rate->bw);
|
tbl->expected_tpt = rs_get_expected_tpt_table(lq_sta, column, rate->bw);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* rs uses two tables, one is active and the second is for searching better
|
||||||
|
* configuration. This function, according to the index of the currently
|
||||||
|
* active table returns the search table, which is located at the
|
||||||
|
* index complementary to 1 according to the active table (active = 1,
|
||||||
|
* search = 0 or active = 0, search = 1).
|
||||||
|
* Since lq_info is an arary of size 2, make sure index cannot be out of bounds.
|
||||||
|
*/
|
||||||
|
static inline u8 rs_search_tbl(u8 active_tbl)
|
||||||
|
{
|
||||||
|
return (active_tbl ^ 1) & 1;
|
||||||
|
}
|
||||||
|
|
||||||
static s32 rs_get_best_rate(struct iwl_mvm *mvm,
|
static s32 rs_get_best_rate(struct iwl_mvm *mvm,
|
||||||
struct iwl_lq_sta *lq_sta,
|
struct iwl_lq_sta *lq_sta,
|
||||||
struct iwl_scale_tbl_info *tbl, /* "search" */
|
struct iwl_scale_tbl_info *tbl, /* "search" */
|
||||||
|
@ -1794,7 +1574,7 @@ static void rs_update_rate_tbl(struct iwl_mvm *mvm,
|
||||||
struct iwl_scale_tbl_info *tbl)
|
struct iwl_scale_tbl_info *tbl)
|
||||||
{
|
{
|
||||||
rs_fill_lq_cmd(mvm, sta, lq_sta, &tbl->rate);
|
rs_fill_lq_cmd(mvm, sta, lq_sta, &tbl->rate);
|
||||||
iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, false);
|
iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool rs_tweak_rate_tbl(struct iwl_mvm *mvm,
|
static bool rs_tweak_rate_tbl(struct iwl_mvm *mvm,
|
||||||
|
@ -1931,9 +1711,9 @@ static int rs_switch_to_column(struct iwl_mvm *mvm,
|
||||||
struct ieee80211_sta *sta,
|
struct ieee80211_sta *sta,
|
||||||
enum rs_column col_id)
|
enum rs_column col_id)
|
||||||
{
|
{
|
||||||
struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
|
struct iwl_scale_tbl_info *tbl = &lq_sta->lq_info[lq_sta->active_tbl];
|
||||||
struct iwl_scale_tbl_info *search_tbl =
|
struct iwl_scale_tbl_info *search_tbl =
|
||||||
&(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
|
&lq_sta->lq_info[rs_search_tbl(lq_sta->active_tbl)];
|
||||||
struct rs_rate *rate = &search_tbl->rate;
|
struct rs_rate *rate = &search_tbl->rate;
|
||||||
const struct rs_tx_column *column = &rs_tx_columns[col_id];
|
const struct rs_tx_column *column = &rs_tx_columns[col_id];
|
||||||
const struct rs_tx_column *curr_column = &rs_tx_columns[tbl->column];
|
const struct rs_tx_column *curr_column = &rs_tx_columns[tbl->column];
|
||||||
|
@ -2341,7 +2121,7 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
|
||||||
if (!lq_sta->search_better_tbl)
|
if (!lq_sta->search_better_tbl)
|
||||||
active_tbl = lq_sta->active_tbl;
|
active_tbl = lq_sta->active_tbl;
|
||||||
else
|
else
|
||||||
active_tbl = 1 - lq_sta->active_tbl;
|
active_tbl = rs_search_tbl(lq_sta->active_tbl);
|
||||||
|
|
||||||
tbl = &(lq_sta->lq_info[active_tbl]);
|
tbl = &(lq_sta->lq_info[active_tbl]);
|
||||||
rate = &tbl->rate;
|
rate = &tbl->rate;
|
||||||
|
@ -2565,7 +2345,7 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
|
||||||
/* If new "search" mode was selected, set up in uCode table */
|
/* If new "search" mode was selected, set up in uCode table */
|
||||||
if (lq_sta->search_better_tbl) {
|
if (lq_sta->search_better_tbl) {
|
||||||
/* Access the "search" table, clear its history. */
|
/* Access the "search" table, clear its history. */
|
||||||
tbl = &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
|
tbl = &lq_sta->lq_info[rs_search_tbl(lq_sta->active_tbl)];
|
||||||
rs_rate_scale_clear_tbl_windows(mvm, tbl);
|
rs_rate_scale_clear_tbl_windows(mvm, tbl);
|
||||||
|
|
||||||
/* Use new "search" start rate */
|
/* Use new "search" start rate */
|
||||||
|
@ -2896,7 +2676,7 @@ void rs_update_last_rssi(struct iwl_mvm *mvm,
|
||||||
static void rs_initialize_lq(struct iwl_mvm *mvm,
|
static void rs_initialize_lq(struct iwl_mvm *mvm,
|
||||||
struct ieee80211_sta *sta,
|
struct ieee80211_sta *sta,
|
||||||
struct iwl_lq_sta *lq_sta,
|
struct iwl_lq_sta *lq_sta,
|
||||||
enum nl80211_band band, bool update)
|
enum nl80211_band band)
|
||||||
{
|
{
|
||||||
struct iwl_scale_tbl_info *tbl;
|
struct iwl_scale_tbl_info *tbl;
|
||||||
struct rs_rate *rate;
|
struct rs_rate *rate;
|
||||||
|
@ -2908,7 +2688,7 @@ static void rs_initialize_lq(struct iwl_mvm *mvm,
|
||||||
if (!lq_sta->search_better_tbl)
|
if (!lq_sta->search_better_tbl)
|
||||||
active_tbl = lq_sta->active_tbl;
|
active_tbl = lq_sta->active_tbl;
|
||||||
else
|
else
|
||||||
active_tbl = 1 - lq_sta->active_tbl;
|
active_tbl = rs_search_tbl(lq_sta->active_tbl);
|
||||||
|
|
||||||
tbl = &(lq_sta->lq_info[active_tbl]);
|
tbl = &(lq_sta->lq_info[active_tbl]);
|
||||||
rate = &tbl->rate;
|
rate = &tbl->rate;
|
||||||
|
@ -2926,7 +2706,7 @@ static void rs_initialize_lq(struct iwl_mvm *mvm,
|
||||||
rs_set_expected_tpt_table(lq_sta, tbl);
|
rs_set_expected_tpt_table(lq_sta, tbl);
|
||||||
rs_fill_lq_cmd(mvm, sta, lq_sta, rate);
|
rs_fill_lq_cmd(mvm, sta, lq_sta, rate);
|
||||||
/* TODO restore station should remember the lq cmd */
|
/* TODO restore station should remember the lq cmd */
|
||||||
iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, !update);
|
iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void rs_drv_get_rate(void *mvm_r, struct ieee80211_sta *sta,
|
static void rs_drv_get_rate(void *mvm_r, struct ieee80211_sta *sta,
|
||||||
|
@ -3175,7 +2955,7 @@ void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg)
|
||||||
* Called after adding a new station to initialize rate scaling
|
* Called after adding a new station to initialize rate scaling
|
||||||
*/
|
*/
|
||||||
static void rs_drv_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
|
static void rs_drv_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
|
||||||
enum nl80211_band band, bool update)
|
enum nl80211_band band)
|
||||||
{
|
{
|
||||||
int i, j;
|
int i, j;
|
||||||
struct ieee80211_hw *hw = mvm->hw;
|
struct ieee80211_hw *hw = mvm->hw;
|
||||||
|
@ -3186,6 +2966,8 @@ static void rs_drv_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
|
||||||
struct ieee80211_supported_band *sband;
|
struct ieee80211_supported_band *sband;
|
||||||
unsigned long supp; /* must be unsigned long for for_each_set_bit */
|
unsigned long supp; /* must be unsigned long for for_each_set_bit */
|
||||||
|
|
||||||
|
lockdep_assert_held(&mvmsta->lq_sta.rs_drv.pers.lock);
|
||||||
|
|
||||||
/* clear all non-persistent lq data */
|
/* clear all non-persistent lq data */
|
||||||
memset(lq_sta, 0, offsetof(typeof(*lq_sta), pers));
|
memset(lq_sta, 0, offsetof(typeof(*lq_sta), pers));
|
||||||
|
|
||||||
|
@ -3255,7 +3037,7 @@ static void rs_drv_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
|
||||||
#ifdef CONFIG_IWLWIFI_DEBUGFS
|
#ifdef CONFIG_IWLWIFI_DEBUGFS
|
||||||
iwl_mvm_reset_frame_stats(mvm);
|
iwl_mvm_reset_frame_stats(mvm);
|
||||||
#endif
|
#endif
|
||||||
rs_initialize_lq(mvm, sta, lq_sta, band, update);
|
rs_initialize_lq(mvm, sta, lq_sta, band);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void rs_drv_rate_update(void *mvm_r,
|
static void rs_drv_rate_update(void *mvm_r,
|
||||||
|
@ -3278,6 +3060,258 @@ static void rs_drv_rate_update(void *mvm_r,
|
||||||
iwl_mvm_rs_rate_init(mvm, sta, sband->band, true);
|
iwl_mvm_rs_rate_init(mvm, sta, sband->band, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void __iwl_mvm_rs_tx_status(struct iwl_mvm *mvm,
|
||||||
|
struct ieee80211_sta *sta,
|
||||||
|
int tid, struct ieee80211_tx_info *info,
|
||||||
|
bool ndp)
|
||||||
|
{
|
||||||
|
int legacy_success;
|
||||||
|
int retries;
|
||||||
|
int i;
|
||||||
|
struct iwl_lq_cmd *table;
|
||||||
|
u32 lq_hwrate;
|
||||||
|
struct rs_rate lq_rate, tx_resp_rate;
|
||||||
|
struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl;
|
||||||
|
u32 tlc_info = (uintptr_t)info->status.status_driver_data[0];
|
||||||
|
u8 reduced_txp = tlc_info & RS_DRV_DATA_TXP_MSK;
|
||||||
|
u8 lq_color = RS_DRV_DATA_LQ_COLOR_GET(tlc_info);
|
||||||
|
u32 tx_resp_hwrate = (uintptr_t)info->status.status_driver_data[1];
|
||||||
|
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
|
||||||
|
struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta.rs_drv;
|
||||||
|
|
||||||
|
/* Treat uninitialized rate scaling data same as non-existing. */
|
||||||
|
if (!lq_sta) {
|
||||||
|
IWL_DEBUG_RATE(mvm, "Station rate scaling not created yet.\n");
|
||||||
|
return;
|
||||||
|
} else if (!lq_sta->pers.drv) {
|
||||||
|
IWL_DEBUG_RATE(mvm, "Rate scaling not initialized yet.\n");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* This packet was aggregated but doesn't carry status info */
|
||||||
|
if ((info->flags & IEEE80211_TX_CTL_AMPDU) &&
|
||||||
|
!(info->flags & IEEE80211_TX_STAT_AMPDU))
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (rs_rate_from_ucode_rate(tx_resp_hwrate, info->band,
|
||||||
|
&tx_resp_rate)) {
|
||||||
|
WARN_ON_ONCE(1);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_MAC80211_DEBUGFS
|
||||||
|
/* Disable last tx check if we are debugging with fixed rate but
|
||||||
|
* update tx stats
|
||||||
|
*/
|
||||||
|
if (lq_sta->pers.dbg_fixed_rate) {
|
||||||
|
int index = tx_resp_rate.index;
|
||||||
|
enum rs_column column;
|
||||||
|
int attempts, success;
|
||||||
|
|
||||||
|
column = rs_get_column_from_rate(&tx_resp_rate);
|
||||||
|
if (WARN_ONCE(column == RS_COLUMN_INVALID,
|
||||||
|
"Can't map rate 0x%x to column",
|
||||||
|
tx_resp_hwrate))
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (info->flags & IEEE80211_TX_STAT_AMPDU) {
|
||||||
|
attempts = info->status.ampdu_len;
|
||||||
|
success = info->status.ampdu_ack_len;
|
||||||
|
} else {
|
||||||
|
attempts = info->status.rates[0].count;
|
||||||
|
success = !!(info->flags & IEEE80211_TX_STAT_ACK);
|
||||||
|
}
|
||||||
|
|
||||||
|
lq_sta->pers.tx_stats[column][index].total += attempts;
|
||||||
|
lq_sta->pers.tx_stats[column][index].success += success;
|
||||||
|
|
||||||
|
IWL_DEBUG_RATE(mvm, "Fixed rate 0x%x success %d attempts %d\n",
|
||||||
|
tx_resp_hwrate, success, attempts);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
if (time_after(jiffies,
|
||||||
|
(unsigned long)(lq_sta->last_tx +
|
||||||
|
(IWL_MVM_RS_IDLE_TIMEOUT * HZ)))) {
|
||||||
|
IWL_DEBUG_RATE(mvm, "Tx idle for too long. reinit rs\n");
|
||||||
|
/* reach here only in case of driver RS, call directly
|
||||||
|
* the unlocked version
|
||||||
|
*/
|
||||||
|
rs_drv_rate_init(mvm, sta, info->band);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
lq_sta->last_tx = jiffies;
|
||||||
|
|
||||||
|
/* Ignore this Tx frame response if its initial rate doesn't match
|
||||||
|
* that of latest Link Quality command. There may be stragglers
|
||||||
|
* from a previous Link Quality command, but we're no longer interested
|
||||||
|
* in those; they're either from the "active" mode while we're trying
|
||||||
|
* to check "search" mode, or a prior "search" mode after we've moved
|
||||||
|
* to a new "search" mode (which might become the new "active" mode).
|
||||||
|
*/
|
||||||
|
table = &lq_sta->lq;
|
||||||
|
lq_hwrate = le32_to_cpu(table->rs_table[0]);
|
||||||
|
if (rs_rate_from_ucode_rate(lq_hwrate, info->band, &lq_rate)) {
|
||||||
|
WARN_ON_ONCE(1);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Here we actually compare this rate to the latest LQ command */
|
||||||
|
if (lq_color != LQ_FLAG_COLOR_GET(table->flags)) {
|
||||||
|
IWL_DEBUG_RATE(mvm,
|
||||||
|
"tx resp color 0x%x does not match 0x%x\n",
|
||||||
|
lq_color, LQ_FLAG_COLOR_GET(table->flags));
|
||||||
|
|
||||||
|
/* Since rates mis-match, the last LQ command may have failed.
|
||||||
|
* After IWL_MISSED_RATE_MAX mis-matches, resync the uCode with
|
||||||
|
* ... driver.
|
||||||
|
*/
|
||||||
|
lq_sta->missed_rate_counter++;
|
||||||
|
if (lq_sta->missed_rate_counter > IWL_MVM_RS_MISSED_RATE_MAX) {
|
||||||
|
lq_sta->missed_rate_counter = 0;
|
||||||
|
IWL_DEBUG_RATE(mvm,
|
||||||
|
"Too many rates mismatch. Send sync LQ. rs_state %d\n",
|
||||||
|
lq_sta->rs_state);
|
||||||
|
iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq);
|
||||||
|
}
|
||||||
|
/* Regardless, ignore this status info for outdated rate */
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Rate did match, so reset the missed_rate_counter */
|
||||||
|
lq_sta->missed_rate_counter = 0;
|
||||||
|
|
||||||
|
if (!lq_sta->search_better_tbl) {
|
||||||
|
curr_tbl = &lq_sta->lq_info[lq_sta->active_tbl];
|
||||||
|
other_tbl = &lq_sta->lq_info[rs_search_tbl(lq_sta->active_tbl)];
|
||||||
|
} else {
|
||||||
|
curr_tbl = &lq_sta->lq_info[rs_search_tbl(lq_sta->active_tbl)];
|
||||||
|
other_tbl = &lq_sta->lq_info[lq_sta->active_tbl];
|
||||||
|
}
|
||||||
|
|
||||||
|
if (WARN_ON_ONCE(!rs_rate_column_match(&lq_rate, &curr_tbl->rate))) {
|
||||||
|
IWL_DEBUG_RATE(mvm,
|
||||||
|
"Neither active nor search matches tx rate\n");
|
||||||
|
tmp_tbl = &lq_sta->lq_info[lq_sta->active_tbl];
|
||||||
|
rs_dump_rate(mvm, &tmp_tbl->rate, "ACTIVE");
|
||||||
|
tmp_tbl = &lq_sta->lq_info[rs_search_tbl(lq_sta->active_tbl)];
|
||||||
|
rs_dump_rate(mvm, &tmp_tbl->rate, "SEARCH");
|
||||||
|
rs_dump_rate(mvm, &lq_rate, "ACTUAL");
|
||||||
|
|
||||||
|
/* no matching table found, let's by-pass the data collection
|
||||||
|
* and continue to perform rate scale to find the rate table
|
||||||
|
*/
|
||||||
|
rs_stay_in_table(lq_sta, true);
|
||||||
|
goto done;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Updating the frame history depends on whether packets were
|
||||||
|
* aggregated.
|
||||||
|
*
|
||||||
|
* For aggregation, all packets were transmitted at the same rate, the
|
||||||
|
* first index into rate scale table.
|
||||||
|
*/
|
||||||
|
if (info->flags & IEEE80211_TX_STAT_AMPDU) {
|
||||||
|
rs_collect_tpc_data(mvm, lq_sta, curr_tbl, tx_resp_rate.index,
|
||||||
|
info->status.ampdu_len,
|
||||||
|
info->status.ampdu_ack_len,
|
||||||
|
reduced_txp);
|
||||||
|
|
||||||
|
/* ampdu_ack_len = 0 marks no BA was received. For TLC, treat
|
||||||
|
* it as a single frame loss as we don't want the success ratio
|
||||||
|
* to dip too quickly because a BA wasn't received.
|
||||||
|
* For TPC, there's no need for this optimisation since we want
|
||||||
|
* to recover very quickly from a bad power reduction and,
|
||||||
|
* therefore we'd like the success ratio to get an immediate hit
|
||||||
|
* when failing to get a BA, so we'd switch back to a lower or
|
||||||
|
* zero power reduction. When FW transmits agg with a rate
|
||||||
|
* different from the initial rate, it will not use reduced txp
|
||||||
|
* and will send BA notification twice (one empty with reduced
|
||||||
|
* txp equal to the value from LQ and one with reduced txp 0).
|
||||||
|
* We need to update counters for each txp level accordingly.
|
||||||
|
*/
|
||||||
|
if (info->status.ampdu_ack_len == 0)
|
||||||
|
info->status.ampdu_len = 1;
|
||||||
|
|
||||||
|
rs_collect_tlc_data(mvm, mvmsta, tid, curr_tbl,
|
||||||
|
tx_resp_rate.index,
|
||||||
|
info->status.ampdu_len,
|
||||||
|
info->status.ampdu_ack_len);
|
||||||
|
|
||||||
|
/* Update success/fail counts if not searching for new mode */
|
||||||
|
if (lq_sta->rs_state == RS_STATE_STAY_IN_COLUMN) {
|
||||||
|
lq_sta->total_success += info->status.ampdu_ack_len;
|
||||||
|
lq_sta->total_failed += (info->status.ampdu_len -
|
||||||
|
info->status.ampdu_ack_len);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
/* For legacy, update frame history with for each Tx retry. */
|
||||||
|
retries = info->status.rates[0].count - 1;
|
||||||
|
/* HW doesn't send more than 15 retries */
|
||||||
|
retries = min(retries, 15);
|
||||||
|
|
||||||
|
/* The last transmission may have been successful */
|
||||||
|
legacy_success = !!(info->flags & IEEE80211_TX_STAT_ACK);
|
||||||
|
/* Collect data for each rate used during failed TX attempts */
|
||||||
|
for (i = 0; i <= retries; ++i) {
|
||||||
|
lq_hwrate = le32_to_cpu(table->rs_table[i]);
|
||||||
|
if (rs_rate_from_ucode_rate(lq_hwrate, info->band,
|
||||||
|
&lq_rate)) {
|
||||||
|
WARN_ON_ONCE(1);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Only collect stats if retried rate is in the same RS
|
||||||
|
* table as active/search.
|
||||||
|
*/
|
||||||
|
if (rs_rate_column_match(&lq_rate, &curr_tbl->rate))
|
||||||
|
tmp_tbl = curr_tbl;
|
||||||
|
else if (rs_rate_column_match(&lq_rate,
|
||||||
|
&other_tbl->rate))
|
||||||
|
tmp_tbl = other_tbl;
|
||||||
|
else
|
||||||
|
continue;
|
||||||
|
|
||||||
|
rs_collect_tpc_data(mvm, lq_sta, tmp_tbl,
|
||||||
|
tx_resp_rate.index, 1,
|
||||||
|
i < retries ? 0 : legacy_success,
|
||||||
|
reduced_txp);
|
||||||
|
rs_collect_tlc_data(mvm, mvmsta, tid, tmp_tbl,
|
||||||
|
tx_resp_rate.index, 1,
|
||||||
|
i < retries ? 0 : legacy_success);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Update success/fail counts if not searching for new mode */
|
||||||
|
if (lq_sta->rs_state == RS_STATE_STAY_IN_COLUMN) {
|
||||||
|
lq_sta->total_success += legacy_success;
|
||||||
|
lq_sta->total_failed += retries + (1 - legacy_success);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
/* The last TX rate is cached in lq_sta; it's set in if/else above */
|
||||||
|
lq_sta->last_rate_n_flags = lq_hwrate;
|
||||||
|
IWL_DEBUG_RATE(mvm, "reduced txpower: %d\n", reduced_txp);
|
||||||
|
done:
|
||||||
|
/* See if there's a better rate or modulation mode to try. */
|
||||||
|
if (sta->supp_rates[info->band])
|
||||||
|
rs_rate_scale_perform(mvm, sta, lq_sta, tid, ndp);
|
||||||
|
}
|
||||||
|
|
||||||
|
void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
|
||||||
|
int tid, struct ieee80211_tx_info *info, bool ndp)
|
||||||
|
{
|
||||||
|
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
|
||||||
|
|
||||||
|
/* If it's locked we are in middle of init flow
|
||||||
|
* just wait for next tx status to update the lq_sta data
|
||||||
|
*/
|
||||||
|
if (!spin_trylock(&mvmsta->lq_sta.rs_drv.pers.lock))
|
||||||
|
return;
|
||||||
|
|
||||||
|
__iwl_mvm_rs_tx_status(mvm, sta, tid, info, ndp);
|
||||||
|
spin_unlock(&mvmsta->lq_sta.rs_drv.pers.lock);
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_MAC80211_DEBUGFS
|
#ifdef CONFIG_MAC80211_DEBUGFS
|
||||||
static void rs_build_rates_table_from_fixed(struct iwl_mvm *mvm,
|
static void rs_build_rates_table_from_fixed(struct iwl_mvm *mvm,
|
||||||
struct iwl_lq_cmd *lq_cmd,
|
struct iwl_lq_cmd *lq_cmd,
|
||||||
|
@ -3569,7 +3603,7 @@ static void rs_set_lq_ss_params(struct iwl_mvm *mvm,
|
||||||
|
|
||||||
bfersta_ss_params &= ~LQ_SS_BFER_ALLOWED;
|
bfersta_ss_params &= ~LQ_SS_BFER_ALLOWED;
|
||||||
bfersta_lq_cmd->ss_params = cpu_to_le32(bfersta_ss_params);
|
bfersta_lq_cmd->ss_params = cpu_to_le32(bfersta_ss_params);
|
||||||
iwl_mvm_send_lq_cmd(mvm, bfersta_lq_cmd, false);
|
iwl_mvm_send_lq_cmd(mvm, bfersta_lq_cmd);
|
||||||
|
|
||||||
ss_params |= LQ_SS_BFER_ALLOWED;
|
ss_params |= LQ_SS_BFER_ALLOWED;
|
||||||
IWL_DEBUG_RATE(mvm,
|
IWL_DEBUG_RATE(mvm,
|
||||||
|
@ -3735,7 +3769,7 @@ static void rs_program_fix_rate(struct iwl_mvm *mvm,
|
||||||
|
|
||||||
if (lq_sta->pers.dbg_fixed_rate) {
|
if (lq_sta->pers.dbg_fixed_rate) {
|
||||||
rs_fill_lq_cmd(mvm, NULL, lq_sta, NULL);
|
rs_fill_lq_cmd(mvm, NULL, lq_sta, NULL);
|
||||||
iwl_mvm_send_lq_cmd(lq_sta->pers.drv, &lq_sta->lq, false);
|
iwl_mvm_send_lq_cmd(lq_sta->pers.drv, &lq_sta->lq);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4132,10 +4166,15 @@ static const struct rate_control_ops rs_mvm_ops_drv = {
|
||||||
void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
|
void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
|
||||||
enum nl80211_band band, bool update)
|
enum nl80211_band band, bool update)
|
||||||
{
|
{
|
||||||
if (iwl_mvm_has_tlc_offload(mvm))
|
if (iwl_mvm_has_tlc_offload(mvm)) {
|
||||||
rs_fw_rate_init(mvm, sta, band, update);
|
rs_fw_rate_init(mvm, sta, band, update);
|
||||||
else
|
} else {
|
||||||
rs_drv_rate_init(mvm, sta, band, update);
|
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
|
||||||
|
|
||||||
|
spin_lock(&mvmsta->lq_sta.rs_drv.pers.lock);
|
||||||
|
rs_drv_rate_init(mvm, sta, band);
|
||||||
|
spin_unlock(&mvmsta->lq_sta.rs_drv.pers.lock);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int iwl_mvm_rate_control_register(void)
|
int iwl_mvm_rate_control_register(void)
|
||||||
|
@ -4165,7 +4204,7 @@ static int rs_drv_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
|
||||||
lq->flags &= ~LQ_FLAG_USE_RTS_MSK;
|
lq->flags &= ~LQ_FLAG_USE_RTS_MSK;
|
||||||
}
|
}
|
||||||
|
|
||||||
return iwl_mvm_send_lq_cmd(mvm, lq, false);
|
return iwl_mvm_send_lq_cmd(mvm, lq);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -4,7 +4,7 @@
|
||||||
* Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
|
* Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
|
||||||
* Copyright(c) 2015 Intel Mobile Communications GmbH
|
* Copyright(c) 2015 Intel Mobile Communications GmbH
|
||||||
* Copyright(c) 2017 Intel Deutschland GmbH
|
* Copyright(c) 2017 Intel Deutschland GmbH
|
||||||
* Copyright(c) 2018 Intel Corporation
|
* Copyright(c) 2018 - 2019 Intel Corporation
|
||||||
*
|
*
|
||||||
* Contact Information:
|
* Contact Information:
|
||||||
* Intel Linux Wireless <linuxwifi@intel.com>
|
* Intel Linux Wireless <linuxwifi@intel.com>
|
||||||
|
@ -390,6 +390,7 @@ struct iwl_lq_sta {
|
||||||
s8 last_rssi;
|
s8 last_rssi;
|
||||||
struct rs_rate_stats tx_stats[RS_COLUMN_COUNT][IWL_RATE_COUNT];
|
struct rs_rate_stats tx_stats[RS_COLUMN_COUNT][IWL_RATE_COUNT];
|
||||||
struct iwl_mvm *drv;
|
struct iwl_mvm *drv;
|
||||||
|
spinlock_t lock; /* for races in reinit/update table */
|
||||||
} pers;
|
} pers;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -463,20 +463,22 @@ static bool iwl_mvm_is_dup(struct ieee80211_sta *sta, int queue,
|
||||||
}
|
}
|
||||||
|
|
||||||
int iwl_mvm_notify_rx_queue(struct iwl_mvm *mvm, u32 rxq_mask,
|
int iwl_mvm_notify_rx_queue(struct iwl_mvm *mvm, u32 rxq_mask,
|
||||||
const u8 *data, u32 count)
|
const u8 *data, u32 count, bool async)
|
||||||
{
|
{
|
||||||
struct iwl_rxq_sync_cmd *cmd;
|
u8 buf[sizeof(struct iwl_rxq_sync_cmd) +
|
||||||
|
sizeof(struct iwl_mvm_rss_sync_notif)];
|
||||||
|
struct iwl_rxq_sync_cmd *cmd = (void *)buf;
|
||||||
u32 data_size = sizeof(*cmd) + count;
|
u32 data_size = sizeof(*cmd) + count;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
/* should be DWORD aligned */
|
/*
|
||||||
if (WARN_ON(count & 3 || count > IWL_MULTI_QUEUE_SYNC_MSG_MAX_SIZE))
|
* size must be a multiple of DWORD
|
||||||
|
* Ensure we don't overflow buf
|
||||||
|
*/
|
||||||
|
if (WARN_ON(count & 3 ||
|
||||||
|
count > sizeof(struct iwl_mvm_rss_sync_notif)))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
cmd = kzalloc(data_size, GFP_KERNEL);
|
|
||||||
if (!cmd)
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
cmd->rxq_mask = cpu_to_le32(rxq_mask);
|
cmd->rxq_mask = cpu_to_le32(rxq_mask);
|
||||||
cmd->count = cpu_to_le32(count);
|
cmd->count = cpu_to_le32(count);
|
||||||
cmd->flags = 0;
|
cmd->flags = 0;
|
||||||
|
@ -485,9 +487,8 @@ int iwl_mvm_notify_rx_queue(struct iwl_mvm *mvm, u32 rxq_mask,
|
||||||
ret = iwl_mvm_send_cmd_pdu(mvm,
|
ret = iwl_mvm_send_cmd_pdu(mvm,
|
||||||
WIDE_ID(DATA_PATH_GROUP,
|
WIDE_ID(DATA_PATH_GROUP,
|
||||||
TRIGGER_RX_QUEUES_NOTIF_CMD),
|
TRIGGER_RX_QUEUES_NOTIF_CMD),
|
||||||
0, data_size, cmd);
|
async ? CMD_ASYNC : 0, data_size, cmd);
|
||||||
|
|
||||||
kfree(cmd);
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -503,14 +504,31 @@ static bool iwl_mvm_is_sn_less(u16 sn1, u16 sn2, u16 buffer_size)
|
||||||
!ieee80211_sn_less(sn1, sn2 - buffer_size);
|
!ieee80211_sn_less(sn1, sn2 - buffer_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void iwl_mvm_sync_nssn(struct iwl_mvm *mvm, u8 baid, u16 nssn)
|
||||||
|
{
|
||||||
|
struct iwl_mvm_rss_sync_notif notif = {
|
||||||
|
.metadata.type = IWL_MVM_RXQ_NSSN_SYNC,
|
||||||
|
.metadata.sync = 0,
|
||||||
|
.nssn_sync.baid = baid,
|
||||||
|
.nssn_sync.nssn = nssn,
|
||||||
|
};
|
||||||
|
|
||||||
|
iwl_mvm_sync_rx_queues_internal(mvm, (void *)¬if, sizeof(notif));
|
||||||
|
}
|
||||||
|
|
||||||
#define RX_REORDER_BUF_TIMEOUT_MQ (HZ / 10)
|
#define RX_REORDER_BUF_TIMEOUT_MQ (HZ / 10)
|
||||||
|
|
||||||
|
enum iwl_mvm_release_flags {
|
||||||
|
IWL_MVM_RELEASE_SEND_RSS_SYNC = BIT(0),
|
||||||
|
IWL_MVM_RELEASE_FROM_RSS_SYNC = BIT(1),
|
||||||
|
};
|
||||||
|
|
||||||
static void iwl_mvm_release_frames(struct iwl_mvm *mvm,
|
static void iwl_mvm_release_frames(struct iwl_mvm *mvm,
|
||||||
struct ieee80211_sta *sta,
|
struct ieee80211_sta *sta,
|
||||||
struct napi_struct *napi,
|
struct napi_struct *napi,
|
||||||
struct iwl_mvm_baid_data *baid_data,
|
struct iwl_mvm_baid_data *baid_data,
|
||||||
struct iwl_mvm_reorder_buffer *reorder_buf,
|
struct iwl_mvm_reorder_buffer *reorder_buf,
|
||||||
u16 nssn)
|
u16 nssn, u32 flags)
|
||||||
{
|
{
|
||||||
struct iwl_mvm_reorder_buf_entry *entries =
|
struct iwl_mvm_reorder_buf_entry *entries =
|
||||||
&baid_data->entries[reorder_buf->queue *
|
&baid_data->entries[reorder_buf->queue *
|
||||||
|
@ -519,6 +537,18 @@ static void iwl_mvm_release_frames(struct iwl_mvm *mvm,
|
||||||
|
|
||||||
lockdep_assert_held(&reorder_buf->lock);
|
lockdep_assert_held(&reorder_buf->lock);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We keep the NSSN not too far behind, if we are sync'ing it and it
|
||||||
|
* is more than 2048 ahead of us, it must be behind us. Discard it.
|
||||||
|
* This can happen if the queue that hit the 0 / 2048 seqno was lagging
|
||||||
|
* behind and this queue already processed packets. The next if
|
||||||
|
* would have caught cases where this queue would have processed less
|
||||||
|
* than 64 packets, but it may have processed more than 64 packets.
|
||||||
|
*/
|
||||||
|
if ((flags & IWL_MVM_RELEASE_FROM_RSS_SYNC) &&
|
||||||
|
ieee80211_sn_less(nssn, ssn))
|
||||||
|
goto set_timer;
|
||||||
|
|
||||||
/* ignore nssn smaller than head sn - this can happen due to timeout */
|
/* ignore nssn smaller than head sn - this can happen due to timeout */
|
||||||
if (iwl_mvm_is_sn_less(nssn, ssn, reorder_buf->buf_size))
|
if (iwl_mvm_is_sn_less(nssn, ssn, reorder_buf->buf_size))
|
||||||
goto set_timer;
|
goto set_timer;
|
||||||
|
@ -529,6 +559,9 @@ static void iwl_mvm_release_frames(struct iwl_mvm *mvm,
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
|
|
||||||
ssn = ieee80211_sn_inc(ssn);
|
ssn = ieee80211_sn_inc(ssn);
|
||||||
|
if ((flags & IWL_MVM_RELEASE_SEND_RSS_SYNC) &&
|
||||||
|
(ssn == 2048 || ssn == 0))
|
||||||
|
iwl_mvm_sync_nssn(mvm, baid_data->baid, ssn);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Empty the list. Will have more than one frame for A-MSDU.
|
* Empty the list. Will have more than one frame for A-MSDU.
|
||||||
|
@ -615,7 +648,8 @@ void iwl_mvm_reorder_timer_expired(struct timer_list *t)
|
||||||
sta_id, sn);
|
sta_id, sn);
|
||||||
iwl_mvm_event_frame_timeout_callback(buf->mvm, mvmsta->vif,
|
iwl_mvm_event_frame_timeout_callback(buf->mvm, mvmsta->vif,
|
||||||
sta, baid_data->tid);
|
sta, baid_data->tid);
|
||||||
iwl_mvm_release_frames(buf->mvm, sta, NULL, baid_data, buf, sn);
|
iwl_mvm_release_frames(buf->mvm, sta, NULL, baid_data,
|
||||||
|
buf, sn, IWL_MVM_RELEASE_SEND_RSS_SYNC);
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
} else {
|
} else {
|
||||||
/*
|
/*
|
||||||
|
@ -657,7 +691,8 @@ static void iwl_mvm_del_ba(struct iwl_mvm *mvm, int queue,
|
||||||
spin_lock_bh(&reorder_buf->lock);
|
spin_lock_bh(&reorder_buf->lock);
|
||||||
iwl_mvm_release_frames(mvm, sta, NULL, ba_data, reorder_buf,
|
iwl_mvm_release_frames(mvm, sta, NULL, ba_data, reorder_buf,
|
||||||
ieee80211_sn_add(reorder_buf->head_sn,
|
ieee80211_sn_add(reorder_buf->head_sn,
|
||||||
reorder_buf->buf_size));
|
reorder_buf->buf_size),
|
||||||
|
0);
|
||||||
spin_unlock_bh(&reorder_buf->lock);
|
spin_unlock_bh(&reorder_buf->lock);
|
||||||
del_timer_sync(&reorder_buf->reorder_timer);
|
del_timer_sync(&reorder_buf->reorder_timer);
|
||||||
|
|
||||||
|
@ -665,8 +700,54 @@ static void iwl_mvm_del_ba(struct iwl_mvm *mvm, int queue,
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
}
|
}
|
||||||
|
|
||||||
void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
|
static void iwl_mvm_release_frames_from_notif(struct iwl_mvm *mvm,
|
||||||
int queue)
|
struct napi_struct *napi,
|
||||||
|
u8 baid, u16 nssn, int queue,
|
||||||
|
u32 flags)
|
||||||
|
{
|
||||||
|
struct ieee80211_sta *sta;
|
||||||
|
struct iwl_mvm_reorder_buffer *reorder_buf;
|
||||||
|
struct iwl_mvm_baid_data *ba_data;
|
||||||
|
|
||||||
|
IWL_DEBUG_HT(mvm, "Frame release notification for BAID %u, NSSN %d\n",
|
||||||
|
baid, nssn);
|
||||||
|
|
||||||
|
if (WARN_ON_ONCE(baid == IWL_RX_REORDER_DATA_INVALID_BAID ||
|
||||||
|
baid >= ARRAY_SIZE(mvm->baid_map)))
|
||||||
|
return;
|
||||||
|
|
||||||
|
rcu_read_lock();
|
||||||
|
|
||||||
|
ba_data = rcu_dereference(mvm->baid_map[baid]);
|
||||||
|
if (WARN_ON_ONCE(!ba_data))
|
||||||
|
goto out;
|
||||||
|
|
||||||
|
sta = rcu_dereference(mvm->fw_id_to_mac_id[ba_data->sta_id]);
|
||||||
|
if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
|
||||||
|
goto out;
|
||||||
|
|
||||||
|
reorder_buf = &ba_data->reorder_buf[queue];
|
||||||
|
|
||||||
|
spin_lock_bh(&reorder_buf->lock);
|
||||||
|
iwl_mvm_release_frames(mvm, sta, napi, ba_data,
|
||||||
|
reorder_buf, nssn, flags);
|
||||||
|
spin_unlock_bh(&reorder_buf->lock);
|
||||||
|
|
||||||
|
out:
|
||||||
|
rcu_read_unlock();
|
||||||
|
}
|
||||||
|
|
||||||
|
static void iwl_mvm_nssn_sync(struct iwl_mvm *mvm,
|
||||||
|
struct napi_struct *napi, int queue,
|
||||||
|
const struct iwl_mvm_nssn_sync_data *data)
|
||||||
|
{
|
||||||
|
iwl_mvm_release_frames_from_notif(mvm, napi, data->baid,
|
||||||
|
data->nssn, queue,
|
||||||
|
IWL_MVM_RELEASE_FROM_RSS_SYNC);
|
||||||
|
}
|
||||||
|
|
||||||
|
void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct napi_struct *napi,
|
||||||
|
struct iwl_rx_cmd_buffer *rxb, int queue)
|
||||||
{
|
{
|
||||||
struct iwl_rx_packet *pkt = rxb_addr(rxb);
|
struct iwl_rx_packet *pkt = rxb_addr(rxb);
|
||||||
struct iwl_rxq_sync_notification *notif;
|
struct iwl_rxq_sync_notification *notif;
|
||||||
|
@ -687,6 +768,10 @@ void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
|
||||||
case IWL_MVM_RXQ_NOTIF_DEL_BA:
|
case IWL_MVM_RXQ_NOTIF_DEL_BA:
|
||||||
iwl_mvm_del_ba(mvm, queue, (void *)internal_notif->data);
|
iwl_mvm_del_ba(mvm, queue, (void *)internal_notif->data);
|
||||||
break;
|
break;
|
||||||
|
case IWL_MVM_RXQ_NSSN_SYNC:
|
||||||
|
iwl_mvm_nssn_sync(mvm, napi, queue,
|
||||||
|
(void *)internal_notif->data);
|
||||||
|
break;
|
||||||
default:
|
default:
|
||||||
WARN_ONCE(1, "Invalid identifier %d", internal_notif->type);
|
WARN_ONCE(1, "Invalid identifier %d", internal_notif->type);
|
||||||
}
|
}
|
||||||
|
@ -785,7 +870,8 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ieee80211_is_back_req(hdr->frame_control)) {
|
if (ieee80211_is_back_req(hdr->frame_control)) {
|
||||||
iwl_mvm_release_frames(mvm, sta, napi, baid_data, buffer, nssn);
|
iwl_mvm_release_frames(mvm, sta, napi, baid_data,
|
||||||
|
buffer, nssn, 0);
|
||||||
goto drop;
|
goto drop;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -794,7 +880,10 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
|
||||||
* If the SN is smaller than the NSSN it might need to first go into
|
* If the SN is smaller than the NSSN it might need to first go into
|
||||||
* the reorder buffer, in which case we just release up to it and the
|
* the reorder buffer, in which case we just release up to it and the
|
||||||
* rest of the function will take care of storing it and releasing up to
|
* rest of the function will take care of storing it and releasing up to
|
||||||
* the nssn
|
* the nssn.
|
||||||
|
* This should not happen. This queue has been lagging and it should
|
||||||
|
* have been updated by a IWL_MVM_RXQ_NSSN_SYNC notification. Be nice
|
||||||
|
* and update the other queues.
|
||||||
*/
|
*/
|
||||||
if (!iwl_mvm_is_sn_less(nssn, buffer->head_sn + buffer->buf_size,
|
if (!iwl_mvm_is_sn_less(nssn, buffer->head_sn + buffer->buf_size,
|
||||||
buffer->buf_size) ||
|
buffer->buf_size) ||
|
||||||
|
@ -802,7 +891,7 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
|
||||||
u16 min_sn = ieee80211_sn_less(sn, nssn) ? sn : nssn;
|
u16 min_sn = ieee80211_sn_less(sn, nssn) ? sn : nssn;
|
||||||
|
|
||||||
iwl_mvm_release_frames(mvm, sta, napi, baid_data, buffer,
|
iwl_mvm_release_frames(mvm, sta, napi, baid_data, buffer,
|
||||||
min_sn);
|
min_sn, IWL_MVM_RELEASE_SEND_RSS_SYNC);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* drop any oudated packets */
|
/* drop any oudated packets */
|
||||||
|
@ -813,8 +902,23 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
|
||||||
if (!buffer->num_stored && ieee80211_sn_less(sn, nssn)) {
|
if (!buffer->num_stored && ieee80211_sn_less(sn, nssn)) {
|
||||||
if (iwl_mvm_is_sn_less(buffer->head_sn, nssn,
|
if (iwl_mvm_is_sn_less(buffer->head_sn, nssn,
|
||||||
buffer->buf_size) &&
|
buffer->buf_size) &&
|
||||||
(!amsdu || last_subframe))
|
(!amsdu || last_subframe)) {
|
||||||
|
/*
|
||||||
|
* If we crossed the 2048 or 0 SN, notify all the
|
||||||
|
* queues. This is done in order to avoid having a
|
||||||
|
* head_sn that lags behind for too long. When that
|
||||||
|
* happens, we can get to a situation where the head_sn
|
||||||
|
* is within the interval [nssn - buf_size : nssn]
|
||||||
|
* which will make us think that the nssn is a packet
|
||||||
|
* that we already freed because of the reordering
|
||||||
|
* buffer and we will ignore it. So maintain the
|
||||||
|
* head_sn somewhat updated across all the queues:
|
||||||
|
* when it crosses 0 and 2048.
|
||||||
|
*/
|
||||||
|
if (sn == 2048 || sn == 0)
|
||||||
|
iwl_mvm_sync_nssn(mvm, baid, sn);
|
||||||
buffer->head_sn = nssn;
|
buffer->head_sn = nssn;
|
||||||
|
}
|
||||||
/* No need to update AMSDU last SN - we are moving the head */
|
/* No need to update AMSDU last SN - we are moving the head */
|
||||||
spin_unlock_bh(&buffer->lock);
|
spin_unlock_bh(&buffer->lock);
|
||||||
return false;
|
return false;
|
||||||
|
@ -829,8 +933,11 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
|
||||||
* while technically there is no hole and we can move forward.
|
* while technically there is no hole and we can move forward.
|
||||||
*/
|
*/
|
||||||
if (!buffer->num_stored && sn == buffer->head_sn) {
|
if (!buffer->num_stored && sn == buffer->head_sn) {
|
||||||
if (!amsdu || last_subframe)
|
if (!amsdu || last_subframe) {
|
||||||
|
if (sn == 2048 || sn == 0)
|
||||||
|
iwl_mvm_sync_nssn(mvm, baid, sn);
|
||||||
buffer->head_sn = ieee80211_sn_inc(buffer->head_sn);
|
buffer->head_sn = ieee80211_sn_inc(buffer->head_sn);
|
||||||
|
}
|
||||||
/* No need to update AMSDU last SN - we are moving the head */
|
/* No need to update AMSDU last SN - we are moving the head */
|
||||||
spin_unlock_bh(&buffer->lock);
|
spin_unlock_bh(&buffer->lock);
|
||||||
return false;
|
return false;
|
||||||
|
@ -875,7 +982,9 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
|
||||||
* release notification with up to date NSSN.
|
* release notification with up to date NSSN.
|
||||||
*/
|
*/
|
||||||
if (!amsdu || last_subframe)
|
if (!amsdu || last_subframe)
|
||||||
iwl_mvm_release_frames(mvm, sta, napi, baid_data, buffer, nssn);
|
iwl_mvm_release_frames(mvm, sta, napi, baid_data,
|
||||||
|
buffer, nssn,
|
||||||
|
IWL_MVM_RELEASE_SEND_RSS_SYNC);
|
||||||
|
|
||||||
spin_unlock_bh(&buffer->lock);
|
spin_unlock_bh(&buffer->lock);
|
||||||
return true;
|
return true;
|
||||||
|
@ -1840,40 +1949,14 @@ void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi,
|
||||||
out:
|
out:
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
}
|
}
|
||||||
|
|
||||||
void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi,
|
void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi,
|
||||||
struct iwl_rx_cmd_buffer *rxb, int queue)
|
struct iwl_rx_cmd_buffer *rxb, int queue)
|
||||||
{
|
{
|
||||||
struct iwl_rx_packet *pkt = rxb_addr(rxb);
|
struct iwl_rx_packet *pkt = rxb_addr(rxb);
|
||||||
struct iwl_frame_release *release = (void *)pkt->data;
|
struct iwl_frame_release *release = (void *)pkt->data;
|
||||||
struct ieee80211_sta *sta;
|
|
||||||
struct iwl_mvm_reorder_buffer *reorder_buf;
|
|
||||||
struct iwl_mvm_baid_data *ba_data;
|
|
||||||
|
|
||||||
int baid = release->baid;
|
iwl_mvm_release_frames_from_notif(mvm, napi, release->baid,
|
||||||
|
le16_to_cpu(release->nssn),
|
||||||
IWL_DEBUG_HT(mvm, "Frame release notification for BAID %u, NSSN %d\n",
|
queue, 0);
|
||||||
release->baid, le16_to_cpu(release->nssn));
|
|
||||||
|
|
||||||
if (WARN_ON_ONCE(baid == IWL_RX_REORDER_DATA_INVALID_BAID))
|
|
||||||
return;
|
|
||||||
|
|
||||||
rcu_read_lock();
|
|
||||||
|
|
||||||
ba_data = rcu_dereference(mvm->baid_map[baid]);
|
|
||||||
if (WARN_ON_ONCE(!ba_data))
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
sta = rcu_dereference(mvm->fw_id_to_mac_id[ba_data->sta_id]);
|
|
||||||
if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
reorder_buf = &ba_data->reorder_buf[queue];
|
|
||||||
|
|
||||||
spin_lock_bh(&reorder_buf->lock);
|
|
||||||
iwl_mvm_release_frames(mvm, sta, napi, ba_data, reorder_buf,
|
|
||||||
le16_to_cpu(release->nssn));
|
|
||||||
spin_unlock_bh(&reorder_buf->lock);
|
|
||||||
|
|
||||||
out:
|
|
||||||
rcu_read_unlock();
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -1684,6 +1684,8 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
|
||||||
*/
|
*/
|
||||||
if (iwl_mvm_has_tlc_offload(mvm))
|
if (iwl_mvm_has_tlc_offload(mvm))
|
||||||
iwl_mvm_rs_add_sta(mvm, mvm_sta);
|
iwl_mvm_rs_add_sta(mvm, mvm_sta);
|
||||||
|
else
|
||||||
|
spin_lock_init(&mvm_sta->lq_sta.rs_drv.pers.lock);
|
||||||
|
|
||||||
iwl_mvm_toggle_tx_ant(mvm, &mvm_sta->tx_ant);
|
iwl_mvm_toggle_tx_ant(mvm, &mvm_sta->tx_ant);
|
||||||
|
|
||||||
|
@ -2421,7 +2423,7 @@ int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
|
||||||
|
|
||||||
static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid)
|
static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid)
|
||||||
{
|
{
|
||||||
struct iwl_mvm_delba_notif notif = {
|
struct iwl_mvm_rss_sync_notif notif = {
|
||||||
.metadata.type = IWL_MVM_RXQ_NOTIF_DEL_BA,
|
.metadata.type = IWL_MVM_RXQ_NOTIF_DEL_BA,
|
||||||
.metadata.sync = 1,
|
.metadata.sync = 1,
|
||||||
.delba.baid = baid,
|
.delba.baid = baid,
|
||||||
|
@ -2972,7 +2974,7 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
||||||
IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n",
|
IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n",
|
||||||
sta->addr, tid);
|
sta->addr, tid);
|
||||||
|
|
||||||
return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.rs_drv.lq, false);
|
return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.rs_drv.lq);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm,
|
static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm,
|
||||||
|
|
|
@ -343,9 +343,17 @@ struct iwl_mvm_delba_data {
|
||||||
u32 baid;
|
u32 baid;
|
||||||
} __packed;
|
} __packed;
|
||||||
|
|
||||||
struct iwl_mvm_delba_notif {
|
struct iwl_mvm_nssn_sync_data {
|
||||||
|
u32 baid;
|
||||||
|
u32 nssn;
|
||||||
|
} __packed;
|
||||||
|
|
||||||
|
struct iwl_mvm_rss_sync_notif {
|
||||||
struct iwl_mvm_internal_rxq_notif metadata;
|
struct iwl_mvm_internal_rxq_notif metadata;
|
||||||
struct iwl_mvm_delba_data delba;
|
union {
|
||||||
|
struct iwl_mvm_delba_data delba;
|
||||||
|
struct iwl_mvm_nssn_sync_data nssn_sync;
|
||||||
|
};
|
||||||
} __packed;
|
} __packed;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -831,6 +831,7 @@ iwl_mvm_tx_tso_segment(struct sk_buff *skb, unsigned int num_subframes,
|
||||||
unsigned int tcp_payload_len;
|
unsigned int tcp_payload_len;
|
||||||
unsigned int mss = skb_shinfo(skb)->gso_size;
|
unsigned int mss = skb_shinfo(skb)->gso_size;
|
||||||
bool ipv4 = (skb->protocol == htons(ETH_P_IP));
|
bool ipv4 = (skb->protocol == htons(ETH_P_IP));
|
||||||
|
bool qos = ieee80211_is_data_qos(hdr->frame_control);
|
||||||
u16 ip_base_id = ipv4 ? ntohs(ip_hdr(skb)->id) : 0;
|
u16 ip_base_id = ipv4 ? ntohs(ip_hdr(skb)->id) : 0;
|
||||||
|
|
||||||
skb_shinfo(skb)->gso_size = num_subframes * mss;
|
skb_shinfo(skb)->gso_size = num_subframes * mss;
|
||||||
|
@ -864,7 +865,7 @@ iwl_mvm_tx_tso_segment(struct sk_buff *skb, unsigned int num_subframes,
|
||||||
if (tcp_payload_len > mss) {
|
if (tcp_payload_len > mss) {
|
||||||
skb_shinfo(tmp)->gso_size = mss;
|
skb_shinfo(tmp)->gso_size = mss;
|
||||||
} else {
|
} else {
|
||||||
if (ieee80211_is_data_qos(hdr->frame_control)) {
|
if (qos) {
|
||||||
u8 *qc;
|
u8 *qc;
|
||||||
|
|
||||||
if (ipv4)
|
if (ipv4)
|
||||||
|
|
|
@ -653,12 +653,12 @@ int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
|
||||||
* this case to clear the state indicating that station creation is in
|
* this case to clear the state indicating that station creation is in
|
||||||
* progress.
|
* progress.
|
||||||
*/
|
*/
|
||||||
int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool sync)
|
int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq)
|
||||||
{
|
{
|
||||||
struct iwl_host_cmd cmd = {
|
struct iwl_host_cmd cmd = {
|
||||||
.id = LQ_CMD,
|
.id = LQ_CMD,
|
||||||
.len = { sizeof(struct iwl_lq_cmd), },
|
.len = { sizeof(struct iwl_lq_cmd), },
|
||||||
.flags = sync ? 0 : CMD_ASYNC,
|
.flags = CMD_ASYNC,
|
||||||
.data = { lq, },
|
.data = { lq, },
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -604,10 +604,13 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
|
||||||
{IWL_PCI_DEVICE(0x2526, 0x40A4, iwl9460_2ac_cfg)},
|
{IWL_PCI_DEVICE(0x2526, 0x40A4, iwl9460_2ac_cfg)},
|
||||||
{IWL_PCI_DEVICE(0x2526, 0x4234, iwl9560_2ac_cfg_soc)},
|
{IWL_PCI_DEVICE(0x2526, 0x4234, iwl9560_2ac_cfg_soc)},
|
||||||
{IWL_PCI_DEVICE(0x2526, 0x42A4, iwl9462_2ac_cfg_soc)},
|
{IWL_PCI_DEVICE(0x2526, 0x42A4, iwl9462_2ac_cfg_soc)},
|
||||||
|
{IWL_PCI_DEVICE(0x2526, 0x6010, iwl9260_2ac_160_cfg)},
|
||||||
{IWL_PCI_DEVICE(0x2526, 0x6014, iwl9260_2ac_160_cfg)},
|
{IWL_PCI_DEVICE(0x2526, 0x6014, iwl9260_2ac_160_cfg)},
|
||||||
{IWL_PCI_DEVICE(0x2526, 0x8014, iwl9260_2ac_160_cfg)},
|
{IWL_PCI_DEVICE(0x2526, 0x8014, iwl9260_2ac_160_cfg)},
|
||||||
{IWL_PCI_DEVICE(0x2526, 0x8010, iwl9260_2ac_160_cfg)},
|
{IWL_PCI_DEVICE(0x2526, 0x8010, iwl9260_2ac_160_cfg)},
|
||||||
{IWL_PCI_DEVICE(0x2526, 0xA014, iwl9260_2ac_160_cfg)},
|
{IWL_PCI_DEVICE(0x2526, 0xA014, iwl9260_2ac_160_cfg)},
|
||||||
|
{IWL_PCI_DEVICE(0x2526, 0xE010, iwl9260_2ac_160_cfg)},
|
||||||
|
{IWL_PCI_DEVICE(0x2526, 0xE014, iwl9260_2ac_160_cfg)},
|
||||||
{IWL_PCI_DEVICE(0x271B, 0x0010, iwl9160_2ac_cfg)},
|
{IWL_PCI_DEVICE(0x271B, 0x0010, iwl9160_2ac_cfg)},
|
||||||
{IWL_PCI_DEVICE(0x271B, 0x0014, iwl9160_2ac_cfg)},
|
{IWL_PCI_DEVICE(0x271B, 0x0014, iwl9160_2ac_cfg)},
|
||||||
{IWL_PCI_DEVICE(0x271B, 0x0210, iwl9160_2ac_cfg)},
|
{IWL_PCI_DEVICE(0x271B, 0x0210, iwl9160_2ac_cfg)},
|
||||||
|
|
|
@ -435,6 +435,8 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
|
||||||
DMA_TO_DEVICE);
|
DMA_TO_DEVICE);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
meta->tbs = 0;
|
||||||
|
|
||||||
if (trans->cfg->use_tfh) {
|
if (trans->cfg->use_tfh) {
|
||||||
struct iwl_tfh_tfd *tfd_fh = (void *)tfd;
|
struct iwl_tfh_tfd *tfd_fh = (void *)tfd;
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue