mirror of https://gitee.com/openkylin/linux.git
iwlwifi: mvm: let any command flag be passed to iwl_mvm_flushtx_path()
Instead of only allowing the caller to decide whether the CMD_ASYNC flag is set, let it pass the entire flags bitmask. This allows more flexibility and will be needed when we call this function in the suspend flow (where other flags are needed). Signed-off-by: Luca Coelho <luciano.coelho@intel.com> Signed-off-by: Emmanuel Grumbach <emmanuel.grumbach@intel.com>
This commit is contained in:
parent
c84af35de6
commit
5888a40c50
|
@ -85,7 +85,7 @@ static ssize_t iwl_dbgfs_tx_flush_write(struct iwl_mvm *mvm, char *buf,
|
||||||
IWL_ERR(mvm, "FLUSHING queues: scd_q_msk = 0x%x\n", scd_q_msk);
|
IWL_ERR(mvm, "FLUSHING queues: scd_q_msk = 0x%x\n", scd_q_msk);
|
||||||
|
|
||||||
mutex_lock(&mvm->mutex);
|
mutex_lock(&mvm->mutex);
|
||||||
ret = iwl_mvm_flush_tx_path(mvm, scd_q_msk, true) ? : count;
|
ret = iwl_mvm_flush_tx_path(mvm, scd_q_msk, 0) ? : count;
|
||||||
mutex_unlock(&mvm->mutex);
|
mutex_unlock(&mvm->mutex);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
|
|
@ -1781,7 +1781,7 @@ static void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm,
|
||||||
* Flush them here.
|
* Flush them here.
|
||||||
*/
|
*/
|
||||||
mutex_lock(&mvm->mutex);
|
mutex_lock(&mvm->mutex);
|
||||||
iwl_mvm_flush_tx_path(mvm, tfd_msk, true);
|
iwl_mvm_flush_tx_path(mvm, tfd_msk, 0);
|
||||||
mutex_unlock(&mvm->mutex);
|
mutex_unlock(&mvm->mutex);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -3924,7 +3924,7 @@ static void iwl_mvm_mac_flush(struct ieee80211_hw *hw,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (drop) {
|
if (drop) {
|
||||||
if (iwl_mvm_flush_tx_path(mvm, msk, true))
|
if (iwl_mvm_flush_tx_path(mvm, msk, 0))
|
||||||
IWL_ERR(mvm, "flush request fail\n");
|
IWL_ERR(mvm, "flush request fail\n");
|
||||||
mutex_unlock(&mvm->mutex);
|
mutex_unlock(&mvm->mutex);
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -1031,7 +1031,7 @@ const char *iwl_mvm_get_tx_fail_reason(u32 status);
|
||||||
#else
|
#else
|
||||||
static inline const char *iwl_mvm_get_tx_fail_reason(u32 status) { return ""; }
|
static inline const char *iwl_mvm_get_tx_fail_reason(u32 status) { return ""; }
|
||||||
#endif
|
#endif
|
||||||
int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, bool sync);
|
int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, u32 flags);
|
||||||
void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm);
|
void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm);
|
||||||
|
|
||||||
static inline void iwl_mvm_set_tx_cmd_ccmp(struct ieee80211_tx_info *info,
|
static inline void iwl_mvm_set_tx_cmd_ccmp(struct ieee80211_tx_info *info,
|
||||||
|
|
|
@ -501,7 +501,7 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
/* flush its queues here since we are freeing mvm_sta */
|
/* flush its queues here since we are freeing mvm_sta */
|
||||||
ret = iwl_mvm_flush_tx_path(mvm, mvm_sta->tfd_queue_msk, true);
|
ret = iwl_mvm_flush_tx_path(mvm, mvm_sta->tfd_queue_msk, 0);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
ret = iwl_trans_wait_tx_queue_empty(mvm->trans,
|
ret = iwl_trans_wait_tx_queue_empty(mvm->trans,
|
||||||
|
@ -1155,7 +1155,7 @@ int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
||||||
|
|
||||||
if (old_state >= IWL_AGG_ON) {
|
if (old_state >= IWL_AGG_ON) {
|
||||||
iwl_mvm_drain_sta(mvm, mvmsta, true);
|
iwl_mvm_drain_sta(mvm, mvmsta, true);
|
||||||
if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), true))
|
if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), 0))
|
||||||
IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
|
IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
|
||||||
iwl_trans_wait_tx_queue_empty(mvm->trans,
|
iwl_trans_wait_tx_queue_empty(mvm->trans,
|
||||||
mvmsta->tfd_queue_msk);
|
mvmsta->tfd_queue_msk);
|
||||||
|
|
|
@ -129,7 +129,7 @@ void iwl_mvm_roc_done_wk(struct work_struct *wk)
|
||||||
* issue as it will have to complete before the next command is
|
* issue as it will have to complete before the next command is
|
||||||
* executed, and a new time event means a new command.
|
* executed, and a new time event means a new command.
|
||||||
*/
|
*/
|
||||||
iwl_mvm_flush_tx_path(mvm, queues, false);
|
iwl_mvm_flush_tx_path(mvm, queues, CMD_ASYNC);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void iwl_mvm_roc_finished(struct iwl_mvm *mvm)
|
static void iwl_mvm_roc_finished(struct iwl_mvm *mvm)
|
||||||
|
|
|
@ -1099,7 +1099,7 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
|
||||||
* 2) flush the Tx path
|
* 2) flush the Tx path
|
||||||
* 3) wait for the transport queues to be empty
|
* 3) wait for the transport queues to be empty
|
||||||
*/
|
*/
|
||||||
int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, bool sync)
|
int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, u32 flags)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
struct iwl_tx_path_flush_cmd flush_cmd = {
|
struct iwl_tx_path_flush_cmd flush_cmd = {
|
||||||
|
@ -1107,8 +1107,6 @@ int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, bool sync)
|
||||||
.flush_ctl = cpu_to_le16(DUMP_TX_FIFO_FLUSH),
|
.flush_ctl = cpu_to_le16(DUMP_TX_FIFO_FLUSH),
|
||||||
};
|
};
|
||||||
|
|
||||||
u32 flags = sync ? 0 : CMD_ASYNC;
|
|
||||||
|
|
||||||
ret = iwl_mvm_send_cmd_pdu(mvm, TXPATH_FLUSH, flags,
|
ret = iwl_mvm_send_cmd_pdu(mvm, TXPATH_FLUSH, flags,
|
||||||
sizeof(flush_cmd), &flush_cmd);
|
sizeof(flush_cmd), &flush_cmd);
|
||||||
if (ret)
|
if (ret)
|
||||||
|
|
Loading…
Reference in New Issue