iwlwifi: mvm: let any command flag be passed to iwl_mvm_flushtx_path()

Instead of only allowing the caller to decide whether the CMD_ASYNC
flag is set, let it pass the entire flags bitmask.  This allows more
flexibility and will be needed when we call this function in the
suspend flow (where other flags are needed).

Signed-off-by: Luca Coelho <luciano.coelho@intel.com>
Signed-off-by: Emmanuel Grumbach <emmanuel.grumbach@intel.com>
This commit is contained in:
Luca Coelho 2015-10-06 09:54:57 +03:00 committed by Emmanuel Grumbach
parent c84af35de6
commit 5888a40c50
6 changed files with 8 additions and 10 deletions

View File

@ -85,7 +85,7 @@ static ssize_t iwl_dbgfs_tx_flush_write(struct iwl_mvm *mvm, char *buf,
IWL_ERR(mvm, "FLUSHING queues: scd_q_msk = 0x%x\n", scd_q_msk);
mutex_lock(&mvm->mutex);
ret = iwl_mvm_flush_tx_path(mvm, scd_q_msk, true) ? : count;
ret = iwl_mvm_flush_tx_path(mvm, scd_q_msk, 0) ? : count;
mutex_unlock(&mvm->mutex);
return ret;

View File

@ -1781,7 +1781,7 @@ static void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm,
* Flush them here.
*/
mutex_lock(&mvm->mutex);
iwl_mvm_flush_tx_path(mvm, tfd_msk, true);
iwl_mvm_flush_tx_path(mvm, tfd_msk, 0);
mutex_unlock(&mvm->mutex);
/*
@ -3924,7 +3924,7 @@ static void iwl_mvm_mac_flush(struct ieee80211_hw *hw,
}
if (drop) {
if (iwl_mvm_flush_tx_path(mvm, msk, true))
if (iwl_mvm_flush_tx_path(mvm, msk, 0))
IWL_ERR(mvm, "flush request fail\n");
mutex_unlock(&mvm->mutex);
} else {

View File

@ -1031,7 +1031,7 @@ const char *iwl_mvm_get_tx_fail_reason(u32 status);
#else
static inline const char *iwl_mvm_get_tx_fail_reason(u32 status) { return ""; }
#endif
int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, bool sync);
int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, u32 flags);
void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm);
static inline void iwl_mvm_set_tx_cmd_ccmp(struct ieee80211_tx_info *info,

View File

@ -501,7 +501,7 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
if (ret)
return ret;
/* flush its queues here since we are freeing mvm_sta */
ret = iwl_mvm_flush_tx_path(mvm, mvm_sta->tfd_queue_msk, true);
ret = iwl_mvm_flush_tx_path(mvm, mvm_sta->tfd_queue_msk, 0);
if (ret)
return ret;
ret = iwl_trans_wait_tx_queue_empty(mvm->trans,
@ -1155,7 +1155,7 @@ int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (old_state >= IWL_AGG_ON) {
iwl_mvm_drain_sta(mvm, mvmsta, true);
if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), true))
if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), 0))
IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
iwl_trans_wait_tx_queue_empty(mvm->trans,
mvmsta->tfd_queue_msk);

View File

@ -129,7 +129,7 @@ void iwl_mvm_roc_done_wk(struct work_struct *wk)
* issue as it will have to complete before the next command is
* executed, and a new time event means a new command.
*/
iwl_mvm_flush_tx_path(mvm, queues, false);
iwl_mvm_flush_tx_path(mvm, queues, CMD_ASYNC);
}
static void iwl_mvm_roc_finished(struct iwl_mvm *mvm)

View File

@ -1099,7 +1099,7 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
* 2) flush the Tx path
* 3) wait for the transport queues to be empty
*/
int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, bool sync)
int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, u32 flags)
{
int ret;
struct iwl_tx_path_flush_cmd flush_cmd = {
@ -1107,8 +1107,6 @@ int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, bool sync)
.flush_ctl = cpu_to_le16(DUMP_TX_FIFO_FLUSH),
};
u32 flags = sync ? 0 : CMD_ASYNC;
ret = iwl_mvm_send_cmd_pdu(mvm, TXPATH_FLUSH, flags,
sizeof(flush_cmd), &flush_cmd);
if (ret)