ath10k: rework tx queue locking

Tx queue locking was very simple until now.
Multi-channel support will require a more flexible
and fine grained control.

This introduces a per-hw and per-vif (each with a
bitmask of reasons) tx queue locking.

Signed-off-by: Michal Kazior <michal.kazior@tieto.com>
Signed-off-by: Kalle Valo <kvalo@qca.qualcomm.com>
This commit is contained in:
Michal Kazior 2015-03-31 10:26:23 +00:00 committed by Kalle Valo
parent 5b272e30a4
commit 96d828d45e
4 changed files with 101 additions and 4 deletions

View File

@ -314,6 +314,7 @@ struct ath10k_vif {
enum ath10k_beacon_state beacon_state; enum ath10k_beacon_state beacon_state;
void *beacon_buf; void *beacon_buf;
dma_addr_t beacon_paddr; dma_addr_t beacon_paddr;
unsigned long tx_paused; /* arbitrary values defined by target */
struct ath10k *ar; struct ath10k *ar;
struct ieee80211_vif *vif; struct ieee80211_vif *vif;
@ -519,6 +520,11 @@ static inline const char *ath10k_scan_state_str(enum ath10k_scan_state state)
return "unknown"; return "unknown";
} }
enum ath10k_tx_pause_reason {
ATH10K_TX_PAUSE_Q_FULL,
ATH10K_TX_PAUSE_MAX,
};
struct ath10k { struct ath10k {
struct ath_common ath_common; struct ath_common ath_common;
struct ieee80211_hw *hw; struct ieee80211_hw *hw;
@ -680,6 +686,8 @@ struct ath10k {
struct dfs_pattern_detector *dfs_detector; struct dfs_pattern_detector *dfs_detector;
unsigned long tx_paused; /* see ATH10K_TX_PAUSE_ */
#ifdef CONFIG_ATH10K_DEBUGFS #ifdef CONFIG_ATH10K_DEBUGFS
struct ath10k_debug debug; struct ath10k_debug debug;
#endif #endif

View File

@ -26,7 +26,7 @@ void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt)
{ {
htt->num_pending_tx--; htt->num_pending_tx--;
if (htt->num_pending_tx == htt->max_num_pending_tx - 1) if (htt->num_pending_tx == htt->max_num_pending_tx - 1)
ieee80211_wake_queues(htt->ar->hw); ath10k_mac_tx_unlock(htt->ar, ATH10K_TX_PAUSE_Q_FULL);
} }
static void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt) static void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt)
@ -49,7 +49,7 @@ static int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt)
htt->num_pending_tx++; htt->num_pending_tx++;
if (htt->num_pending_tx == htt->max_num_pending_tx) if (htt->num_pending_tx == htt->max_num_pending_tx)
ieee80211_stop_queues(htt->ar->hw); ath10k_mac_tx_lock(htt->ar, ATH10K_TX_PAUSE_Q_FULL);
exit: exit:
spin_unlock_bh(&htt->tx_lock); spin_unlock_bh(&htt->tx_lock);

View File

@ -2853,6 +2853,72 @@ static void ath10k_reg_notifier(struct wiphy *wiphy,
/* TX handlers */ /* TX handlers */
/***************/ /***************/
void ath10k_mac_tx_lock(struct ath10k *ar, int reason)
{
lockdep_assert_held(&ar->htt.tx_lock);
WARN_ON(reason >= ATH10K_TX_PAUSE_MAX);
ar->tx_paused |= BIT(reason);
ieee80211_stop_queues(ar->hw);
}
static void ath10k_mac_tx_unlock_iter(void *data, u8 *mac,
struct ieee80211_vif *vif)
{
struct ath10k *ar = data;
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
if (arvif->tx_paused)
return;
ieee80211_wake_queue(ar->hw, arvif->vdev_id);
}
void ath10k_mac_tx_unlock(struct ath10k *ar, int reason)
{
lockdep_assert_held(&ar->htt.tx_lock);
WARN_ON(reason >= ATH10K_TX_PAUSE_MAX);
ar->tx_paused &= ~BIT(reason);
if (ar->tx_paused)
return;
ieee80211_iterate_active_interfaces_atomic(ar->hw,
IEEE80211_IFACE_ITER_RESUME_ALL,
ath10k_mac_tx_unlock_iter,
ar);
}
void ath10k_mac_vif_tx_lock(struct ath10k_vif *arvif, int reason)
{
struct ath10k *ar = arvif->ar;
lockdep_assert_held(&ar->htt.tx_lock);
WARN_ON(reason >= BITS_PER_LONG);
arvif->tx_paused |= BIT(reason);
ieee80211_stop_queue(ar->hw, arvif->vdev_id);
}
void ath10k_mac_vif_tx_unlock(struct ath10k_vif *arvif, int reason)
{
struct ath10k *ar = arvif->ar;
lockdep_assert_held(&ar->htt.tx_lock);
WARN_ON(reason >= BITS_PER_LONG);
arvif->tx_paused &= ~BIT(reason);
if (ar->tx_paused)
return;
if (arvif->tx_paused)
return;
ieee80211_wake_queue(ar->hw, arvif->vdev_id);
}
static u8 ath10k_tx_h_get_tid(struct ieee80211_hdr *hdr) static u8 ath10k_tx_h_get_tid(struct ieee80211_hdr *hdr)
{ {
if (ieee80211_is_mgmt(hdr->frame_control)) if (ieee80211_is_mgmt(hdr->frame_control))
@ -3443,6 +3509,7 @@ void ath10k_halt(struct ath10k *ar)
ath10k_monitor_stop(ar); ath10k_monitor_stop(ar);
ar->monitor_started = false; ar->monitor_started = false;
ar->tx_paused = 0;
ath10k_scan_finish(ar); ath10k_scan_finish(ar);
ath10k_peer_cleanup_all(ar); ath10k_peer_cleanup_all(ar);
@ -3862,6 +3929,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
int ret = 0; int ret = 0;
u32 value; u32 value;
int bit; int bit;
int i;
u32 vdev_param; u32 vdev_param;
vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD; vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD;
@ -3919,6 +3987,15 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
break; break;
} }
/* Using vdev_id as queue number will make it very easy to do per-vif
* tx queue locking. This shouldn't wrap due to interface combinations
* but do a modulo for correctness sake and prevent using offchannel tx
* queues for regular vif tx.
*/
vif->cab_queue = arvif->vdev_id % (IEEE80211_MAX_QUEUES - 1);
for (i = 0; i < ARRAY_SIZE(vif->hw_queue); i++)
vif->hw_queue[i] = arvif->vdev_id % (IEEE80211_MAX_QUEUES - 1);
/* Some firmware revisions don't wait for beacon tx completion before /* Some firmware revisions don't wait for beacon tx completion before
* sending another SWBA event. This could lead to hardware using old * sending another SWBA event. This could lead to hardware using old
* (freed) beacon data in some cases, e.g. tx credit starvation * (freed) beacon data in some cases, e.g. tx credit starvation
@ -6547,7 +6624,8 @@ int ath10k_mac_register(struct ath10k *ar)
IEEE80211_HW_SW_CRYPTO_CONTROL | IEEE80211_HW_SW_CRYPTO_CONTROL |
IEEE80211_HW_CONNECTION_MONITOR | IEEE80211_HW_CONNECTION_MONITOR |
IEEE80211_HW_WANT_MONITOR_VIF | IEEE80211_HW_WANT_MONITOR_VIF |
IEEE80211_HW_CHANCTX_STA_CSA; IEEE80211_HW_CHANCTX_STA_CSA |
IEEE80211_HW_QUEUE_CONTROL;
ar->hw->wiphy->features |= NL80211_FEATURE_STATIC_SMPS; ar->hw->wiphy->features |= NL80211_FEATURE_STATIC_SMPS;
@ -6603,7 +6681,13 @@ int ath10k_mac_register(struct ath10k *ar)
* on LL hardware queues are managed entirely by the FW * on LL hardware queues are managed entirely by the FW
* so we only advertise to mac we can do the queues thing * so we only advertise to mac we can do the queues thing
*/ */
ar->hw->queues = 4; ar->hw->queues = IEEE80211_MAX_QUEUES;
/* vdev_ids are used as hw queue numbers. Make sure offchan tx queue is
* something that vdev_ids can't reach so that we don't stop the queue
* accidentally.
*/
ar->hw->offchannel_tx_hw_queue = IEEE80211_MAX_QUEUES - 1;
switch (ar->wmi.op_version) { switch (ar->wmi.op_version) {
case ATH10K_FW_WMI_OP_VERSION_MAIN: case ATH10K_FW_WMI_OP_VERSION_MAIN:

View File

@ -63,6 +63,11 @@ u8 ath10k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband,
u8 ath10k_mac_bitrate_to_idx(const struct ieee80211_supported_band *sband, u8 ath10k_mac_bitrate_to_idx(const struct ieee80211_supported_band *sband,
u32 bitrate); u32 bitrate);
void ath10k_mac_tx_lock(struct ath10k *ar, int reason);
void ath10k_mac_tx_unlock(struct ath10k *ar, int reason);
void ath10k_mac_vif_tx_lock(struct ath10k_vif *arvif, int reason);
void ath10k_mac_vif_tx_unlock(struct ath10k_vif *arvif, int reason);
static inline struct ath10k_vif *ath10k_vif_to_arvif(struct ieee80211_vif *vif) static inline struct ath10k_vif *ath10k_vif_to_arvif(struct ieee80211_vif *vif)
{ {
return (struct ath10k_vif *)vif->drv_priv; return (struct ath10k_vif *)vif->drv_priv;