mirror of https://gitee.com/openkylin/linux.git
ath9k: clean up tx buffer setup
Merge ath_tx_send_normal and ath_tx_send_ht_normal. Move the paprd state initialization and sequence number assignment to reduce the number of redundant checks. This not only simplifies buffer allocation error handling, but also removes a small inconsistency in the buffer HT flag. This flag should only be set if the frame is also a QoS data frame. Signed-off-by: Felix Fietkau <nbd@openwrt.org> Signed-off-by: John W. Linville <linville@tuxdriver.com>
This commit is contained in:
parent
61117f01e7
commit
82b873afe8
|
@ -48,7 +48,7 @@ static u16 bits_per_symbol[][2] = {
|
||||||
|
|
||||||
#define IS_HT_RATE(_rate) ((_rate) & 0x80)
|
#define IS_HT_RATE(_rate) ((_rate) & 0x80)
|
||||||
|
|
||||||
static void ath_tx_send_ht_normal(struct ath_softc *sc, struct ath_txq *txq,
|
static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
|
||||||
struct ath_atx_tid *tid,
|
struct ath_atx_tid *tid,
|
||||||
struct list_head *bf_head);
|
struct list_head *bf_head);
|
||||||
static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
|
static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
|
||||||
|
@ -160,7 +160,7 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
|
||||||
ath_tx_update_baw(sc, tid, bf->bf_seqno);
|
ath_tx_update_baw(sc, tid, bf->bf_seqno);
|
||||||
ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
|
ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
|
||||||
} else {
|
} else {
|
||||||
ath_tx_send_ht_normal(sc, txq, tid, &bf_head);
|
ath_tx_send_normal(sc, txq, tid, &bf_head);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1322,7 +1322,7 @@ static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
|
||||||
ath_tx_txqaddbuf(sc, txctl->txq, bf_head);
|
ath_tx_txqaddbuf(sc, txctl->txq, bf_head);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ath_tx_send_ht_normal(struct ath_softc *sc, struct ath_txq *txq,
|
static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
|
||||||
struct ath_atx_tid *tid,
|
struct ath_atx_tid *tid,
|
||||||
struct list_head *bf_head)
|
struct list_head *bf_head)
|
||||||
{
|
{
|
||||||
|
@ -1332,6 +1332,7 @@ static void ath_tx_send_ht_normal(struct ath_softc *sc, struct ath_txq *txq,
|
||||||
bf->bf_state.bf_type &= ~BUF_AMPDU;
|
bf->bf_state.bf_type &= ~BUF_AMPDU;
|
||||||
|
|
||||||
/* update starting sequence number for subsequent ADDBA request */
|
/* update starting sequence number for subsequent ADDBA request */
|
||||||
|
if (tid)
|
||||||
INCR(tid->seq_start, IEEE80211_SEQ_MAX);
|
INCR(tid->seq_start, IEEE80211_SEQ_MAX);
|
||||||
|
|
||||||
bf->bf_nframes = 1;
|
bf->bf_nframes = 1;
|
||||||
|
@ -1341,20 +1342,6 @@ static void ath_tx_send_ht_normal(struct ath_softc *sc, struct ath_txq *txq,
|
||||||
TX_STAT_INC(txq->axq_qnum, queued);
|
TX_STAT_INC(txq->axq_qnum, queued);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
|
|
||||||
struct list_head *bf_head)
|
|
||||||
{
|
|
||||||
struct ath_buf *bf;
|
|
||||||
|
|
||||||
bf = list_first_entry(bf_head, struct ath_buf, list);
|
|
||||||
|
|
||||||
bf->bf_lastbf = bf;
|
|
||||||
bf->bf_nframes = 1;
|
|
||||||
ath_buf_set_rate(sc, bf);
|
|
||||||
ath_tx_txqaddbuf(sc, txq, bf_head);
|
|
||||||
TX_STAT_INC(txq->axq_qnum, queued);
|
|
||||||
}
|
|
||||||
|
|
||||||
static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
|
static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
struct ieee80211_hdr *hdr;
|
struct ieee80211_hdr *hdr;
|
||||||
|
@ -1411,7 +1398,7 @@ static void assign_aggr_tid_seqno(struct sk_buff *skb,
|
||||||
INCR(tid->seq_next, IEEE80211_SEQ_MAX);
|
INCR(tid->seq_next, IEEE80211_SEQ_MAX);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int setup_tx_flags(struct sk_buff *skb, bool use_ldpc)
|
static int setup_tx_flags(struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
|
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
|
||||||
int flags = 0;
|
int flags = 0;
|
||||||
|
@ -1422,7 +1409,7 @@ static int setup_tx_flags(struct sk_buff *skb, bool use_ldpc)
|
||||||
if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
|
if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
|
||||||
flags |= ATH9K_TXDESC_NOACK;
|
flags |= ATH9K_TXDESC_NOACK;
|
||||||
|
|
||||||
if (use_ldpc)
|
if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
|
||||||
flags |= ATH9K_TXDESC_LDPC;
|
flags |= ATH9K_TXDESC_LDPC;
|
||||||
|
|
||||||
return flags;
|
return flags;
|
||||||
|
@ -1567,18 +1554,24 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
|
||||||
ath9k_hw_set11n_burstduration(sc->sc_ah, bf->bf_desc, 8192);
|
ath9k_hw_set11n_burstduration(sc->sc_ah, bf->bf_desc, 8192);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf,
|
static struct ath_buf *ath_tx_setup_buffer(struct ieee80211_hw *hw,
|
||||||
struct sk_buff *skb,
|
struct sk_buff *skb)
|
||||||
struct ath_tx_control *txctl)
|
|
||||||
{
|
{
|
||||||
struct ath_wiphy *aphy = hw->priv;
|
struct ath_wiphy *aphy = hw->priv;
|
||||||
struct ath_softc *sc = aphy->sc;
|
struct ath_softc *sc = aphy->sc;
|
||||||
|
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
|
||||||
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
|
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
|
||||||
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
|
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
|
||||||
|
struct ath_buf *bf;
|
||||||
int hdrlen;
|
int hdrlen;
|
||||||
__le16 fc;
|
__le16 fc;
|
||||||
int padpos, padsize;
|
int padpos, padsize;
|
||||||
bool use_ldpc = false;
|
|
||||||
|
bf = ath_tx_get_buffer(sc);
|
||||||
|
if (!bf) {
|
||||||
|
ath_print(common, ATH_DBG_XMIT, "TX buffers are full\n");
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
hdrlen = ieee80211_get_hdrlen_from_skb(skb);
|
hdrlen = ieee80211_get_hdrlen_from_skb(skb);
|
||||||
fc = hdr->frame_control;
|
fc = hdr->frame_control;
|
||||||
|
@ -1594,16 +1587,13 @@ static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf,
|
||||||
bf->bf_frmlen -= padsize;
|
bf->bf_frmlen -= padsize;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!txctl->paprd && conf_is_ht(&hw->conf)) {
|
if (ieee80211_is_data_qos(fc) && conf_is_ht(&hw->conf)) {
|
||||||
bf->bf_state.bf_type |= BUF_HT;
|
bf->bf_state.bf_type |= BUF_HT;
|
||||||
if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
|
if (sc->sc_flags & SC_OP_TXAGGR)
|
||||||
use_ldpc = true;
|
assign_aggr_tid_seqno(skb, bf);
|
||||||
}
|
}
|
||||||
|
|
||||||
bf->bf_state.bfs_paprd = txctl->paprd;
|
bf->bf_flags = setup_tx_flags(skb);
|
||||||
if (txctl->paprd)
|
|
||||||
bf->bf_state.bfs_paprd_timestamp = jiffies;
|
|
||||||
bf->bf_flags = setup_tx_flags(skb, use_ldpc);
|
|
||||||
|
|
||||||
bf->bf_keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
|
bf->bf_keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
|
||||||
if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR) {
|
if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR) {
|
||||||
|
@ -1613,10 +1603,6 @@ static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf,
|
||||||
bf->bf_keyix = ATH9K_TXKEYIX_INVALID;
|
bf->bf_keyix = ATH9K_TXKEYIX_INVALID;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ieee80211_is_data_qos(fc) && bf_isht(bf) &&
|
|
||||||
(sc->sc_flags & SC_OP_TXAGGR))
|
|
||||||
assign_aggr_tid_seqno(skb, bf);
|
|
||||||
|
|
||||||
bf->bf_mpdu = skb;
|
bf->bf_mpdu = skb;
|
||||||
|
|
||||||
bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
|
bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
|
||||||
|
@ -1626,12 +1612,13 @@ static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf,
|
||||||
bf->bf_buf_addr = 0;
|
bf->bf_buf_addr = 0;
|
||||||
ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
|
ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
|
||||||
"dma_mapping_error() on TX\n");
|
"dma_mapping_error() on TX\n");
|
||||||
return -ENOMEM;
|
ath_tx_return_buffer(sc, bf);
|
||||||
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
bf->bf_tx_aborted = false;
|
bf->bf_tx_aborted = false;
|
||||||
|
|
||||||
return 0;
|
return bf;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* FIXME: tx power */
|
/* FIXME: tx power */
|
||||||
|
@ -1679,11 +1666,6 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
|
||||||
an = (struct ath_node *)tx_info->control.sta->drv_priv;
|
an = (struct ath_node *)tx_info->control.sta->drv_priv;
|
||||||
tid = ATH_AN_2_TID(an, bf->bf_tidno);
|
tid = ATH_AN_2_TID(an, bf->bf_tidno);
|
||||||
|
|
||||||
if (!ieee80211_is_data_qos(fc)) {
|
|
||||||
ath_tx_send_normal(sc, txctl->txq, &bf_head);
|
|
||||||
goto tx_done;
|
|
||||||
}
|
|
||||||
|
|
||||||
WARN_ON(tid->ac->txq != txctl->txq);
|
WARN_ON(tid->ac->txq != txctl->txq);
|
||||||
if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
|
if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
|
||||||
/*
|
/*
|
||||||
|
@ -1696,15 +1678,18 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
|
||||||
* Send this frame as regular when ADDBA
|
* Send this frame as regular when ADDBA
|
||||||
* exchange is neither complete nor pending.
|
* exchange is neither complete nor pending.
|
||||||
*/
|
*/
|
||||||
ath_tx_send_ht_normal(sc, txctl->txq,
|
ath_tx_send_normal(sc, txctl->txq, tid, &bf_head);
|
||||||
tid, &bf_head);
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
bf->bf_state.bfs_ftype = txctl->frame_type;
|
bf->bf_state.bfs_ftype = txctl->frame_type;
|
||||||
ath_tx_send_normal(sc, txctl->txq, &bf_head);
|
bf->bf_state.bfs_paprd = txctl->paprd;
|
||||||
|
|
||||||
|
if (txctl->paprd)
|
||||||
|
bf->bf_state.bfs_paprd_timestamp = jiffies;
|
||||||
|
|
||||||
|
ath_tx_send_normal(sc, txctl->txq, NULL, &bf_head);
|
||||||
}
|
}
|
||||||
|
|
||||||
tx_done:
|
|
||||||
spin_unlock_bh(&txctl->txq->axq_lock);
|
spin_unlock_bh(&txctl->txq->axq_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1714,39 +1699,15 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
|
||||||
{
|
{
|
||||||
struct ath_wiphy *aphy = hw->priv;
|
struct ath_wiphy *aphy = hw->priv;
|
||||||
struct ath_softc *sc = aphy->sc;
|
struct ath_softc *sc = aphy->sc;
|
||||||
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
|
|
||||||
struct ath_txq *txq = txctl->txq;
|
struct ath_txq *txq = txctl->txq;
|
||||||
struct ath_buf *bf;
|
struct ath_buf *bf;
|
||||||
int q, r;
|
int q;
|
||||||
|
|
||||||
bf = ath_tx_get_buffer(sc);
|
bf = ath_tx_setup_buffer(hw, skb);
|
||||||
if (!bf) {
|
if (unlikely(!bf))
|
||||||
ath_print(common, ATH_DBG_XMIT, "TX buffers are full\n");
|
return -ENOMEM;
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
q = skb_get_queue_mapping(skb);
|
q = skb_get_queue_mapping(skb);
|
||||||
r = ath_tx_setup_buffer(hw, bf, skb, txctl);
|
|
||||||
if (unlikely(r)) {
|
|
||||||
ath_print(common, ATH_DBG_FATAL, "TX mem alloc failure\n");
|
|
||||||
|
|
||||||
/* upon ath_tx_processq() this TX queue will be resumed, we
|
|
||||||
* guarantee this will happen by knowing beforehand that
|
|
||||||
* we will at least have to run TX completionon one buffer
|
|
||||||
* on the queue */
|
|
||||||
spin_lock_bh(&txq->axq_lock);
|
|
||||||
if (txq == sc->tx.txq_map[q] && !txq->stopped &&
|
|
||||||
txq->axq_depth > 1) {
|
|
||||||
ath_mac80211_stop_queue(sc, q);
|
|
||||||
txq->stopped = 1;
|
|
||||||
}
|
|
||||||
spin_unlock_bh(&txq->axq_lock);
|
|
||||||
|
|
||||||
ath_tx_return_buffer(sc, bf);
|
|
||||||
|
|
||||||
return r;
|
|
||||||
}
|
|
||||||
|
|
||||||
spin_lock_bh(&txq->axq_lock);
|
spin_lock_bh(&txq->axq_lock);
|
||||||
if (txq == sc->tx.txq_map[q] &&
|
if (txq == sc->tx.txq_map[q] &&
|
||||||
++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
|
++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
|
||||||
|
|
Loading…
Reference in New Issue