mt76 patches for 5.3

* use NAPI polling for tx cleanup on mt7603/mt7615
 * various fixes for mt7615
 * unify some code between mt7603 and mt7615
 * fix locking issues on mt76x02
 * add support for toggling edcca on mt7603
 * fix reading target tx power with ext PA on mt7603/mt7615
 * fix initalizing channel maximum power
 * fix rate control / tx status reporting issues on mt76x02/mt7603
 * add support for eeprom calibration data from mtd on mt7615
 * support configuring tx power on mt7615
 * fix external PA support on mt76x0
 * per-chain signal reporting on mt7615
 * rx/tx buffer fixes for USB devices
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG/MacGPG2 v2
 Comment: GPGTools - http://gpgtools.org
 
 iEYEABECAAYFAl0UoeQACgkQ130UHQKnbvWX+QCfcV7NzQzY4BKCkJLjLA8ZdIFj
 D5MAnRFsqv6Np1Z7bPE64hw3od/fEFSg
 =iqzT
 -----END PGP SIGNATURE-----

Merge tag 'mt76-for-kvalo-2019-06-27' of https://github.com/nbd168/wireless

mt76 patches for 5.3

* use NAPI polling for tx cleanup on mt7603/mt7615
* various fixes for mt7615
* unify some code between mt7603 and mt7615
* fix locking issues on mt76x02
* add support for toggling edcca on mt7603
* fix reading target tx power with ext PA on mt7603/mt7615
* fix initalizing channel maximum power
* fix rate control / tx status reporting issues on mt76x02/mt7603
* add support for eeprom calibration data from mtd on mt7615
* support configuring tx power on mt7615
* fix external PA support on mt76x0
* per-chain signal reporting on mt7615
* rx/tx buffer fixes for USB devices
This commit is contained in:
Kalle Valo 2019-06-30 12:29:30 +03:00
commit 9829a0bd66
47 changed files with 1417 additions and 1006 deletions

View File

@ -588,6 +588,7 @@ void mt76_dma_cleanup(struct mt76_dev *dev)
{
int i;
netif_napi_del(&dev->tx_napi);
for (i = 0; i < ARRAY_SIZE(dev->q_tx); i++)
mt76_dma_tx_cleanup(dev, i, true);

View File

@ -766,10 +766,21 @@ int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
*dbm = DIV_ROUND_UP(dev->txpower_cur, 2);
/* convert from per-chain power to combined
* output on 2x2 devices
* output power
*/
if (n_chains > 1)
switch (n_chains) {
case 4:
*dbm += 6;
break;
case 3:
*dbm += 4;
break;
case 2:
*dbm += 3;
break;
default:
break;
}
return 0;
}
@ -820,3 +831,50 @@ mt76_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set)
return 0;
}
EXPORT_SYMBOL_GPL(mt76_set_tim);
void mt76_insert_ccmp_hdr(struct sk_buff *skb, u8 key_id)
{
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
u8 *hdr, *pn = status->iv;
__skb_push(skb, 8);
memmove(skb->data, skb->data + 8, hdr_len);
hdr = skb->data + hdr_len;
hdr[0] = pn[5];
hdr[1] = pn[4];
hdr[2] = 0;
hdr[3] = 0x20 | (key_id << 6);
hdr[4] = pn[3];
hdr[5] = pn[2];
hdr[6] = pn[1];
hdr[7] = pn[0];
status->flag &= ~RX_FLAG_IV_STRIPPED;
}
EXPORT_SYMBOL_GPL(mt76_insert_ccmp_hdr);
int mt76_get_rate(struct mt76_dev *dev,
struct ieee80211_supported_band *sband,
int idx, bool cck)
{
int i, offset = 0, len = sband->n_bitrates;
if (cck) {
if (sband == &dev->sband_5g.sband)
return 0;
idx &= ~BIT(2); /* short preamble */
} else if (sband == &dev->sband_2g.sband) {
offset = 4;
}
for (i = offset; i < len; i++) {
if ((sband->bitrates[i].hw_value & GENMASK(7, 0)) == idx)
return i;
}
return 0;
}
EXPORT_SYMBOL_GPL(mt76_get_rate);

View File

@ -258,10 +258,11 @@ struct mt76_rx_tid {
#define MT_TX_CB_TXS_DONE BIT(1)
#define MT_TX_CB_TXS_FAILED BIT(2)
#define MT_PACKET_ID_MASK GENMASK(7, 0)
#define MT_PACKET_ID_MASK GENMASK(6, 0)
#define MT_PACKET_ID_NO_ACK 0
#define MT_PACKET_ID_NO_SKB 1
#define MT_PACKET_ID_FIRST 2
#define MT_PACKET_ID_HAS_RATE BIT(7)
#define MT_TX_STATUS_SKB_TIMEOUT HZ
@ -381,7 +382,8 @@ enum mt76u_out_ep {
__MT_EP_OUT_MAX,
};
#define MT_SG_MAX_SIZE 8
#define MT_TX_SG_MAX_SIZE 8
#define MT_RX_SG_MAX_SIZE 1
#define MT_NUM_TX_ENTRIES 256
#define MT_NUM_RX_ENTRIES 128
#define MCU_RESP_URB_SIZE 1024
@ -393,9 +395,7 @@ struct mt76_usb {
struct delayed_work stat_work;
u8 out_ep[__MT_EP_OUT_MAX];
u16 out_max_packet;
u8 in_ep[__MT_EP_IN_MAX];
u16 in_max_packet;
bool sg_en;
struct mt76u_mcu {
@ -452,6 +452,7 @@ struct mt76_dev {
int tx_dma_idx[4];
struct tasklet_struct tx_tasklet;
struct napi_struct tx_napi;
struct delayed_work mac_work;
wait_queue_head_t tx_wait;
@ -483,6 +484,8 @@ struct mt76_dev {
int txpower_conf;
int txpower_cur;
enum nl80211_dfs_regions region;
u32 debugfs_reg;
struct led_classdev led_cdev;
@ -688,6 +691,14 @@ static inline void mt76_insert_hdr_pad(struct sk_buff *skb)
skb->data[len + 1] = 0;
}
static inline bool mt76_is_skb_pktid(u8 pktid)
{
if (pktid & MT_PACKET_ID_HAS_RATE)
return false;
return pktid >= MT_PACKET_ID_FIRST;
}
void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb);
void mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta,
struct mt76_wcid *wcid, struct sk_buff *skb);
@ -749,6 +760,10 @@ void mt76_csa_check(struct mt76_dev *dev);
void mt76_csa_finish(struct mt76_dev *dev);
int mt76_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set);
void mt76_insert_ccmp_hdr(struct sk_buff *skb, u8 key_id);
int mt76_get_rate(struct mt76_dev *dev,
struct ieee80211_supported_band *sband,
int idx, bool cck);
/* internal */
void mt76_tx_free(struct mt76_dev *dev);

View File

@ -35,7 +35,7 @@ irqreturn_t mt7603_irq_handler(int irq, void *dev_instance)
if (intr & MT_INT_TX_DONE_ALL) {
mt7603_irq_disable(dev, MT_INT_TX_DONE_ALL);
tasklet_schedule(&dev->mt76.tx_tasklet);
napi_schedule(&dev->mt76.tx_napi);
}
if (intr & MT_INT_RX_DONE(0)) {

View File

@ -40,6 +40,35 @@ mt7603_radio_read(struct seq_file *s, void *data)
return 0;
}
static int
mt7603_edcca_set(void *data, u64 val)
{
struct mt7603_dev *dev = data;
mutex_lock(&dev->mt76.mutex);
dev->ed_monitor_enabled = !!val;
dev->ed_monitor = dev->ed_monitor_enabled &&
dev->mt76.region == NL80211_DFS_ETSI;
mt7603_init_edcca(dev);
mutex_unlock(&dev->mt76.mutex);
return 0;
}
static int
mt7603_edcca_get(void *data, u64 *val)
{
struct mt7603_dev *dev = data;
*val = dev->ed_monitor_enabled;
return 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(fops_edcca, mt7603_edcca_get,
mt7603_edcca_set, "%lld\n");
void mt7603_init_debugfs(struct mt7603_dev *dev)
{
struct dentry *dir;
@ -48,6 +77,7 @@ void mt7603_init_debugfs(struct mt7603_dev *dev)
if (!dir)
return;
debugfs_create_file("edcca", 0600, dir, dev, &fops_edcca);
debugfs_create_u32("reset_test", 0600, dir, &dev->reset_test);
debugfs_create_devm_seqfile(dev->mt76.dev, "reset", dir,
mt7603_reset_read);

View File

@ -139,15 +139,30 @@ static void
mt7603_tx_tasklet(unsigned long data)
{
struct mt7603_dev *dev = (struct mt7603_dev *)data;
mt76_txq_schedule_all(&dev->mt76);
}
static int mt7603_poll_tx(struct napi_struct *napi, int budget)
{
struct mt7603_dev *dev;
int i;
dev = container_of(napi, struct mt7603_dev, mt76.tx_napi);
dev->tx_dma_check = 0;
for (i = MT_TXQ_MCU; i >= 0; i--)
mt76_queue_tx_cleanup(dev, i, false);
mt76_txq_schedule_all(&dev->mt76);
if (napi_complete_done(napi, 0))
mt7603_irq_enable(dev, MT_INT_TX_DONE_ALL);
mt7603_irq_enable(dev, MT_INT_TX_DONE_ALL);
for (i = MT_TXQ_MCU; i >= 0; i--)
mt76_queue_tx_cleanup(dev, i, false);
tasklet_schedule(&dev->mt76.tx_tasklet);
return 0;
}
int mt7603_dma_init(struct mt7603_dev *dev)
@ -216,7 +231,15 @@ int mt7603_dma_init(struct mt7603_dev *dev)
return ret;
mt76_wr(dev, MT_DELAY_INT_CFG, 0);
return mt76_init_queues(dev);
ret = mt76_init_queues(dev);
if (ret)
return ret;
netif_tx_napi_add(&dev->mt76.napi_dev, &dev->mt76.tx_napi,
mt7603_poll_tx, NAPI_POLL_WEIGHT);
napi_enable(&dev->mt76.tx_napi);
return 0;
}
void mt7603_dma_cleanup(struct mt7603_dev *dev)

View File

@ -69,6 +69,8 @@ enum mt7603_eeprom_field {
MT_EE_CP_FT_VERSION = 0x0f0,
MT_EE_TX_POWER_TSSI_OFF = 0x0f2,
MT_EE_XTAL_FREQ_OFFSET = 0x0f4,
MT_EE_XTAL_TRIM_2_COMP = 0x0f5,
MT_EE_XTAL_TRIM_3_COMP = 0x0f6,

View File

@ -227,11 +227,19 @@ mt7603_mac_init(struct mt7603_dev *dev)
mt76_rmw_field(dev, MT_LPON_BTEIR, MT_LPON_BTEIR_MBSS_MODE, 2);
mt76_rmw_field(dev, MT_WF_RMACDR, MT_WF_RMACDR_MBSSID_MASK, 2);
mt76_wr(dev, MT_AGG_ARUCR, FIELD_PREP(MT_AGG_ARxCR_LIMIT(0), 7));
mt76_wr(dev, MT_AGG_ARUCR,
FIELD_PREP(MT_AGG_ARxCR_LIMIT(0), 7) |
FIELD_PREP(MT_AGG_ARxCR_LIMIT(1), 2) |
FIELD_PREP(MT_AGG_ARxCR_LIMIT(2), 2) |
FIELD_PREP(MT_AGG_ARxCR_LIMIT(3), 2) |
FIELD_PREP(MT_AGG_ARxCR_LIMIT(4), 1) |
FIELD_PREP(MT_AGG_ARxCR_LIMIT(5), 1) |
FIELD_PREP(MT_AGG_ARxCR_LIMIT(6), 1) |
FIELD_PREP(MT_AGG_ARxCR_LIMIT(7), 1));
mt76_wr(dev, MT_AGG_ARDCR,
FIELD_PREP(MT_AGG_ARxCR_LIMIT(0), 0) |
FIELD_PREP(MT_AGG_ARxCR_LIMIT(1),
max_t(int, 0, MT7603_RATE_RETRY - 2)) |
FIELD_PREP(MT_AGG_ARxCR_LIMIT(0), MT7603_RATE_RETRY - 1) |
FIELD_PREP(MT_AGG_ARxCR_LIMIT(1), MT7603_RATE_RETRY - 1) |
FIELD_PREP(MT_AGG_ARxCR_LIMIT(2), MT7603_RATE_RETRY - 1) |
FIELD_PREP(MT_AGG_ARxCR_LIMIT(3), MT7603_RATE_RETRY - 1) |
FIELD_PREP(MT_AGG_ARxCR_LIMIT(4), MT7603_RATE_RETRY - 1) |
@ -437,7 +445,9 @@ mt7603_regd_notifier(struct wiphy *wiphy,
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
struct mt7603_dev *dev = hw->priv;
dev->ed_monitor = request->dfs_region == NL80211_DFS_ETSI;
dev->mt76.region = request->dfs_region;
dev->ed_monitor = dev->ed_monitor_enabled &&
dev->mt76.region == NL80211_DFS_ETSI;
}
static int
@ -463,9 +473,13 @@ mt7603_init_txpower(struct mt7603_dev *dev,
u8 *eeprom = (u8 *)dev->mt76.eeprom.data;
int target_power = eeprom[MT_EE_TX_POWER_0_START_2G + 2] & ~BIT(7);
u8 *rate_power = &eeprom[MT_EE_TX_POWER_CCK];
bool ext_pa = eeprom[MT_EE_NIC_CONF_0 + 1] & BIT(1);
int max_offset, cur_offset;
int i;
if (ext_pa && is_mt7603(dev))
target_power = eeprom[MT_EE_TX_POWER_TSSI_OFF] & ~BIT(7);
if (target_power & BIT(6))
target_power = -(target_power & GENMASK(5, 0));
@ -488,7 +502,7 @@ mt7603_init_txpower(struct mt7603_dev *dev,
for (i = 0; i < sband->n_channels; i++) {
chan = &sband->channels[i];
chan->max_power = target_power;
chan->max_power = min_t(int, chan->max_reg_power, target_power);
chan->orig_mpwr = target_power;
}
}

View File

@ -370,31 +370,6 @@ void mt7603_mac_tx_ba_reset(struct mt7603_dev *dev, int wcid, int tid,
mt76_rmw(dev, addr + (15 * 4), tid_mask, tid_val);
}
static int
mt7603_get_rate(struct mt7603_dev *dev, struct ieee80211_supported_band *sband,
int idx, bool cck)
{
int offset = 0;
int len = sband->n_bitrates;
int i;
if (cck) {
if (sband == &dev->mt76.sband_5g.sband)
return 0;
idx &= ~BIT(2); /* short preamble */
} else if (sband == &dev->mt76.sband_2g.sband) {
offset = 4;
}
for (i = offset; i < len; i++) {
if ((sband->bitrates[i].hw_value & GENMASK(7, 0)) == idx)
return i;
}
return 0;
}
static struct mt76_wcid *
mt7603_rx_get_wcid(struct mt7603_dev *dev, u8 idx, bool unicast)
{
@ -418,30 +393,6 @@ mt7603_rx_get_wcid(struct mt7603_dev *dev, u8 idx, bool unicast)
return &sta->vif->sta.wcid;
}
static void
mt7603_insert_ccmp_hdr(struct sk_buff *skb, u8 key_id)
{
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
u8 *pn = status->iv;
u8 *hdr;
__skb_push(skb, 8);
memmove(skb->data, skb->data + 8, hdr_len);
hdr = skb->data + hdr_len;
hdr[0] = pn[5];
hdr[1] = pn[4];
hdr[2] = 0;
hdr[3] = 0x20 | (key_id << 6);
hdr[4] = pn[3];
hdr[5] = pn[2];
hdr[6] = pn[1];
hdr[7] = pn[0];
status->flag &= ~RX_FLAG_IV_STRIPPED;
}
int
mt7603_mac_fill_rx(struct mt7603_dev *dev, struct sk_buff *skb)
{
@ -532,7 +483,7 @@ mt7603_mac_fill_rx(struct mt7603_dev *dev, struct sk_buff *skb)
cck = true;
/* fall through */
case MT_PHY_TYPE_OFDM:
i = mt7603_get_rate(dev, sband, i, cck);
i = mt76_get_rate(&dev->mt76, sband, i, cck);
break;
case MT_PHY_TYPE_HT_GF:
case MT_PHY_TYPE_HT:
@ -580,7 +531,7 @@ mt7603_mac_fill_rx(struct mt7603_dev *dev, struct sk_buff *skb)
if (insert_ccmp_hdr) {
u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1);
mt7603_insert_ccmp_hdr(skb, key_id);
mt76_insert_ccmp_hdr(skb, key_id);
}
hdr = (struct ieee80211_hdr *)skb->data;
@ -640,6 +591,7 @@ void mt7603_wtbl_set_rates(struct mt7603_dev *dev, struct mt7603_sta *sta,
struct ieee80211_tx_rate *probe_rate,
struct ieee80211_tx_rate *rates)
{
struct ieee80211_tx_rate *ref;
int wcid = sta->wcid.idx;
u32 addr = mt7603_wtbl2_addr(wcid);
bool stbc = false;
@ -648,7 +600,8 @@ void mt7603_wtbl_set_rates(struct mt7603_dev *dev, struct mt7603_sta *sta,
u16 val[4];
u16 probe_val;
u32 w9 = mt76_rr(dev, addr + 9 * 4);
int i;
bool rateset;
int i, k;
if (!mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000))
return;
@ -656,6 +609,41 @@ void mt7603_wtbl_set_rates(struct mt7603_dev *dev, struct mt7603_sta *sta,
for (i = n_rates; i < 4; i++)
rates[i] = rates[n_rates - 1];
rateset = !(sta->rate_set_tsf & BIT(0));
memcpy(sta->rateset[rateset].rates, rates,
sizeof(sta->rateset[rateset].rates));
if (probe_rate) {
sta->rateset[rateset].probe_rate = *probe_rate;
ref = &sta->rateset[rateset].probe_rate;
} else {
sta->rateset[rateset].probe_rate.idx = -1;
ref = &sta->rateset[rateset].rates[0];
}
rates = sta->rateset[rateset].rates;
for (i = 0; i < ARRAY_SIZE(sta->rateset[rateset].rates); i++) {
/*
* We don't support switching between short and long GI
* within the rate set. For accurate tx status reporting, we
* need to make sure that flags match.
* For improved performance, avoid duplicate entries by
* decrementing the MCS index if necessary
*/
if ((ref->flags ^ rates[i].flags) & IEEE80211_TX_RC_SHORT_GI)
rates[i].flags ^= IEEE80211_TX_RC_SHORT_GI;
for (k = 0; k < i; k++) {
if (rates[i].idx != rates[k].idx)
continue;
if ((rates[i].flags ^ rates[k].flags) &
IEEE80211_TX_RC_40_MHZ_WIDTH)
continue;
rates[i].idx--;
}
}
w9 &= MT_WTBL2_W9_SHORT_GI_20 | MT_WTBL2_W9_SHORT_GI_40 |
MT_WTBL2_W9_SHORT_GI_80;
@ -699,19 +687,22 @@ void mt7603_wtbl_set_rates(struct mt7603_dev *dev, struct mt7603_sta *sta,
mt76_wr(dev, MT_WTBL_RIUCR1,
FIELD_PREP(MT_WTBL_RIUCR1_RATE0, probe_val) |
FIELD_PREP(MT_WTBL_RIUCR1_RATE1, val[0]) |
FIELD_PREP(MT_WTBL_RIUCR1_RATE2_LO, val[0]));
FIELD_PREP(MT_WTBL_RIUCR1_RATE2_LO, val[1]));
mt76_wr(dev, MT_WTBL_RIUCR2,
FIELD_PREP(MT_WTBL_RIUCR2_RATE2_HI, val[0] >> 8) |
FIELD_PREP(MT_WTBL_RIUCR2_RATE2_HI, val[1] >> 8) |
FIELD_PREP(MT_WTBL_RIUCR2_RATE3, val[1]) |
FIELD_PREP(MT_WTBL_RIUCR2_RATE4, val[1]) |
FIELD_PREP(MT_WTBL_RIUCR2_RATE4, val[2]) |
FIELD_PREP(MT_WTBL_RIUCR2_RATE5_LO, val[2]));
mt76_wr(dev, MT_WTBL_RIUCR3,
FIELD_PREP(MT_WTBL_RIUCR3_RATE5_HI, val[2] >> 4) |
FIELD_PREP(MT_WTBL_RIUCR3_RATE6, val[2]) |
FIELD_PREP(MT_WTBL_RIUCR3_RATE6, val[3]) |
FIELD_PREP(MT_WTBL_RIUCR3_RATE7, val[3]));
mt76_set(dev, MT_LPON_T0CR, MT_LPON_T0CR_MODE); /* TSF read */
sta->rate_set_tsf = (mt76_rr(dev, MT_LPON_UTTR0) & ~BIT(0)) | rateset;
mt76_wr(dev, MT_WTBL_UPDATE,
FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, wcid) |
MT_WTBL_UPDATE_RATE_UPDATE |
@ -938,9 +929,9 @@ int mt7603_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) {
spin_lock_bh(&dev->mt76.lock);
msta->rate_probe = true;
mt7603_wtbl_set_rates(dev, msta, &info->control.rates[0],
msta->rates);
msta->rate_probe = true;
spin_unlock_bh(&dev->mt76.lock);
}
@ -955,10 +946,12 @@ mt7603_fill_txs(struct mt7603_dev *dev, struct mt7603_sta *sta,
struct ieee80211_tx_info *info, __le32 *txs_data)
{
struct ieee80211_supported_band *sband;
int final_idx = 0;
struct mt7603_rate_set *rs;
int first_idx = 0, last_idx;
u32 rate_set_tsf;
u32 final_rate;
u32 final_rate_flags;
bool final_mpdu;
bool rs_idx;
bool ack_timeout;
bool fixed_rate;
bool probe;
@ -966,7 +959,6 @@ mt7603_fill_txs(struct mt7603_dev *dev, struct mt7603_sta *sta,
bool cck = false;
int count;
u32 txs;
u8 pid;
int idx;
int i;
@ -974,10 +966,9 @@ mt7603_fill_txs(struct mt7603_dev *dev, struct mt7603_sta *sta,
probe = !!(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
txs = le32_to_cpu(txs_data[4]);
final_mpdu = txs & MT_TXS4_ACKED_MPDU;
ampdu = !fixed_rate && (txs & MT_TXS4_AMPDU);
pid = FIELD_GET(MT_TXS4_PID, txs);
count = FIELD_GET(MT_TXS4_TX_COUNT, txs);
last_idx = FIELD_GET(MT_TXS4_LAST_TX_RATE, txs);
txs = le32_to_cpu(txs_data[0]);
final_rate = FIELD_GET(MT_TXS0_TX_RATE, txs);
@ -999,38 +990,57 @@ mt7603_fill_txs(struct mt7603_dev *dev, struct mt7603_sta *sta,
if (ampdu || (info->flags & IEEE80211_TX_CTL_AMPDU))
info->flags |= IEEE80211_TX_STAT_AMPDU | IEEE80211_TX_CTL_AMPDU;
first_idx = max_t(int, 0, last_idx - (count + 1) / MT7603_RATE_RETRY);
if (fixed_rate && !probe) {
info->status.rates[0].count = count;
i = 0;
goto out;
}
for (i = 0, idx = 0; i < ARRAY_SIZE(info->status.rates); i++) {
int cur_count = min_t(int, count, 2 * MT7603_RATE_RETRY);
rate_set_tsf = READ_ONCE(sta->rate_set_tsf);
rs_idx = !((u32)(FIELD_GET(MT_TXS1_F0_TIMESTAMP, le32_to_cpu(txs_data[1])) -
rate_set_tsf) < 1000000);
rs_idx ^= rate_set_tsf & BIT(0);
rs = &sta->rateset[rs_idx];
if (!i && probe) {
cur_count = 1;
} else {
info->status.rates[i] = sta->rates[idx];
idx++;
if (!first_idx && rs->probe_rate.idx >= 0) {
info->status.rates[0] = rs->probe_rate;
spin_lock_bh(&dev->mt76.lock);
if (sta->rate_probe) {
mt7603_wtbl_set_rates(dev, sta, NULL,
sta->rates);
sta->rate_probe = false;
}
spin_unlock_bh(&dev->mt76.lock);
} else
info->status.rates[0] = rs->rates[first_idx / 2];
info->status.rates[0].count = 0;
if (i && info->status.rates[i].idx < 0) {
info->status.rates[i - 1].count += count;
break;
}
for (i = 0, idx = first_idx; count && idx <= last_idx; idx++) {
struct ieee80211_tx_rate *cur_rate;
int cur_count;
if (!count) {
info->status.rates[i].idx = -1;
break;
}
info->status.rates[i].count = cur_count;
final_idx = i;
cur_rate = &rs->rates[idx / 2];
cur_count = min_t(int, MT7603_RATE_RETRY, count);
count -= cur_count;
if (idx && (cur_rate->idx != info->status.rates[i].idx ||
cur_rate->flags != info->status.rates[i].flags)) {
i++;
if (i == ARRAY_SIZE(info->status.rates))
break;
info->status.rates[i] = *cur_rate;
info->status.rates[i].count = 0;
}
info->status.rates[i].count += cur_count;
}
out:
final_rate_flags = info->status.rates[final_idx].flags;
final_rate_flags = info->status.rates[i].flags;
switch (FIELD_GET(MT_TX_RATE_MODE, final_rate)) {
case MT_PHY_TYPE_CCK:
@ -1042,7 +1052,8 @@ mt7603_fill_txs(struct mt7603_dev *dev, struct mt7603_sta *sta,
else
sband = &dev->mt76.sband_2g.sband;
final_rate &= GENMASK(5, 0);
final_rate = mt7603_get_rate(dev, sband, final_rate, cck);
final_rate = mt76_get_rate(&dev->mt76, sband, final_rate,
cck);
final_rate_flags = 0;
break;
case MT_PHY_TYPE_HT_GF:
@ -1056,8 +1067,8 @@ mt7603_fill_txs(struct mt7603_dev *dev, struct mt7603_sta *sta,
return false;
}
info->status.rates[final_idx].idx = final_rate;
info->status.rates[final_idx].flags = final_rate_flags;
info->status.rates[i].idx = final_rate;
info->status.rates[i].flags = final_rate_flags;
return true;
}
@ -1078,16 +1089,6 @@ mt7603_mac_add_txs_skb(struct mt7603_dev *dev, struct mt7603_sta *sta, int pid,
if (skb) {
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) {
spin_lock_bh(&dev->mt76.lock);
if (sta->rate_probe) {
mt7603_wtbl_set_rates(dev, sta, NULL,
sta->rates);
sta->rate_probe = false;
}
spin_unlock_bh(&dev->mt76.lock);
}
if (!mt7603_fill_txs(dev, sta, info, txs_data)) {
ieee80211_tx_info_clear_status(info);
info->status.rates[0].idx = -1;
@ -1282,6 +1283,7 @@ static void mt7603_mac_watchdog_reset(struct mt7603_dev *dev)
tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
napi_disable(&dev->mt76.napi[0]);
napi_disable(&dev->mt76.napi[1]);
napi_disable(&dev->mt76.tx_napi);
mutex_lock(&dev->mt76.mutex);
@ -1326,7 +1328,8 @@ static void mt7603_mac_watchdog_reset(struct mt7603_dev *dev)
mutex_unlock(&dev->mt76.mutex);
tasklet_enable(&dev->mt76.tx_tasklet);
tasklet_schedule(&dev->mt76.tx_tasklet);
napi_enable(&dev->mt76.tx_napi);
napi_schedule(&dev->mt76.tx_napi);
tasklet_enable(&dev->mt76.pre_tbtt_tasklet);
mt7603_beacon_set_timer(dev, -1, beacon_int);

View File

@ -103,8 +103,7 @@ mt7603_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
mutex_unlock(&dev->mt76.mutex);
}
static void
mt7603_init_edcca(struct mt7603_dev *dev)
void mt7603_init_edcca(struct mt7603_dev *dev)
{
/* Set lower signal level to -65dBm */
mt76_rmw_field(dev, MT_RXTD(8), MT_RXTD_8_LOWER_SIGNAL, 0x23);
@ -207,8 +206,11 @@ mt7603_config(struct ieee80211_hw *hw, u32 changed)
int ret = 0;
if (changed & (IEEE80211_CONF_CHANGE_CHANNEL |
IEEE80211_CONF_CHANGE_POWER))
IEEE80211_CONF_CHANGE_POWER)) {
ieee80211_stop_queues(hw);
ret = mt7603_set_channel(dev, &hw->conf.chandef);
ieee80211_wake_queues(hw);
}
if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
mutex_lock(&dev->mt76.mutex);

View File

@ -346,7 +346,7 @@ int mt7603_mcu_set_eeprom(struct mt7603_dev *dev)
};
struct req_data {
u16 addr;
__le16 addr;
u8 val;
u8 pad;
} __packed;

View File

@ -51,6 +51,11 @@ enum mt7603_bw {
MT_BW_80,
};
struct mt7603_rate_set {
struct ieee80211_tx_rate probe_rate;
struct ieee80211_tx_rate rates[4];
};
struct mt7603_sta {
struct mt76_wcid wcid; /* must be first */
@ -58,7 +63,11 @@ struct mt7603_sta {
struct sk_buff_head psq;
struct ieee80211_tx_rate rates[8];
struct ieee80211_tx_rate rates[4];
struct mt7603_rate_set rateset[2];
u32 rate_set_tsf;
u8 rate_count;
u8 n_rates;
@ -117,8 +126,9 @@ struct mt7603_dev {
u8 mac_work_count;
u8 mcu_running;
u8 ed_monitor;
u8 ed_monitor_enabled;
u8 ed_monitor;
s8 ed_trigger;
u8 ed_strict_mode;
u8 ed_strong_signal;
@ -241,4 +251,5 @@ void mt7603_update_channel(struct mt76_dev *mdev);
void mt7603_edcca_set_strict(struct mt7603_dev *dev, bool val);
void mt7603_cca_stats_reset(struct mt7603_dev *dev);
void mt7603_init_edcca(struct mt7603_dev *dev);
#endif

View File

@ -480,6 +480,12 @@ enum {
#define MT_LPON_BASE 0x24000
#define MT_LPON(n) (MT_LPON_BASE + (n))
#define MT_LPON_T0CR MT_LPON(0x010)
#define MT_LPON_T0CR_MODE GENMASK(1, 0)
#define MT_LPON_UTTR0 MT_LPON(0x018)
#define MT_LPON_UTTR1 MT_LPON(0x01c)
#define MT_LPON_BTEIR MT_LPON(0x020)
#define MT_LPON_BTEIR_MBSS_MODE GENMASK(31, 29)

View File

@ -93,18 +93,33 @@ void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
static void mt7615_tx_tasklet(unsigned long data)
{
struct mt7615_dev *dev = (struct mt7615_dev *)data;
mt76_txq_schedule_all(&dev->mt76);
}
static int mt7615_poll_tx(struct napi_struct *napi, int budget)
{
static const u8 queue_map[] = {
MT_TXQ_MCU,
MT_TXQ_BE
};
struct mt7615_dev *dev;
int i;
dev = container_of(napi, struct mt7615_dev, mt76.tx_napi);
for (i = 0; i < ARRAY_SIZE(queue_map); i++)
mt76_queue_tx_cleanup(dev, queue_map[i], false);
mt76_txq_schedule_all(&dev->mt76);
if (napi_complete_done(napi, 0))
mt7615_irq_enable(dev, MT_INT_TX_DONE_ALL);
mt7615_irq_enable(dev, MT_INT_TX_DONE_ALL);
for (i = 0; i < ARRAY_SIZE(queue_map); i++)
mt76_queue_tx_cleanup(dev, queue_map[i], false);
tasklet_schedule(&dev->mt76.tx_tasklet);
return 0;
}
int mt7615_dma_init(struct mt7615_dev *dev)
@ -178,6 +193,10 @@ int mt7615_dma_init(struct mt7615_dev *dev)
if (ret < 0)
return ret;
netif_tx_napi_add(&dev->mt76.napi_dev, &dev->mt76.tx_napi,
mt7615_poll_tx, NAPI_POLL_WEIGHT);
napi_enable(&dev->mt76.tx_napi);
mt76_poll(dev, MT_WPDMA_GLO_CFG,
MT_WPDMA_GLO_CFG_TX_DMA_BUSY |
MT_WPDMA_GLO_CFG_RX_DMA_BUSY, 0, 1000);

View File

@ -42,13 +42,13 @@ static int mt7615_efuse_read(struct mt7615_dev *dev, u32 base,
static int mt7615_efuse_init(struct mt7615_dev *dev)
{
u32 base = mt7615_reg_map(dev, MT_EFUSE_BASE);
int len = MT7615_EEPROM_SIZE;
int ret, i;
u32 val, base = mt7615_reg_map(dev, MT_EFUSE_BASE);
int i, len = MT7615_EEPROM_SIZE;
void *buf;
if (mt76_rr(dev, base + MT_EFUSE_BASE_CTRL) & MT_EFUSE_BASE_CTRL_EMPTY)
return -EINVAL;
val = mt76_rr(dev, base + MT_EFUSE_BASE_CTRL);
if (val & MT_EFUSE_BASE_CTRL_EMPTY)
return 0;
dev->mt76.otp.data = devm_kzalloc(dev->mt76.dev, len, GFP_KERNEL);
dev->mt76.otp.size = len;
@ -57,6 +57,8 @@ static int mt7615_efuse_init(struct mt7615_dev *dev)
buf = dev->mt76.otp.data;
for (i = 0; i + 16 <= len; i += 16) {
int ret;
ret = mt7615_efuse_read(dev, base, i, buf + i);
if (ret)
return ret;
@ -76,6 +78,82 @@ static int mt7615_eeprom_load(struct mt7615_dev *dev)
return mt7615_efuse_init(dev);
}
static int mt7615_check_eeprom(struct mt76_dev *dev)
{
u16 val = get_unaligned_le16(dev->eeprom.data);
switch (val) {
case 0x7615:
return 0;
default:
return -EINVAL;
}
}
static void mt7615_eeprom_parse_hw_cap(struct mt7615_dev *dev)
{
u8 val, *eeprom = dev->mt76.eeprom.data;
val = FIELD_GET(MT_EE_NIC_WIFI_CONF_BAND_SEL,
eeprom[MT_EE_WIFI_CONF]);
switch (val) {
case MT_EE_5GHZ:
dev->mt76.cap.has_5ghz = true;
break;
case MT_EE_2GHZ:
dev->mt76.cap.has_2ghz = true;
break;
default:
dev->mt76.cap.has_2ghz = true;
dev->mt76.cap.has_5ghz = true;
break;
}
}
int mt7615_eeprom_get_power_index(struct mt7615_dev *dev,
struct ieee80211_channel *chan,
u8 chain_idx)
{
int index;
if (chain_idx > 3)
return -EINVAL;
/* TSSI disabled */
if (mt7615_ext_pa_enabled(dev, chan->band)) {
if (chan->band == NL80211_BAND_2GHZ)
return MT_EE_EXT_PA_2G_TARGET_POWER;
else
return MT_EE_EXT_PA_5G_TARGET_POWER;
}
/* TSSI enabled */
if (chan->band == NL80211_BAND_2GHZ) {
index = MT_EE_TX0_2G_TARGET_POWER + chain_idx * 6;
} else {
int group = mt7615_get_channel_group(chan->hw_value);
switch (chain_idx) {
case 1:
index = MT_EE_TX1_5G_G0_TARGET_POWER;
break;
case 2:
index = MT_EE_TX2_5G_G0_TARGET_POWER;
break;
case 3:
index = MT_EE_TX3_5G_G0_TARGET_POWER;
break;
case 0:
default:
index = MT_EE_TX0_5G_G0_TARGET_POWER;
break;
}
index += 5 * group;
}
return index;
}
int mt7615_eeprom_init(struct mt7615_dev *dev)
{
int ret;
@ -84,11 +162,12 @@ int mt7615_eeprom_init(struct mt7615_dev *dev)
if (ret < 0)
return ret;
memcpy(dev->mt76.eeprom.data, dev->mt76.otp.data, MT7615_EEPROM_SIZE);
dev->mt76.cap.has_2ghz = true;
dev->mt76.cap.has_5ghz = true;
ret = mt7615_check_eeprom(&dev->mt76);
if (ret && dev->mt76.otp.data)
memcpy(dev->mt76.eeprom.data, dev->mt76.otp.data,
MT7615_EEPROM_SIZE);
mt7615_eeprom_parse_hw_cap(dev);
memcpy(dev->mt76.macaddr, dev->mt76.eeprom.data + MT_EE_MAC_ADDR,
ETH_ALEN);

View File

@ -11,8 +11,69 @@ enum mt7615_eeprom_field {
MT_EE_VERSION = 0x002,
MT_EE_MAC_ADDR = 0x004,
MT_EE_NIC_CONF_0 = 0x034,
MT_EE_NIC_CONF_1 = 0x036,
MT_EE_WIFI_CONF = 0x03e,
MT_EE_TX0_2G_TARGET_POWER = 0x058,
MT_EE_TX0_5G_G0_TARGET_POWER = 0x070,
MT_EE_TX1_5G_G0_TARGET_POWER = 0x098,
MT_EE_EXT_PA_2G_TARGET_POWER = 0x0f2,
MT_EE_EXT_PA_5G_TARGET_POWER = 0x0f3,
MT_EE_TX2_5G_G0_TARGET_POWER = 0x142,
MT_EE_TX3_5G_G0_TARGET_POWER = 0x16a,
__MT_EE_MAX = 0x3bf
};
#define MT_EE_NIC_CONF_TSSI_2G BIT(5)
#define MT_EE_NIC_CONF_TSSI_5G BIT(6)
#define MT_EE_NIC_WIFI_CONF_BAND_SEL GENMASK(5, 4)
enum mt7615_eeprom_band {
MT_EE_DUAL_BAND,
MT_EE_5GHZ,
MT_EE_2GHZ,
MT_EE_DBDC,
};
enum mt7615_channel_group {
MT_CH_5G_JAPAN,
MT_CH_5G_UNII_1,
MT_CH_5G_UNII_2A,
MT_CH_5G_UNII_2B,
MT_CH_5G_UNII_2E_1,
MT_CH_5G_UNII_2E_2,
MT_CH_5G_UNII_2E_3,
MT_CH_5G_UNII_3,
__MT_CH_MAX
};
static inline enum mt7615_channel_group
mt7615_get_channel_group(int channel)
{
if (channel >= 184 && channel <= 196)
return MT_CH_5G_JAPAN;
if (channel <= 48)
return MT_CH_5G_UNII_1;
if (channel <= 64)
return MT_CH_5G_UNII_2A;
if (channel <= 114)
return MT_CH_5G_UNII_2E_1;
if (channel <= 144)
return MT_CH_5G_UNII_2E_2;
if (channel <= 161)
return MT_CH_5G_UNII_2E_3;
return MT_CH_5G_UNII_3;
}
static inline bool
mt7615_ext_pa_enabled(struct mt7615_dev *dev, enum nl80211_band band)
{
u8 *eep = dev->mt76.eeprom.data;
if (band == NL80211_BAND_5GHZ)
return !(eep[MT_EE_NIC_CONF_1 + 1] & MT_EE_NIC_CONF_TSSI_5G);
else
return !(eep[MT_EE_NIC_CONF_1 + 1] & MT_EE_NIC_CONF_TSSI_2G);
}
#endif

View File

@ -9,6 +9,7 @@
#include <linux/etherdevice.h>
#include "mt7615.h"
#include "mac.h"
#include "eeprom.h"
static void mt7615_phy_init(struct mt7615_dev *dev)
{
@ -62,16 +63,11 @@ static void mt7615_mac_init(struct mt7615_dev *dev)
MT_AGG_ARCR_RATE_DOWN_RATIO_EN |
FIELD_PREP(MT_AGG_ARCR_RATE_DOWN_RATIO, 1) |
FIELD_PREP(MT_AGG_ARCR_RATE_UP_EXTRA_TH, 4)));
dev->mt76.global_wcid.idx = MT7615_WTBL_RESERVED;
dev->mt76.global_wcid.hw_key_idx = -1;
rcu_assign_pointer(dev->mt76.wcid[MT7615_WTBL_RESERVED],
&dev->mt76.global_wcid);
}
static int mt7615_init_hardware(struct mt7615_dev *dev)
{
int ret;
int ret, idx;
mt76_wr(dev, MT_INT_SOURCE_CSR, ~0);
@ -98,6 +94,15 @@ static int mt7615_init_hardware(struct mt7615_dev *dev)
mt7615_mcu_ctrl_pm_state(dev, 0);
mt7615_mcu_del_wtbl_all(dev);
/* Beacon and mgmt frames should occupy wcid 0 */
idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7615_WTBL_STA - 1);
if (idx)
return -ENOSPC;
dev->mt76.global_wcid.idx = idx;
dev->mt76.global_wcid.hw_key_idx = -1;
rcu_assign_pointer(dev->mt76.wcid[idx], &dev->mt76.global_wcid);
return 0;
}
@ -133,6 +138,9 @@ static const struct ieee80211_iface_limit if_limits[] = {
{
.max = MT7615_MAX_INTERFACES,
.types = BIT(NL80211_IFTYPE_AP) |
#ifdef CONFIG_MAC80211_MESH
BIT(NL80211_IFTYPE_MESH_POINT) |
#endif
BIT(NL80211_IFTYPE_STATION)
}
};
@ -158,6 +166,48 @@ static int mt7615_init_debugfs(struct mt7615_dev *dev)
return 0;
}
static void
mt7615_init_txpower(struct mt7615_dev *dev,
struct ieee80211_supported_band *sband)
{
int i, n_chains = hweight8(dev->mt76.antenna_mask), target_chains;
u8 *eep = (u8 *)dev->mt76.eeprom.data;
enum nl80211_band band = sband->band;
target_chains = mt7615_ext_pa_enabled(dev, band) ? 1 : n_chains;
for (i = 0; i < sband->n_channels; i++) {
struct ieee80211_channel *chan = &sband->channels[i];
u8 target_power = 0;
int j;
for (j = 0; j < target_chains; j++) {
int index;
index = mt7615_eeprom_get_power_index(dev, chan, j);
target_power = max(target_power, eep[index]);
}
target_power = DIV_ROUND_UP(target_power, 2);
switch (n_chains) {
case 4:
target_power += 6;
break;
case 3:
target_power += 4;
break;
case 2:
target_power += 3;
break;
default:
break;
}
chan->max_power = min_t(int, chan->max_reg_power,
target_power);
chan->orig_mpwr = target_power;
}
}
int mt7615_register_device(struct mt7615_dev *dev)
{
struct ieee80211_hw *hw = mt76_hw(dev);
@ -195,6 +245,9 @@ int mt7615_register_device(struct mt7615_dev *dev)
dev->mt76.antenna_mask = 0xf;
wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
#ifdef CONFIG_MAC80211_MESH
BIT(NL80211_IFTYPE_MESH_POINT) |
#endif
BIT(NL80211_IFTYPE_AP);
ret = mt76_register_device(&dev->mt76, true, mt7615_rates,
@ -202,6 +255,9 @@ int mt7615_register_device(struct mt7615_dev *dev)
if (ret)
return ret;
mt7615_init_txpower(dev, &dev->mt76.sband_2g.sband);
mt7615_init_txpower(dev, &dev->mt76.sband_5g.sband);
hw->max_tx_fragments = MT_TXP_MAX_BUF_NUM;
return mt7615_init_debugfs(dev);
@ -212,6 +268,10 @@ void mt7615_unregister_device(struct mt7615_dev *dev)
struct mt76_txwi_cache *txwi;
int id;
mt76_unregister_device(&dev->mt76);
mt7615_mcu_exit(dev);
mt7615_dma_cleanup(dev);
spin_lock_bh(&dev->token_lock);
idr_for_each_entry(&dev->token, txwi, id) {
mt7615_txp_skb_unmap(&dev->mt76, txwi);
@ -221,9 +281,6 @@ void mt7615_unregister_device(struct mt7615_dev *dev)
}
spin_unlock_bh(&dev->token_lock);
idr_destroy(&dev->token);
mt76_unregister_device(&dev->mt76);
mt7615_mcu_exit(dev);
mt7615_dma_cleanup(dev);
ieee80211_free_hw(mt76_hw(dev));
mt76_free_device(&dev->mt76);
}

View File

@ -13,6 +13,11 @@
#include "../dma.h"
#include "mac.h"
static inline s8 to_rssi(u32 field, u32 rxv)
{
return (FIELD_GET(field, rxv) - 220) / 2;
}
static struct mt76_wcid *mt7615_rx_get_wcid(struct mt7615_dev *dev,
u8 idx, bool unicast)
{
@ -36,54 +41,6 @@ static struct mt76_wcid *mt7615_rx_get_wcid(struct mt7615_dev *dev,
return &sta->vif->sta.wcid;
}
static int mt7615_get_rate(struct mt7615_dev *dev,
struct ieee80211_supported_band *sband,
int idx, bool cck)
{
int offset = 0;
int len = sband->n_bitrates;
int i;
if (cck) {
if (sband == &dev->mt76.sband_5g.sband)
return 0;
idx &= ~BIT(2); /* short preamble */
} else if (sband == &dev->mt76.sband_2g.sband) {
offset = 4;
}
for (i = offset; i < len; i++) {
if ((sband->bitrates[i].hw_value & GENMASK(7, 0)) == idx)
return i;
}
return 0;
}
static void mt7615_insert_ccmp_hdr(struct sk_buff *skb, u8 key_id)
{
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
u8 *pn = status->iv;
u8 *hdr;
__skb_push(skb, 8);
memmove(skb->data, skb->data + 8, hdr_len);
hdr = skb->data + hdr_len;
hdr[0] = pn[5];
hdr[1] = pn[4];
hdr[2] = 0;
hdr[3] = 0x20 | (key_id << 6);
hdr[4] = pn[3];
hdr[5] = pn[2];
hdr[6] = pn[1];
hdr[7] = pn[0];
status->flag &= ~RX_FLAG_IV_STRIPPED;
}
int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
{
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
@ -96,6 +53,9 @@ int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
bool unicast, remove_pad, insert_ccmp_hdr = false;
int i, idx;
if (!test_bit(MT76_STATE_RUNNING, &dev->mt76.state))
return -EINVAL;
memset(status, 0, sizeof(*status));
unicast = (rxd1 & MT_RXD1_NORMAL_ADDR_TYPE) == MT_RXD1_NORMAL_U2M;
@ -165,6 +125,7 @@ int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
if (rxd0 & MT_RXD0_NORMAL_GROUP_3) {
u32 rxdg0 = le32_to_cpu(rxd[0]);
u32 rxdg1 = le32_to_cpu(rxd[1]);
u32 rxdg3 = le32_to_cpu(rxd[3]);
u8 stbc = FIELD_GET(MT_RXV1_HT_STBC, rxdg0);
bool cck = false;
@ -174,7 +135,7 @@ int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
cck = true;
/* fall through */
case MT_PHY_TYPE_OFDM:
i = mt7615_get_rate(dev, sband, i, cck);
i = mt76_get_rate(&dev->mt76, sband, i, cck);
break;
case MT_PHY_TYPE_HT_GF:
case MT_PHY_TYPE_HT:
@ -214,7 +175,21 @@ int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc;
/* TODO: RSSI */
status->chains = dev->mt76.antenna_mask;
status->chain_signal[0] = to_rssi(MT_RXV4_RCPI0, rxdg3);
status->chain_signal[1] = to_rssi(MT_RXV4_RCPI1, rxdg3);
status->chain_signal[2] = to_rssi(MT_RXV4_RCPI2, rxdg3);
status->chain_signal[3] = to_rssi(MT_RXV4_RCPI3, rxdg3);
status->signal = status->chain_signal[0];
for (i = 1; i < hweight8(dev->mt76.antenna_mask); i++) {
if (!(status->chains & BIT(i)))
continue;
status->signal = max(status->signal,
status->chain_signal[i]);
}
rxd += 6;
if ((u8 *)rxd - skb->data >= skb->len)
return -EINVAL;
@ -225,7 +200,7 @@ int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
if (insert_ccmp_hdr) {
u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1);
mt7615_insert_ccmp_hdr(skb, key_id);
mt76_insert_ccmp_hdr(skb, key_id);
}
hdr = (struct ieee80211_hdr *)skb->data;
@ -549,23 +524,20 @@ static bool mt7615_fill_txs(struct mt7615_dev *dev, struct mt7615_sta *sta,
{
struct ieee80211_supported_band *sband;
int i, idx, count, final_idx = 0;
bool fixed_rate, final_mpdu, ack_timeout;
bool fixed_rate, ack_timeout;
bool probe, ampdu, cck = false;
u32 final_rate, final_rate_flags, final_nss, txs;
u8 pid;
fixed_rate = info->status.rates[0].count;
probe = !!(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
txs = le32_to_cpu(txs_data[1]);
final_mpdu = txs & MT_TXS1_ACKED_MPDU;
ampdu = !fixed_rate && (txs & MT_TXS1_AMPDU);
txs = le32_to_cpu(txs_data[3]);
count = FIELD_GET(MT_TXS3_TX_COUNT, txs);
txs = le32_to_cpu(txs_data[0]);
pid = FIELD_GET(MT_TXS0_PID, txs);
final_rate = FIELD_GET(MT_TXS0_TX_RATE, txs);
ack_timeout = txs & MT_TXS0_ACK_TIMEOUT;
@ -628,7 +600,8 @@ static bool mt7615_fill_txs(struct mt7615_dev *dev, struct mt7615_sta *sta,
else
sband = &dev->mt76.sband_2g.sband;
final_rate &= MT_TX_RATE_IDX;
final_rate = mt7615_get_rate(dev, sband, final_rate, cck);
final_rate = mt76_get_rate(&dev->mt76, sband, final_rate,
cck);
final_rate_flags = 0;
break;
case MT_PHY_TYPE_HT_GF:

View File

@ -98,6 +98,11 @@ enum rx_pkt_type {
#define MT_RXV2_GROUP_ID GENMASK(26, 21)
#define MT_RXV2_LENGTH GENMASK(20, 0)
#define MT_RXV4_RCPI3 GENMASK(31, 24)
#define MT_RXV4_RCPI2 GENMASK(23, 16)
#define MT_RXV4_RCPI1 GENMASK(15, 8)
#define MT_RXV4_RCPI0 GENMASK(7, 0)
enum tx_header_format {
MT_HDR_FORMAT_802_3,
MT_HDR_FORMAT_CMD,

View File

@ -37,6 +37,7 @@ static int get_omac_idx(enum nl80211_iftype type, u32 mask)
switch (type) {
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_MESH_POINT:
/* ap use hw bssid 0 and ext bssid */
if (~mask & BIT(HW_BSSID_0))
return HW_BSSID_0;
@ -77,11 +78,12 @@ static int mt7615_add_interface(struct ieee80211_hw *hw,
goto out;
}
mvif->omac_idx = get_omac_idx(vif->type, dev->omac_mask);
if (mvif->omac_idx < 0) {
idx = get_omac_idx(vif->type, dev->omac_mask);
if (idx < 0) {
ret = -ENOSPC;
goto out;
}
mvif->omac_idx = idx;
/* TODO: DBDC support. Use band 0 and wmm 0 for now */
mvif->band_idx = 0;
@ -93,7 +95,7 @@ static int mt7615_add_interface(struct ieee80211_hw *hw,
dev->vif_mask |= BIT(mvif->idx);
dev->omac_mask |= BIT(mvif->omac_idx);
idx = MT7615_WTBL_RESERVED - 1 - mvif->idx;
idx = MT7615_WTBL_RESERVED - mvif->idx;
mvif->sta.wcid.idx = idx;
mvif->sta.wcid.hw_key_idx = -1;
@ -128,8 +130,7 @@ static void mt7615_remove_interface(struct ieee80211_hw *hw,
mutex_unlock(&dev->mt76.mutex);
}
static int mt7615_set_channel(struct mt7615_dev *dev,
struct cfg80211_chan_def *def)
static int mt7615_set_channel(struct mt7615_dev *dev)
{
int ret;
@ -190,28 +191,28 @@ static int mt7615_config(struct ieee80211_hw *hw, u32 changed)
struct mt7615_dev *dev = hw->priv;
int ret = 0;
mutex_lock(&dev->mt76.mutex);
if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
mutex_lock(&dev->mt76.mutex);
ieee80211_stop_queues(hw);
ret = mt7615_set_channel(dev, &hw->conf.chandef);
ret = mt7615_set_channel(dev);
ieee80211_wake_queues(hw);
mutex_unlock(&dev->mt76.mutex);
}
if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
mutex_lock(&dev->mt76.mutex);
if (changed & IEEE80211_CONF_CHANGE_POWER)
ret = mt7615_mcu_set_tx_power(dev);
if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
if (!(hw->conf.flags & IEEE80211_CONF_MONITOR))
dev->mt76.rxfilter |= MT_WF_RFCR_DROP_OTHER_UC;
else
dev->mt76.rxfilter &= ~MT_WF_RFCR_DROP_OTHER_UC;
mt76_wr(dev, MT_WF_RFCR, dev->mt76.rxfilter);
mutex_unlock(&dev->mt76.mutex);
}
mutex_unlock(&dev->mt76.mutex);
return ret;
}
@ -281,26 +282,18 @@ static void mt7615_bss_info_changed(struct ieee80211_hw *hw,
mutex_lock(&dev->mt76.mutex);
/* TODO: sta mode connect/disconnect
* BSS_CHANGED_ASSOC | BSS_CHANGED_BSSID
*/
if (changed & BSS_CHANGED_ASSOC)
mt7615_mcu_set_bss_info(dev, vif, info->assoc);
/* TODO: update beacon content
* BSS_CHANGED_BEACON
*/
if (changed & BSS_CHANGED_BEACON_ENABLED) {
if (info->enable_beacon) {
mt7615_mcu_set_bss_info(dev, vif, 1);
mt7615_mcu_add_wtbl_bmc(dev, vif);
mt7615_mcu_set_sta_rec_bmc(dev, vif, 1);
mt7615_mcu_set_bcn(dev, vif, 1);
} else {
mt7615_mcu_set_sta_rec_bmc(dev, vif, 0);
mt7615_mcu_del_wtbl_bmc(dev, vif);
mt7615_mcu_set_bss_info(dev, vif, 0);
mt7615_mcu_set_bcn(dev, vif, 0);
}
mt7615_mcu_set_bss_info(dev, vif, info->enable_beacon);
mt7615_mcu_wtbl_bmc(dev, vif, info->enable_beacon);
mt7615_mcu_set_sta_rec_bmc(dev, vif, info->enable_beacon);
mt7615_mcu_set_bcn(dev, vif, info->enable_beacon);
}
mutex_unlock(&dev->mt76.mutex);
@ -343,7 +336,7 @@ void mt7615_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
mt7615_mcu_set_sta_rec(dev, vif, sta, 0);
mt7615_mcu_del_wtbl(dev, vif, sta);
mt7615_mcu_del_wtbl(dev, sta);
}
static void mt7615_sta_rate_tbl_update(struct ieee80211_hw *hw,
@ -496,4 +489,5 @@ const struct ieee80211_ops mt7615_ops = {
.sw_scan_start = mt7615_sw_scan,
.sw_scan_complete = mt7615_sw_scan_complete,
.release_buffered_frames = mt76_release_buffered_frames,
.get_txpower = mt76_get_txpower,
};

File diff suppressed because it is too large Load Diff

View File

@ -70,6 +70,7 @@ enum {
enum {
MCU_EXT_CMD_PM_STATE_CTRL = 0x07,
MCU_EXT_CMD_CHANNEL_SWITCH = 0x08,
MCU_EXT_CMD_SET_TX_POWER_CTRL = 0x11,
MCU_EXT_CMD_EFUSE_BUFFER_MODE = 0x21,
MCU_EXT_CMD_STA_REC_UPDATE = 0x25,
MCU_EXT_CMD_BSS_INFO_UPDATE = 0x26,
@ -105,25 +106,19 @@ enum {
#define STA_TYPE_STA BIT(0)
#define STA_TYPE_AP BIT(1)
#define STA_TYPE_ADHOC BIT(2)
#define STA_TYPE_TDLS BIT(3)
#define STA_TYPE_WDS BIT(4)
#define STA_TYPE_BC BIT(5)
#define NETWORK_INFRA BIT(16)
#define NETWORK_P2P BIT(17)
#define NETWORK_IBSS BIT(18)
#define NETWORK_MESH BIT(19)
#define NETWORK_BOW BIT(20)
#define NETWORK_WDS BIT(21)
#define CONNECTION_INFRA_STA (STA_TYPE_STA | NETWORK_INFRA)
#define CONNECTION_INFRA_AP (STA_TYPE_AP | NETWORK_INFRA)
#define CONNECTION_P2P_GC (STA_TYPE_STA | NETWORK_P2P)
#define CONNECTION_P2P_GO (STA_TYPE_AP | NETWORK_P2P)
#define CONNECTION_MESH_STA (STA_TYPE_STA | NETWORK_MESH)
#define CONNECTION_MESH_AP (STA_TYPE_AP | NETWORK_MESH)
#define CONNECTION_IBSS_ADHOC (STA_TYPE_ADHOC | NETWORK_IBSS)
#define CONNECTION_TDLS (STA_TYPE_STA | NETWORK_INFRA | STA_TYPE_TDLS)
#define CONNECTION_WDS (STA_TYPE_WDS | NETWORK_WDS)
#define CONNECTION_INFRA_BC (STA_TYPE_BC | NETWORK_INFRA)
@ -131,41 +126,11 @@ enum {
#define CONN_STATE_CONNECT 1
#define CONN_STATE_PORT_SECURE 2
struct dev_info {
u8 omac_idx;
u8 omac_addr[ETH_ALEN];
u8 band_idx;
u8 enable;
u32 feature;
};
enum {
DEV_INFO_ACTIVE,
DEV_INFO_MAX_NUM
};
struct bss_info {
u8 bss_idx;
u8 bssid[ETH_ALEN];
u8 omac_idx;
u8 band_idx;
u8 bmc_tx_wlan_idx; /* for bmc tx (sta mode use uc entry) */
u8 wmm_idx;
u32 network_type;
u32 conn_type;
u16 bcn_interval;
u8 dtim_period;
u8 enable;
u32 feature;
};
struct bss_info_tag_handler {
u32 tag;
u32 len;
void (*handler)(struct mt7615_dev *dev,
struct bss_info *bss_info, struct sk_buff *skb);
};
struct bss_info_omac {
__le16 tag;
__le16 len;
@ -231,6 +196,13 @@ enum {
WTBL_RESET_ALL
};
struct wtbl_req_hdr {
u8 wlan_idx;
u8 operation;
__le16 tlv_num;
u8 rsv[4];
} __packed;
struct wtbl_generic {
__le16 tag;
__le16 len;
@ -396,7 +368,8 @@ struct wtbl_raw {
__le32 val;
} __packed;
#define MT7615_WTBL_UPDATE_MAX_SIZE (sizeof(struct wtbl_generic) + \
#define MT7615_WTBL_UPDATE_MAX_SIZE (sizeof(struct wtbl_req_hdr) + \
sizeof(struct wtbl_generic) + \
sizeof(struct wtbl_rx) + \
sizeof(struct wtbl_ht) + \
sizeof(struct wtbl_vht) + \
@ -430,6 +403,15 @@ enum {
WTBL_MAX_NUM
};
struct sta_req_hdr {
u8 bss_idx;
u8 wlan_idx;
__le16 tlv_num;
u8 is_tlv_append;
u8 muar_idx;
u8 rsv[2];
} __packed;
struct sta_rec_basic {
__le16 tag;
__le16 len;

View File

@ -105,11 +105,14 @@ u32 mt7615_reg_map(struct mt7615_dev *dev, u32 addr);
int mt7615_register_device(struct mt7615_dev *dev);
void mt7615_unregister_device(struct mt7615_dev *dev);
int mt7615_eeprom_init(struct mt7615_dev *dev);
int mt7615_eeprom_get_power_index(struct mt7615_dev *dev,
struct ieee80211_channel *chan,
u8 chain_idx);
int mt7615_dma_init(struct mt7615_dev *dev);
void mt7615_dma_cleanup(struct mt7615_dev *dev);
int mt7615_mcu_init(struct mt7615_dev *dev);
int mt7615_mcu_set_dev_info(struct mt7615_dev *dev, struct ieee80211_vif *vif,
int en);
int mt7615_mcu_set_dev_info(struct mt7615_dev *dev,
struct ieee80211_vif *vif, bool enable);
int mt7615_mcu_set_bss_info(struct mt7615_dev *dev, struct ieee80211_vif *vif,
int en);
int mt7615_mcu_set_wtbl_key(struct mt7615_dev *dev, int wcid,
@ -118,12 +121,11 @@ int mt7615_mcu_set_wtbl_key(struct mt7615_dev *dev, int wcid,
void mt7615_mcu_set_rates(struct mt7615_dev *dev, struct mt7615_sta *sta,
struct ieee80211_tx_rate *probe_rate,
struct ieee80211_tx_rate *rates);
int mt7615_mcu_add_wtbl_bmc(struct mt7615_dev *dev, struct ieee80211_vif *vif);
int mt7615_mcu_del_wtbl_bmc(struct mt7615_dev *dev, struct ieee80211_vif *vif);
int mt7615_mcu_wtbl_bmc(struct mt7615_dev *dev, struct ieee80211_vif *vif,
bool enable);
int mt7615_mcu_add_wtbl(struct mt7615_dev *dev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
int mt7615_mcu_del_wtbl(struct mt7615_dev *dev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
int mt7615_mcu_del_wtbl(struct mt7615_dev *dev, struct ieee80211_sta *sta);
int mt7615_mcu_del_wtbl_all(struct mt7615_dev *dev);
int mt7615_mcu_set_sta_rec_bmc(struct mt7615_dev *dev,
struct ieee80211_vif *vif, bool en);
@ -168,6 +170,7 @@ int mt7615_mcu_set_eeprom(struct mt7615_dev *dev);
int mt7615_mcu_init_mac(struct mt7615_dev *dev);
int mt7615_mcu_set_rts_thresh(struct mt7615_dev *dev, u32 val);
int mt7615_mcu_ctrl_pm_state(struct mt7615_dev *dev, int enter);
int mt7615_mcu_set_tx_power(struct mt7615_dev *dev);
void mt7615_mcu_exit(struct mt7615_dev *dev);
int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
@ -180,7 +183,6 @@ void mt7615_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
struct sk_buff *skb);
void mt7615_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q);
void mt7615_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps);
int mt7615_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);

View File

@ -27,14 +27,15 @@ u32 mt7615_reg_map(struct mt7615_dev *dev, u32 addr)
return MT_PCIE_REMAP_BASE_2 + offset;
}
void mt7615_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
static void
mt7615_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
{
struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
mt7615_irq_enable(dev, MT_INT_RX_DONE(q));
}
irqreturn_t mt7615_irq_handler(int irq, void *dev_instance)
static irqreturn_t mt7615_irq_handler(int irq, void *dev_instance)
{
struct mt7615_dev *dev = dev_instance;
u32 intr;
@ -49,7 +50,7 @@ irqreturn_t mt7615_irq_handler(int irq, void *dev_instance)
if (intr & MT_INT_TX_DONE_ALL) {
mt7615_irq_disable(dev, MT_INT_TX_DONE_ALL);
tasklet_schedule(&dev->mt76.tx_tasklet);
napi_schedule(&dev->mt76.tx_napi);
}
if (intr & MT_INT_RX_DONE(0)) {

View File

@ -271,8 +271,9 @@ mt76x0_init_txpower(struct mt76x02_dev *dev,
mt76x0_get_tx_power_per_rate(dev, chan, &t);
mt76x0_get_power_info(dev, chan, &tp);
chan->max_power = (mt76x02_get_max_rate_power(&t) + tp) / 2;
chan->orig_mpwr = chan->max_power;
chan->orig_mpwr = (mt76x02_get_max_rate_power(&t) + tp) / 2;
chan->max_power = min_t(int, chan->max_reg_power,
chan->orig_mpwr);
}
}

View File

@ -25,7 +25,7 @@ mt76x0_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef)
mt76_rr(dev, MT_CH_IDLE);
mt76_rr(dev, MT_CH_BUSY);
mt76x02_edcca_init(dev, true);
mt76x02_edcca_init(dev);
if (mt76_is_mmio(dev)) {
mt76x02_dfs_init_params(dev);

View File

@ -422,15 +422,15 @@ mt76x0_phy_set_chan_bbp_params(struct mt76x02_dev *dev, u16 rf_bw_band)
static void mt76x0_phy_ant_select(struct mt76x02_dev *dev)
{
u16 ee_ant = mt76x02_eeprom_get(dev, MT_EE_ANTENNA);
u16 ee_cfg1 = mt76x02_eeprom_get(dev, MT_EE_CFG1_INIT);
u16 nic_conf2 = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_2);
u32 wlan, coex3, cmb;
u32 wlan, coex3;
bool ant_div;
wlan = mt76_rr(dev, MT_WLAN_FUN_CTRL);
cmb = mt76_rr(dev, MT_CMB_CTRL);
coex3 = mt76_rr(dev, MT_COEXCFG3);
cmb &= ~(BIT(14) | BIT(12));
ee_ant &= ~(BIT(14) | BIT(12));
wlan &= ~(BIT(6) | BIT(5));
coex3 &= ~GENMASK(5, 2);
@ -439,7 +439,7 @@ static void mt76x0_phy_ant_select(struct mt76x02_dev *dev)
ant_div = !(nic_conf2 & MT_EE_NIC_CONF_2_ANT_OPT) &&
(nic_conf2 & MT_EE_NIC_CONF_2_ANT_DIV);
if (ant_div)
cmb |= BIT(12);
ee_ant |= BIT(12);
else
coex3 |= BIT(4);
coex3 |= BIT(3);
@ -456,10 +456,11 @@ static void mt76x0_phy_ant_select(struct mt76x02_dev *dev)
}
if (is_mt7630(dev))
cmb |= BIT(14) | BIT(11);
ee_ant |= BIT(14) | BIT(11);
mt76_wr(dev, MT_WLAN_FUN_CTRL, wlan);
mt76_wr(dev, MT_CMB_CTRL, cmb);
mt76_rmw(dev, MT_CMB_CTRL, GENMASK(15, 0), ee_ant);
mt76_rmw(dev, MT_CSR_EE_CFG1, GENMASK(15, 0), ee_cfg1);
mt76_clear(dev, MT_COEXCFG0, BIT(2));
mt76_wr(dev, MT_COEXCFG3, coex3);
}

View File

@ -183,7 +183,7 @@ static int mt76x0u_register_device(struct mt76x02_dev *dev)
/* check hw sg support in order to enable AMSDU */
if (dev->mt76.usb.sg_en)
hw->max_tx_fragments = MT_SG_MAX_SIZE;
hw->max_tx_fragments = MT_TX_SG_MAX_SIZE;
else
hw->max_tx_fragments = 1;

View File

@ -90,7 +90,6 @@ struct mt76x02_dev {
struct sk_buff *rx_head;
struct napi_struct tx_napi;
struct delayed_work cal_work;
struct delayed_work wdt_work;

View File

@ -189,10 +189,8 @@ mt76x02_resync_beacon_timer(struct mt76x02_dev *dev)
mt76_rmw_field(dev, MT_BEACON_TIME_CFG,
MT_BEACON_TIME_CFG_INTVAL, timer_val);
if (dev->tbtt_count >= 64) {
if (dev->tbtt_count >= 64)
dev->tbtt_count = 0;
return;
}
}
EXPORT_SYMBOL_GPL(mt76x02_resync_beacon_timer);

View File

@ -120,12 +120,16 @@ static int
mt76_edcca_set(void *data, u64 val)
{
struct mt76x02_dev *dev = data;
enum nl80211_dfs_regions region = dev->dfs_pd.region;
enum nl80211_dfs_regions region = dev->mt76.region;
mutex_lock(&dev->mt76.mutex);
dev->ed_monitor_enabled = !!val;
dev->ed_monitor = dev->ed_monitor_enabled &&
region == NL80211_DFS_ETSI;
mt76x02_edcca_init(dev, true);
mt76x02_edcca_init(dev);
mutex_unlock(&dev->mt76.mutex);
return 0;
}
@ -153,7 +157,7 @@ void mt76x02_init_debugfs(struct mt76x02_dev *dev)
debugfs_create_u8("temperature", 0400, dir, &dev->cal.temp);
debugfs_create_bool("tpc", 0600, dir, &dev->enable_tpc);
debugfs_create_file("edcca", 0400, dir, dev, &fops_edcca);
debugfs_create_file("edcca", 0600, dir, dev, &fops_edcca);
debugfs_create_file("ampdu_stat", 0400, dir, dev, &fops_ampdu_stat);
debugfs_create_file("dfs_stats", 0400, dir, dev, &fops_dfs_stat);
debugfs_create_devm_seqfile(dev->mt76.dev, "txpower", dir,

View File

@ -283,7 +283,7 @@ static bool mt76x02_dfs_check_hw_pulse(struct mt76x02_dev *dev,
if (!pulse->period || !pulse->w1)
return false;
switch (dev->dfs_pd.region) {
switch (dev->mt76.region) {
case NL80211_DFS_FCC:
if (pulse->engine > 3)
break;
@ -457,7 +457,7 @@ static int mt76x02_dfs_create_sequence(struct mt76x02_dev *dev,
with_sum = event->width + cur_event->width;
sw_params = &dfs_pd->sw_dpd_params;
switch (dev->dfs_pd.region) {
switch (dev->mt76.region) {
case NL80211_DFS_FCC:
case NL80211_DFS_JP:
if (with_sum < 600)
@ -685,7 +685,7 @@ static void mt76x02_dfs_init_sw_detector(struct mt76x02_dev *dev)
{
struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
switch (dev->dfs_pd.region) {
switch (dev->mt76.region) {
case NL80211_DFS_FCC:
dfs_pd->sw_dpd_params.max_pri = MT_DFS_FCC_MAX_PRI;
dfs_pd->sw_dpd_params.min_pri = MT_DFS_FCC_MIN_PRI;
@ -725,7 +725,7 @@ static void mt76x02_dfs_set_bbp_params(struct mt76x02_dev *dev)
break;
}
switch (dev->dfs_pd.region) {
switch (dev->mt76.region) {
case NL80211_DFS_FCC:
radar_specs = &fcc_radar_specs[shift];
break;
@ -836,7 +836,7 @@ void mt76x02_dfs_init_params(struct mt76x02_dev *dev)
struct cfg80211_chan_def *chandef = &dev->mt76.chandef;
if ((chandef->chan->flags & IEEE80211_CHAN_RADAR) &&
dev->dfs_pd.region != NL80211_DFS_UNSET) {
dev->mt76.region != NL80211_DFS_UNSET) {
mt76x02_dfs_init_sw_detector(dev);
mt76x02_dfs_set_bbp_params(dev);
/* enable debug mode */
@ -869,7 +869,7 @@ void mt76x02_dfs_init_detector(struct mt76x02_dev *dev)
INIT_LIST_HEAD(&dfs_pd->sequences);
INIT_LIST_HEAD(&dfs_pd->seq_pool);
dfs_pd->region = NL80211_DFS_UNSET;
dev->mt76.region = NL80211_DFS_UNSET;
dfs_pd->last_sw_check = jiffies;
tasklet_init(&dfs_pd->dfs_tasklet, mt76x02_dfs_tasklet,
(unsigned long)dev);
@ -882,14 +882,14 @@ mt76x02_dfs_set_domain(struct mt76x02_dev *dev,
struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
mutex_lock(&dev->mt76.mutex);
if (dfs_pd->region != region) {
if (dev->mt76.region != region) {
tasklet_disable(&dfs_pd->dfs_tasklet);
dev->ed_monitor = dev->ed_monitor_enabled &&
region == NL80211_DFS_ETSI;
mt76x02_edcca_init(dev, true);
mt76x02_edcca_init(dev);
dfs_pd->region = region;
dev->mt76.region = region;
mt76x02_dfs_init_params(dev);
tasklet_enable(&dfs_pd->dfs_tasklet);
}

View File

@ -118,8 +118,6 @@ struct mt76x02_dfs_seq_stats {
};
struct mt76x02_dfs_pattern_detector {
enum nl80211_dfs_regions region;
u8 chirp_pulse_cnt;
u32 chirp_pulse_ts;

View File

@ -26,6 +26,7 @@ enum mt76x02_eeprom_field {
MT_EE_MAC_ADDR = 0x004,
MT_EE_PCI_ID = 0x00A,
MT_EE_ANTENNA = 0x022,
MT_EE_CFG1_INIT = 0x024,
MT_EE_NIC_CONF_0 = 0x034,
MT_EE_NIC_CONF_1 = 0x036,
MT_EE_COUNTRY_REGION_5GHZ = 0x038,

View File

@ -420,30 +420,92 @@ void mt76x02_mac_write_txwi(struct mt76x02_dev *dev, struct mt76x02_txwi *txwi,
EXPORT_SYMBOL_GPL(mt76x02_mac_write_txwi);
static void
mt76x02_mac_fill_tx_status(struct mt76x02_dev *dev,
mt76x02_tx_rate_fallback(struct ieee80211_tx_rate *rates, int idx, int phy)
{
u8 mcs, nss;
if (!idx)
return;
rates += idx - 1;
rates[1] = rates[0];
switch (phy) {
case MT_PHY_TYPE_VHT:
mcs = ieee80211_rate_get_vht_mcs(rates);
nss = ieee80211_rate_get_vht_nss(rates);
if (mcs == 0)
nss = max_t(int, nss - 1, 1);
else
mcs--;
ieee80211_rate_set_vht(rates + 1, mcs, nss);
break;
case MT_PHY_TYPE_HT_GF:
case MT_PHY_TYPE_HT:
/* MCS 8 falls back to MCS 0 */
if (rates[0].idx == 8) {
rates[1].idx = 0;
break;
}
/* fall through */
default:
rates[1].idx = max_t(int, rates[0].idx - 1, 0);
break;
}
}
static void
mt76x02_mac_fill_tx_status(struct mt76x02_dev *dev, struct mt76x02_sta *msta,
struct ieee80211_tx_info *info,
struct mt76x02_tx_status *st, int n_frames)
{
struct ieee80211_tx_rate *rate = info->status.rates;
int cur_idx, last_rate;
struct ieee80211_tx_rate last_rate;
u16 first_rate;
int retry = st->retry;
int phy;
int i;
if (!n_frames)
return;
last_rate = min_t(int, st->retry, IEEE80211_TX_MAX_RATES - 1);
mt76x02_mac_process_tx_rate(&rate[last_rate], st->rate,
dev->mt76.chandef.chan->band);
if (last_rate < IEEE80211_TX_MAX_RATES - 1)
rate[last_rate + 1].idx = -1;
phy = FIELD_GET(MT_RXWI_RATE_PHY, st->rate);
cur_idx = rate[last_rate].idx + last_rate;
for (i = 0; i <= last_rate; i++) {
rate[i].flags = rate[last_rate].flags;
rate[i].idx = max_t(int, 0, cur_idx - i);
rate[i].count = 1;
if (st->pktid & MT_PACKET_ID_HAS_RATE) {
first_rate = st->rate & ~MT_RXWI_RATE_INDEX;
first_rate |= st->pktid & MT_RXWI_RATE_INDEX;
mt76x02_mac_process_tx_rate(&rate[0], first_rate,
dev->mt76.chandef.chan->band);
} else if (rate[0].idx < 0) {
if (!msta)
return;
mt76x02_mac_process_tx_rate(&rate[0], msta->wcid.tx_info,
dev->mt76.chandef.chan->band);
}
mt76x02_mac_process_tx_rate(&last_rate, st->rate,
dev->mt76.chandef.chan->band);
for (i = 0; i < ARRAY_SIZE(info->status.rates); i++) {
retry--;
if (i + 1 == ARRAY_SIZE(info->status.rates)) {
info->status.rates[i] = last_rate;
info->status.rates[i].count = max_t(int, retry, 1);
break;
}
mt76x02_tx_rate_fallback(info->status.rates, i, phy);
if (info->status.rates[i].idx == last_rate.idx)
break;
}
if (i + 1 < ARRAY_SIZE(info->status.rates)) {
info->status.rates[i + 1].idx = -1;
info->status.rates[i + 1].count = 0;
}
rate[last_rate].count = st->retry + 1 - last_rate;
info->status.ampdu_len = n_frames;
info->status.ampdu_ack_len = st->success ? n_frames : 0;
@ -489,13 +551,19 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev,
mt76_tx_status_lock(mdev, &list);
if (wcid) {
if (stat->pktid >= MT_PACKET_ID_FIRST)
if (mt76_is_skb_pktid(stat->pktid))
status.skb = mt76_tx_status_skb_get(mdev, wcid,
stat->pktid, &list);
if (status.skb)
status.info = IEEE80211_SKB_CB(status.skb);
}
if (!status.skb && !(stat->pktid & MT_PACKET_ID_HAS_RATE)) {
mt76_tx_status_unlock(mdev, &list);
rcu_read_unlock();
return;
}
if (msta && stat->aggr && !status.skb) {
u32 stat_val, stat_cache;
@ -512,14 +580,14 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev,
return;
}
mt76x02_mac_fill_tx_status(dev, status.info, &msta->status,
msta->n_frames);
mt76x02_mac_fill_tx_status(dev, msta, status.info,
&msta->status, msta->n_frames);
msta->status = *stat;
msta->n_frames = 1;
*update = 0;
} else {
mt76x02_mac_fill_tx_status(dev, status.info, stat, 1);
mt76x02_mac_fill_tx_status(dev, msta, status.info, stat, 1);
*update = 1;
}
@ -945,12 +1013,12 @@ mt76x02_edcca_tx_enable(struct mt76x02_dev *dev, bool enable)
dev->ed_tx_blocked = !enable;
}
void mt76x02_edcca_init(struct mt76x02_dev *dev, bool enable)
void mt76x02_edcca_init(struct mt76x02_dev *dev)
{
dev->ed_trigger = 0;
dev->ed_silent = 0;
if (dev->ed_monitor && enable) {
if (dev->ed_monitor) {
struct ieee80211_channel *chan = dev->mt76.chandef.chan;
u8 ed_th = chan->band == NL80211_BAND_5GHZ ? 0x0e : 0x20;

View File

@ -209,5 +209,5 @@ int mt76x02_mac_set_beacon(struct mt76x02_dev *dev, u8 vif_idx,
void mt76x02_mac_set_beacon_enable(struct mt76x02_dev *dev,
struct ieee80211_vif *vif, bool val);
void mt76x02_edcca_init(struct mt76x02_dev *dev, bool enable);
void mt76x02_edcca_init(struct mt76x02_dev *dev);
#endif

View File

@ -166,7 +166,8 @@ static void mt76x02_tx_tasklet(unsigned long data)
static int mt76x02_poll_tx(struct napi_struct *napi, int budget)
{
struct mt76x02_dev *dev = container_of(napi, struct mt76x02_dev, tx_napi);
struct mt76x02_dev *dev = container_of(napi, struct mt76x02_dev,
mt76.tx_napi);
int i;
mt76x02_mac_poll_tx_status(dev, false);
@ -245,9 +246,9 @@ int mt76x02_dma_init(struct mt76x02_dev *dev)
if (ret)
return ret;
netif_tx_napi_add(&dev->mt76.napi_dev, &dev->tx_napi, mt76x02_poll_tx,
NAPI_POLL_WEIGHT);
napi_enable(&dev->tx_napi);
netif_tx_napi_add(&dev->mt76.napi_dev, &dev->mt76.tx_napi,
mt76x02_poll_tx, NAPI_POLL_WEIGHT);
napi_enable(&dev->mt76.tx_napi);
return 0;
}
@ -303,7 +304,7 @@ irqreturn_t mt76x02_irq_handler(int irq, void *dev_instance)
if (intr & (MT_INT_TX_STAT | MT_INT_TX_DONE_ALL)) {
mt76x02_irq_disable(dev, MT_INT_TX_DONE_ALL);
napi_schedule(&dev->tx_napi);
napi_schedule(&dev->mt76.tx_napi);
}
if (intr & MT_INT_GPTIMER) {
@ -334,7 +335,6 @@ static void mt76x02_dma_enable(struct mt76x02_dev *dev)
void mt76x02_dma_cleanup(struct mt76x02_dev *dev)
{
tasklet_kill(&dev->mt76.tx_tasklet);
netif_napi_del(&dev->tx_napi);
mt76_dma_cleanup(&dev->mt76);
}
EXPORT_SYMBOL_GPL(mt76x02_dma_cleanup);
@ -454,7 +454,7 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
tasklet_disable(&dev->mt76.tx_tasklet);
napi_disable(&dev->tx_napi);
napi_disable(&dev->mt76.tx_napi);
for (i = 0; i < ARRAY_SIZE(dev->mt76.napi); i++)
napi_disable(&dev->mt76.napi[i]);
@ -508,8 +508,8 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
clear_bit(MT76_RESET, &dev->mt76.state);
tasklet_enable(&dev->mt76.tx_tasklet);
napi_enable(&dev->tx_napi);
napi_schedule(&dev->tx_napi);
napi_enable(&dev->mt76.tx_napi);
napi_schedule(&dev->mt76.tx_napi);
tasklet_enable(&dev->mt76.pre_tbtt_tasklet);

View File

@ -66,6 +66,9 @@
#define MT_WLAN_FUN_CTRL_GPIO_OUT GENMASK(23, 16) /* MT76x0 */
#define MT_WLAN_FUN_CTRL_GPIO_OUT_EN GENMASK(31, 24) /* MT76x0 */
/* MT76x0 */
#define MT_CSR_EE_CFG1 0x0104
#define MT_XO_CTRL0 0x0100
#define MT_XO_CTRL1 0x0104
#define MT_XO_CTRL2 0x0108

View File

@ -154,6 +154,7 @@ int mt76x02_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx_info->skb->data;
struct mt76x02_txwi *txwi = txwi_ptr;
bool ampdu = IEEE80211_SKB_CB(tx_info->skb)->flags & IEEE80211_TX_CTL_AMPDU;
int hdrlen, len, pid, qsel = MT_QSEL_EDCA;
if (qid == MT_TXQ_PSD && wcid && wcid->idx < 128)
@ -164,9 +165,15 @@ int mt76x02_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
mt76x02_mac_write_txwi(dev, txwi, tx_info->skb, wcid, sta, len);
pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
/* encode packet rate for no-skb packet id to fix up status reporting */
if (pid == MT_PACKET_ID_NO_SKB)
pid = MT_PACKET_ID_HAS_RATE |
(le16_to_cpu(txwi->rate) & MT_RXWI_RATE_INDEX);
txwi->pktid = pid;
if (pid >= MT_PACKET_ID_FIRST)
if (mt76_is_skb_pktid(pid) && ampdu)
qsel = MT_QSEL_MGMT;
tx_info->info = FIELD_PREP(MT_TXD_INFO_QSEL, qsel) |

View File

@ -14,7 +14,7 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include "mt76x02.h"
#include "mt76x02_usb.h"
static void mt76x02u_remove_dma_hdr(struct sk_buff *skb)
{
@ -79,6 +79,7 @@ int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
int pid, len = tx_info->skb->len, ep = q2ep(mdev->q_tx[qid].q->hw_idx);
struct mt76x02_txwi *txwi;
bool ampdu = IEEE80211_SKB_CB(tx_info->skb)->flags & IEEE80211_TX_CTL_AMPDU;
enum mt76_qsel qsel;
u32 flags;
@ -89,9 +90,15 @@ int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
skb_push(tx_info->skb, sizeof(*txwi));
pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
/* encode packet rate for no-skb packet id to fix up status reporting */
if (pid == MT_PACKET_ID_NO_SKB)
pid = MT_PACKET_ID_HAS_RATE |
(le16_to_cpu(txwi->rate) & MT_RXWI_RATE_INDEX);
txwi->pktid = pid;
if (pid >= MT_PACKET_ID_FIRST || ep == MT_EP_OUT_HCCA)
if ((mt76_is_skb_pktid(pid) && ampdu) || ep == MT_EP_OUT_HCCA)
qsel = MT_QSEL_MGMT;
else
qsel = MT_QSEL_EDCA;

View File

@ -173,13 +173,14 @@ void mt76x2_init_txpower(struct mt76x02_dev *dev,
mt76x2_get_power_info(dev, &txp, chan);
mt76x2_get_rate_power(dev, &t, chan);
chan->max_power = mt76x02_get_max_rate_power(&t) +
chan->orig_mpwr = mt76x02_get_max_rate_power(&t) +
txp.target_power;
chan->max_power = DIV_ROUND_UP(chan->max_power, 2);
chan->orig_mpwr = DIV_ROUND_UP(chan->orig_mpwr, 2);
/* convert to combined output power on 2x2 devices */
chan->max_power += 3;
chan->orig_mpwr = chan->max_power;
chan->orig_mpwr += 3;
chan->max_power = min_t(int, chan->max_reg_power,
chan->orig_mpwr);
}
}
EXPORT_SYMBOL_GPL(mt76x2_init_txpower);

View File

@ -54,14 +54,14 @@ mt76x2_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef)
int ret;
cancel_delayed_work_sync(&dev->cal_work);
tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
tasklet_disable(&dev->dfs_pd.dfs_tasklet);
mutex_lock(&dev->mt76.mutex);
set_bit(MT76_RESET, &dev->mt76.state);
mt76_set_channel(&dev->mt76);
tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
tasklet_disable(&dev->dfs_pd.dfs_tasklet);
mt76x2_mac_stop(dev, true);
ret = mt76x2_phy_set_channel(dev, chandef);
@ -72,10 +72,12 @@ mt76x2_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef)
mt76x02_dfs_init_params(dev);
mt76x2_mac_resume(dev);
tasklet_enable(&dev->dfs_pd.dfs_tasklet);
tasklet_enable(&dev->mt76.pre_tbtt_tasklet);
clear_bit(MT76_RESET, &dev->mt76.state);
mutex_unlock(&dev->mt76.mutex);
tasklet_enable(&dev->dfs_pd.dfs_tasklet);
tasklet_enable(&dev->mt76.pre_tbtt_tasklet);
mt76_txq_schedule_all(&dev->mt76);
@ -111,14 +113,14 @@ mt76x2_config(struct ieee80211_hw *hw, u32 changed)
}
}
mutex_unlock(&dev->mt76.mutex);
if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
ieee80211_stop_queues(hw);
ret = mt76x2_set_channel(dev, &hw->conf.chandef);
ieee80211_wake_queues(hw);
}
mutex_unlock(&dev->mt76.mutex);
return ret;
}

View File

@ -74,7 +74,7 @@ mt76x2_phy_channel_calibrate(struct mt76x02_dev *dev, bool mac_stopped)
mt76x2_mac_resume(dev);
mt76x2_apply_gain_adj(dev);
mt76x02_edcca_init(dev, true);
mt76x02_edcca_init(dev);
dev->cal.channel_cal_done = true;
}
@ -294,10 +294,16 @@ void mt76x2_phy_calibrate(struct work_struct *work)
struct mt76x02_dev *dev;
dev = container_of(work, struct mt76x02_dev, cal_work.work);
mutex_lock(&dev->mt76.mutex);
mt76x2_phy_channel_calibrate(dev, false);
mt76x2_phy_tssi_compensate(dev);
mt76x2_phy_temp_compensate(dev);
mt76x2_phy_update_channel_gain(dev);
mutex_unlock(&dev->mt76.mutex);
ieee80211_queue_delayed_work(mt76_hw(dev), &dev->cal_work,
MT_CALIBRATE_INTERVAL);
}

View File

@ -225,7 +225,7 @@ int mt76x2u_register_device(struct mt76x02_dev *dev)
/* check hw sg support in order to enable AMSDU */
if (dev->mt76.usb.sg_en)
hw->max_tx_fragments = MT_SG_MAX_SIZE;
hw->max_tx_fragments = MT_TX_SG_MAX_SIZE;
else
hw->max_tx_fragments = 1;

View File

@ -48,22 +48,23 @@ mt76x2u_set_channel(struct mt76x02_dev *dev,
int err;
cancel_delayed_work_sync(&dev->cal_work);
dev->beacon_ops->pre_tbtt_enable(dev, false);
mutex_lock(&dev->mt76.mutex);
set_bit(MT76_RESET, &dev->mt76.state);
mt76_set_channel(&dev->mt76);
dev->beacon_ops->pre_tbtt_enable(dev, false);
mt76x2_mac_stop(dev, false);
err = mt76x2u_phy_set_channel(dev, chandef);
mt76x2_mac_resume(dev);
mt76x02_edcca_init(dev, true);
dev->beacon_ops->pre_tbtt_enable(dev, true);
clear_bit(MT76_RESET, &dev->mt76.state);
mutex_unlock(&dev->mt76.mutex);
dev->beacon_ops->pre_tbtt_enable(dev, true);
mt76_txq_schedule_all(&dev->mt76);
return err;
@ -85,12 +86,6 @@ mt76x2u_config(struct ieee80211_hw *hw, u32 changed)
mt76_wr(dev, MT_RX_FILTR_CFG, dev->mt76.rxfilter);
}
if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
ieee80211_stop_queues(hw);
err = mt76x2u_set_channel(dev, &hw->conf.chandef);
ieee80211_wake_queues(hw);
}
if (changed & IEEE80211_CONF_CHANGE_POWER) {
dev->mt76.txpower_conf = hw->conf.power_level * 2;
@ -103,6 +98,12 @@ mt76x2u_config(struct ieee80211_hw *hw, u32 changed)
mutex_unlock(&dev->mt76.mutex);
if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
ieee80211_stop_queues(hw);
err = mt76x2u_set_channel(dev, &hw->conf.chandef);
ieee80211_wake_queues(hw);
}
return err;
}

View File

@ -45,7 +45,7 @@ mt76x2u_phy_channel_calibrate(struct mt76x02_dev *dev, bool mac_stopped)
if (!mac_stopped)
mt76x2_mac_resume(dev);
mt76x2_apply_gain_adj(dev);
mt76x02_edcca_init(dev, true);
mt76x02_edcca_init(dev);
dev->cal.channel_cal_done = true;
}
@ -55,10 +55,15 @@ void mt76x2u_phy_calibrate(struct work_struct *work)
struct mt76x02_dev *dev;
dev = container_of(work, struct mt76x02_dev, cal_work.work);
mutex_lock(&dev->mt76.mutex);
mt76x2u_phy_channel_calibrate(dev, false);
mt76x2_phy_tssi_compensate(dev);
mt76x2_phy_update_channel_gain(dev);
mutex_unlock(&dev->mt76.mutex);
ieee80211_queue_delayed_work(mt76_hw(dev), &dev->cal_work,
MT_CALIBRATE_INTERVAL);
}

View File

@ -267,12 +267,10 @@ mt76u_set_endpoints(struct usb_interface *intf,
if (usb_endpoint_is_bulk_in(ep_desc) &&
in_ep < __MT_EP_IN_MAX) {
usb->in_ep[in_ep] = usb_endpoint_num(ep_desc);
usb->in_max_packet = usb_endpoint_maxp(ep_desc);
in_ep++;
} else if (usb_endpoint_is_bulk_out(ep_desc) &&
out_ep < __MT_EP_OUT_MAX) {
usb->out_ep[out_ep] = usb_endpoint_num(ep_desc);
usb->out_max_packet = usb_endpoint_maxp(ep_desc);
out_ep++;
}
}
@ -333,12 +331,13 @@ mt76u_refill_rx(struct mt76_dev *dev, struct urb *urb, int nsgs, gfp_t gfp)
}
static int
mt76u_urb_alloc(struct mt76_dev *dev, struct mt76_queue_entry *e)
mt76u_urb_alloc(struct mt76_dev *dev, struct mt76_queue_entry *e,
int sg_max_size)
{
unsigned int size = sizeof(struct urb);
if (dev->usb.sg_en)
size += MT_SG_MAX_SIZE * sizeof(struct scatterlist);
size += sg_max_size * sizeof(struct scatterlist);
e->urb = kzalloc(size, GFP_KERNEL);
if (!e->urb)
@ -357,11 +356,12 @@ mt76u_rx_urb_alloc(struct mt76_dev *dev, struct mt76_queue_entry *e)
{
int err;
err = mt76u_urb_alloc(dev, e);
err = mt76u_urb_alloc(dev, e, MT_RX_SG_MAX_SIZE);
if (err)
return err;
return mt76u_refill_rx(dev, e->urb, MT_SG_MAX_SIZE, GFP_KERNEL);
return mt76u_refill_rx(dev, e->urb, MT_RX_SG_MAX_SIZE,
GFP_KERNEL);
}
static void mt76u_urb_free(struct urb *urb)
@ -577,8 +577,9 @@ static int mt76u_alloc_rx(struct mt76_dev *dev)
if (!q->entry)
return -ENOMEM;
q->buf_size = dev->usb.sg_en ? MT_RX_BUF_SIZE : PAGE_SIZE;
q->ndesc = MT_NUM_RX_ENTRIES;
q->buf_size = PAGE_SIZE;
for (i = 0; i < q->ndesc; i++) {
err = mt76u_rx_urb_alloc(dev, &q->entry[i]);
if (err < 0)
@ -735,7 +736,7 @@ mt76u_tx_setup_buffers(struct mt76_dev *dev, struct sk_buff *skb,
urb->transfer_buffer = skb->data;
return 0;
} else {
sg_init_table(urb->sg, MT_SG_MAX_SIZE);
sg_init_table(urb->sg, MT_TX_SG_MAX_SIZE);
urb->num_sgs = skb_to_sgvec(skb, urb->sg, 0, skb->len);
if (urb->num_sgs == 0)
return -ENOMEM;
@ -829,7 +830,8 @@ static int mt76u_alloc_tx(struct mt76_dev *dev)
q->ndesc = MT_NUM_TX_ENTRIES;
for (j = 0; j < q->ndesc; j++) {
err = mt76u_urb_alloc(dev, &q->entry[j]);
err = mt76u_urb_alloc(dev, &q->entry[j],
MT_TX_SG_MAX_SIZE);
if (err < 0)
return err;
}