wireless-drivers-next patches for 4.21
First set of patches for 4.21. Most notable here is support for Quantenna's QSR1000/QSR2000 chipsets and more flexible ways to provide nvram files for brcmfmac. Major changes: brcmfmac * add support for first trying to get a board specific nvram file * add support for getting nvram contents from EFI variables qtnfmac * use single PCIe driver for all platforms and rename Kconfig option CONFIG_QTNFMAC_PEARL_PCIE to CONFIG_QTNFMAC_PCIE * add support for QSR1000/QSR2000 (Topaz) family of chipsets ath10k * add support for WCN3990 firmware crash recovery * add firmware memory dump support for QCA4019 wil6210 * add firmware error recovery while in AP mode ath9k * remove experimental notice from dynack feature iwlwifi * PCI IDs for some new 9000-series cards * improve antenna usage on connection problems * new firmware debugging infrastructure * some more work on 802.11ax * improve support for multiple RF modules with 22000 devices cordic * move cordic macros and defines to a public header file * convert brcmsmac and b43 to fully use cordic library -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQEcBAABAgAGBQJcATmGAAoJEG4XJFUm622bk9kH/1RWa2z7Gmjr2wBSLHryvRAH 3FAUSJ4GJPH6MJ5X824srkcVsx2Y1RVt2i+bLXRXLq35aZkrFswohTXVM2h5dU7T Uy2SL+q9pBUcS7fyv9bE7XV7KsFBJly/5p/wciVRZMnEK6X/w6KkBx/vGvlm5I/C q196KazbVAYcl6s7KMrfFOYt0Wsto/gdEeesSVBkmcCTEkiKUjjJ4WEDVvAKK+qB AiwgSdioqYmmEiUuredm6bhVqZG3K2mScoCy95N3jXkiDaKkaYgVtBuAU4Cdju/t WgLIc9EnUYxXZtwnt889X62P1OkDOtKlj/mjdsGyF0Vrs1W+kErDO0NocOvu2Tk= =9bL7 -----END PGP SIGNATURE----- Merge tag 'wireless-drivers-next-for-davem-2018-11-30' of git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers-next Kalle Valo says: ==================== wireless-drivers-next patches for 4.21 First set of patches for 4.21. Most notable here is support for Quantenna's QSR1000/QSR2000 chipsets and more flexible ways to provide nvram files for brcmfmac. Major changes: brcmfmac * add support for first trying to get a board specific nvram file * add support for getting nvram contents from EFI variables qtnfmac * use single PCIe driver for all platforms and rename Kconfig option CONFIG_QTNFMAC_PEARL_PCIE to CONFIG_QTNFMAC_PCIE * add support for QSR1000/QSR2000 (Topaz) family of chipsets ath10k * add support for WCN3990 firmware crash recovery * add firmware memory dump support for QCA4019 wil6210 * add firmware error recovery while in AP mode ath9k * remove experimental notice from dynack feature iwlwifi * PCI IDs for some new 9000-series cards * improve antenna usage on connection problems * new firmware debugging infrastructure * some more work on 802.11ax * improve support for multiple RF modules with 22000 devices cordic * move cordic macros and defines to a public header file * convert brcmsmac and b43 to fully use cordic library ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
ce01a56ba3
|
@ -561,6 +561,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
|
|||
.hw_ops = &wcn3990_ops,
|
||||
.decap_align_bytes = 1,
|
||||
.num_peers = TARGET_HL_10_TLV_NUM_PEERS,
|
||||
.n_cipher_suites = 8,
|
||||
.ast_skid_limit = TARGET_HL_10_TLV_AST_SKID_LIMIT,
|
||||
.num_wds_entries = TARGET_HL_10_TLV_NUM_WDS_ENTRIES,
|
||||
.target_64bit = true,
|
||||
|
@ -594,6 +595,7 @@ static const char *const ath10k_core_fw_feature_str[] = {
|
|||
[ATH10K_FW_FEATURE_NO_PS] = "no-ps",
|
||||
[ATH10K_FW_FEATURE_MGMT_TX_BY_REF] = "mgmt-tx-by-reference",
|
||||
[ATH10K_FW_FEATURE_NON_BMI] = "non-bmi",
|
||||
[ATH10K_FW_FEATURE_SINGLE_CHAN_INFO_PER_CHANNEL] = "single-chan-info-per-channel",
|
||||
};
|
||||
|
||||
static unsigned int ath10k_core_get_fw_feature_str(char *buf,
|
||||
|
@ -2183,6 +2185,8 @@ static void ath10k_core_restart(struct work_struct *work)
|
|||
if (ret)
|
||||
ath10k_warn(ar, "failed to send firmware crash dump via devcoredump: %d",
|
||||
ret);
|
||||
|
||||
complete(&ar->driver_recovery);
|
||||
}
|
||||
|
||||
static void ath10k_core_set_coverage_class_work(struct work_struct *work)
|
||||
|
@ -3046,6 +3050,7 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
|
|||
init_completion(&ar->scan.completed);
|
||||
init_completion(&ar->scan.on_channel);
|
||||
init_completion(&ar->target_suspend);
|
||||
init_completion(&ar->driver_recovery);
|
||||
init_completion(&ar->wow.wakeup_completed);
|
||||
|
||||
init_completion(&ar->install_key_done);
|
||||
|
|
|
@ -474,6 +474,7 @@ struct ath10k_htt_data_stats {
|
|||
u64 bw[ATH10K_COUNTER_TYPE_MAX][ATH10K_BW_NUM];
|
||||
u64 nss[ATH10K_COUNTER_TYPE_MAX][ATH10K_NSS_NUM];
|
||||
u64 gi[ATH10K_COUNTER_TYPE_MAX][ATH10K_GI_NUM];
|
||||
u64 rate_table[ATH10K_COUNTER_TYPE_MAX][ATH10K_RATE_TABLE_NUM];
|
||||
};
|
||||
|
||||
struct ath10k_htt_tx_stats {
|
||||
|
@ -760,6 +761,9 @@ enum ath10k_fw_features {
|
|||
/* Firmware load is done externally, not by bmi */
|
||||
ATH10K_FW_FEATURE_NON_BMI = 19,
|
||||
|
||||
/* Firmware sends only one chan_info event per channel */
|
||||
ATH10K_FW_FEATURE_SINGLE_CHAN_INFO_PER_CHANNEL = 20,
|
||||
|
||||
/* keep last */
|
||||
ATH10K_FW_FEATURE_COUNT,
|
||||
};
|
||||
|
@ -960,6 +964,7 @@ struct ath10k {
|
|||
} hif;
|
||||
|
||||
struct completion target_suspend;
|
||||
struct completion driver_recovery;
|
||||
|
||||
const struct ath10k_hw_regs *regs;
|
||||
const struct ath10k_hw_ce_regs *hw_ce_regs;
|
||||
|
|
|
@ -867,9 +867,105 @@ static const struct ath10k_mem_region qca9984_hw10_mem_regions[] = {
|
|||
},
|
||||
};
|
||||
|
||||
static const struct ath10k_mem_section ipq4019_soc_reg_range[] = {
|
||||
{0x080000, 0x080004},
|
||||
{0x080020, 0x080024},
|
||||
{0x080028, 0x080050},
|
||||
{0x0800d4, 0x0800ec},
|
||||
{0x08010c, 0x080118},
|
||||
{0x080284, 0x080290},
|
||||
{0x0802a8, 0x0802b8},
|
||||
{0x0802dc, 0x08030c},
|
||||
{0x082000, 0x083fff}
|
||||
};
|
||||
|
||||
static const struct ath10k_mem_region qca4019_hw10_mem_regions[] = {
|
||||
{
|
||||
.type = ATH10K_MEM_REGION_TYPE_DRAM,
|
||||
.start = 0x400000,
|
||||
.len = 0x68000,
|
||||
.name = "DRAM",
|
||||
.section_table = {
|
||||
.sections = NULL,
|
||||
.size = 0,
|
||||
},
|
||||
},
|
||||
{
|
||||
.type = ATH10K_MEM_REGION_TYPE_REG,
|
||||
.start = 0xC0000,
|
||||
.len = 0x40000,
|
||||
.name = "SRAM",
|
||||
.section_table = {
|
||||
.sections = NULL,
|
||||
.size = 0,
|
||||
},
|
||||
},
|
||||
{
|
||||
.type = ATH10K_MEM_REGION_TYPE_REG,
|
||||
.start = 0x98000,
|
||||
.len = 0x50000,
|
||||
.name = "IRAM",
|
||||
.section_table = {
|
||||
.sections = NULL,
|
||||
.size = 0,
|
||||
},
|
||||
},
|
||||
{
|
||||
.type = ATH10K_MEM_REGION_TYPE_IOREG,
|
||||
.start = 0x30000,
|
||||
.len = 0x7000,
|
||||
.name = "APB REG 1",
|
||||
.section_table = {
|
||||
.sections = NULL,
|
||||
.size = 0,
|
||||
},
|
||||
},
|
||||
{
|
||||
.type = ATH10K_MEM_REGION_TYPE_IOREG,
|
||||
.start = 0x3f000,
|
||||
.len = 0x3000,
|
||||
.name = "APB REG 2",
|
||||
.section_table = {
|
||||
.sections = NULL,
|
||||
.size = 0,
|
||||
},
|
||||
},
|
||||
{
|
||||
.type = ATH10K_MEM_REGION_TYPE_IOREG,
|
||||
.start = 0x43000,
|
||||
.len = 0x3000,
|
||||
.name = "WIFI REG",
|
||||
.section_table = {
|
||||
.sections = NULL,
|
||||
.size = 0,
|
||||
},
|
||||
},
|
||||
{
|
||||
.type = ATH10K_MEM_REGION_TYPE_IOREG,
|
||||
.start = 0x4A000,
|
||||
.len = 0x5000,
|
||||
.name = "CE REG",
|
||||
.section_table = {
|
||||
.sections = NULL,
|
||||
.size = 0,
|
||||
},
|
||||
},
|
||||
{
|
||||
.type = ATH10K_MEM_REGION_TYPE_REG,
|
||||
.start = 0x080000,
|
||||
.len = 0x083fff - 0x080000,
|
||||
.name = "REG_TOTAL",
|
||||
.section_table = {
|
||||
.sections = ipq4019_soc_reg_range,
|
||||
.size = ARRAY_SIZE(ipq4019_soc_reg_range),
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
static const struct ath10k_hw_mem_layout hw_mem_layouts[] = {
|
||||
{
|
||||
.hw_id = QCA6174_HW_1_0_VERSION,
|
||||
.hw_rev = ATH10K_HW_QCA6174,
|
||||
.region_table = {
|
||||
.regions = qca6174_hw10_mem_regions,
|
||||
.size = ARRAY_SIZE(qca6174_hw10_mem_regions),
|
||||
|
@ -877,6 +973,7 @@ static const struct ath10k_hw_mem_layout hw_mem_layouts[] = {
|
|||
},
|
||||
{
|
||||
.hw_id = QCA6174_HW_1_1_VERSION,
|
||||
.hw_rev = ATH10K_HW_QCA6174,
|
||||
.region_table = {
|
||||
.regions = qca6174_hw10_mem_regions,
|
||||
.size = ARRAY_SIZE(qca6174_hw10_mem_regions),
|
||||
|
@ -884,6 +981,7 @@ static const struct ath10k_hw_mem_layout hw_mem_layouts[] = {
|
|||
},
|
||||
{
|
||||
.hw_id = QCA6174_HW_1_3_VERSION,
|
||||
.hw_rev = ATH10K_HW_QCA6174,
|
||||
.region_table = {
|
||||
.regions = qca6174_hw10_mem_regions,
|
||||
.size = ARRAY_SIZE(qca6174_hw10_mem_regions),
|
||||
|
@ -891,6 +989,7 @@ static const struct ath10k_hw_mem_layout hw_mem_layouts[] = {
|
|||
},
|
||||
{
|
||||
.hw_id = QCA6174_HW_2_1_VERSION,
|
||||
.hw_rev = ATH10K_HW_QCA6174,
|
||||
.region_table = {
|
||||
.regions = qca6174_hw21_mem_regions,
|
||||
.size = ARRAY_SIZE(qca6174_hw21_mem_regions),
|
||||
|
@ -898,6 +997,7 @@ static const struct ath10k_hw_mem_layout hw_mem_layouts[] = {
|
|||
},
|
||||
{
|
||||
.hw_id = QCA6174_HW_3_0_VERSION,
|
||||
.hw_rev = ATH10K_HW_QCA6174,
|
||||
.region_table = {
|
||||
.regions = qca6174_hw30_mem_regions,
|
||||
.size = ARRAY_SIZE(qca6174_hw30_mem_regions),
|
||||
|
@ -905,6 +1005,7 @@ static const struct ath10k_hw_mem_layout hw_mem_layouts[] = {
|
|||
},
|
||||
{
|
||||
.hw_id = QCA6174_HW_3_2_VERSION,
|
||||
.hw_rev = ATH10K_HW_QCA6174,
|
||||
.region_table = {
|
||||
.regions = qca6174_hw30_mem_regions,
|
||||
.size = ARRAY_SIZE(qca6174_hw30_mem_regions),
|
||||
|
@ -912,6 +1013,7 @@ static const struct ath10k_hw_mem_layout hw_mem_layouts[] = {
|
|||
},
|
||||
{
|
||||
.hw_id = QCA9377_HW_1_1_DEV_VERSION,
|
||||
.hw_rev = ATH10K_HW_QCA9377,
|
||||
.region_table = {
|
||||
.regions = qca6174_hw30_mem_regions,
|
||||
.size = ARRAY_SIZE(qca6174_hw30_mem_regions),
|
||||
|
@ -919,6 +1021,7 @@ static const struct ath10k_hw_mem_layout hw_mem_layouts[] = {
|
|||
},
|
||||
{
|
||||
.hw_id = QCA988X_HW_2_0_VERSION,
|
||||
.hw_rev = ATH10K_HW_QCA988X,
|
||||
.region_table = {
|
||||
.regions = qca988x_hw20_mem_regions,
|
||||
.size = ARRAY_SIZE(qca988x_hw20_mem_regions),
|
||||
|
@ -926,6 +1029,7 @@ static const struct ath10k_hw_mem_layout hw_mem_layouts[] = {
|
|||
},
|
||||
{
|
||||
.hw_id = QCA9984_HW_1_0_DEV_VERSION,
|
||||
.hw_rev = ATH10K_HW_QCA9984,
|
||||
.region_table = {
|
||||
.regions = qca9984_hw10_mem_regions,
|
||||
.size = ARRAY_SIZE(qca9984_hw10_mem_regions),
|
||||
|
@ -933,6 +1037,7 @@ static const struct ath10k_hw_mem_layout hw_mem_layouts[] = {
|
|||
},
|
||||
{
|
||||
.hw_id = QCA9888_HW_2_0_DEV_VERSION,
|
||||
.hw_rev = ATH10K_HW_QCA9888,
|
||||
.region_table = {
|
||||
.regions = qca9984_hw10_mem_regions,
|
||||
.size = ARRAY_SIZE(qca9984_hw10_mem_regions),
|
||||
|
@ -940,12 +1045,20 @@ static const struct ath10k_hw_mem_layout hw_mem_layouts[] = {
|
|||
},
|
||||
{
|
||||
.hw_id = QCA99X0_HW_2_0_DEV_VERSION,
|
||||
.hw_rev = ATH10K_HW_QCA99X0,
|
||||
.region_table = {
|
||||
.regions = qca99x0_hw20_mem_regions,
|
||||
.size = ARRAY_SIZE(qca99x0_hw20_mem_regions),
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
.hw_id = QCA4019_HW_1_0_DEV_VERSION,
|
||||
.hw_rev = ATH10K_HW_QCA4019,
|
||||
.region_table = {
|
||||
.regions = qca4019_hw10_mem_regions,
|
||||
.size = ARRAY_SIZE(qca4019_hw10_mem_regions),
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
static u32 ath10k_coredump_get_ramdump_size(struct ath10k *ar)
|
||||
|
@ -987,7 +1100,8 @@ const struct ath10k_hw_mem_layout *ath10k_coredump_get_mem_layout(struct ath10k
|
|||
return NULL;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(hw_mem_layouts); i++) {
|
||||
if (ar->target_version == hw_mem_layouts[i].hw_id)
|
||||
if (ar->target_version == hw_mem_layouts[i].hw_id &&
|
||||
ar->hw_rev == hw_mem_layouts[i].hw_rev)
|
||||
return &hw_mem_layouts[i];
|
||||
}
|
||||
|
||||
|
|
|
@ -165,6 +165,7 @@ struct ath10k_mem_region {
|
|||
*/
|
||||
struct ath10k_hw_mem_layout {
|
||||
u32 hw_id;
|
||||
u32 hw_rev;
|
||||
|
||||
struct {
|
||||
const struct ath10k_mem_region *regions;
|
||||
|
|
|
@ -665,7 +665,7 @@ static ssize_t ath10k_dbg_sta_dump_tx_stats(struct file *file,
|
|||
"retry", "ampdu"};
|
||||
const char *str[ATH10K_COUNTER_TYPE_MAX] = {"bytes", "packets"};
|
||||
int len = 0, i, j, k, retval = 0;
|
||||
const int size = 2 * 4096;
|
||||
const int size = 16 * 4096;
|
||||
char *buf;
|
||||
|
||||
buf = kzalloc(size, GFP_KERNEL);
|
||||
|
@ -719,6 +719,16 @@ static ssize_t ath10k_dbg_sta_dump_tx_stats(struct file *file,
|
|||
len += scnprintf(buf + len, size - len, "%llu ",
|
||||
stats->legacy[j][i]);
|
||||
len += scnprintf(buf + len, size - len, "\n");
|
||||
len += scnprintf(buf + len, size - len,
|
||||
" Rate table %s (1,2 ... Mbps)\n ",
|
||||
str[j]);
|
||||
for (i = 0; i < ATH10K_RATE_TABLE_NUM; i++) {
|
||||
len += scnprintf(buf + len, size - len, "%llu ",
|
||||
stats->rate_table[j][i]);
|
||||
if (!((i + 1) % 8))
|
||||
len +=
|
||||
scnprintf(buf + len, size - len, "\n ");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -2650,7 +2650,7 @@ ath10k_accumulate_per_peer_tx_stats(struct ath10k *ar,
|
|||
{
|
||||
struct rate_info *txrate = &arsta->txrate;
|
||||
struct ath10k_htt_tx_stats *tx_stats;
|
||||
int ht_idx, gi, mcs, bw, nss;
|
||||
int idx, ht_idx, gi, mcs, bw, nss;
|
||||
|
||||
if (!arsta->tx_stats)
|
||||
return;
|
||||
|
@ -2661,6 +2661,8 @@ ath10k_accumulate_per_peer_tx_stats(struct ath10k *ar,
|
|||
mcs = txrate->mcs;
|
||||
bw = txrate->bw;
|
||||
nss = txrate->nss;
|
||||
idx = mcs * 8 + 8 * 10 * nss;
|
||||
idx += bw * 2 + gi;
|
||||
|
||||
#define STATS_OP_FMT(name) tx_stats->stats[ATH10K_STATS_TYPE_##name]
|
||||
|
||||
|
@ -2709,12 +2711,16 @@ ath10k_accumulate_per_peer_tx_stats(struct ath10k *ar,
|
|||
pstats->succ_bytes + pstats->retry_bytes;
|
||||
STATS_OP_FMT(AMPDU).gi[0][gi] +=
|
||||
pstats->succ_bytes + pstats->retry_bytes;
|
||||
STATS_OP_FMT(AMPDU).rate_table[0][idx] +=
|
||||
pstats->succ_bytes + pstats->retry_bytes;
|
||||
STATS_OP_FMT(AMPDU).bw[1][bw] +=
|
||||
pstats->succ_pkts + pstats->retry_pkts;
|
||||
STATS_OP_FMT(AMPDU).nss[1][nss] +=
|
||||
pstats->succ_pkts + pstats->retry_pkts;
|
||||
STATS_OP_FMT(AMPDU).gi[1][gi] +=
|
||||
pstats->succ_pkts + pstats->retry_pkts;
|
||||
STATS_OP_FMT(AMPDU).rate_table[1][idx] +=
|
||||
pstats->succ_pkts + pstats->retry_pkts;
|
||||
} else {
|
||||
tx_stats->ack_fails +=
|
||||
ATH10K_HW_BA_FAIL(pstats->flags);
|
||||
|
@ -2743,6 +2749,15 @@ ath10k_accumulate_per_peer_tx_stats(struct ath10k *ar,
|
|||
STATS_OP_FMT(RETRY).bw[1][bw] += pstats->retry_pkts;
|
||||
STATS_OP_FMT(RETRY).nss[1][nss] += pstats->retry_pkts;
|
||||
STATS_OP_FMT(RETRY).gi[1][gi] += pstats->retry_pkts;
|
||||
|
||||
if (txrate->flags >= RATE_INFO_FLAGS_MCS) {
|
||||
STATS_OP_FMT(SUCC).rate_table[0][idx] += pstats->succ_bytes;
|
||||
STATS_OP_FMT(SUCC).rate_table[1][idx] += pstats->succ_pkts;
|
||||
STATS_OP_FMT(FAIL).rate_table[0][idx] += pstats->failed_bytes;
|
||||
STATS_OP_FMT(FAIL).rate_table[1][idx] += pstats->failed_pkts;
|
||||
STATS_OP_FMT(RETRY).rate_table[0][idx] += pstats->retry_bytes;
|
||||
STATS_OP_FMT(RETRY).rate_table[1][idx] += pstats->retry_pkts;
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
|
|
|
@ -6296,8 +6296,10 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
|
|||
if (ath10k_debug_is_extd_tx_stats_enabled(ar)) {
|
||||
arsta->tx_stats = kzalloc(sizeof(*arsta->tx_stats),
|
||||
GFP_KERNEL);
|
||||
if (!arsta->tx_stats)
|
||||
if (!arsta->tx_stats) {
|
||||
ret = -ENOMEM;
|
||||
goto exit;
|
||||
}
|
||||
}
|
||||
|
||||
num_tdls_stations = ath10k_mac_tdls_vif_stations_count(hw, vif);
|
||||
|
@ -6385,8 +6387,10 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
|
|||
"mac vdev %d peer delete %pM sta %pK (sta gone)\n",
|
||||
arvif->vdev_id, sta->addr, sta);
|
||||
|
||||
if (ath10k_debug_is_extd_tx_stats_enabled(ar))
|
||||
if (ath10k_debug_is_extd_tx_stats_enabled(ar)) {
|
||||
kfree(arsta->tx_stats);
|
||||
arsta->tx_stats = NULL;
|
||||
}
|
||||
|
||||
if (sta->tdls) {
|
||||
ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id,
|
||||
|
@ -8313,7 +8317,6 @@ static u32 ath10k_mac_wrdd_get_mcc(struct ath10k *ar, union acpi_object *wrdd)
|
|||
|
||||
static int ath10k_mac_get_wrdd_regulatory(struct ath10k *ar, u16 *rd)
|
||||
{
|
||||
struct pci_dev __maybe_unused *pdev = to_pci_dev(ar->dev);
|
||||
acpi_handle root_handle;
|
||||
acpi_handle handle;
|
||||
struct acpi_buffer wrdd = {ACPI_ALLOCATE_BUFFER, NULL};
|
||||
|
@ -8321,7 +8324,7 @@ static int ath10k_mac_get_wrdd_regulatory(struct ath10k *ar, u16 *rd)
|
|||
u32 alpha2_code;
|
||||
char alpha2[3];
|
||||
|
||||
root_handle = ACPI_HANDLE(&pdev->dev);
|
||||
root_handle = ACPI_HANDLE(ar->dev);
|
||||
if (!root_handle)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
|
|
|
@ -543,7 +543,7 @@ static int ath10k_qmi_cap_send_sync_msg(struct ath10k_qmi *qmi)
|
|||
goto out;
|
||||
|
||||
if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
|
||||
ath10k_err(ar, "capablity req rejected: %d\n", resp->resp.error);
|
||||
ath10k_err(ar, "capability req rejected: %d\n", resp->resp.error);
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
@ -623,7 +623,7 @@ static int ath10k_qmi_host_cap_send_sync(struct ath10k_qmi *qmi)
|
|||
goto out;
|
||||
}
|
||||
|
||||
ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi host capablity request completed\n");
|
||||
ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi host capability request completed\n");
|
||||
return 0;
|
||||
|
||||
out:
|
||||
|
@ -657,7 +657,7 @@ ath10k_qmi_ind_register_send_sync_msg(struct ath10k_qmi *qmi)
|
|||
wlfw_ind_register_req_msg_v01_ei, &req);
|
||||
if (ret < 0) {
|
||||
qmi_txn_cancel(&txn);
|
||||
ath10k_err(ar, "failed to send indication registed request: %d\n", ret);
|
||||
ath10k_err(ar, "failed to send indication registered request: %d\n", ret);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -931,7 +931,7 @@ static int ath10k_qmi_setup_msa_resources(struct ath10k_qmi *qmi, u32 msa_size)
|
|||
qmi->msa_mem_size = resource_size(&r);
|
||||
qmi->msa_va = devm_memremap(dev, qmi->msa_pa, qmi->msa_mem_size,
|
||||
MEMREMAP_WT);
|
||||
if (!qmi->msa_pa) {
|
||||
if (!qmi->msa_va) {
|
||||
dev_err(dev, "failed to map memory region: %pa\n", &r.start);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
|
|
@ -46,14 +46,14 @@ static char *const ce_name[] = {
|
|||
"WLAN_CE_11",
|
||||
};
|
||||
|
||||
static struct ath10k_wcn3990_vreg_info vreg_cfg[] = {
|
||||
{NULL, "vdd-0.8-cx-mx", 800000, 800000, 0, 0, false},
|
||||
{NULL, "vdd-1.8-xo", 1800000, 1800000, 0, 0, false},
|
||||
{NULL, "vdd-1.3-rfa", 1304000, 1304000, 0, 0, false},
|
||||
{NULL, "vdd-3.3-ch0", 3312000, 3312000, 0, 0, false},
|
||||
static struct ath10k_vreg_info vreg_cfg[] = {
|
||||
{NULL, "vdd-0.8-cx-mx", 800000, 850000, 0, 0, false},
|
||||
{NULL, "vdd-1.8-xo", 1800000, 1850000, 0, 0, false},
|
||||
{NULL, "vdd-1.3-rfa", 1300000, 1350000, 0, 0, false},
|
||||
{NULL, "vdd-3.3-ch0", 3300000, 3350000, 0, 0, false},
|
||||
};
|
||||
|
||||
static struct ath10k_wcn3990_clk_info clk_cfg[] = {
|
||||
static struct ath10k_clk_info clk_cfg[] = {
|
||||
{NULL, "cxo_ref_clk_pin", 0, false},
|
||||
};
|
||||
|
||||
|
@ -474,14 +474,14 @@ static struct service_to_pipe target_service_to_ce_map_wlan[] = {
|
|||
},
|
||||
};
|
||||
|
||||
void ath10k_snoc_write32(struct ath10k *ar, u32 offset, u32 value)
|
||||
static void ath10k_snoc_write32(struct ath10k *ar, u32 offset, u32 value)
|
||||
{
|
||||
struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
|
||||
|
||||
iowrite32(value, ar_snoc->mem + offset);
|
||||
}
|
||||
|
||||
u32 ath10k_snoc_read32(struct ath10k *ar, u32 offset)
|
||||
static u32 ath10k_snoc_read32(struct ath10k *ar, u32 offset)
|
||||
{
|
||||
struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
|
||||
u32 val;
|
||||
|
@ -918,7 +918,9 @@ static void ath10k_snoc_buffer_cleanup(struct ath10k *ar)
|
|||
|
||||
static void ath10k_snoc_hif_stop(struct ath10k *ar)
|
||||
{
|
||||
ath10k_snoc_irq_disable(ar);
|
||||
if (!test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags))
|
||||
ath10k_snoc_irq_disable(ar);
|
||||
|
||||
napi_synchronize(&ar->napi);
|
||||
napi_disable(&ar->napi);
|
||||
ath10k_snoc_buffer_cleanup(ar);
|
||||
|
@ -927,10 +929,14 @@ static void ath10k_snoc_hif_stop(struct ath10k *ar)
|
|||
|
||||
static int ath10k_snoc_hif_start(struct ath10k *ar)
|
||||
{
|
||||
struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
|
||||
|
||||
napi_enable(&ar->napi);
|
||||
ath10k_snoc_irq_enable(ar);
|
||||
ath10k_snoc_rx_post(ar);
|
||||
|
||||
clear_bit(ATH10K_SNOC_FLAG_RECOVERY, &ar_snoc->flags);
|
||||
|
||||
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n");
|
||||
|
||||
return 0;
|
||||
|
@ -994,7 +1000,8 @@ static int ath10k_snoc_wlan_enable(struct ath10k *ar)
|
|||
|
||||
static void ath10k_snoc_wlan_disable(struct ath10k *ar)
|
||||
{
|
||||
ath10k_qmi_wlan_disable(ar);
|
||||
if (!test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags))
|
||||
ath10k_qmi_wlan_disable(ar);
|
||||
}
|
||||
|
||||
static void ath10k_snoc_hif_power_down(struct ath10k *ar)
|
||||
|
@ -1091,6 +1098,11 @@ static int ath10k_snoc_napi_poll(struct napi_struct *ctx, int budget)
|
|||
struct ath10k *ar = container_of(ctx, struct ath10k, napi);
|
||||
int done = 0;
|
||||
|
||||
if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags)) {
|
||||
napi_complete(ctx);
|
||||
return done;
|
||||
}
|
||||
|
||||
ath10k_ce_per_engine_service_any(ar);
|
||||
done = ath10k_htt_txrx_compl_task(ar, budget);
|
||||
|
||||
|
@ -1187,17 +1199,29 @@ int ath10k_snoc_fw_indication(struct ath10k *ar, u64 type)
|
|||
struct ath10k_bus_params bus_params;
|
||||
int ret;
|
||||
|
||||
if (test_bit(ATH10K_SNOC_FLAG_UNREGISTERING, &ar_snoc->flags))
|
||||
return 0;
|
||||
|
||||
switch (type) {
|
||||
case ATH10K_QMI_EVENT_FW_READY_IND:
|
||||
if (test_bit(ATH10K_SNOC_FLAG_REGISTERED, &ar_snoc->flags)) {
|
||||
queue_work(ar->workqueue, &ar->restart_work);
|
||||
break;
|
||||
}
|
||||
|
||||
bus_params.dev_type = ATH10K_DEV_TYPE_LL;
|
||||
bus_params.chip_id = ar_snoc->target_info.soc_version;
|
||||
ret = ath10k_core_register(ar, &bus_params);
|
||||
if (ret) {
|
||||
ath10k_err(ar, "failed to register driver core: %d\n",
|
||||
ath10k_err(ar, "Failed to register driver core: %d\n",
|
||||
ret);
|
||||
return ret;
|
||||
}
|
||||
set_bit(ATH10K_SNOC_FLAG_REGISTERED, &ar_snoc->flags);
|
||||
break;
|
||||
case ATH10K_QMI_EVENT_FW_DOWN_IND:
|
||||
set_bit(ATH10K_SNOC_FLAG_RECOVERY, &ar_snoc->flags);
|
||||
set_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags);
|
||||
break;
|
||||
default:
|
||||
ath10k_err(ar, "invalid fw indication: %llx\n", type);
|
||||
|
@ -1246,7 +1270,7 @@ static void ath10k_snoc_release_resource(struct ath10k *ar)
|
|||
}
|
||||
|
||||
static int ath10k_get_vreg_info(struct ath10k *ar, struct device *dev,
|
||||
struct ath10k_wcn3990_vreg_info *vreg_info)
|
||||
struct ath10k_vreg_info *vreg_info)
|
||||
{
|
||||
struct regulator *reg;
|
||||
int ret = 0;
|
||||
|
@ -1284,7 +1308,7 @@ static int ath10k_get_vreg_info(struct ath10k *ar, struct device *dev,
|
|||
}
|
||||
|
||||
static int ath10k_get_clk_info(struct ath10k *ar, struct device *dev,
|
||||
struct ath10k_wcn3990_clk_info *clk_info)
|
||||
struct ath10k_clk_info *clk_info)
|
||||
{
|
||||
struct clk *handle;
|
||||
int ret = 0;
|
||||
|
@ -1311,10 +1335,80 @@ static int ath10k_get_clk_info(struct ath10k *ar, struct device *dev,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int ath10k_wcn3990_vreg_on(struct ath10k *ar)
|
||||
static int __ath10k_snoc_vreg_on(struct ath10k *ar,
|
||||
struct ath10k_vreg_info *vreg_info)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc regulator %s being enabled\n",
|
||||
vreg_info->name);
|
||||
|
||||
ret = regulator_set_voltage(vreg_info->reg, vreg_info->min_v,
|
||||
vreg_info->max_v);
|
||||
if (ret) {
|
||||
ath10k_err(ar,
|
||||
"failed to set regulator %s voltage-min: %d voltage-max: %d\n",
|
||||
vreg_info->name, vreg_info->min_v, vreg_info->max_v);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (vreg_info->load_ua) {
|
||||
ret = regulator_set_load(vreg_info->reg, vreg_info->load_ua);
|
||||
if (ret < 0) {
|
||||
ath10k_err(ar, "failed to set regulator %s load: %d\n",
|
||||
vreg_info->name, vreg_info->load_ua);
|
||||
goto err_set_load;
|
||||
}
|
||||
}
|
||||
|
||||
ret = regulator_enable(vreg_info->reg);
|
||||
if (ret) {
|
||||
ath10k_err(ar, "failed to enable regulator %s\n",
|
||||
vreg_info->name);
|
||||
goto err_enable;
|
||||
}
|
||||
|
||||
if (vreg_info->settle_delay)
|
||||
udelay(vreg_info->settle_delay);
|
||||
|
||||
return 0;
|
||||
|
||||
err_enable:
|
||||
regulator_set_load(vreg_info->reg, 0);
|
||||
err_set_load:
|
||||
regulator_set_voltage(vreg_info->reg, 0, vreg_info->max_v);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int __ath10k_snoc_vreg_off(struct ath10k *ar,
|
||||
struct ath10k_vreg_info *vreg_info)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc regulator %s being disabled\n",
|
||||
vreg_info->name);
|
||||
|
||||
ret = regulator_disable(vreg_info->reg);
|
||||
if (ret)
|
||||
ath10k_err(ar, "failed to disable regulator %s\n",
|
||||
vreg_info->name);
|
||||
|
||||
ret = regulator_set_load(vreg_info->reg, 0);
|
||||
if (ret < 0)
|
||||
ath10k_err(ar, "failed to set load %s\n", vreg_info->name);
|
||||
|
||||
ret = regulator_set_voltage(vreg_info->reg, 0, vreg_info->max_v);
|
||||
if (ret)
|
||||
ath10k_err(ar, "failed to set voltage %s\n", vreg_info->name);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ath10k_snoc_vreg_on(struct ath10k *ar)
|
||||
{
|
||||
struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
|
||||
struct ath10k_wcn3990_vreg_info *vreg_info;
|
||||
struct ath10k_vreg_info *vreg_info;
|
||||
int ret = 0;
|
||||
int i;
|
||||
|
||||
|
@ -1324,62 +1418,30 @@ static int ath10k_wcn3990_vreg_on(struct ath10k *ar)
|
|||
if (!vreg_info->reg)
|
||||
continue;
|
||||
|
||||
ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc regulator %s being enabled\n",
|
||||
vreg_info->name);
|
||||
|
||||
ret = regulator_set_voltage(vreg_info->reg, vreg_info->min_v,
|
||||
vreg_info->max_v);
|
||||
if (ret) {
|
||||
ath10k_err(ar,
|
||||
"failed to set regulator %s voltage-min: %d voltage-max: %d\n",
|
||||
vreg_info->name, vreg_info->min_v, vreg_info->max_v);
|
||||
ret = __ath10k_snoc_vreg_on(ar, vreg_info);
|
||||
if (ret)
|
||||
goto err_reg_config;
|
||||
}
|
||||
|
||||
if (vreg_info->load_ua) {
|
||||
ret = regulator_set_load(vreg_info->reg,
|
||||
vreg_info->load_ua);
|
||||
if (ret < 0) {
|
||||
ath10k_err(ar,
|
||||
"failed to set regulator %s load: %d\n",
|
||||
vreg_info->name,
|
||||
vreg_info->load_ua);
|
||||
goto err_reg_config;
|
||||
}
|
||||
}
|
||||
|
||||
ret = regulator_enable(vreg_info->reg);
|
||||
if (ret) {
|
||||
ath10k_err(ar, "failed to enable regulator %s\n",
|
||||
vreg_info->name);
|
||||
goto err_reg_config;
|
||||
}
|
||||
|
||||
if (vreg_info->settle_delay)
|
||||
udelay(vreg_info->settle_delay);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_reg_config:
|
||||
for (; i >= 0; i--) {
|
||||
for (i = i - 1; i >= 0; i--) {
|
||||
vreg_info = &ar_snoc->vreg[i];
|
||||
|
||||
if (!vreg_info->reg)
|
||||
continue;
|
||||
|
||||
regulator_disable(vreg_info->reg);
|
||||
regulator_set_load(vreg_info->reg, 0);
|
||||
regulator_set_voltage(vreg_info->reg, 0, vreg_info->max_v);
|
||||
__ath10k_snoc_vreg_off(ar, vreg_info);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ath10k_wcn3990_vreg_off(struct ath10k *ar)
|
||||
static int ath10k_snoc_vreg_off(struct ath10k *ar)
|
||||
{
|
||||
struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
|
||||
struct ath10k_wcn3990_vreg_info *vreg_info;
|
||||
struct ath10k_vreg_info *vreg_info;
|
||||
int ret = 0;
|
||||
int i;
|
||||
|
||||
|
@ -1389,33 +1451,16 @@ static int ath10k_wcn3990_vreg_off(struct ath10k *ar)
|
|||
if (!vreg_info->reg)
|
||||
continue;
|
||||
|
||||
ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc regulator %s being disabled\n",
|
||||
vreg_info->name);
|
||||
|
||||
ret = regulator_disable(vreg_info->reg);
|
||||
if (ret)
|
||||
ath10k_err(ar, "failed to disable regulator %s\n",
|
||||
vreg_info->name);
|
||||
|
||||
ret = regulator_set_load(vreg_info->reg, 0);
|
||||
if (ret < 0)
|
||||
ath10k_err(ar, "failed to set load %s\n",
|
||||
vreg_info->name);
|
||||
|
||||
ret = regulator_set_voltage(vreg_info->reg, 0,
|
||||
vreg_info->max_v);
|
||||
if (ret)
|
||||
ath10k_err(ar, "failed to set voltage %s\n",
|
||||
vreg_info->name);
|
||||
ret = __ath10k_snoc_vreg_off(ar, vreg_info);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ath10k_wcn3990_clk_init(struct ath10k *ar)
|
||||
static int ath10k_snoc_clk_init(struct ath10k *ar)
|
||||
{
|
||||
struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
|
||||
struct ath10k_wcn3990_clk_info *clk_info;
|
||||
struct ath10k_clk_info *clk_info;
|
||||
int ret = 0;
|
||||
int i;
|
||||
|
||||
|
@ -1449,7 +1494,7 @@ static int ath10k_wcn3990_clk_init(struct ath10k *ar)
|
|||
return 0;
|
||||
|
||||
err_clock_config:
|
||||
for (; i >= 0; i--) {
|
||||
for (i = i - 1; i >= 0; i--) {
|
||||
clk_info = &ar_snoc->clk[i];
|
||||
|
||||
if (!clk_info->handle)
|
||||
|
@ -1461,10 +1506,10 @@ static int ath10k_wcn3990_clk_init(struct ath10k *ar)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int ath10k_wcn3990_clk_deinit(struct ath10k *ar)
|
||||
static int ath10k_snoc_clk_deinit(struct ath10k *ar)
|
||||
{
|
||||
struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
|
||||
struct ath10k_wcn3990_clk_info *clk_info;
|
||||
struct ath10k_clk_info *clk_info;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(clk_cfg); i++) {
|
||||
|
@ -1488,18 +1533,18 @@ static int ath10k_hw_power_on(struct ath10k *ar)
|
|||
|
||||
ath10k_dbg(ar, ATH10K_DBG_SNOC, "soc power on\n");
|
||||
|
||||
ret = ath10k_wcn3990_vreg_on(ar);
|
||||
ret = ath10k_snoc_vreg_on(ar);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = ath10k_wcn3990_clk_init(ar);
|
||||
ret = ath10k_snoc_clk_init(ar);
|
||||
if (ret)
|
||||
goto vreg_off;
|
||||
|
||||
return ret;
|
||||
|
||||
vreg_off:
|
||||
ath10k_wcn3990_vreg_off(ar);
|
||||
ath10k_snoc_vreg_off(ar);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1509,9 +1554,9 @@ static int ath10k_hw_power_off(struct ath10k *ar)
|
|||
|
||||
ath10k_dbg(ar, ATH10K_DBG_SNOC, "soc power off\n");
|
||||
|
||||
ath10k_wcn3990_clk_deinit(ar);
|
||||
ath10k_snoc_clk_deinit(ar);
|
||||
|
||||
ret = ath10k_wcn3990_vreg_off(ar);
|
||||
ret = ath10k_snoc_vreg_off(ar);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -1628,8 +1673,17 @@ static int ath10k_snoc_probe(struct platform_device *pdev)
|
|||
static int ath10k_snoc_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct ath10k *ar = platform_get_drvdata(pdev);
|
||||
struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
|
||||
|
||||
ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc remove\n");
|
||||
|
||||
reinit_completion(&ar->driver_recovery);
|
||||
|
||||
if (test_bit(ATH10K_SNOC_FLAG_RECOVERY, &ar_snoc->flags))
|
||||
wait_for_completion_timeout(&ar->driver_recovery, 3 * HZ);
|
||||
|
||||
set_bit(ATH10K_SNOC_FLAG_UNREGISTERING, &ar_snoc->flags);
|
||||
|
||||
ath10k_core_unregister(ar);
|
||||
ath10k_hw_power_off(ar);
|
||||
ath10k_snoc_free_irq(ar);
|
||||
|
@ -1641,12 +1695,12 @@ static int ath10k_snoc_remove(struct platform_device *pdev)
|
|||
}
|
||||
|
||||
static struct platform_driver ath10k_snoc_driver = {
|
||||
.probe = ath10k_snoc_probe,
|
||||
.remove = ath10k_snoc_remove,
|
||||
.driver = {
|
||||
.name = "ath10k_snoc",
|
||||
.of_match_table = ath10k_snoc_dt_match,
|
||||
},
|
||||
.probe = ath10k_snoc_probe,
|
||||
.remove = ath10k_snoc_remove,
|
||||
.driver = {
|
||||
.name = "ath10k_snoc",
|
||||
.of_match_table = ath10k_snoc_dt_match,
|
||||
},
|
||||
};
|
||||
module_platform_driver(ath10k_snoc_driver);
|
||||
|
||||
|
|
|
@ -53,7 +53,7 @@ struct ath10k_snoc_ce_irq {
|
|||
u32 irq_line;
|
||||
};
|
||||
|
||||
struct ath10k_wcn3990_vreg_info {
|
||||
struct ath10k_vreg_info {
|
||||
struct regulator *reg;
|
||||
const char *name;
|
||||
u32 min_v;
|
||||
|
@ -63,13 +63,19 @@ struct ath10k_wcn3990_vreg_info {
|
|||
bool required;
|
||||
};
|
||||
|
||||
struct ath10k_wcn3990_clk_info {
|
||||
struct ath10k_clk_info {
|
||||
struct clk *handle;
|
||||
const char *name;
|
||||
u32 freq;
|
||||
bool required;
|
||||
};
|
||||
|
||||
enum ath10k_snoc_flags {
|
||||
ATH10K_SNOC_FLAG_REGISTERED,
|
||||
ATH10K_SNOC_FLAG_UNREGISTERING,
|
||||
ATH10K_SNOC_FLAG_RECOVERY,
|
||||
};
|
||||
|
||||
struct ath10k_snoc {
|
||||
struct platform_device *dev;
|
||||
struct ath10k *ar;
|
||||
|
@ -81,9 +87,10 @@ struct ath10k_snoc {
|
|||
struct ath10k_snoc_ce_irq ce_irqs[CE_COUNT_MAX];
|
||||
struct ath10k_ce ce;
|
||||
struct timer_list rx_post_retry;
|
||||
struct ath10k_wcn3990_vreg_info *vreg;
|
||||
struct ath10k_wcn3990_clk_info *clk;
|
||||
struct ath10k_vreg_info *vreg;
|
||||
struct ath10k_clk_info *clk;
|
||||
struct ath10k_qmi *qmi;
|
||||
unsigned long int flags;
|
||||
};
|
||||
|
||||
static inline struct ath10k_snoc *ath10k_snoc_priv(struct ath10k *ar)
|
||||
|
@ -91,8 +98,6 @@ static inline struct ath10k_snoc *ath10k_snoc_priv(struct ath10k *ar)
|
|||
return (struct ath10k_snoc *)ar->drv_priv;
|
||||
}
|
||||
|
||||
void ath10k_snoc_write32(struct ath10k *ar, u32 offset, u32 value);
|
||||
u32 ath10k_snoc_read32(struct ath10k *ar, u32 offset);
|
||||
int ath10k_snoc_fw_indication(struct ath10k *ar, u64 type);
|
||||
|
||||
#endif /* _SNOC_H_ */
|
||||
|
|
|
@ -762,6 +762,9 @@ static int ath10k_wmi_tlv_op_pull_ch_info_ev(struct ath10k *ar,
|
|||
arg->noise_floor = ev->noise_floor;
|
||||
arg->rx_clear_count = ev->rx_clear_count;
|
||||
arg->cycle_count = ev->cycle_count;
|
||||
if (test_bit(ATH10K_FW_FEATURE_SINGLE_CHAN_INFO_PER_CHANNEL,
|
||||
ar->running_fw->fw_file.fw_features))
|
||||
arg->mac_clk_mhz = ev->mac_clk_mhz;
|
||||
|
||||
kfree(tb);
|
||||
return 0;
|
||||
|
@ -3452,7 +3455,6 @@ ath10k_wmi_tlv_op_gen_config_pno_start(struct ath10k *ar,
|
|||
struct wmi_tlv *tlv;
|
||||
struct sk_buff *skb;
|
||||
__le32 *channel_list;
|
||||
u16 tlv_len;
|
||||
size_t len;
|
||||
void *ptr;
|
||||
u32 i;
|
||||
|
@ -3510,8 +3512,6 @@ ath10k_wmi_tlv_op_gen_config_pno_start(struct ath10k *ar,
|
|||
/* nlo_configured_parameters(nlo_list) */
|
||||
cmd->no_of_ssids = __cpu_to_le32(min_t(u8, pno->uc_networks_count,
|
||||
WMI_NLO_MAX_SSIDS));
|
||||
tlv_len = __le32_to_cpu(cmd->no_of_ssids) *
|
||||
sizeof(struct nlo_configured_parameters);
|
||||
|
||||
tlv = ptr;
|
||||
tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
|
||||
|
|
|
@ -1579,6 +1579,16 @@ struct ath10k_mgmt_tx_pkt_addr {
|
|||
dma_addr_t paddr;
|
||||
};
|
||||
|
||||
struct chan_info_params {
|
||||
u32 err_code;
|
||||
u32 freq;
|
||||
u32 cmd_flags;
|
||||
u32 noise_floor;
|
||||
u32 rx_clear_count;
|
||||
u32 cycle_count;
|
||||
u32 mac_clk_mhz;
|
||||
};
|
||||
|
||||
struct wmi_tlv_mgmt_tx_compl_ev {
|
||||
__le32 desc_id;
|
||||
__le32 status;
|
||||
|
|
|
@ -2554,12 +2554,89 @@ static int ath10k_wmi_10_4_op_pull_ch_info_ev(struct ath10k *ar,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Handle the channel info event for firmware which only sends one
|
||||
* chan_info event per scanned channel.
|
||||
*/
|
||||
static void ath10k_wmi_event_chan_info_unpaired(struct ath10k *ar,
|
||||
struct chan_info_params *params)
|
||||
{
|
||||
struct survey_info *survey;
|
||||
int idx;
|
||||
|
||||
if (params->cmd_flags & WMI_CHAN_INFO_FLAG_COMPLETE) {
|
||||
ath10k_dbg(ar, ATH10K_DBG_WMI, "chan info report completed\n");
|
||||
return;
|
||||
}
|
||||
|
||||
idx = freq_to_idx(ar, params->freq);
|
||||
if (idx >= ARRAY_SIZE(ar->survey)) {
|
||||
ath10k_warn(ar, "chan info: invalid frequency %d (idx %d out of bounds)\n",
|
||||
params->freq, idx);
|
||||
return;
|
||||
}
|
||||
|
||||
survey = &ar->survey[idx];
|
||||
|
||||
if (!params->mac_clk_mhz || !survey)
|
||||
return;
|
||||
|
||||
memset(survey, 0, sizeof(*survey));
|
||||
|
||||
survey->noise = params->noise_floor;
|
||||
survey->time = (params->cycle_count / params->mac_clk_mhz) / 1000;
|
||||
survey->time_busy = (params->rx_clear_count / params->mac_clk_mhz) / 1000;
|
||||
survey->filled |= SURVEY_INFO_NOISE_DBM | SURVEY_INFO_TIME |
|
||||
SURVEY_INFO_TIME_BUSY;
|
||||
}
|
||||
|
||||
/*
|
||||
* Handle the channel info event for firmware which sends chan_info
|
||||
* event in pairs(start and stop events) for every scanned channel.
|
||||
*/
|
||||
static void ath10k_wmi_event_chan_info_paired(struct ath10k *ar,
|
||||
struct chan_info_params *params)
|
||||
{
|
||||
struct survey_info *survey;
|
||||
int idx;
|
||||
|
||||
idx = freq_to_idx(ar, params->freq);
|
||||
if (idx >= ARRAY_SIZE(ar->survey)) {
|
||||
ath10k_warn(ar, "chan info: invalid frequency %d (idx %d out of bounds)\n",
|
||||
params->freq, idx);
|
||||
return;
|
||||
}
|
||||
|
||||
if (params->cmd_flags & WMI_CHAN_INFO_FLAG_COMPLETE) {
|
||||
if (ar->ch_info_can_report_survey) {
|
||||
survey = &ar->survey[idx];
|
||||
survey->noise = params->noise_floor;
|
||||
survey->filled = SURVEY_INFO_NOISE_DBM;
|
||||
|
||||
ath10k_hw_fill_survey_time(ar,
|
||||
survey,
|
||||
params->cycle_count,
|
||||
params->rx_clear_count,
|
||||
ar->survey_last_cycle_count,
|
||||
ar->survey_last_rx_clear_count);
|
||||
}
|
||||
|
||||
ar->ch_info_can_report_survey = false;
|
||||
} else {
|
||||
ar->ch_info_can_report_survey = true;
|
||||
}
|
||||
|
||||
if (!(params->cmd_flags & WMI_CHAN_INFO_FLAG_PRE_COMPLETE)) {
|
||||
ar->survey_last_rx_clear_count = params->rx_clear_count;
|
||||
ar->survey_last_cycle_count = params->cycle_count;
|
||||
}
|
||||
}
|
||||
|
||||
void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb)
|
||||
{
|
||||
struct chan_info_params ch_info_param;
|
||||
struct wmi_ch_info_ev_arg arg = {};
|
||||
struct survey_info *survey;
|
||||
u32 err_code, freq, cmd_flags, noise_floor, rx_clear_count, cycle_count;
|
||||
int idx, ret;
|
||||
int ret;
|
||||
|
||||
ret = ath10k_wmi_pull_ch_info(ar, skb, &arg);
|
||||
if (ret) {
|
||||
|
@ -2567,17 +2644,19 @@ void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb)
|
|||
return;
|
||||
}
|
||||
|
||||
err_code = __le32_to_cpu(arg.err_code);
|
||||
freq = __le32_to_cpu(arg.freq);
|
||||
cmd_flags = __le32_to_cpu(arg.cmd_flags);
|
||||
noise_floor = __le32_to_cpu(arg.noise_floor);
|
||||
rx_clear_count = __le32_to_cpu(arg.rx_clear_count);
|
||||
cycle_count = __le32_to_cpu(arg.cycle_count);
|
||||
ch_info_param.err_code = __le32_to_cpu(arg.err_code);
|
||||
ch_info_param.freq = __le32_to_cpu(arg.freq);
|
||||
ch_info_param.cmd_flags = __le32_to_cpu(arg.cmd_flags);
|
||||
ch_info_param.noise_floor = __le32_to_cpu(arg.noise_floor);
|
||||
ch_info_param.rx_clear_count = __le32_to_cpu(arg.rx_clear_count);
|
||||
ch_info_param.cycle_count = __le32_to_cpu(arg.cycle_count);
|
||||
ch_info_param.mac_clk_mhz = __le32_to_cpu(arg.mac_clk_mhz);
|
||||
|
||||
ath10k_dbg(ar, ATH10K_DBG_WMI,
|
||||
"chan info err_code %d freq %d cmd_flags %d noise_floor %d rx_clear_count %d cycle_count %d\n",
|
||||
err_code, freq, cmd_flags, noise_floor, rx_clear_count,
|
||||
cycle_count);
|
||||
ch_info_param.err_code, ch_info_param.freq, ch_info_param.cmd_flags,
|
||||
ch_info_param.noise_floor, ch_info_param.rx_clear_count,
|
||||
ch_info_param.cycle_count);
|
||||
|
||||
spin_lock_bh(&ar->data_lock);
|
||||
|
||||
|
@ -2591,36 +2670,11 @@ void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb)
|
|||
break;
|
||||
}
|
||||
|
||||
idx = freq_to_idx(ar, freq);
|
||||
if (idx >= ARRAY_SIZE(ar->survey)) {
|
||||
ath10k_warn(ar, "chan info: invalid frequency %d (idx %d out of bounds)\n",
|
||||
freq, idx);
|
||||
goto exit;
|
||||
}
|
||||
|
||||
if (cmd_flags & WMI_CHAN_INFO_FLAG_COMPLETE) {
|
||||
if (ar->ch_info_can_report_survey) {
|
||||
survey = &ar->survey[idx];
|
||||
survey->noise = noise_floor;
|
||||
survey->filled = SURVEY_INFO_NOISE_DBM;
|
||||
|
||||
ath10k_hw_fill_survey_time(ar,
|
||||
survey,
|
||||
cycle_count,
|
||||
rx_clear_count,
|
||||
ar->survey_last_cycle_count,
|
||||
ar->survey_last_rx_clear_count);
|
||||
}
|
||||
|
||||
ar->ch_info_can_report_survey = false;
|
||||
} else {
|
||||
ar->ch_info_can_report_survey = true;
|
||||
}
|
||||
|
||||
if (!(cmd_flags & WMI_CHAN_INFO_FLAG_PRE_COMPLETE)) {
|
||||
ar->survey_last_rx_clear_count = rx_clear_count;
|
||||
ar->survey_last_cycle_count = cycle_count;
|
||||
}
|
||||
if (test_bit(ATH10K_FW_FEATURE_SINGLE_CHAN_INFO_PER_CHANNEL,
|
||||
ar->running_fw->fw_file.fw_features))
|
||||
ath10k_wmi_event_chan_info_unpaired(ar, &ch_info_param);
|
||||
else
|
||||
ath10k_wmi_event_chan_info_paired(ar, &ch_info_param);
|
||||
|
||||
exit:
|
||||
spin_unlock_bh(&ar->data_lock);
|
||||
|
|
|
@ -4991,6 +4991,7 @@ enum wmi_rate_preamble {
|
|||
#define ATH10K_LEGACY_NUM 12
|
||||
#define ATH10K_GI_NUM 2
|
||||
#define ATH10K_HT_MCS_NUM 32
|
||||
#define ATH10K_RATE_TABLE_NUM 320
|
||||
|
||||
/* Value to disable fixed rate setting */
|
||||
#define WMI_FIXED_RATE_NONE (0xff)
|
||||
|
@ -6441,6 +6442,14 @@ struct wmi_chan_info_event {
|
|||
__le32 noise_floor;
|
||||
__le32 rx_clear_count;
|
||||
__le32 cycle_count;
|
||||
__le32 chan_tx_pwr_range;
|
||||
__le32 chan_tx_pwr_tp;
|
||||
__le32 rx_frame_count;
|
||||
__le32 my_bss_rx_cycle_count;
|
||||
__le32 rx_11b_mode_data_duration;
|
||||
__le32 tx_frame_cnt;
|
||||
__le32 mac_clk_mhz;
|
||||
|
||||
} __packed;
|
||||
|
||||
struct wmi_10_4_chan_info_event {
|
||||
|
@ -6669,6 +6678,10 @@ struct wmi_ch_info_ev_arg {
|
|||
__le32 chan_tx_pwr_range;
|
||||
__le32 chan_tx_pwr_tp;
|
||||
__le32 rx_frame_count;
|
||||
__le32 my_bss_rx_cycle_count;
|
||||
__le32 rx_11b_mode_data_duration;
|
||||
__le32 tx_frame_cnt;
|
||||
__le32 mac_clk_mhz;
|
||||
};
|
||||
|
||||
/* From 10.4 firmware, not sure all have the same values. */
|
||||
|
|
|
@ -135,7 +135,7 @@ static void ath10k_wow_convert_8023_to_80211
|
|||
&old_hdr_mask->h_proto,
|
||||
sizeof(old_hdr_mask->h_proto));
|
||||
|
||||
/* Caculate new pkt_offset */
|
||||
/* Calculate new pkt_offset */
|
||||
if (old->pkt_offset < ETH_ALEN)
|
||||
new->pkt_offset = old->pkt_offset +
|
||||
offsetof(struct ieee80211_hdr_3addr, addr1);
|
||||
|
@ -146,7 +146,7 @@ static void ath10k_wow_convert_8023_to_80211
|
|||
else
|
||||
new->pkt_offset = old->pkt_offset + hdr_len + rfc_len - ETH_HLEN;
|
||||
|
||||
/* Caculate new hdr end offset */
|
||||
/* Calculate new hdr end offset */
|
||||
if (total_len > ETH_HLEN)
|
||||
hdr_80211_end_offset = hdr_len + rfc_len;
|
||||
else if (total_len > offsetof(struct ethhdr, h_proto))
|
||||
|
|
|
@ -389,6 +389,7 @@ void ath6kl_connect_ap_mode_bss(struct ath6kl_vif *vif, u16 channel)
|
|||
if (!ik->valid || ik->key_type != WAPI_CRYPT)
|
||||
break;
|
||||
/* for WAPI, we need to set the delayed group key, continue: */
|
||||
/* fall through */
|
||||
case WPA_PSK_AUTH:
|
||||
case WPA2_PSK_AUTH:
|
||||
case (WPA_PSK_AUTH | WPA2_PSK_AUTH):
|
||||
|
|
|
@ -116,7 +116,7 @@ config ATH9K_DFS_CERTIFIED
|
|||
except increase code size.
|
||||
|
||||
config ATH9K_DYNACK
|
||||
bool "Atheros ath9k ACK timeout estimation algorithm (EXPERIMENTAL)"
|
||||
bool "Atheros ath9k ACK timeout estimation algorithm"
|
||||
depends on ATH9K
|
||||
default n
|
||||
---help---
|
||||
|
|
|
@ -586,7 +586,7 @@ static void ar5008_hw_init_chain_masks(struct ath_hw *ah)
|
|||
REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, 0x7);
|
||||
break;
|
||||
}
|
||||
/* else: fall through */
|
||||
/* fall through */
|
||||
case 0x1:
|
||||
case 0x2:
|
||||
case 0x7:
|
||||
|
|
|
@ -119,7 +119,7 @@ static int ar9002_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
|
|||
aModeRefSel = 2;
|
||||
if (aModeRefSel)
|
||||
break;
|
||||
/* else: fall through */
|
||||
/* fall through */
|
||||
case 1:
|
||||
default:
|
||||
aModeRefSel = 0;
|
||||
|
|
|
@ -1055,17 +1055,15 @@ void ar9003_mci_stop_bt(struct ath_hw *ah, bool save_fullsleep)
|
|||
static void ar9003_mci_send_2g5g_status(struct ath_hw *ah, bool wait_done)
|
||||
{
|
||||
struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
|
||||
u32 new_flags, to_set, to_clear;
|
||||
u32 to_set, to_clear;
|
||||
|
||||
if (!mci->update_2g5g || (mci->bt_state == MCI_BT_SLEEP))
|
||||
return;
|
||||
|
||||
if (mci->is_2g) {
|
||||
new_flags = MCI_2G_FLAGS;
|
||||
to_clear = MCI_2G_FLAGS_CLEAR_MASK;
|
||||
to_set = MCI_2G_FLAGS_SET_MASK;
|
||||
} else {
|
||||
new_flags = MCI_5G_FLAGS;
|
||||
to_clear = MCI_5G_FLAGS_CLEAR_MASK;
|
||||
to_set = MCI_5G_FLAGS_SET_MASK;
|
||||
}
|
||||
|
|
|
@ -272,7 +272,7 @@ struct ath_node {
|
|||
#endif
|
||||
u8 key_idx[4];
|
||||
|
||||
u32 ackto;
|
||||
int ackto;
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
|
|
|
@ -29,9 +29,13 @@
|
|||
* ath_dynack_ewma - EWMA (Exponentially Weighted Moving Average) calculation
|
||||
*
|
||||
*/
|
||||
static inline u32 ath_dynack_ewma(u32 old, u32 new)
|
||||
static inline int ath_dynack_ewma(int old, int new)
|
||||
{
|
||||
return (new * (EWMA_DIV - EWMA_LEVEL) + old * EWMA_LEVEL) / EWMA_DIV;
|
||||
if (old > 0)
|
||||
return (new * (EWMA_DIV - EWMA_LEVEL) +
|
||||
old * EWMA_LEVEL) / EWMA_DIV;
|
||||
else
|
||||
return new;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -82,10 +86,10 @@ static inline bool ath_dynack_bssidmask(struct ath_hw *ah, const u8 *mac)
|
|||
*/
|
||||
static void ath_dynack_compute_ackto(struct ath_hw *ah)
|
||||
{
|
||||
struct ath_node *an;
|
||||
u32 to = 0;
|
||||
struct ath_dynack *da = &ah->dynack;
|
||||
struct ath_common *common = ath9k_hw_common(ah);
|
||||
struct ath_dynack *da = &ah->dynack;
|
||||
struct ath_node *an;
|
||||
int to = 0;
|
||||
|
||||
list_for_each_entry(an, &da->nodes, list)
|
||||
if (an->ackto > to)
|
||||
|
@ -144,7 +148,8 @@ static void ath_dynack_compute_to(struct ath_hw *ah)
|
|||
an->ackto = ath_dynack_ewma(an->ackto,
|
||||
ackto);
|
||||
ath_dbg(ath9k_hw_common(ah), DYNACK,
|
||||
"%pM to %u\n", dst, an->ackto);
|
||||
"%pM to %d [%u]\n", dst,
|
||||
an->ackto, ackto);
|
||||
if (time_is_before_jiffies(da->lto)) {
|
||||
ath_dynack_compute_ackto(ah);
|
||||
da->lto = jiffies + COMPUTE_TO;
|
||||
|
@ -166,18 +171,21 @@ static void ath_dynack_compute_to(struct ath_hw *ah)
|
|||
* @ah: ath hw
|
||||
* @skb: socket buffer
|
||||
* @ts: tx status info
|
||||
* @sta: station pointer
|
||||
*
|
||||
*/
|
||||
void ath_dynack_sample_tx_ts(struct ath_hw *ah, struct sk_buff *skb,
|
||||
struct ath_tx_status *ts)
|
||||
struct ath_tx_status *ts,
|
||||
struct ieee80211_sta *sta)
|
||||
{
|
||||
u8 ridx;
|
||||
struct ieee80211_hdr *hdr;
|
||||
struct ath_dynack *da = &ah->dynack;
|
||||
struct ath_common *common = ath9k_hw_common(ah);
|
||||
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
||||
u32 dur = ts->duration;
|
||||
u8 ridx;
|
||||
|
||||
if ((info->flags & IEEE80211_TX_CTL_NO_ACK) || !da->enabled)
|
||||
if (!da->enabled || (info->flags & IEEE80211_TX_CTL_NO_ACK))
|
||||
return;
|
||||
|
||||
spin_lock_bh(&da->qlock);
|
||||
|
@ -187,11 +195,19 @@ void ath_dynack_sample_tx_ts(struct ath_hw *ah, struct sk_buff *skb,
|
|||
/* late ACK */
|
||||
if (ts->ts_status & ATH9K_TXERR_XRETRY) {
|
||||
if (ieee80211_is_assoc_req(hdr->frame_control) ||
|
||||
ieee80211_is_assoc_resp(hdr->frame_control)) {
|
||||
ieee80211_is_assoc_resp(hdr->frame_control) ||
|
||||
ieee80211_is_auth(hdr->frame_control)) {
|
||||
ath_dbg(common, DYNACK, "late ack\n");
|
||||
|
||||
ath9k_hw_setslottime(ah, (LATEACK_TO - 3) / 2);
|
||||
ath9k_hw_set_ack_timeout(ah, LATEACK_TO);
|
||||
ath9k_hw_set_cts_timeout(ah, LATEACK_TO);
|
||||
if (sta) {
|
||||
struct ath_node *an;
|
||||
|
||||
an = (struct ath_node *)sta->drv_priv;
|
||||
an->ackto = -1;
|
||||
}
|
||||
da->lto = jiffies + LATEACK_DELAY;
|
||||
}
|
||||
|
||||
|
@ -202,14 +218,13 @@ void ath_dynack_sample_tx_ts(struct ath_hw *ah, struct sk_buff *skb,
|
|||
ridx = ts->ts_rateindex;
|
||||
|
||||
da->st_rbf.ts[da->st_rbf.t_rb].tstamp = ts->ts_tstamp;
|
||||
da->st_rbf.ts[da->st_rbf.t_rb].dur = ts->duration;
|
||||
ether_addr_copy(da->st_rbf.addr[da->st_rbf.t_rb].h_dest, hdr->addr1);
|
||||
ether_addr_copy(da->st_rbf.addr[da->st_rbf.t_rb].h_src, hdr->addr2);
|
||||
|
||||
if (!(info->status.rates[ridx].flags & IEEE80211_TX_RC_MCS)) {
|
||||
u32 phy, sifs;
|
||||
const struct ieee80211_rate *rate;
|
||||
struct ieee80211_tx_rate *rates = info->status.rates;
|
||||
u32 phy;
|
||||
|
||||
rate = &common->sbands[info->band].bitrates[rates[ridx].idx];
|
||||
if (info->band == NL80211_BAND_2GHZ &&
|
||||
|
@ -218,19 +233,18 @@ void ath_dynack_sample_tx_ts(struct ath_hw *ah, struct sk_buff *skb,
|
|||
else
|
||||
phy = WLAN_RC_PHY_OFDM;
|
||||
|
||||
sifs = ath_dynack_get_sifs(ah, phy);
|
||||
da->st_rbf.ts[da->st_rbf.t_rb].dur -= sifs;
|
||||
dur -= ath_dynack_get_sifs(ah, phy);
|
||||
}
|
||||
|
||||
ath_dbg(common, DYNACK, "{%pM} tx sample %u [dur %u][h %u-t %u]\n",
|
||||
hdr->addr1, da->st_rbf.ts[da->st_rbf.t_rb].tstamp,
|
||||
da->st_rbf.ts[da->st_rbf.t_rb].dur, da->st_rbf.h_rb,
|
||||
(da->st_rbf.t_rb + 1) % ATH_DYN_BUF);
|
||||
da->st_rbf.ts[da->st_rbf.t_rb].dur = dur;
|
||||
|
||||
INCR(da->st_rbf.t_rb, ATH_DYN_BUF);
|
||||
if (da->st_rbf.t_rb == da->st_rbf.h_rb)
|
||||
INCR(da->st_rbf.h_rb, ATH_DYN_BUF);
|
||||
|
||||
ath_dbg(common, DYNACK, "{%pM} tx sample %u [dur %u][h %u-t %u]\n",
|
||||
hdr->addr1, ts->ts_tstamp, dur, da->st_rbf.h_rb,
|
||||
da->st_rbf.t_rb);
|
||||
|
||||
ath_dynack_compute_to(ah);
|
||||
|
||||
spin_unlock_bh(&da->qlock);
|
||||
|
@ -251,20 +265,19 @@ void ath_dynack_sample_ack_ts(struct ath_hw *ah, struct sk_buff *skb,
|
|||
struct ath_common *common = ath9k_hw_common(ah);
|
||||
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
|
||||
|
||||
if (!ath_dynack_bssidmask(ah, hdr->addr1) || !da->enabled)
|
||||
if (!da->enabled || !ath_dynack_bssidmask(ah, hdr->addr1))
|
||||
return;
|
||||
|
||||
spin_lock_bh(&da->qlock);
|
||||
da->ack_rbf.tstamp[da->ack_rbf.t_rb] = ts;
|
||||
|
||||
ath_dbg(common, DYNACK, "rx sample %u [h %u-t %u]\n",
|
||||
da->ack_rbf.tstamp[da->ack_rbf.t_rb],
|
||||
da->ack_rbf.h_rb, (da->ack_rbf.t_rb + 1) % ATH_DYN_BUF);
|
||||
|
||||
INCR(da->ack_rbf.t_rb, ATH_DYN_BUF);
|
||||
if (da->ack_rbf.t_rb == da->ack_rbf.h_rb)
|
||||
INCR(da->ack_rbf.h_rb, ATH_DYN_BUF);
|
||||
|
||||
ath_dbg(common, DYNACK, "rx sample %u [h %u-t %u]\n",
|
||||
ts, da->ack_rbf.h_rb, da->ack_rbf.t_rb);
|
||||
|
||||
ath_dynack_compute_to(ah);
|
||||
|
||||
spin_unlock_bh(&da->qlock);
|
||||
|
|
|
@ -86,7 +86,8 @@ void ath_dynack_node_deinit(struct ath_hw *ah, struct ath_node *an);
|
|||
void ath_dynack_init(struct ath_hw *ah);
|
||||
void ath_dynack_sample_ack_ts(struct ath_hw *ah, struct sk_buff *skb, u32 ts);
|
||||
void ath_dynack_sample_tx_ts(struct ath_hw *ah, struct sk_buff *skb,
|
||||
struct ath_tx_status *ts);
|
||||
struct ath_tx_status *ts,
|
||||
struct ieee80211_sta *sta);
|
||||
#else
|
||||
static inline void ath_dynack_init(struct ath_hw *ah) {}
|
||||
static inline void ath_dynack_node_init(struct ath_hw *ah,
|
||||
|
@ -97,7 +98,8 @@ static inline void ath_dynack_sample_ack_ts(struct ath_hw *ah,
|
|||
struct sk_buff *skb, u32 ts) {}
|
||||
static inline void ath_dynack_sample_tx_ts(struct ath_hw *ah,
|
||||
struct sk_buff *skb,
|
||||
struct ath_tx_status *ts) {}
|
||||
struct ath_tx_status *ts,
|
||||
struct ieee80211_sta *sta) {}
|
||||
#endif
|
||||
|
||||
#endif /* DYNACK_H */
|
||||
|
|
|
@ -2279,6 +2279,7 @@ void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period)
|
|||
case NL80211_IFTYPE_ADHOC:
|
||||
REG_SET_BIT(ah, AR_TXCFG,
|
||||
AR_TXCFG_ADHOC_BEACON_ATIM_TX_POLICY);
|
||||
/* fall through */
|
||||
case NL80211_IFTYPE_MESH_POINT:
|
||||
case NL80211_IFTYPE_AP:
|
||||
REG_WRITE(ah, AR_NEXT_TBTT_TIMER, next_beacon);
|
||||
|
|
|
@ -629,7 +629,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
|
|||
if (bf == bf->bf_lastbf)
|
||||
ath_dynack_sample_tx_ts(sc->sc_ah,
|
||||
bf->bf_mpdu,
|
||||
ts);
|
||||
ts, sta);
|
||||
}
|
||||
|
||||
ath_tx_complete_buf(sc, bf, txq, &bf_head, sta, ts,
|
||||
|
@ -773,7 +773,8 @@ static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
|
|||
memcpy(info->control.rates, bf->rates,
|
||||
sizeof(info->control.rates));
|
||||
ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok);
|
||||
ath_dynack_sample_tx_ts(sc->sc_ah, bf->bf_mpdu, ts);
|
||||
ath_dynack_sample_tx_ts(sc->sc_ah, bf->bf_mpdu, ts,
|
||||
sta);
|
||||
}
|
||||
ath_tx_complete_buf(sc, bf, txq, bf_head, sta, ts, txok);
|
||||
} else
|
||||
|
|
|
@ -766,6 +766,7 @@ static void carl9170_rx_untie_data(struct ar9170 *ar, u8 *buf, int len)
|
|||
|
||||
goto drop;
|
||||
}
|
||||
/* fall through */
|
||||
|
||||
case AR9170_RX_STATUS_MPDU_MIDDLE:
|
||||
/* These are just data + mac status */
|
||||
|
|
|
@ -830,10 +830,12 @@ static bool carl9170_tx_rts_check(struct ar9170 *ar,
|
|||
case CARL9170_ERP_AUTO:
|
||||
if (ampdu)
|
||||
break;
|
||||
/* fall through */
|
||||
|
||||
case CARL9170_ERP_MAC80211:
|
||||
if (!(rate->flags & IEEE80211_TX_RC_USE_RTS_CTS))
|
||||
break;
|
||||
/* fall through */
|
||||
|
||||
case CARL9170_ERP_RTS:
|
||||
if (likely(!multi))
|
||||
|
@ -854,6 +856,7 @@ static bool carl9170_tx_cts_check(struct ar9170 *ar,
|
|||
case CARL9170_ERP_MAC80211:
|
||||
if (!(rate->flags & IEEE80211_TX_RC_USE_CTS_PROTECT))
|
||||
break;
|
||||
/* fall through */
|
||||
|
||||
case CARL9170_ERP_CTS:
|
||||
return true;
|
||||
|
|
|
@ -51,6 +51,19 @@ static struct ieee80211_channel wil_60ghz_channels[] = {
|
|||
CHAN60G(4, 0),
|
||||
};
|
||||
|
||||
static void
|
||||
wil_memdup_ie(u8 **pdst, size_t *pdst_len, const u8 *src, size_t src_len)
|
||||
{
|
||||
kfree(*pdst);
|
||||
*pdst = NULL;
|
||||
*pdst_len = 0;
|
||||
if (src_len > 0) {
|
||||
*pdst = kmemdup(src, src_len, GFP_KERNEL);
|
||||
if (*pdst)
|
||||
*pdst_len = src_len;
|
||||
}
|
||||
}
|
||||
|
||||
static int wil_num_supported_channels(struct wil6210_priv *wil)
|
||||
{
|
||||
int num_channels = ARRAY_SIZE(wil_60ghz_channels);
|
||||
|
@ -1441,11 +1454,19 @@ static int wil_cfg80211_add_key(struct wiphy *wiphy,
|
|||
|
||||
rc = wmi_add_cipher_key(vif, key_index, mac_addr, params->key_len,
|
||||
params->key, key_usage);
|
||||
if (!rc && !IS_ERR(cs))
|
||||
if (!rc && !IS_ERR(cs)) {
|
||||
/* update local storage used for AP recovery */
|
||||
if (key_usage == WMI_KEY_USE_TX_GROUP && params->key &&
|
||||
params->key_len <= WMI_MAX_KEY_LEN) {
|
||||
vif->gtk_index = key_index;
|
||||
memcpy(vif->gtk, params->key, params->key_len);
|
||||
vif->gtk_len = params->key_len;
|
||||
}
|
||||
/* in FT set crypto will take place upon receiving
|
||||
* WMI_RING_EN_EVENTID event
|
||||
*/
|
||||
wil_set_crypto_rx(key_index, key_usage, cs, params);
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
@ -1634,6 +1655,14 @@ static int _wil_cfg80211_set_ies(struct wil6210_vif *vif,
|
|||
u16 len = 0, proberesp_len = 0;
|
||||
u8 *ies = NULL, *proberesp;
|
||||
|
||||
/* update local storage used for AP recovery */
|
||||
wil_memdup_ie(&vif->proberesp, &vif->proberesp_len, bcon->probe_resp,
|
||||
bcon->probe_resp_len);
|
||||
wil_memdup_ie(&vif->proberesp_ies, &vif->proberesp_ies_len,
|
||||
bcon->proberesp_ies, bcon->proberesp_ies_len);
|
||||
wil_memdup_ie(&vif->assocresp_ies, &vif->assocresp_ies_len,
|
||||
bcon->assocresp_ies, bcon->assocresp_ies_len);
|
||||
|
||||
proberesp = _wil_cfg80211_get_proberesp_ies(bcon->probe_resp,
|
||||
bcon->probe_resp_len,
|
||||
&proberesp_len);
|
||||
|
@ -1735,6 +1764,9 @@ static int _wil_cfg80211_start_ap(struct wiphy *wiphy,
|
|||
vif->channel = chan;
|
||||
vif->hidden_ssid = hidden_ssid;
|
||||
vif->pbss = pbss;
|
||||
vif->bi = bi;
|
||||
memcpy(vif->ssid, ssid, ssid_len);
|
||||
vif->ssid_len = ssid_len;
|
||||
|
||||
netif_carrier_on(ndev);
|
||||
if (!wil_has_other_active_ifaces(wil, ndev, false, true))
|
||||
|
@ -1761,11 +1793,64 @@ static int _wil_cfg80211_start_ap(struct wiphy *wiphy,
|
|||
return rc;
|
||||
}
|
||||
|
||||
void wil_cfg80211_ap_recovery(struct wil6210_priv *wil)
|
||||
{
|
||||
int rc, i;
|
||||
struct wiphy *wiphy = wil_to_wiphy(wil);
|
||||
|
||||
for (i = 0; i < wil->max_vifs; i++) {
|
||||
struct wil6210_vif *vif = wil->vifs[i];
|
||||
struct net_device *ndev;
|
||||
struct cfg80211_beacon_data bcon = {};
|
||||
struct key_params key_params = {};
|
||||
|
||||
if (!vif || vif->ssid_len == 0)
|
||||
continue;
|
||||
|
||||
ndev = vif_to_ndev(vif);
|
||||
bcon.proberesp_ies = vif->proberesp_ies;
|
||||
bcon.assocresp_ies = vif->assocresp_ies;
|
||||
bcon.probe_resp = vif->proberesp;
|
||||
bcon.proberesp_ies_len = vif->proberesp_ies_len;
|
||||
bcon.assocresp_ies_len = vif->assocresp_ies_len;
|
||||
bcon.probe_resp_len = vif->proberesp_len;
|
||||
|
||||
wil_info(wil,
|
||||
"AP (vif %d) recovery: privacy %d, bi %d, channel %d, hidden %d, pbss %d\n",
|
||||
i, vif->privacy, vif->bi, vif->channel,
|
||||
vif->hidden_ssid, vif->pbss);
|
||||
wil_hex_dump_misc("SSID ", DUMP_PREFIX_OFFSET, 16, 1,
|
||||
vif->ssid, vif->ssid_len, true);
|
||||
rc = _wil_cfg80211_start_ap(wiphy, ndev,
|
||||
vif->ssid, vif->ssid_len,
|
||||
vif->privacy, vif->bi,
|
||||
vif->channel, &bcon,
|
||||
vif->hidden_ssid, vif->pbss);
|
||||
if (rc) {
|
||||
wil_err(wil, "vif %d recovery failed (%d)\n", i, rc);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!vif->privacy || vif->gtk_len == 0)
|
||||
continue;
|
||||
|
||||
key_params.key = vif->gtk;
|
||||
key_params.key_len = vif->gtk_len;
|
||||
key_params.seq_len = IEEE80211_GCMP_PN_LEN;
|
||||
rc = wil_cfg80211_add_key(wiphy, ndev, vif->gtk_index, false,
|
||||
NULL, &key_params);
|
||||
if (rc)
|
||||
wil_err(wil, "vif %d recovery add key failed (%d)\n",
|
||||
i, rc);
|
||||
}
|
||||
}
|
||||
|
||||
static int wil_cfg80211_change_beacon(struct wiphy *wiphy,
|
||||
struct net_device *ndev,
|
||||
struct cfg80211_beacon_data *bcon)
|
||||
{
|
||||
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
|
||||
struct wireless_dev *wdev = ndev->ieee80211_ptr;
|
||||
struct wil6210_vif *vif = ndev_to_vif(ndev);
|
||||
int rc;
|
||||
u32 privacy = 0;
|
||||
|
@ -1778,15 +1863,16 @@ static int wil_cfg80211_change_beacon(struct wiphy *wiphy,
|
|||
bcon->tail_len))
|
||||
privacy = 1;
|
||||
|
||||
memcpy(vif->ssid, wdev->ssid, wdev->ssid_len);
|
||||
vif->ssid_len = wdev->ssid_len;
|
||||
|
||||
/* in case privacy has changed, need to restart the AP */
|
||||
if (vif->privacy != privacy) {
|
||||
struct wireless_dev *wdev = ndev->ieee80211_ptr;
|
||||
|
||||
wil_dbg_misc(wil, "privacy changed %d=>%d. Restarting AP\n",
|
||||
vif->privacy, privacy);
|
||||
|
||||
rc = _wil_cfg80211_start_ap(wiphy, ndev, wdev->ssid,
|
||||
wdev->ssid_len, privacy,
|
||||
rc = _wil_cfg80211_start_ap(wiphy, ndev, vif->ssid,
|
||||
vif->ssid_len, privacy,
|
||||
wdev->beacon_interval,
|
||||
vif->channel, bcon,
|
||||
vif->hidden_ssid,
|
||||
|
@ -1876,6 +1962,12 @@ static int wil_cfg80211_stop_ap(struct wiphy *wiphy,
|
|||
|
||||
wmi_pcp_stop(vif);
|
||||
clear_bit(wil_vif_ft_roam, vif->status);
|
||||
vif->ssid_len = 0;
|
||||
wil_memdup_ie(&vif->proberesp, &vif->proberesp_len, NULL, 0);
|
||||
wil_memdup_ie(&vif->proberesp_ies, &vif->proberesp_ies_len, NULL, 0);
|
||||
wil_memdup_ie(&vif->assocresp_ies, &vif->assocresp_ies_len, NULL, 0);
|
||||
memset(vif->gtk, 0, WMI_MAX_KEY_LEN);
|
||||
vif->gtk_len = 0;
|
||||
|
||||
if (last)
|
||||
__wil_down(wil);
|
||||
|
@ -1923,7 +2015,7 @@ static int wil_cfg80211_del_station(struct wiphy *wiphy,
|
|||
params->mac, params->reason_code, vif->mid);
|
||||
|
||||
mutex_lock(&wil->mutex);
|
||||
wil6210_disconnect(vif, params->mac, params->reason_code, false);
|
||||
wil6210_disconnect(vif, params->mac, params->reason_code);
|
||||
mutex_unlock(&wil->mutex);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -664,10 +664,10 @@ static ssize_t wil_read_file_ioblob(struct file *file, char __user *user_buf,
|
|||
enum { max_count = 4096 };
|
||||
struct wil_blob_wrapper *wil_blob = file->private_data;
|
||||
struct wil6210_priv *wil = wil_blob->wil;
|
||||
loff_t pos = *ppos;
|
||||
loff_t aligned_pos, pos = *ppos;
|
||||
size_t available = wil_blob->blob.size;
|
||||
void *buf;
|
||||
size_t ret;
|
||||
size_t unaligned_bytes, aligned_count, ret;
|
||||
int rc;
|
||||
|
||||
if (test_bit(wil_status_suspending, wil_blob->wil->status) ||
|
||||
|
@ -685,7 +685,12 @@ static ssize_t wil_read_file_ioblob(struct file *file, char __user *user_buf,
|
|||
if (count > max_count)
|
||||
count = max_count;
|
||||
|
||||
buf = kmalloc(count, GFP_KERNEL);
|
||||
/* set pos to 4 bytes aligned */
|
||||
unaligned_bytes = pos % 4;
|
||||
aligned_pos = pos - unaligned_bytes;
|
||||
aligned_count = count + unaligned_bytes;
|
||||
|
||||
buf = kmalloc(aligned_count, GFP_KERNEL);
|
||||
if (!buf)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -696,9 +701,9 @@ static ssize_t wil_read_file_ioblob(struct file *file, char __user *user_buf,
|
|||
}
|
||||
|
||||
wil_memcpy_fromio_32(buf, (const void __iomem *)
|
||||
wil_blob->blob.data + pos, count);
|
||||
wil_blob->blob.data + aligned_pos, aligned_count);
|
||||
|
||||
ret = copy_to_user(user_buf, buf, count);
|
||||
ret = copy_to_user(user_buf, buf + unaligned_bytes, count);
|
||||
|
||||
wil_pm_runtime_put(wil);
|
||||
|
||||
|
@ -962,6 +967,8 @@ static ssize_t wil_write_file_txmgmt(struct file *file, const char __user *buf,
|
|||
int rc;
|
||||
void *frame;
|
||||
|
||||
memset(¶ms, 0, sizeof(params));
|
||||
|
||||
if (!len)
|
||||
return -EINVAL;
|
||||
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
#include <linux/moduleparam.h>
|
||||
#include <linux/if_arp.h>
|
||||
#include <linux/etherdevice.h>
|
||||
#include <linux/rtnetlink.h>
|
||||
|
||||
#include "wil6210.h"
|
||||
#include "txrx.h"
|
||||
|
@ -80,7 +81,7 @@ static const struct kernel_param_ops mtu_max_ops = {
|
|||
module_param_cb(mtu_max, &mtu_max_ops, &mtu_max, 0444);
|
||||
MODULE_PARM_DESC(mtu_max, " Max MTU value.");
|
||||
|
||||
static uint rx_ring_order = WIL_RX_RING_SIZE_ORDER_DEFAULT;
|
||||
static uint rx_ring_order;
|
||||
static uint tx_ring_order = WIL_TX_RING_SIZE_ORDER_DEFAULT;
|
||||
static uint bcast_ring_order = WIL_BCAST_RING_SIZE_ORDER_DEFAULT;
|
||||
|
||||
|
@ -214,8 +215,21 @@ static void wil_ring_fini_tx(struct wil6210_priv *wil, int id)
|
|||
wil->txrx_ops.ring_fini_tx(wil, ring);
|
||||
}
|
||||
|
||||
static void wil_disconnect_cid(struct wil6210_vif *vif, int cid,
|
||||
u16 reason_code, bool from_event)
|
||||
static bool wil_vif_is_connected(struct wil6210_priv *wil, u8 mid)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < WIL6210_MAX_CID; i++) {
|
||||
if (wil->sta[i].mid == mid &&
|
||||
wil->sta[i].status == wil_sta_connected)
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static void wil_disconnect_cid_complete(struct wil6210_vif *vif, int cid,
|
||||
u16 reason_code)
|
||||
__acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
|
||||
{
|
||||
uint i;
|
||||
|
@ -226,24 +240,14 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
|
|||
int min_ring_id = wil_get_min_tx_ring_id(wil);
|
||||
|
||||
might_sleep();
|
||||
wil_dbg_misc(wil, "disconnect_cid: CID %d, MID %d, status %d\n",
|
||||
wil_dbg_misc(wil,
|
||||
"disconnect_cid_complete: CID %d, MID %d, status %d\n",
|
||||
cid, sta->mid, sta->status);
|
||||
/* inform upper/lower layers */
|
||||
/* inform upper layers */
|
||||
if (sta->status != wil_sta_unused) {
|
||||
if (vif->mid != sta->mid) {
|
||||
wil_err(wil, "STA MID mismatch with VIF MID(%d)\n",
|
||||
vif->mid);
|
||||
/* let FW override sta->mid but be more strict with
|
||||
* user space requests
|
||||
*/
|
||||
if (!from_event)
|
||||
return;
|
||||
}
|
||||
if (!from_event) {
|
||||
bool del_sta = (wdev->iftype == NL80211_IFTYPE_AP) ?
|
||||
disable_ap_sme : false;
|
||||
wmi_disconnect_sta(vif, sta->addr, reason_code,
|
||||
true, del_sta);
|
||||
}
|
||||
|
||||
switch (wdev->iftype) {
|
||||
|
@ -283,36 +287,20 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
|
|||
sta->stats.tx_latency_min_us = U32_MAX;
|
||||
}
|
||||
|
||||
static bool wil_vif_is_connected(struct wil6210_priv *wil, u8 mid)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(wil->sta); i++) {
|
||||
if (wil->sta[i].mid == mid &&
|
||||
wil->sta[i].status == wil_sta_connected)
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static void _wil6210_disconnect(struct wil6210_vif *vif, const u8 *bssid,
|
||||
u16 reason_code, bool from_event)
|
||||
static void _wil6210_disconnect_complete(struct wil6210_vif *vif,
|
||||
const u8 *bssid, u16 reason_code)
|
||||
{
|
||||
struct wil6210_priv *wil = vif_to_wil(vif);
|
||||
int cid = -ENOENT;
|
||||
struct net_device *ndev;
|
||||
struct wireless_dev *wdev;
|
||||
|
||||
if (unlikely(!vif))
|
||||
return;
|
||||
|
||||
ndev = vif_to_ndev(vif);
|
||||
wdev = vif_to_wdev(vif);
|
||||
|
||||
might_sleep();
|
||||
wil_info(wil, "bssid=%pM, reason=%d, ev%s\n", bssid,
|
||||
reason_code, from_event ? "+" : "-");
|
||||
wil_info(wil, "disconnect_complete: bssid=%pM, reason=%d\n",
|
||||
bssid, reason_code);
|
||||
|
||||
/* Cases are:
|
||||
* - disconnect single STA, still connected
|
||||
|
@ -327,14 +315,15 @@ static void _wil6210_disconnect(struct wil6210_vif *vif, const u8 *bssid,
|
|||
if (bssid && !is_broadcast_ether_addr(bssid) &&
|
||||
!ether_addr_equal_unaligned(ndev->dev_addr, bssid)) {
|
||||
cid = wil_find_cid(wil, vif->mid, bssid);
|
||||
wil_dbg_misc(wil, "Disconnect %pM, CID=%d, reason=%d\n",
|
||||
wil_dbg_misc(wil,
|
||||
"Disconnect complete %pM, CID=%d, reason=%d\n",
|
||||
bssid, cid, reason_code);
|
||||
if (cid >= 0) /* disconnect 1 peer */
|
||||
wil_disconnect_cid(vif, cid, reason_code, from_event);
|
||||
wil_disconnect_cid_complete(vif, cid, reason_code);
|
||||
} else { /* all */
|
||||
wil_dbg_misc(wil, "Disconnect all\n");
|
||||
wil_dbg_misc(wil, "Disconnect complete all\n");
|
||||
for (cid = 0; cid < WIL6210_MAX_CID; cid++)
|
||||
wil_disconnect_cid(vif, cid, reason_code, from_event);
|
||||
wil_disconnect_cid_complete(vif, cid, reason_code);
|
||||
}
|
||||
|
||||
/* link state */
|
||||
|
@ -380,6 +369,84 @@ static void _wil6210_disconnect(struct wil6210_vif *vif, const u8 *bssid,
|
|||
}
|
||||
}
|
||||
|
||||
static int wil_disconnect_cid(struct wil6210_vif *vif, int cid,
|
||||
u16 reason_code)
|
||||
{
|
||||
struct wil6210_priv *wil = vif_to_wil(vif);
|
||||
struct wireless_dev *wdev = vif_to_wdev(vif);
|
||||
struct wil_sta_info *sta = &wil->sta[cid];
|
||||
bool del_sta = false;
|
||||
|
||||
might_sleep();
|
||||
wil_dbg_misc(wil, "disconnect_cid: CID %d, MID %d, status %d\n",
|
||||
cid, sta->mid, sta->status);
|
||||
|
||||
if (sta->status == wil_sta_unused)
|
||||
return 0;
|
||||
|
||||
if (vif->mid != sta->mid) {
|
||||
wil_err(wil, "STA MID mismatch with VIF MID(%d)\n", vif->mid);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* inform lower layers */
|
||||
if (wdev->iftype == NL80211_IFTYPE_AP && disable_ap_sme)
|
||||
del_sta = true;
|
||||
|
||||
/* disconnect by sending command disconnect/del_sta and wait
|
||||
* synchronously for WMI_DISCONNECT_EVENTID event.
|
||||
*/
|
||||
return wmi_disconnect_sta(vif, sta->addr, reason_code, del_sta);
|
||||
}
|
||||
|
||||
static void _wil6210_disconnect(struct wil6210_vif *vif, const u8 *bssid,
|
||||
u16 reason_code)
|
||||
{
|
||||
struct wil6210_priv *wil;
|
||||
struct net_device *ndev;
|
||||
struct wireless_dev *wdev;
|
||||
int cid = -ENOENT;
|
||||
|
||||
if (unlikely(!vif))
|
||||
return;
|
||||
|
||||
wil = vif_to_wil(vif);
|
||||
ndev = vif_to_ndev(vif);
|
||||
wdev = vif_to_wdev(vif);
|
||||
|
||||
might_sleep();
|
||||
wil_info(wil, "disconnect bssid=%pM, reason=%d\n", bssid, reason_code);
|
||||
|
||||
/* Cases are:
|
||||
* - disconnect single STA, still connected
|
||||
* - disconnect single STA, already disconnected
|
||||
* - disconnect all
|
||||
*
|
||||
* For "disconnect all", there are 3 options:
|
||||
* - bssid == NULL
|
||||
* - bssid is broadcast address (ff:ff:ff:ff:ff:ff)
|
||||
* - bssid is our MAC address
|
||||
*/
|
||||
if (bssid && !is_broadcast_ether_addr(bssid) &&
|
||||
!ether_addr_equal_unaligned(ndev->dev_addr, bssid)) {
|
||||
cid = wil_find_cid(wil, vif->mid, bssid);
|
||||
wil_dbg_misc(wil, "Disconnect %pM, CID=%d, reason=%d\n",
|
||||
bssid, cid, reason_code);
|
||||
if (cid >= 0) /* disconnect 1 peer */
|
||||
wil_disconnect_cid(vif, cid, reason_code);
|
||||
} else { /* all */
|
||||
wil_dbg_misc(wil, "Disconnect all\n");
|
||||
for (cid = 0; cid < WIL6210_MAX_CID; cid++)
|
||||
wil_disconnect_cid(vif, cid, reason_code);
|
||||
}
|
||||
|
||||
/* call event handler manually after processing wmi_call,
|
||||
* to avoid deadlock - disconnect event handler acquires
|
||||
* wil->mutex while it is already held here
|
||||
*/
|
||||
_wil6210_disconnect_complete(vif, bssid, reason_code);
|
||||
}
|
||||
|
||||
void wil_disconnect_worker(struct work_struct *work)
|
||||
{
|
||||
struct wil6210_vif *vif = container_of(work,
|
||||
|
@ -485,10 +552,11 @@ static void wil_fw_error_worker(struct work_struct *work)
|
|||
if (wil_wait_for_recovery(wil) != 0)
|
||||
return;
|
||||
|
||||
rtnl_lock();
|
||||
mutex_lock(&wil->mutex);
|
||||
/* Needs adaptation for multiple VIFs
|
||||
* need to go over all VIFs and consider the appropriate
|
||||
* recovery.
|
||||
* recovery because each one can have different iftype.
|
||||
*/
|
||||
switch (wdev->iftype) {
|
||||
case NL80211_IFTYPE_STATION:
|
||||
|
@ -500,15 +568,24 @@ static void wil_fw_error_worker(struct work_struct *work)
|
|||
break;
|
||||
case NL80211_IFTYPE_AP:
|
||||
case NL80211_IFTYPE_P2P_GO:
|
||||
wil_info(wil, "No recovery for AP-like interface\n");
|
||||
/* recovery in these modes is done by upper layers */
|
||||
if (no_fw_recovery) /* upper layers do recovery */
|
||||
break;
|
||||
/* silent recovery, upper layers will see disconnect */
|
||||
__wil_down(wil);
|
||||
__wil_up(wil);
|
||||
mutex_unlock(&wil->mutex);
|
||||
wil_cfg80211_ap_recovery(wil);
|
||||
mutex_lock(&wil->mutex);
|
||||
wil_info(wil, "... completed\n");
|
||||
break;
|
||||
default:
|
||||
wil_err(wil, "No recovery - unknown interface type %d\n",
|
||||
wdev->iftype);
|
||||
break;
|
||||
}
|
||||
|
||||
mutex_unlock(&wil->mutex);
|
||||
rtnl_unlock();
|
||||
}
|
||||
|
||||
static int wil_find_free_ring(struct wil6210_priv *wil)
|
||||
|
@ -694,20 +771,41 @@ void wil6210_bus_request(struct wil6210_priv *wil, u32 kbps)
|
|||
* @vif: virtual interface context
|
||||
* @bssid: peer to disconnect, NULL to disconnect all
|
||||
* @reason_code: Reason code for the Disassociation frame
|
||||
* @from_event: whether is invoked from FW event handler
|
||||
*
|
||||
* Disconnect and release associated resources. If invoked not from the
|
||||
* FW event handler, issue WMI command(s) to trigger MAC disconnect.
|
||||
* Disconnect and release associated resources. Issue WMI
|
||||
* command(s) to trigger MAC disconnect. When command was issued
|
||||
* successfully, call the wil6210_disconnect_complete function
|
||||
* to handle the event synchronously
|
||||
*/
|
||||
void wil6210_disconnect(struct wil6210_vif *vif, const u8 *bssid,
|
||||
u16 reason_code, bool from_event)
|
||||
u16 reason_code)
|
||||
{
|
||||
struct wil6210_priv *wil = vif_to_wil(vif);
|
||||
|
||||
wil_dbg_misc(wil, "disconnect\n");
|
||||
wil_dbg_misc(wil, "disconnecting\n");
|
||||
|
||||
del_timer_sync(&vif->connect_timer);
|
||||
_wil6210_disconnect(vif, bssid, reason_code, from_event);
|
||||
_wil6210_disconnect(vif, bssid, reason_code);
|
||||
}
|
||||
|
||||
/**
|
||||
* wil6210_disconnect_complete - handle disconnect event
|
||||
* @vif: virtual interface context
|
||||
* @bssid: peer to disconnect, NULL to disconnect all
|
||||
* @reason_code: Reason code for the Disassociation frame
|
||||
*
|
||||
* Release associated resources and indicate upper layers the
|
||||
* connection is terminated.
|
||||
*/
|
||||
void wil6210_disconnect_complete(struct wil6210_vif *vif, const u8 *bssid,
|
||||
u16 reason_code)
|
||||
{
|
||||
struct wil6210_priv *wil = vif_to_wil(vif);
|
||||
|
||||
wil_dbg_misc(wil, "got disconnect\n");
|
||||
|
||||
del_timer_sync(&vif->connect_timer);
|
||||
_wil6210_disconnect_complete(vif, bssid, reason_code);
|
||||
}
|
||||
|
||||
void wil_priv_deinit(struct wil6210_priv *wil)
|
||||
|
@ -998,10 +1096,13 @@ static int wil_target_reset(struct wil6210_priv *wil, int no_flash)
|
|||
|
||||
wil_dbg_misc(wil, "Resetting \"%s\"...\n", wil->hw_name);
|
||||
|
||||
/* Clear MAC link up */
|
||||
wil_s(wil, RGF_HP_CTRL, BIT(15));
|
||||
wil_s(wil, RGF_USER_CLKS_CTL_SW_RST_MASK_0, BIT_HPAL_PERST_FROM_PAD);
|
||||
wil_s(wil, RGF_USER_CLKS_CTL_SW_RST_MASK_0, BIT_CAR_PERST_RST);
|
||||
if (wil->hw_version < HW_VER_TALYN) {
|
||||
/* Clear MAC link up */
|
||||
wil_s(wil, RGF_HP_CTRL, BIT(15));
|
||||
wil_s(wil, RGF_USER_CLKS_CTL_SW_RST_MASK_0,
|
||||
BIT_HPAL_PERST_FROM_PAD);
|
||||
wil_s(wil, RGF_USER_CLKS_CTL_SW_RST_MASK_0, BIT_CAR_PERST_RST);
|
||||
}
|
||||
|
||||
wil_halt_cpu(wil);
|
||||
|
||||
|
@ -1398,8 +1499,15 @@ static void wil_pre_fw_config(struct wil6210_priv *wil)
|
|||
wil6210_clear_irq(wil);
|
||||
/* CAF_ICR - clear and mask */
|
||||
/* it is W1C, clear by writing back same value */
|
||||
wil_s(wil, RGF_CAF_ICR + offsetof(struct RGF_ICR, ICR), 0);
|
||||
wil_w(wil, RGF_CAF_ICR + offsetof(struct RGF_ICR, IMV), ~0);
|
||||
if (wil->hw_version < HW_VER_TALYN_MB) {
|
||||
wil_s(wil, RGF_CAF_ICR + offsetof(struct RGF_ICR, ICR), 0);
|
||||
wil_w(wil, RGF_CAF_ICR + offsetof(struct RGF_ICR, IMV), ~0);
|
||||
} else {
|
||||
wil_s(wil,
|
||||
RGF_CAF_ICR_TALYN_MB + offsetof(struct RGF_ICR, ICR), 0);
|
||||
wil_w(wil, RGF_CAF_ICR_TALYN_MB +
|
||||
offsetof(struct RGF_ICR, IMV), ~0);
|
||||
}
|
||||
/* clear PAL_UNIT_ICR (potential D0->D3 leftover)
|
||||
* In Talyn-MB host cannot access this register due to
|
||||
* access control, hence PAL_UNIT_ICR is cleared by the FW
|
||||
|
@ -1511,7 +1619,7 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
|
|||
if (vif) {
|
||||
cancel_work_sync(&vif->disconnect_worker);
|
||||
wil6210_disconnect(vif, NULL,
|
||||
WLAN_REASON_DEAUTH_LEAVING, false);
|
||||
WLAN_REASON_DEAUTH_LEAVING);
|
||||
}
|
||||
}
|
||||
wil_bcast_fini_all(wil);
|
||||
|
@ -1681,7 +1789,12 @@ int __wil_up(struct wil6210_priv *wil)
|
|||
return rc;
|
||||
|
||||
/* Rx RING. After MAC and beacon */
|
||||
rc = wil->txrx_ops.rx_init(wil, 1 << rx_ring_order);
|
||||
if (rx_ring_order == 0)
|
||||
rx_ring_order = wil->hw_version < HW_VER_TALYN_MB ?
|
||||
WIL_RX_RING_SIZE_ORDER_DEFAULT :
|
||||
WIL_RX_RING_SIZE_ORDER_TALYN_DEFAULT;
|
||||
|
||||
rc = wil->txrx_ops.rx_init(wil, rx_ring_order);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
|
|
|
@ -345,8 +345,7 @@ wil_vif_alloc(struct wil6210_priv *wil, const char *name,
|
|||
ndev->ieee80211_ptr = wdev;
|
||||
ndev->hw_features = NETIF_F_HW_CSUM | NETIF_F_RXCSUM |
|
||||
NETIF_F_SG | NETIF_F_GRO |
|
||||
NETIF_F_TSO | NETIF_F_TSO6 |
|
||||
NETIF_F_RXHASH;
|
||||
NETIF_F_TSO | NETIF_F_TSO6;
|
||||
|
||||
ndev->features |= ndev->hw_features;
|
||||
SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy));
|
||||
|
@ -513,7 +512,7 @@ void wil_vif_remove(struct wil6210_priv *wil, u8 mid)
|
|||
}
|
||||
|
||||
mutex_lock(&wil->mutex);
|
||||
wil6210_disconnect(vif, NULL, WLAN_REASON_DEAUTH_LEAVING, false);
|
||||
wil6210_disconnect(vif, NULL, WLAN_REASON_DEAUTH_LEAVING);
|
||||
mutex_unlock(&wil->mutex);
|
||||
|
||||
ndev = vif_to_ndev(vif);
|
||||
|
|
|
@ -743,14 +743,6 @@ void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
|
|||
|
||||
stats = &wil->sta[cid].stats;
|
||||
|
||||
if (ndev->features & NETIF_F_RXHASH)
|
||||
/* fake L4 to ensure it won't be re-calculated later
|
||||
* set hash to any non-zero value to activate rps
|
||||
* mechanism, core will be chosen according
|
||||
* to user-level rps configuration.
|
||||
*/
|
||||
skb_set_hash(skb, 1, PKT_HASH_TYPE_L4);
|
||||
|
||||
skb_orphan(skb);
|
||||
|
||||
if (security && (wil->txrx_ops.rx_crypto_check(wil, skb) != 0)) {
|
||||
|
@ -880,7 +872,7 @@ static void wil_rx_buf_len_init(struct wil6210_priv *wil)
|
|||
}
|
||||
}
|
||||
|
||||
static int wil_rx_init(struct wil6210_priv *wil, u16 size)
|
||||
static int wil_rx_init(struct wil6210_priv *wil, uint order)
|
||||
{
|
||||
struct wil_ring *vring = &wil->ring_rx;
|
||||
int rc;
|
||||
|
@ -894,7 +886,7 @@ static int wil_rx_init(struct wil6210_priv *wil, u16 size)
|
|||
|
||||
wil_rx_buf_len_init(wil);
|
||||
|
||||
vring->size = size;
|
||||
vring->size = 1 << order;
|
||||
vring->is_rx = true;
|
||||
rc = wil_vring_alloc(wil, vring);
|
||||
if (rc)
|
||||
|
@ -1403,6 +1395,8 @@ static struct wil_ring *wil_find_tx_bcast_2(struct wil6210_priv *wil,
|
|||
wil_dbg_txrx(wil, "BCAST DUP -> ring %d\n", i);
|
||||
wil_set_da_for_vring(wil, skb2, i);
|
||||
wil_tx_ring(wil, vif, v2, skb2);
|
||||
/* successful call to wil_tx_ring takes skb2 ref */
|
||||
dev_kfree_skb_any(skb2);
|
||||
} else {
|
||||
wil_err(wil, "skb_copy failed\n");
|
||||
}
|
||||
|
|
|
@ -160,7 +160,7 @@ static int wil_ring_alloc_skb_edma(struct wil6210_priv *wil,
|
|||
struct wil_ring *ring, u32 i)
|
||||
{
|
||||
struct device *dev = wil_to_dev(wil);
|
||||
unsigned int sz = ALIGN(wil->rx_buf_len, 4);
|
||||
unsigned int sz = wil->rx_buf_len;
|
||||
dma_addr_t pa;
|
||||
u16 buff_id;
|
||||
struct list_head *active = &wil->rx_buff_mgmt.active;
|
||||
|
@ -234,9 +234,10 @@ static int wil_rx_refill_edma(struct wil6210_priv *wil)
|
|||
struct wil_ring *ring = &wil->ring_rx;
|
||||
u32 next_head;
|
||||
int rc = 0;
|
||||
u32 swtail = *ring->edma_rx_swtail.va;
|
||||
ring->swtail = *ring->edma_rx_swtail.va;
|
||||
|
||||
for (; next_head = wil_ring_next_head(ring), (next_head != swtail);
|
||||
for (; next_head = wil_ring_next_head(ring),
|
||||
(next_head != ring->swtail);
|
||||
ring->swhead = next_head) {
|
||||
rc = wil_ring_alloc_skb_edma(wil, ring, ring->swhead);
|
||||
if (unlikely(rc)) {
|
||||
|
@ -264,43 +265,26 @@ static void wil_move_all_rx_buff_to_free_list(struct wil6210_priv *wil,
|
|||
struct wil_ring *ring)
|
||||
{
|
||||
struct device *dev = wil_to_dev(wil);
|
||||
u32 next_tail;
|
||||
u32 swhead = (ring->swhead + 1) % ring->size;
|
||||
struct list_head *active = &wil->rx_buff_mgmt.active;
|
||||
dma_addr_t pa;
|
||||
u16 dmalen;
|
||||
|
||||
for (; next_tail = wil_ring_next_tail(ring), (next_tail != swhead);
|
||||
ring->swtail = next_tail) {
|
||||
struct wil_rx_enhanced_desc dd, *d = ⅆ
|
||||
struct wil_rx_enhanced_desc *_d =
|
||||
(struct wil_rx_enhanced_desc *)
|
||||
&ring->va[ring->swtail].rx.enhanced;
|
||||
struct sk_buff *skb;
|
||||
u16 buff_id;
|
||||
while (!list_empty(active)) {
|
||||
struct wil_rx_buff *rx_buff =
|
||||
list_first_entry(active, struct wil_rx_buff, list);
|
||||
struct sk_buff *skb = rx_buff->skb;
|
||||
|
||||
*d = *_d;
|
||||
|
||||
/* Extract the SKB from the rx_buff management array */
|
||||
buff_id = __le16_to_cpu(d->mac.buff_id);
|
||||
if (buff_id >= wil->rx_buff_mgmt.size) {
|
||||
wil_err(wil, "invalid buff_id %d\n", buff_id);
|
||||
continue;
|
||||
}
|
||||
skb = wil->rx_buff_mgmt.buff_arr[buff_id].skb;
|
||||
wil->rx_buff_mgmt.buff_arr[buff_id].skb = NULL;
|
||||
if (unlikely(!skb)) {
|
||||
wil_err(wil, "No Rx skb at buff_id %d\n", buff_id);
|
||||
wil_err(wil, "No Rx skb at buff_id %d\n", rx_buff->id);
|
||||
} else {
|
||||
pa = wil_rx_desc_get_addr_edma(&d->dma);
|
||||
dmalen = le16_to_cpu(d->dma.length);
|
||||
dma_unmap_single(dev, pa, dmalen, DMA_FROM_DEVICE);
|
||||
|
||||
rx_buff->skb = NULL;
|
||||
memcpy(&pa, skb->cb, sizeof(pa));
|
||||
dma_unmap_single(dev, pa, wil->rx_buf_len,
|
||||
DMA_FROM_DEVICE);
|
||||
kfree_skb(skb);
|
||||
}
|
||||
|
||||
/* Move the buffer from the active to the free list */
|
||||
list_move(&wil->rx_buff_mgmt.buff_arr[buff_id].list,
|
||||
&wil->rx_buff_mgmt.free);
|
||||
list_move(&rx_buff->list, &wil->rx_buff_mgmt.free);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -357,8 +341,8 @@ static int wil_init_rx_sring(struct wil6210_priv *wil,
|
|||
struct wil_status_ring *sring = &wil->srings[ring_id];
|
||||
int rc;
|
||||
|
||||
wil_dbg_misc(wil, "init RX sring: size=%u, ring_id=%u\n", sring->size,
|
||||
ring_id);
|
||||
wil_dbg_misc(wil, "init RX sring: size=%u, ring_id=%u\n",
|
||||
status_ring_size, ring_id);
|
||||
|
||||
memset(&sring->rx_data, 0, sizeof(sring->rx_data));
|
||||
|
||||
|
@ -602,20 +586,20 @@ static bool wil_is_rx_idle_edma(struct wil6210_priv *wil)
|
|||
|
||||
static void wil_rx_buf_len_init_edma(struct wil6210_priv *wil)
|
||||
{
|
||||
/* RX buffer size must be aligned to 4 bytes */
|
||||
wil->rx_buf_len = rx_large_buf ?
|
||||
WIL_MAX_ETH_MTU : WIL_EDMA_RX_BUF_LEN_DEFAULT;
|
||||
}
|
||||
|
||||
static int wil_rx_init_edma(struct wil6210_priv *wil, u16 desc_ring_size)
|
||||
static int wil_rx_init_edma(struct wil6210_priv *wil, uint desc_ring_order)
|
||||
{
|
||||
u16 status_ring_size;
|
||||
u16 status_ring_size, desc_ring_size = 1 << desc_ring_order;
|
||||
struct wil_ring *ring = &wil->ring_rx;
|
||||
int rc;
|
||||
size_t elem_size = wil->use_compressed_rx_status ?
|
||||
sizeof(struct wil_rx_status_compressed) :
|
||||
sizeof(struct wil_rx_status_extended);
|
||||
int i;
|
||||
u16 max_rx_pl_per_desc;
|
||||
|
||||
/* In SW reorder one must use extended status messages */
|
||||
if (wil->use_compressed_rx_status && !wil->use_rx_hw_reordering) {
|
||||
|
@ -623,7 +607,12 @@ static int wil_rx_init_edma(struct wil6210_priv *wil, u16 desc_ring_size)
|
|||
"compressed RX status cannot be used with SW reorder\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (wil->rx_status_ring_order <= desc_ring_order)
|
||||
/* make sure sring is larger than desc ring */
|
||||
wil->rx_status_ring_order = desc_ring_order + 1;
|
||||
if (wil->rx_buff_id_count <= desc_ring_size)
|
||||
/* make sure we will not run out of buff_ids */
|
||||
wil->rx_buff_id_count = desc_ring_size + 512;
|
||||
if (wil->rx_status_ring_order < WIL_SRING_SIZE_ORDER_MIN ||
|
||||
wil->rx_status_ring_order > WIL_SRING_SIZE_ORDER_MAX)
|
||||
wil->rx_status_ring_order = WIL_RX_SRING_SIZE_ORDER_DEFAULT;
|
||||
|
@ -636,8 +625,6 @@ static int wil_rx_init_edma(struct wil6210_priv *wil, u16 desc_ring_size)
|
|||
|
||||
wil_rx_buf_len_init_edma(wil);
|
||||
|
||||
max_rx_pl_per_desc = ALIGN(wil->rx_buf_len, 4);
|
||||
|
||||
/* Use debugfs dbg_num_rx_srings if set, reserve one sring for TX */
|
||||
if (wil->num_rx_status_rings > WIL6210_MAX_STATUS_RINGS - 1)
|
||||
wil->num_rx_status_rings = WIL6210_MAX_STATUS_RINGS - 1;
|
||||
|
@ -645,7 +632,7 @@ static int wil_rx_init_edma(struct wil6210_priv *wil, u16 desc_ring_size)
|
|||
wil_dbg_misc(wil, "rx_init: allocate %d status rings\n",
|
||||
wil->num_rx_status_rings);
|
||||
|
||||
rc = wil_wmi_cfg_def_rx_offload(wil, max_rx_pl_per_desc);
|
||||
rc = wil_wmi_cfg_def_rx_offload(wil, wil->rx_buf_len);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
|
@ -834,23 +821,24 @@ static int wil_rx_error_check_edma(struct wil6210_priv *wil,
|
|||
wil_dbg_txrx(wil, "L2 RX error, l2_rx_status=0x%x\n",
|
||||
l2_rx_status);
|
||||
/* Due to HW issue, KEY error will trigger a MIC error */
|
||||
if (l2_rx_status & WIL_RX_EDMA_ERROR_MIC) {
|
||||
wil_dbg_txrx(wil,
|
||||
"L2 MIC/KEY error, dropping packet\n");
|
||||
if (l2_rx_status == WIL_RX_EDMA_ERROR_MIC) {
|
||||
wil_err_ratelimited(wil,
|
||||
"L2 MIC/KEY error, dropping packet\n");
|
||||
stats->rx_mic_error++;
|
||||
}
|
||||
if (l2_rx_status & WIL_RX_EDMA_ERROR_KEY) {
|
||||
wil_dbg_txrx(wil, "L2 KEY error, dropping packet\n");
|
||||
if (l2_rx_status == WIL_RX_EDMA_ERROR_KEY) {
|
||||
wil_err_ratelimited(wil,
|
||||
"L2 KEY error, dropping packet\n");
|
||||
stats->rx_key_error++;
|
||||
}
|
||||
if (l2_rx_status & WIL_RX_EDMA_ERROR_REPLAY) {
|
||||
wil_dbg_txrx(wil,
|
||||
"L2 REPLAY error, dropping packet\n");
|
||||
if (l2_rx_status == WIL_RX_EDMA_ERROR_REPLAY) {
|
||||
wil_err_ratelimited(wil,
|
||||
"L2 REPLAY error, dropping packet\n");
|
||||
stats->rx_replay++;
|
||||
}
|
||||
if (l2_rx_status & WIL_RX_EDMA_ERROR_AMSDU) {
|
||||
wil_dbg_txrx(wil,
|
||||
"L2 AMSDU error, dropping packet\n");
|
||||
if (l2_rx_status == WIL_RX_EDMA_ERROR_AMSDU) {
|
||||
wil_err_ratelimited(wil,
|
||||
"L2 AMSDU error, dropping packet\n");
|
||||
stats->rx_amsdu_error++;
|
||||
}
|
||||
return -EFAULT;
|
||||
|
@ -881,7 +869,7 @@ static struct sk_buff *wil_sring_reap_rx_edma(struct wil6210_priv *wil,
|
|||
struct sk_buff *skb;
|
||||
dma_addr_t pa;
|
||||
struct wil_ring_rx_data *rxdata = &sring->rx_data;
|
||||
unsigned int sz = ALIGN(wil->rx_buf_len, 4);
|
||||
unsigned int sz = wil->rx_buf_len;
|
||||
struct wil_net_stats *stats = NULL;
|
||||
u16 dmalen;
|
||||
int cid;
|
||||
|
|
|
@ -23,9 +23,9 @@
|
|||
#define WIL_SRING_SIZE_ORDER_MIN (WIL_RING_SIZE_ORDER_MIN)
|
||||
#define WIL_SRING_SIZE_ORDER_MAX (WIL_RING_SIZE_ORDER_MAX)
|
||||
/* RX sring order should be bigger than RX ring order */
|
||||
#define WIL_RX_SRING_SIZE_ORDER_DEFAULT (11)
|
||||
#define WIL_RX_SRING_SIZE_ORDER_DEFAULT (12)
|
||||
#define WIL_TX_SRING_SIZE_ORDER_DEFAULT (12)
|
||||
#define WIL_RX_BUFF_ARR_SIZE_DEFAULT (1536)
|
||||
#define WIL_RX_BUFF_ARR_SIZE_DEFAULT (2600)
|
||||
|
||||
#define WIL_DEFAULT_RX_STATUS_RING_ID 0
|
||||
#define WIL_RX_DESC_RING_ID 0
|
||||
|
|
|
@ -81,6 +81,7 @@ static inline u32 WIL_GET_BITS(u32 x, int b0, int b1)
|
|||
|
||||
#define WIL_TX_Q_LEN_DEFAULT (4000)
|
||||
#define WIL_RX_RING_SIZE_ORDER_DEFAULT (10)
|
||||
#define WIL_RX_RING_SIZE_ORDER_TALYN_DEFAULT (11)
|
||||
#define WIL_TX_RING_SIZE_ORDER_DEFAULT (12)
|
||||
#define WIL_BCAST_RING_SIZE_ORDER_DEFAULT (7)
|
||||
#define WIL_BCAST_MCS0_LIMIT (1024) /* limit for MCS0 frame size */
|
||||
|
@ -319,6 +320,7 @@ struct RGF_ICR {
|
|||
/* MAC timer, usec, for packet lifetime */
|
||||
#define RGF_MAC_MTRL_COUNTER_0 (0x886aa8)
|
||||
|
||||
#define RGF_CAF_ICR_TALYN_MB (0x8893d4) /* struct RGF_ICR */
|
||||
#define RGF_CAF_ICR (0x88946c) /* struct RGF_ICR */
|
||||
#define RGF_CAF_OSC_CONTROL (0x88afa4)
|
||||
#define BIT_CAF_OSC_XTAL_EN BIT(0)
|
||||
|
@ -613,7 +615,7 @@ struct wil_txrx_ops {
|
|||
int cid, int tid);
|
||||
irqreturn_t (*irq_tx)(int irq, void *cookie);
|
||||
/* RX ops */
|
||||
int (*rx_init)(struct wil6210_priv *wil, u16 ring_size);
|
||||
int (*rx_init)(struct wil6210_priv *wil, uint ring_order);
|
||||
void (*rx_fini)(struct wil6210_priv *wil);
|
||||
int (*wmi_addba_rx_resp)(struct wil6210_priv *wil, u8 mid, u8 cid,
|
||||
u8 tid, u8 token, u16 status, bool amsdu,
|
||||
|
@ -848,6 +850,14 @@ struct wil6210_vif {
|
|||
u8 hidden_ssid; /* relevant in AP mode */
|
||||
u32 ap_isolate; /* no intra-BSS communication */
|
||||
bool pbss;
|
||||
int bi;
|
||||
u8 *proberesp, *proberesp_ies, *assocresp_ies;
|
||||
size_t proberesp_len, proberesp_ies_len, assocresp_ies_len;
|
||||
u8 ssid[IEEE80211_MAX_SSID_LEN];
|
||||
size_t ssid_len;
|
||||
u8 gtk_index;
|
||||
u8 gtk[WMI_MAX_KEY_LEN];
|
||||
size_t gtk_len;
|
||||
int bcast_ring;
|
||||
struct cfg80211_bss *bss; /* connected bss, relevant in STA mode */
|
||||
int locally_generated_disc; /* relevant in STA mode */
|
||||
|
@ -1220,8 +1230,8 @@ int wmi_rx_chain_add(struct wil6210_priv *wil, struct wil_ring *vring);
|
|||
int wmi_update_ft_ies(struct wil6210_vif *vif, u16 ie_len, const void *ie);
|
||||
int wmi_rxon(struct wil6210_priv *wil, bool on);
|
||||
int wmi_get_temperature(struct wil6210_priv *wil, u32 *t_m, u32 *t_r);
|
||||
int wmi_disconnect_sta(struct wil6210_vif *vif, const u8 *mac,
|
||||
u16 reason, bool full_disconnect, bool del_sta);
|
||||
int wmi_disconnect_sta(struct wil6210_vif *vif, const u8 *mac, u16 reason,
|
||||
bool del_sta);
|
||||
int wmi_addba(struct wil6210_priv *wil, u8 mid,
|
||||
u8 ringid, u8 size, u16 timeout);
|
||||
int wmi_delba_tx(struct wil6210_priv *wil, u8 mid, u8 ringid, u16 reason);
|
||||
|
@ -1276,6 +1286,7 @@ int wmi_stop_discovery(struct wil6210_vif *vif);
|
|||
int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
|
||||
struct cfg80211_mgmt_tx_params *params,
|
||||
u64 *cookie);
|
||||
void wil_cfg80211_ap_recovery(struct wil6210_priv *wil);
|
||||
int wil_cfg80211_iface_combinations_from_fw(
|
||||
struct wil6210_priv *wil,
|
||||
const struct wil_fw_record_concurrency *conc);
|
||||
|
@ -1306,7 +1317,9 @@ void wil_abort_scan(struct wil6210_vif *vif, bool sync);
|
|||
void wil_abort_scan_all_vifs(struct wil6210_priv *wil, bool sync);
|
||||
void wil6210_bus_request(struct wil6210_priv *wil, u32 kbps);
|
||||
void wil6210_disconnect(struct wil6210_vif *vif, const u8 *bssid,
|
||||
u16 reason_code, bool from_event);
|
||||
u16 reason_code);
|
||||
void wil6210_disconnect_complete(struct wil6210_vif *vif, const u8 *bssid,
|
||||
u16 reason_code);
|
||||
void wil_probe_client_flush(struct wil6210_vif *vif);
|
||||
void wil_probe_client_worker(struct work_struct *work);
|
||||
void wil_disconnect_worker(struct work_struct *work);
|
||||
|
|
|
@ -1018,7 +1018,7 @@ static void wmi_evt_connect(struct wil6210_vif *vif, int id, void *d, int len)
|
|||
wil_err(wil, "config tx vring failed for CID %d, rc (%d)\n",
|
||||
evt->cid, rc);
|
||||
wmi_disconnect_sta(vif, wil->sta[evt->cid].addr,
|
||||
WLAN_REASON_UNSPECIFIED, false, false);
|
||||
WLAN_REASON_UNSPECIFIED, false);
|
||||
} else {
|
||||
wil_info(wil, "successful connection to CID %d\n", evt->cid);
|
||||
}
|
||||
|
@ -1112,7 +1112,24 @@ static void wmi_evt_disconnect(struct wil6210_vif *vif, int id,
|
|||
}
|
||||
|
||||
mutex_lock(&wil->mutex);
|
||||
wil6210_disconnect(vif, evt->bssid, reason_code, true);
|
||||
wil6210_disconnect_complete(vif, evt->bssid, reason_code);
|
||||
if (disable_ap_sme) {
|
||||
struct wireless_dev *wdev = vif_to_wdev(vif);
|
||||
struct net_device *ndev = vif_to_ndev(vif);
|
||||
|
||||
/* disconnect event in disable_ap_sme mode means link loss */
|
||||
switch (wdev->iftype) {
|
||||
/* AP-like interface */
|
||||
case NL80211_IFTYPE_AP:
|
||||
case NL80211_IFTYPE_P2P_GO:
|
||||
/* notify hostapd about link loss */
|
||||
cfg80211_cqm_pktloss_notify(ndev, evt->bssid, 0,
|
||||
GFP_KERNEL);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
mutex_unlock(&wil->mutex);
|
||||
}
|
||||
|
||||
|
@ -1637,7 +1654,7 @@ wmi_evt_auth_status(struct wil6210_vif *vif, int id, void *d, int len)
|
|||
return;
|
||||
|
||||
fail:
|
||||
wil6210_disconnect(vif, NULL, WLAN_REASON_PREV_AUTH_NOT_VALID, false);
|
||||
wil6210_disconnect(vif, NULL, WLAN_REASON_PREV_AUTH_NOT_VALID);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -1766,7 +1783,7 @@ wmi_evt_reassoc_status(struct wil6210_vif *vif, int id, void *d, int len)
|
|||
return;
|
||||
|
||||
fail:
|
||||
wil6210_disconnect(vif, NULL, WLAN_REASON_PREV_AUTH_NOT_VALID, false);
|
||||
wil6210_disconnect(vif, NULL, WLAN_REASON_PREV_AUTH_NOT_VALID);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1949,16 +1966,17 @@ int wmi_call(struct wil6210_priv *wil, u16 cmdid, u8 mid, void *buf, u16 len,
|
|||
{
|
||||
int rc;
|
||||
unsigned long remain;
|
||||
ulong flags;
|
||||
|
||||
mutex_lock(&wil->wmi_mutex);
|
||||
|
||||
spin_lock(&wil->wmi_ev_lock);
|
||||
spin_lock_irqsave(&wil->wmi_ev_lock, flags);
|
||||
wil->reply_id = reply_id;
|
||||
wil->reply_mid = mid;
|
||||
wil->reply_buf = reply;
|
||||
wil->reply_size = reply_size;
|
||||
reinit_completion(&wil->wmi_call);
|
||||
spin_unlock(&wil->wmi_ev_lock);
|
||||
spin_unlock_irqrestore(&wil->wmi_ev_lock, flags);
|
||||
|
||||
rc = __wmi_send(wil, cmdid, mid, buf, len);
|
||||
if (rc)
|
||||
|
@ -1978,12 +1996,12 @@ int wmi_call(struct wil6210_priv *wil, u16 cmdid, u8 mid, void *buf, u16 len,
|
|||
}
|
||||
|
||||
out:
|
||||
spin_lock(&wil->wmi_ev_lock);
|
||||
spin_lock_irqsave(&wil->wmi_ev_lock, flags);
|
||||
wil->reply_id = 0;
|
||||
wil->reply_mid = U8_MAX;
|
||||
wil->reply_buf = NULL;
|
||||
wil->reply_size = 0;
|
||||
spin_unlock(&wil->wmi_ev_lock);
|
||||
spin_unlock_irqrestore(&wil->wmi_ev_lock, flags);
|
||||
|
||||
mutex_unlock(&wil->wmi_mutex);
|
||||
|
||||
|
@ -2560,12 +2578,11 @@ int wmi_get_temperature(struct wil6210_priv *wil, u32 *t_bb, u32 *t_rf)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int wmi_disconnect_sta(struct wil6210_vif *vif, const u8 *mac,
|
||||
u16 reason, bool full_disconnect, bool del_sta)
|
||||
int wmi_disconnect_sta(struct wil6210_vif *vif, const u8 *mac, u16 reason,
|
||||
bool del_sta)
|
||||
{
|
||||
struct wil6210_priv *wil = vif_to_wil(vif);
|
||||
int rc;
|
||||
u16 reason_code;
|
||||
struct wmi_disconnect_sta_cmd disc_sta_cmd = {
|
||||
.disconnect_reason = cpu_to_le16(reason),
|
||||
};
|
||||
|
@ -2598,21 +2615,8 @@ int wmi_disconnect_sta(struct wil6210_vif *vif, const u8 *mac,
|
|||
wil_fw_error_recovery(wil);
|
||||
return rc;
|
||||
}
|
||||
wil->sinfo_gen++;
|
||||
|
||||
if (full_disconnect) {
|
||||
/* call event handler manually after processing wmi_call,
|
||||
* to avoid deadlock - disconnect event handler acquires
|
||||
* wil->mutex while it is already held here
|
||||
*/
|
||||
reason_code = le16_to_cpu(reply.evt.protocol_reason_status);
|
||||
|
||||
wil_dbg_wmi(wil, "Disconnect %pM reason [proto %d wmi %d]\n",
|
||||
reply.evt.bssid, reason_code,
|
||||
reply.evt.disconnect_reason);
|
||||
|
||||
wil->sinfo_gen++;
|
||||
wil6210_disconnect(vif, reply.evt.bssid, reason_code, true);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -3145,7 +3149,7 @@ static void wmi_event_handle(struct wil6210_priv *wil,
|
|||
|
||||
if (mid == MID_BROADCAST)
|
||||
mid = 0;
|
||||
if (mid >= wil->max_vifs) {
|
||||
if (mid >= ARRAY_SIZE(wil->vifs) || mid >= wil->max_vifs) {
|
||||
wil_dbg_wmi(wil, "invalid mid %d, event skipped\n",
|
||||
mid);
|
||||
return;
|
||||
|
|
|
@ -4,6 +4,7 @@ config B43
|
|||
select BCMA if B43_BCMA
|
||||
select SSB if B43_SSB
|
||||
select FW_LOADER
|
||||
select CORDIC
|
||||
---help---
|
||||
b43 is a driver for the Broadcom 43xx series wireless devices.
|
||||
|
||||
|
|
|
@ -604,50 +604,3 @@ void b43_phy_force_clock(struct b43_wldev *dev, bool force)
|
|||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
/* http://bcm-v4.sipsolutions.net/802.11/PHY/Cordic */
|
||||
struct b43_c32 b43_cordic(int theta)
|
||||
{
|
||||
static const u32 arctg[] = {
|
||||
2949120, 1740967, 919879, 466945, 234379, 117304,
|
||||
58666, 29335, 14668, 7334, 3667, 1833,
|
||||
917, 458, 229, 115, 57, 29,
|
||||
};
|
||||
u8 i;
|
||||
s32 tmp;
|
||||
s8 signx = 1;
|
||||
u32 angle = 0;
|
||||
struct b43_c32 ret = { .i = 39797, .q = 0, };
|
||||
|
||||
while (theta > (180 << 16))
|
||||
theta -= (360 << 16);
|
||||
while (theta < -(180 << 16))
|
||||
theta += (360 << 16);
|
||||
|
||||
if (theta > (90 << 16)) {
|
||||
theta -= (180 << 16);
|
||||
signx = -1;
|
||||
} else if (theta < -(90 << 16)) {
|
||||
theta += (180 << 16);
|
||||
signx = -1;
|
||||
}
|
||||
|
||||
for (i = 0; i <= 17; i++) {
|
||||
if (theta > angle) {
|
||||
tmp = ret.i - (ret.q >> i);
|
||||
ret.q += ret.i >> i;
|
||||
ret.i = tmp;
|
||||
angle += arctg[i];
|
||||
} else {
|
||||
tmp = ret.i + (ret.q >> i);
|
||||
ret.q -= ret.i >> i;
|
||||
ret.i = tmp;
|
||||
angle -= arctg[i];
|
||||
}
|
||||
}
|
||||
|
||||
ret.i *= signx;
|
||||
ret.q *= signx;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -7,13 +7,6 @@
|
|||
|
||||
struct b43_wldev;
|
||||
|
||||
/* Complex number using 2 32-bit signed integers */
|
||||
struct b43_c32 { s32 i, q; };
|
||||
|
||||
#define CORDIC_CONVERT(value) (((value) >= 0) ? \
|
||||
((((value) >> 15) + 1) >> 1) : \
|
||||
-((((-(value)) >> 15) + 1) >> 1))
|
||||
|
||||
/* PHY register routing bits */
|
||||
#define B43_PHYROUTE 0x0C00 /* PHY register routing bits mask */
|
||||
#define B43_PHYROUTE_BASE 0x0000 /* Base registers */
|
||||
|
@ -450,6 +443,4 @@ bool b43_is_40mhz(struct b43_wldev *dev);
|
|||
|
||||
void b43_phy_force_clock(struct b43_wldev *dev, bool force);
|
||||
|
||||
struct b43_c32 b43_cordic(int theta);
|
||||
|
||||
#endif /* LINUX_B43_PHY_COMMON_H_ */
|
||||
|
|
|
@ -23,6 +23,7 @@
|
|||
|
||||
*/
|
||||
|
||||
#include <linux/cordic.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include "b43.h"
|
||||
|
@ -1780,9 +1781,9 @@ static void lpphy_start_tx_tone(struct b43_wldev *dev, s32 freq, u16 max)
|
|||
{
|
||||
struct b43_phy_lp *lpphy = dev->phy.lp;
|
||||
u16 buf[64];
|
||||
int i, samples = 0, angle = 0;
|
||||
int i, samples = 0, theta = 0;
|
||||
int rotation = (((36 * freq) / 20) << 16) / 100;
|
||||
struct b43_c32 sample;
|
||||
struct cordic_iq sample;
|
||||
|
||||
lpphy->tx_tone_freq = freq;
|
||||
|
||||
|
@ -1798,10 +1799,10 @@ static void lpphy_start_tx_tone(struct b43_wldev *dev, s32 freq, u16 max)
|
|||
}
|
||||
|
||||
for (i = 0; i < samples; i++) {
|
||||
sample = b43_cordic(angle);
|
||||
angle += rotation;
|
||||
buf[i] = CORDIC_CONVERT((sample.i * max) & 0xFF) << 8;
|
||||
buf[i] |= CORDIC_CONVERT((sample.q * max) & 0xFF);
|
||||
sample = cordic_calc_iq(CORDIC_FIXED(theta));
|
||||
theta += rotation;
|
||||
buf[i] = CORDIC_FLOAT((sample.i * max) & 0xFF) << 8;
|
||||
buf[i] |= CORDIC_FLOAT((sample.q * max) & 0xFF);
|
||||
}
|
||||
|
||||
b43_lptab_write_bulk(dev, B43_LPTAB16(5, 0), samples, buf);
|
||||
|
|
|
@ -23,6 +23,7 @@
|
|||
|
||||
*/
|
||||
|
||||
#include <linux/cordic.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/types.h>
|
||||
|
@ -1513,7 +1514,7 @@ static void b43_radio_init2055(struct b43_wldev *dev)
|
|||
|
||||
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/LoadSampleTable */
|
||||
static int b43_nphy_load_samples(struct b43_wldev *dev,
|
||||
struct b43_c32 *samples, u16 len) {
|
||||
struct cordic_iq *samples, u16 len) {
|
||||
struct b43_phy_n *nphy = dev->phy.n;
|
||||
u16 i;
|
||||
u32 *data;
|
||||
|
@ -1544,7 +1545,7 @@ static u16 b43_nphy_gen_load_samples(struct b43_wldev *dev, u32 freq, u16 max,
|
|||
{
|
||||
int i;
|
||||
u16 bw, len, rot, angle;
|
||||
struct b43_c32 *samples;
|
||||
struct cordic_iq *samples;
|
||||
|
||||
bw = b43_is_40mhz(dev) ? 40 : 20;
|
||||
len = bw << 3;
|
||||
|
@ -1561,7 +1562,7 @@ static u16 b43_nphy_gen_load_samples(struct b43_wldev *dev, u32 freq, u16 max,
|
|||
len = bw << 1;
|
||||
}
|
||||
|
||||
samples = kcalloc(len, sizeof(struct b43_c32), GFP_KERNEL);
|
||||
samples = kcalloc(len, sizeof(struct cordic_iq), GFP_KERNEL);
|
||||
if (!samples) {
|
||||
b43err(dev->wl, "allocation for samples generation failed\n");
|
||||
return 0;
|
||||
|
@ -1570,10 +1571,10 @@ static u16 b43_nphy_gen_load_samples(struct b43_wldev *dev, u32 freq, u16 max,
|
|||
angle = 0;
|
||||
|
||||
for (i = 0; i < len; i++) {
|
||||
samples[i] = b43_cordic(angle);
|
||||
samples[i] = cordic_calc_iq(CORDIC_FIXED(angle));
|
||||
angle += rot;
|
||||
samples[i].q = CORDIC_CONVERT(samples[i].q * max);
|
||||
samples[i].i = CORDIC_CONVERT(samples[i].i * max);
|
||||
samples[i].q = CORDIC_FLOAT(samples[i].q * max);
|
||||
samples[i].i = CORDIC_FLOAT(samples[i].i * max);
|
||||
}
|
||||
|
||||
i = b43_nphy_load_samples(dev, samples, len);
|
||||
|
|
|
@ -54,3 +54,5 @@ brcmfmac-$(CONFIG_BRCM_TRACING) += \
|
|||
tracepoint.o
|
||||
brcmfmac-$(CONFIG_OF) += \
|
||||
of.o
|
||||
brcmfmac-$(CONFIG_DMI) += \
|
||||
dmi.o
|
||||
|
|
|
@ -214,7 +214,7 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
|
|||
err = brcmf_fil_iovar_data_get(ifp, "cur_etheraddr", ifp->mac_addr,
|
||||
sizeof(ifp->mac_addr));
|
||||
if (err < 0) {
|
||||
brcmf_err("Retreiving cur_etheraddr failed, %d\n", err);
|
||||
brcmf_err("Retrieving cur_etheraddr failed, %d\n", err);
|
||||
goto done;
|
||||
}
|
||||
memcpy(ifp->drvr->wiphy->perm_addr, ifp->drvr->mac, ETH_ALEN);
|
||||
|
@ -269,7 +269,7 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
|
|||
strcpy(buf, "ver");
|
||||
err = brcmf_fil_iovar_data_get(ifp, "ver", buf, sizeof(buf));
|
||||
if (err < 0) {
|
||||
brcmf_err("Retreiving version information failed, %d\n",
|
||||
brcmf_err("Retrieving version information failed, %d\n",
|
||||
err);
|
||||
goto done;
|
||||
}
|
||||
|
@ -448,7 +448,8 @@ struct brcmf_mp_device *brcmf_get_module_param(struct device *dev,
|
|||
}
|
||||
}
|
||||
if (!found) {
|
||||
/* No platform data for this device, try OF (Open Firwmare) */
|
||||
/* No platform data for this device, try OF and DMI data */
|
||||
brcmf_dmi_probe(settings, chip, chiprev);
|
||||
brcmf_of_probe(dev, bus_type, settings);
|
||||
}
|
||||
return settings;
|
||||
|
|
|
@ -59,6 +59,7 @@ struct brcmf_mp_device {
|
|||
bool iapp;
|
||||
bool ignore_probe_fail;
|
||||
struct brcmfmac_pd_cc *country_codes;
|
||||
const char *board_type;
|
||||
union {
|
||||
struct brcmfmac_sdio_pd sdio;
|
||||
} bus;
|
||||
|
@ -74,4 +75,11 @@ void brcmf_release_module_param(struct brcmf_mp_device *module_param);
|
|||
/* Sets dongle media info (drv_version, mac address). */
|
||||
int brcmf_c_preinit_dcmds(struct brcmf_if *ifp);
|
||||
|
||||
#ifdef CONFIG_DMI
|
||||
void brcmf_dmi_probe(struct brcmf_mp_device *settings, u32 chip, u32 chiprev);
|
||||
#else
|
||||
static inline void
|
||||
brcmf_dmi_probe(struct brcmf_mp_device *settings, u32 chip, u32 chiprev) {}
|
||||
#endif
|
||||
|
||||
#endif /* BRCMFMAC_COMMON_H */
|
||||
|
|
|
@ -0,0 +1,116 @@
|
|||
/*
|
||||
* Copyright 2018 Hans de Goede <hdegoede@redhat.com>
|
||||
*
|
||||
* Permission to use, copy, modify, and/or distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
|
||||
* SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
|
||||
* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
|
||||
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <linux/dmi.h>
|
||||
#include <linux/mod_devicetable.h>
|
||||
#include "core.h"
|
||||
#include "common.h"
|
||||
#include "brcm_hw_ids.h"
|
||||
|
||||
/* The DMI data never changes so we can use a static buf for this */
|
||||
static char dmi_board_type[128];
|
||||
|
||||
struct brcmf_dmi_data {
|
||||
u32 chip;
|
||||
u32 chiprev;
|
||||
const char *board_type;
|
||||
};
|
||||
|
||||
/* NOTE: Please keep all entries sorted alphabetically */
|
||||
|
||||
static const struct brcmf_dmi_data gpd_win_pocket_data = {
|
||||
BRCM_CC_4356_CHIP_ID, 2, "gpd-win-pocket"
|
||||
};
|
||||
|
||||
static const struct brcmf_dmi_data jumper_ezpad_mini3_data = {
|
||||
BRCM_CC_43430_CHIP_ID, 0, "jumper-ezpad-mini3"
|
||||
};
|
||||
|
||||
static const struct brcmf_dmi_data meegopad_t08_data = {
|
||||
BRCM_CC_43340_CHIP_ID, 2, "meegopad-t08"
|
||||
};
|
||||
|
||||
static const struct dmi_system_id dmi_platform_data[] = {
|
||||
{
|
||||
/* Match for the GPDwin which unfortunately uses somewhat
|
||||
* generic dmi strings, which is why we test for 4 strings.
|
||||
* Comparing against 23 other byt/cht boards, board_vendor
|
||||
* and board_name are unique to the GPDwin, where as only one
|
||||
* other board has the same board_serial and 3 others have
|
||||
* the same default product_name. Also the GPDwin is the
|
||||
* only device to have both board_ and product_name not set.
|
||||
*/
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
|
||||
DMI_MATCH(DMI_BOARD_NAME, "Default string"),
|
||||
DMI_MATCH(DMI_BOARD_SERIAL, "Default string"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "Default string"),
|
||||
},
|
||||
.driver_data = (void *)&gpd_win_pocket_data,
|
||||
},
|
||||
{
|
||||
/* Jumper EZpad mini3 */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "CherryTrail"),
|
||||
/* jumperx.T87.KFBNEEA02 with the version-nr dropped */
|
||||
DMI_MATCH(DMI_BIOS_VERSION, "jumperx.T87.KFBNEEA"),
|
||||
},
|
||||
.driver_data = (void *)&jumper_ezpad_mini3_data,
|
||||
},
|
||||
{
|
||||
/* Meegopad T08 */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Default string"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "Default string"),
|
||||
DMI_MATCH(DMI_BOARD_NAME, "T3 MRD"),
|
||||
DMI_MATCH(DMI_BOARD_VERSION, "V1.1"),
|
||||
},
|
||||
.driver_data = (void *)&meegopad_t08_data,
|
||||
},
|
||||
{}
|
||||
};
|
||||
|
||||
void brcmf_dmi_probe(struct brcmf_mp_device *settings, u32 chip, u32 chiprev)
|
||||
{
|
||||
const struct dmi_system_id *match;
|
||||
const struct brcmf_dmi_data *data;
|
||||
const char *sys_vendor;
|
||||
const char *product_name;
|
||||
|
||||
/* Some models have DMI strings which are too generic, e.g.
|
||||
* "Default string", we use a quirk table for these.
|
||||
*/
|
||||
for (match = dmi_first_match(dmi_platform_data);
|
||||
match;
|
||||
match = dmi_first_match(match + 1)) {
|
||||
data = match->driver_data;
|
||||
|
||||
if (data->chip == chip && data->chiprev == chiprev) {
|
||||
settings->board_type = data->board_type;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
/* Not found in the quirk-table, use sys_vendor-product_name */
|
||||
sys_vendor = dmi_get_system_info(DMI_SYS_VENDOR);
|
||||
product_name = dmi_get_system_info(DMI_PRODUCT_NAME);
|
||||
if (sys_vendor && product_name) {
|
||||
snprintf(dmi_board_type, sizeof(dmi_board_type), "%s-%s",
|
||||
sys_vendor, product_name);
|
||||
settings->board_type = dmi_board_type;
|
||||
}
|
||||
}
|
|
@ -14,6 +14,7 @@
|
|||
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <linux/efi.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/device.h>
|
||||
|
@ -445,6 +446,75 @@ struct brcmf_fw {
|
|||
|
||||
static void brcmf_fw_request_done(const struct firmware *fw, void *ctx);
|
||||
|
||||
#ifdef CONFIG_EFI
|
||||
/* In some cases the EFI-var stored nvram contains "ccode=ALL" or "ccode=XV"
|
||||
* to specify "worldwide" compatible settings, but these 2 ccode-s do not work
|
||||
* properly. "ccode=ALL" causes channels 12 and 13 to not be available,
|
||||
* "ccode=XV" causes all 5GHz channels to not be available. So we replace both
|
||||
* with "ccode=X2" which allows channels 12+13 and 5Ghz channels in
|
||||
* no-Initiate-Radiation mode. This means that we will never send on these
|
||||
* channels without first having received valid wifi traffic on the channel.
|
||||
*/
|
||||
static void brcmf_fw_fix_efi_nvram_ccode(char *data, unsigned long data_len)
|
||||
{
|
||||
char *ccode;
|
||||
|
||||
ccode = strnstr((char *)data, "ccode=ALL", data_len);
|
||||
if (!ccode)
|
||||
ccode = strnstr((char *)data, "ccode=XV\r", data_len);
|
||||
if (!ccode)
|
||||
return;
|
||||
|
||||
ccode[6] = 'X';
|
||||
ccode[7] = '2';
|
||||
ccode[8] = '\r';
|
||||
}
|
||||
|
||||
static u8 *brcmf_fw_nvram_from_efi(size_t *data_len_ret)
|
||||
{
|
||||
const u16 name[] = { 'n', 'v', 'r', 'a', 'm', 0 };
|
||||
struct efivar_entry *nvram_efivar;
|
||||
unsigned long data_len = 0;
|
||||
u8 *data = NULL;
|
||||
int err;
|
||||
|
||||
nvram_efivar = kzalloc(sizeof(*nvram_efivar), GFP_KERNEL);
|
||||
if (!nvram_efivar)
|
||||
return NULL;
|
||||
|
||||
memcpy(&nvram_efivar->var.VariableName, name, sizeof(name));
|
||||
nvram_efivar->var.VendorGuid = EFI_GUID(0x74b00bd9, 0x805a, 0x4d61,
|
||||
0xb5, 0x1f, 0x43, 0x26,
|
||||
0x81, 0x23, 0xd1, 0x13);
|
||||
|
||||
err = efivar_entry_size(nvram_efivar, &data_len);
|
||||
if (err)
|
||||
goto fail;
|
||||
|
||||
data = kmalloc(data_len, GFP_KERNEL);
|
||||
if (!data)
|
||||
goto fail;
|
||||
|
||||
err = efivar_entry_get(nvram_efivar, NULL, &data_len, data);
|
||||
if (err)
|
||||
goto fail;
|
||||
|
||||
brcmf_fw_fix_efi_nvram_ccode(data, data_len);
|
||||
brcmf_info("Using nvram EFI variable\n");
|
||||
|
||||
kfree(nvram_efivar);
|
||||
*data_len_ret = data_len;
|
||||
return data;
|
||||
|
||||
fail:
|
||||
kfree(data);
|
||||
kfree(nvram_efivar);
|
||||
return NULL;
|
||||
}
|
||||
#else
|
||||
static u8 *brcmf_fw_nvram_from_efi(size_t *data_len) { return NULL; }
|
||||
#endif
|
||||
|
||||
static void brcmf_fw_free_request(struct brcmf_fw_request *req)
|
||||
{
|
||||
struct brcmf_fw_item *item;
|
||||
|
@ -463,11 +533,12 @@ static int brcmf_fw_request_nvram_done(const struct firmware *fw, void *ctx)
|
|||
{
|
||||
struct brcmf_fw *fwctx = ctx;
|
||||
struct brcmf_fw_item *cur;
|
||||
bool free_bcm47xx_nvram = false;
|
||||
bool kfree_nvram = false;
|
||||
u32 nvram_length = 0;
|
||||
void *nvram = NULL;
|
||||
u8 *data = NULL;
|
||||
size_t data_len;
|
||||
bool raw_nvram;
|
||||
|
||||
brcmf_dbg(TRACE, "enter: dev=%s\n", dev_name(fwctx->dev));
|
||||
|
||||
|
@ -476,12 +547,13 @@ static int brcmf_fw_request_nvram_done(const struct firmware *fw, void *ctx)
|
|||
if (fw && fw->data) {
|
||||
data = (u8 *)fw->data;
|
||||
data_len = fw->size;
|
||||
raw_nvram = false;
|
||||
} else {
|
||||
data = bcm47xx_nvram_get_contents(&data_len);
|
||||
if (!data && !(cur->flags & BRCMF_FW_REQF_OPTIONAL))
|
||||
if ((data = bcm47xx_nvram_get_contents(&data_len)))
|
||||
free_bcm47xx_nvram = true;
|
||||
else if ((data = brcmf_fw_nvram_from_efi(&data_len)))
|
||||
kfree_nvram = true;
|
||||
else if (!(cur->flags & BRCMF_FW_REQF_OPTIONAL))
|
||||
goto fail;
|
||||
raw_nvram = true;
|
||||
}
|
||||
|
||||
if (data)
|
||||
|
@ -489,8 +561,11 @@ static int brcmf_fw_request_nvram_done(const struct firmware *fw, void *ctx)
|
|||
fwctx->req->domain_nr,
|
||||
fwctx->req->bus_nr);
|
||||
|
||||
if (raw_nvram)
|
||||
if (free_bcm47xx_nvram)
|
||||
bcm47xx_nvram_release_contents(data);
|
||||
if (kfree_nvram)
|
||||
kfree(data);
|
||||
|
||||
release_firmware(fw);
|
||||
if (!nvram && !(cur->flags & BRCMF_FW_REQF_OPTIONAL))
|
||||
goto fail;
|
||||
|
@ -504,90 +579,75 @@ static int brcmf_fw_request_nvram_done(const struct firmware *fw, void *ctx)
|
|||
return -ENOENT;
|
||||
}
|
||||
|
||||
static int brcmf_fw_request_next_item(struct brcmf_fw *fwctx, bool async)
|
||||
static int brcmf_fw_complete_request(const struct firmware *fw,
|
||||
struct brcmf_fw *fwctx)
|
||||
{
|
||||
struct brcmf_fw_item *cur;
|
||||
const struct firmware *fw = NULL;
|
||||
int ret;
|
||||
|
||||
cur = &fwctx->req->items[fwctx->curpos];
|
||||
|
||||
brcmf_dbg(TRACE, "%srequest for %s\n", async ? "async " : "",
|
||||
cur->path);
|
||||
|
||||
if (async)
|
||||
ret = request_firmware_nowait(THIS_MODULE, true, cur->path,
|
||||
fwctx->dev, GFP_KERNEL, fwctx,
|
||||
brcmf_fw_request_done);
|
||||
else
|
||||
ret = request_firmware(&fw, cur->path, fwctx->dev);
|
||||
|
||||
if (ret < 0) {
|
||||
brcmf_fw_request_done(NULL, fwctx);
|
||||
} else if (!async && fw) {
|
||||
brcmf_dbg(TRACE, "firmware %s %sfound\n", cur->path,
|
||||
fw ? "" : "not ");
|
||||
if (cur->type == BRCMF_FW_TYPE_BINARY)
|
||||
cur->binary = fw;
|
||||
else if (cur->type == BRCMF_FW_TYPE_NVRAM)
|
||||
brcmf_fw_request_nvram_done(fw, fwctx);
|
||||
else
|
||||
release_firmware(fw);
|
||||
|
||||
return -EAGAIN;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void brcmf_fw_request_done(const struct firmware *fw, void *ctx)
|
||||
{
|
||||
struct brcmf_fw *fwctx = ctx;
|
||||
struct brcmf_fw_item *cur;
|
||||
struct brcmf_fw_item *cur = &fwctx->req->items[fwctx->curpos];
|
||||
int ret = 0;
|
||||
|
||||
cur = &fwctx->req->items[fwctx->curpos];
|
||||
|
||||
brcmf_dbg(TRACE, "enter: firmware %s %sfound\n", cur->path,
|
||||
fw ? "" : "not ");
|
||||
|
||||
if (!fw)
|
||||
ret = -ENOENT;
|
||||
brcmf_dbg(TRACE, "firmware %s %sfound\n", cur->path, fw ? "" : "not ");
|
||||
|
||||
switch (cur->type) {
|
||||
case BRCMF_FW_TYPE_NVRAM:
|
||||
ret = brcmf_fw_request_nvram_done(fw, fwctx);
|
||||
break;
|
||||
case BRCMF_FW_TYPE_BINARY:
|
||||
cur->binary = fw;
|
||||
if (fw)
|
||||
cur->binary = fw;
|
||||
else
|
||||
ret = -ENOENT;
|
||||
break;
|
||||
default:
|
||||
/* something fishy here so bail out early */
|
||||
brcmf_err("unknown fw type: %d\n", cur->type);
|
||||
release_firmware(fw);
|
||||
ret = -EINVAL;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if (ret < 0 && !(cur->flags & BRCMF_FW_REQF_OPTIONAL))
|
||||
goto fail;
|
||||
return (cur->flags & BRCMF_FW_REQF_OPTIONAL) ? 0 : ret;
|
||||
}
|
||||
|
||||
do {
|
||||
if (++fwctx->curpos == fwctx->req->n_items) {
|
||||
ret = 0;
|
||||
goto done;
|
||||
}
|
||||
static int brcmf_fw_request_firmware(const struct firmware **fw,
|
||||
struct brcmf_fw *fwctx)
|
||||
{
|
||||
struct brcmf_fw_item *cur = &fwctx->req->items[fwctx->curpos];
|
||||
int ret;
|
||||
|
||||
ret = brcmf_fw_request_next_item(fwctx, false);
|
||||
} while (ret == -EAGAIN);
|
||||
/* nvram files are board-specific, first try a board-specific path */
|
||||
if (cur->type == BRCMF_FW_TYPE_NVRAM && fwctx->req->board_type) {
|
||||
char alt_path[BRCMF_FW_NAME_LEN];
|
||||
|
||||
return;
|
||||
strlcpy(alt_path, cur->path, BRCMF_FW_NAME_LEN);
|
||||
/* strip .txt at the end */
|
||||
alt_path[strlen(alt_path) - 4] = 0;
|
||||
strlcat(alt_path, ".", BRCMF_FW_NAME_LEN);
|
||||
strlcat(alt_path, fwctx->req->board_type, BRCMF_FW_NAME_LEN);
|
||||
strlcat(alt_path, ".txt", BRCMF_FW_NAME_LEN);
|
||||
|
||||
fail:
|
||||
brcmf_dbg(TRACE, "failed err=%d: dev=%s, fw=%s\n", ret,
|
||||
dev_name(fwctx->dev), cur->path);
|
||||
brcmf_fw_free_request(fwctx->req);
|
||||
fwctx->req = NULL;
|
||||
done:
|
||||
ret = request_firmware(fw, alt_path, fwctx->dev);
|
||||
if (ret == 0)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return request_firmware(fw, cur->path, fwctx->dev);
|
||||
}
|
||||
|
||||
static void brcmf_fw_request_done(const struct firmware *fw, void *ctx)
|
||||
{
|
||||
struct brcmf_fw *fwctx = ctx;
|
||||
int ret;
|
||||
|
||||
ret = brcmf_fw_complete_request(fw, fwctx);
|
||||
|
||||
while (ret == 0 && ++fwctx->curpos < fwctx->req->n_items) {
|
||||
brcmf_fw_request_firmware(&fw, fwctx);
|
||||
ret = brcmf_fw_complete_request(fw, ctx);
|
||||
}
|
||||
|
||||
if (ret) {
|
||||
brcmf_fw_free_request(fwctx->req);
|
||||
fwctx->req = NULL;
|
||||
}
|
||||
fwctx->done(fwctx->dev, ret, fwctx->req);
|
||||
kfree(fwctx);
|
||||
}
|
||||
|
@ -611,7 +671,9 @@ int brcmf_fw_get_firmwares(struct device *dev, struct brcmf_fw_request *req,
|
|||
void (*fw_cb)(struct device *dev, int err,
|
||||
struct brcmf_fw_request *req))
|
||||
{
|
||||
struct brcmf_fw_item *first = &req->items[0];
|
||||
struct brcmf_fw *fwctx;
|
||||
int ret;
|
||||
|
||||
brcmf_dbg(TRACE, "enter: dev=%s\n", dev_name(dev));
|
||||
if (!fw_cb)
|
||||
|
@ -628,7 +690,12 @@ int brcmf_fw_get_firmwares(struct device *dev, struct brcmf_fw_request *req,
|
|||
fwctx->req = req;
|
||||
fwctx->done = fw_cb;
|
||||
|
||||
brcmf_fw_request_next_item(fwctx, true);
|
||||
ret = request_firmware_nowait(THIS_MODULE, true, first->path,
|
||||
fwctx->dev, GFP_KERNEL, fwctx,
|
||||
brcmf_fw_request_done);
|
||||
if (ret < 0)
|
||||
brcmf_fw_request_done(NULL, fwctx);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -641,8 +708,9 @@ brcmf_fw_alloc_request(u32 chip, u32 chiprev,
|
|||
struct brcmf_fw_request *fwreq;
|
||||
char chipname[12];
|
||||
const char *mp_path;
|
||||
size_t mp_path_len;
|
||||
u32 i, j;
|
||||
char end;
|
||||
char end = '\0';
|
||||
size_t reqsz;
|
||||
|
||||
for (i = 0; i < table_size; i++) {
|
||||
|
@ -667,7 +735,10 @@ brcmf_fw_alloc_request(u32 chip, u32 chiprev,
|
|||
mapping_table[i].fw_base, chipname);
|
||||
|
||||
mp_path = brcmf_mp_global.firmware_path;
|
||||
end = mp_path[strlen(mp_path) - 1];
|
||||
mp_path_len = strnlen(mp_path, BRCMF_FW_ALTPATH_LEN);
|
||||
if (mp_path_len)
|
||||
end = mp_path[mp_path_len - 1];
|
||||
|
||||
fwreq->n_items = n_fwnames;
|
||||
|
||||
for (j = 0; j < n_fwnames; j++) {
|
||||
|
|
|
@ -70,6 +70,7 @@ struct brcmf_fw_request {
|
|||
u16 domain_nr;
|
||||
u16 bus_nr;
|
||||
u32 n_items;
|
||||
const char *board_type;
|
||||
struct brcmf_fw_item items[0];
|
||||
};
|
||||
|
||||
|
|
|
@ -176,6 +176,8 @@
|
|||
|
||||
#define BRCMF_VHT_CAP_MCS_MAP_NSS_MAX 8
|
||||
|
||||
#define BRCMF_HE_CAP_MCS_MAP_NSS_MAX 8
|
||||
|
||||
/* MAX_CHUNK_LEN is the maximum length for data passing to firmware in each
|
||||
* ioctl. It is relatively small because firmware has small maximum size input
|
||||
* playload restriction for ioctls.
|
||||
|
@ -601,13 +603,37 @@ struct brcmf_sta_info_le {
|
|||
__le32 rx_pkts_retried; /* # rx with retry bit set */
|
||||
__le32 tx_rate_fallback; /* lowest fallback TX rate */
|
||||
|
||||
/* Fields valid for ver >= 5 */
|
||||
struct {
|
||||
__le32 count; /* # rates in this set */
|
||||
u8 rates[BRCMF_MAXRATES_IN_SET]; /* rates in 500kbps units w/hi bit set if basic */
|
||||
u8 mcs[BRCMF_MCSSET_LEN]; /* supported mcs index bit map */
|
||||
__le16 vht_mcs[BRCMF_VHT_CAP_MCS_MAP_NSS_MAX]; /* supported mcs index bit map per nss */
|
||||
} rateset_adv;
|
||||
union {
|
||||
struct {
|
||||
struct {
|
||||
__le32 count; /* # rates in this set */
|
||||
u8 rates[BRCMF_MAXRATES_IN_SET]; /* rates in 500kbps units w/hi bit set if basic */
|
||||
u8 mcs[BRCMF_MCSSET_LEN]; /* supported mcs index bit map */
|
||||
__le16 vht_mcs[BRCMF_VHT_CAP_MCS_MAP_NSS_MAX]; /* supported mcs index bit map per nss */
|
||||
} rateset_adv;
|
||||
} v5;
|
||||
|
||||
struct {
|
||||
__le32 rx_dur_total; /* total user RX duration (estimated) */
|
||||
__le16 chanspec; /** chanspec this sta is on */
|
||||
__le16 pad_1;
|
||||
struct {
|
||||
__le16 version; /* version */
|
||||
__le16 len; /* length */
|
||||
__le32 count; /* # rates in this set */
|
||||
u8 rates[BRCMF_MAXRATES_IN_SET]; /* rates in 500kbps units w/hi bit set if basic */
|
||||
u8 mcs[BRCMF_MCSSET_LEN]; /* supported mcs index bit map */
|
||||
__le16 vht_mcs[BRCMF_VHT_CAP_MCS_MAP_NSS_MAX]; /* supported mcs index bit map per nss */
|
||||
__le16 he_mcs[BRCMF_HE_CAP_MCS_MAP_NSS_MAX]; /* supported he mcs index bit map per nss */
|
||||
} rateset_adv; /* rateset along with mcs index bitmap */
|
||||
__le16 wpauth; /* authentication type */
|
||||
u8 algo; /* crypto algorithm */
|
||||
u8 pad_2;
|
||||
__le32 tx_rspec; /* Rate of last successful tx frame */
|
||||
__le32 rx_rspec; /* Rate of last successful rx frame */
|
||||
__le32 wnm_cap; /* wnm capabilities */
|
||||
} v7;
|
||||
};
|
||||
};
|
||||
|
||||
struct brcmf_chanspec_list {
|
||||
|
|
|
@ -27,11 +27,20 @@ void brcmf_of_probe(struct device *dev, enum brcmf_bus_type bus_type,
|
|||
struct brcmf_mp_device *settings)
|
||||
{
|
||||
struct brcmfmac_sdio_pd *sdio = &settings->bus.sdio;
|
||||
struct device_node *np = dev->of_node;
|
||||
struct device_node *root, *np = dev->of_node;
|
||||
struct property *prop;
|
||||
int irq;
|
||||
u32 irqf;
|
||||
u32 val;
|
||||
|
||||
/* Set board-type to the first string of the machine compatible prop */
|
||||
root = of_find_node_by_path("/");
|
||||
if (root) {
|
||||
prop = of_find_property(root, "compatible", NULL);
|
||||
settings->board_type = of_prop_next_string(prop, NULL);
|
||||
of_node_put(root);
|
||||
}
|
||||
|
||||
if (!np || bus_type != BRCMF_BUSTYPE_SDIO ||
|
||||
!of_device_is_compatible(np, "brcm,bcm4329-fmac"))
|
||||
return;
|
||||
|
|
|
@ -1785,6 +1785,7 @@ brcmf_pcie_prepare_fw_request(struct brcmf_pciedev_info *devinfo)
|
|||
fwreq->items[BRCMF_PCIE_FW_CODE].type = BRCMF_FW_TYPE_BINARY;
|
||||
fwreq->items[BRCMF_PCIE_FW_NVRAM].type = BRCMF_FW_TYPE_NVRAM;
|
||||
fwreq->items[BRCMF_PCIE_FW_NVRAM].flags = BRCMF_FW_REQF_OPTIONAL;
|
||||
fwreq->board_type = devinfo->settings->board_type;
|
||||
/* NVRAM reserves PCI domain 0 for Broadcom's SDK faked bus */
|
||||
fwreq->domain_nr = pci_domain_nr(devinfo->pdev->bus) + 1;
|
||||
fwreq->bus_nr = devinfo->pdev->bus->number;
|
||||
|
|
|
@ -4174,6 +4174,7 @@ brcmf_sdio_prepare_fw_request(struct brcmf_sdio *bus)
|
|||
|
||||
fwreq->items[BRCMF_SDIO_FW_CODE].type = BRCMF_FW_TYPE_BINARY;
|
||||
fwreq->items[BRCMF_SDIO_FW_NVRAM].type = BRCMF_FW_TYPE_NVRAM;
|
||||
fwreq->board_type = bus->sdiodev->settings->board_type;
|
||||
|
||||
return fwreq;
|
||||
}
|
||||
|
|
|
@ -846,8 +846,8 @@ brcms_ops_ampdu_action(struct ieee80211_hw *hw,
|
|||
status = brcms_c_aggregatable(wl->wlc, tid);
|
||||
spin_unlock_bh(&wl->lock);
|
||||
if (!status) {
|
||||
brcms_err(wl->wlc->hw->d11core,
|
||||
"START: tid %d is not agg\'able\n", tid);
|
||||
brcms_dbg_ht(wl->wlc->hw->d11core,
|
||||
"START: tid %d is not agg\'able\n", tid);
|
||||
return -EINVAL;
|
||||
}
|
||||
ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
|
||||
|
|
|
@ -220,13 +220,6 @@ enum phy_cal_mode {
|
|||
#define BB_MULT_MASK 0x0000ffff
|
||||
#define BB_MULT_VALID_MASK 0x80000000
|
||||
|
||||
#define CORDIC_AG 39797
|
||||
#define CORDIC_NI 18
|
||||
#define FIXED(X) ((s32)((X) << 16))
|
||||
|
||||
#define FLOAT(X) \
|
||||
(((X) >= 0) ? ((((X) >> 15) + 1) >> 1) : -((((-(X)) >> 15) + 1) >> 1))
|
||||
|
||||
#define PHY_CHAIN_TX_DISABLE_TEMP 115
|
||||
#define PHY_HYSTERESIS_DELTATEMP 5
|
||||
|
||||
|
|
|
@ -3447,8 +3447,8 @@ wlc_lcnphy_start_tx_tone(struct brcms_phy *pi, s32 f_kHz, u16 max_val,
|
|||
|
||||
theta += rot;
|
||||
|
||||
i_samp = (u16) (FLOAT(tone_samp.i * max_val) & 0x3ff);
|
||||
q_samp = (u16) (FLOAT(tone_samp.q * max_val) & 0x3ff);
|
||||
i_samp = (u16)(CORDIC_FLOAT(tone_samp.i * max_val) & 0x3ff);
|
||||
q_samp = (u16)(CORDIC_FLOAT(tone_samp.q * max_val) & 0x3ff);
|
||||
data_buf[t] = (i_samp << 10) | q_samp;
|
||||
}
|
||||
|
||||
|
|
|
@ -23089,8 +23089,8 @@ wlc_phy_gen_load_samples_nphy(struct brcms_phy *pi, u32 f_kHz, u16 max_val,
|
|||
|
||||
theta += rot;
|
||||
|
||||
tone_buf[t].q = (s32) FLOAT(tone_buf[t].q * max_val);
|
||||
tone_buf[t].i = (s32) FLOAT(tone_buf[t].i * max_val);
|
||||
tone_buf[t].q = (s32)CORDIC_FLOAT(tone_buf[t].q * max_val);
|
||||
tone_buf[t].i = (s32)CORDIC_FLOAT(tone_buf[t].i * max_val);
|
||||
}
|
||||
|
||||
wlc_phy_loadsampletable_nphy(pi, tone_buf, num_samps);
|
||||
|
|
|
@ -128,7 +128,7 @@ static void brcmu_d11n_decchspec(struct brcmu_chan *ch)
|
|||
}
|
||||
break;
|
||||
default:
|
||||
WARN_ON_ONCE(1);
|
||||
WARN_ONCE(1, "Invalid chanspec 0x%04x\n", ch->chspec);
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -140,7 +140,7 @@ static void brcmu_d11n_decchspec(struct brcmu_chan *ch)
|
|||
ch->band = BRCMU_CHAN_BAND_2G;
|
||||
break;
|
||||
default:
|
||||
WARN_ON_ONCE(1);
|
||||
WARN_ONCE(1, "Invalid chanspec 0x%04x\n", ch->chspec);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -167,7 +167,7 @@ static void brcmu_d11ac_decchspec(struct brcmu_chan *ch)
|
|||
ch->sb = BRCMU_CHAN_SB_U;
|
||||
ch->control_ch_num += CH_10MHZ_APART;
|
||||
} else {
|
||||
WARN_ON_ONCE(1);
|
||||
WARN_ONCE(1, "Invalid chanspec 0x%04x\n", ch->chspec);
|
||||
}
|
||||
break;
|
||||
case BRCMU_CHSPEC_D11AC_BW_80:
|
||||
|
@ -188,7 +188,7 @@ static void brcmu_d11ac_decchspec(struct brcmu_chan *ch)
|
|||
ch->control_ch_num += CH_30MHZ_APART;
|
||||
break;
|
||||
default:
|
||||
WARN_ON_ONCE(1);
|
||||
WARN_ONCE(1, "Invalid chanspec 0x%04x\n", ch->chspec);
|
||||
break;
|
||||
}
|
||||
break;
|
||||
|
@ -222,13 +222,13 @@ static void brcmu_d11ac_decchspec(struct brcmu_chan *ch)
|
|||
ch->control_ch_num += CH_70MHZ_APART;
|
||||
break;
|
||||
default:
|
||||
WARN_ON_ONCE(1);
|
||||
WARN_ONCE(1, "Invalid chanspec 0x%04x\n", ch->chspec);
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case BRCMU_CHSPEC_D11AC_BW_8080:
|
||||
default:
|
||||
WARN_ON_ONCE(1);
|
||||
WARN_ONCE(1, "Invalid chanspec 0x%04x\n", ch->chspec);
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -240,7 +240,7 @@ static void brcmu_d11ac_decchspec(struct brcmu_chan *ch)
|
|||
ch->band = BRCMU_CHAN_BAND_2G;
|
||||
break;
|
||||
default:
|
||||
WARN_ON_ONCE(1);
|
||||
WARN_ONCE(1, "Invalid chanspec 0x%04x\n", ch->chspec);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -5462,7 +5462,7 @@ static int proc_BSSList_open( struct inode *inode, struct file *file ) {
|
|||
we have to add a spin lock... */
|
||||
rc = readBSSListRid(ai, doLoseSync, &BSSList_rid);
|
||||
while(rc == 0 && BSSList_rid.index != cpu_to_le16(0xffff)) {
|
||||
ptr += sprintf(ptr, "%pM %*s rssi = %d",
|
||||
ptr += sprintf(ptr, "%pM %.*s rssi = %d",
|
||||
BSSList_rid.bssid,
|
||||
(int)BSSList_rid.ssidLen,
|
||||
BSSList_rid.ssid,
|
||||
|
|
|
@ -781,7 +781,7 @@ il3945_rs_get_rate(void *il_r, struct ieee80211_sta *sta, void *il_sta,
|
|||
|
||||
switch (scale_action) {
|
||||
case -1:
|
||||
/* Decrese rate */
|
||||
/* Decrease rate */
|
||||
if (low != RATE_INVALID)
|
||||
idx = low;
|
||||
break;
|
||||
|
|
|
@ -559,7 +559,7 @@ il4965_translate_rx_status(struct il_priv *il, u32 decrypt_in)
|
|||
decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK;
|
||||
break;
|
||||
}
|
||||
/* fall through if TTAK OK */
|
||||
/* fall through - if TTAK OK */
|
||||
default:
|
||||
if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK))
|
||||
decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
|
||||
|
|
|
@ -2695,6 +2695,7 @@ il_set_decrypted_flag(struct il_priv *il, struct ieee80211_hdr *hdr,
|
|||
if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
|
||||
RX_RES_STATUS_BAD_KEY_TTAK)
|
||||
break;
|
||||
/* fall through */
|
||||
|
||||
case RX_RES_STATUS_SEC_TYPE_WEP:
|
||||
if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
|
||||
|
@ -2704,6 +2705,7 @@ il_set_decrypted_flag(struct il_priv *il, struct ieee80211_hdr *hdr,
|
|||
D_RX("Packet destroyed\n");
|
||||
return -1;
|
||||
}
|
||||
/* fall through */
|
||||
case RX_RES_STATUS_SEC_TYPE_CCMP:
|
||||
if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
|
||||
RX_RES_STATUS_DECRYPT_OK) {
|
||||
|
|
|
@ -11,6 +11,7 @@ iwlwifi-objs += pcie/ctxt-info.o pcie/ctxt-info-gen3.o
|
|||
iwlwifi-objs += pcie/trans-gen2.o pcie/tx-gen2.o
|
||||
iwlwifi-$(CONFIG_IWLDVM) += cfg/1000.o cfg/2000.o cfg/5000.o cfg/6000.o
|
||||
iwlwifi-$(CONFIG_IWLMVM) += cfg/7000.o cfg/8000.o cfg/9000.o cfg/22000.o
|
||||
iwlwifi-objs += iwl-dbg-tlv.o
|
||||
iwlwifi-objs += iwl-trans.o
|
||||
iwlwifi-objs += fw/notif-wait.o
|
||||
iwlwifi-$(CONFIG_IWLMVM) += fw/paging.o fw/smem.o fw/init.o fw/dbg.o
|
||||
|
|
|
@ -323,7 +323,6 @@ MODULE_FIRMWARE(IWL_22000_HR_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
|
|||
MODULE_FIRMWARE(IWL_22000_JF_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
|
||||
MODULE_FIRMWARE(IWL_22000_HR_A_F0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
|
||||
MODULE_FIRMWARE(IWL_22000_HR_B_F0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
|
||||
MODULE_FIRMWARE(IWL_22000_QU_B_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
|
||||
MODULE_FIRMWARE(IWL_22000_HR_B_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
|
||||
MODULE_FIRMWARE(IWL_22000_JF_B0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
|
||||
MODULE_FIRMWARE(IWL_22000_HR_A0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
|
||||
|
|
|
@ -0,0 +1,401 @@
|
|||
/******************************************************************************
|
||||
*
|
||||
* This file is provided under a dual BSD/GPLv2 license. When using or
|
||||
* redistributing this file, you may do so under either license.
|
||||
*
|
||||
* GPL LICENSE SUMMARY
|
||||
*
|
||||
* Copyright (C) 2018 Intel Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*
|
||||
* The full GNU General Public License is included in this distribution
|
||||
* in the file called COPYING.
|
||||
*
|
||||
* Contact Information:
|
||||
* Intel Linux Wireless <linuxwifi@intel.com>
|
||||
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
|
||||
*
|
||||
* BSD LICENSE
|
||||
*
|
||||
* Copyright (C) 2018 Intel Corporation
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in
|
||||
* the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name Intel Corporation nor the names of its
|
||||
* contributors may be used to endorse or promote products derived
|
||||
* from this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
*****************************************************************************/
|
||||
#ifndef __iwl_fw_dbg_tlv_h__
|
||||
#define __iwl_fw_dbg_tlv_h__
|
||||
|
||||
#include <linux/bitops.h>
|
||||
|
||||
/*
|
||||
* struct iwl_fw_ini_header: Common Header for all debug group TLV's structures
|
||||
* @tlv_version: version info
|
||||
* @apply_point: &enum iwl_fw_ini_apply_point
|
||||
* @data: TLV data followed
|
||||
**/
|
||||
struct iwl_fw_ini_header {
|
||||
__le32 tlv_version;
|
||||
__le32 apply_point;
|
||||
u8 data[];
|
||||
} __packed; /* FW_INI_HEADER_TLV_S */
|
||||
|
||||
/**
|
||||
* struct iwl_fw_ini_allocation_tlv - (IWL_FW_INI_TLV_TYPE_BUFFER_ALLOCATION)
|
||||
* buffer allocation TLV - for debug
|
||||
*
|
||||
* @iwl_fw_ini_header: header
|
||||
* @allocation_id: &enum iwl_fw_ini_allocation_id - to bind allocation and hcmd
|
||||
* if needed (DBGC1/DBGC2/SDFX/...)
|
||||
* @buffer_location: type of iwl_fw_ini_buffer_location
|
||||
* @size: size in bytes
|
||||
* @max_fragments: the maximum allowed fragmentation in the desired memory
|
||||
* allocation above
|
||||
* @min_frag_size: the minimum allowed fragmentation size in bytes
|
||||
*/
|
||||
struct iwl_fw_ini_allocation_tlv {
|
||||
struct iwl_fw_ini_header header;
|
||||
__le32 allocation_id;
|
||||
__le32 buffer_location;
|
||||
__le32 size;
|
||||
__le32 max_fragments;
|
||||
__le32 min_frag_size;
|
||||
} __packed; /* FW_INI_BUFFER_ALLOCATION_TLV_S_VER_1 */
|
||||
|
||||
/**
|
||||
* struct iwl_fw_ini_hcmd (IWL_FW_INI_TLV_TYPE_HCMD)
|
||||
* Generic Host command pass through TLV
|
||||
*
|
||||
* @id: the debug configuration command type for instance: 0xf6 / 0xf5 / DHC
|
||||
* @group: the desired cmd group
|
||||
* @padding: all zeros for dword alignment
|
||||
* @data: all of the relevant command (0xf6/0xf5) to be sent
|
||||
*/
|
||||
struct iwl_fw_ini_hcmd {
|
||||
u8 id;
|
||||
u8 group;
|
||||
__le16 padding;
|
||||
u8 data[0];
|
||||
} __packed; /* FW_INI_HCMD_S */
|
||||
|
||||
/**
|
||||
* struct iwl_fw_ini_hcmd_tlv
|
||||
* @header: header
|
||||
* @hcmd: a variable length host-command to be sent to apply the configuration.
|
||||
*/
|
||||
struct iwl_fw_ini_hcmd_tlv {
|
||||
struct iwl_fw_ini_header header;
|
||||
struct iwl_fw_ini_hcmd hcmd;
|
||||
} __packed; /* FW_INI_HCMD_TLV_S_VER_1 */
|
||||
|
||||
/*
|
||||
* struct iwl_fw_ini_debug_flow_tlv (IWL_FW_INI_TLV_TYPE_DEBUG_FLOW)
|
||||
*
|
||||
* @header: header
|
||||
* @debug_flow_cfg: &enum iwl_fw_ini_debug_flow
|
||||
*/
|
||||
struct iwl_fw_ini_debug_flow_tlv {
|
||||
struct iwl_fw_ini_header header;
|
||||
__le32 debug_flow_cfg;
|
||||
} __packed; /* FW_INI_DEBUG_FLOW_TLV_S_VER_1 */
|
||||
|
||||
#define IWL_FW_INI_MAX_REGION_ID 20
|
||||
#define IWL_FW_INI_MAX_NAME 32
|
||||
/**
|
||||
* struct iwl_fw_ini_region_cfg
|
||||
* @region_id: ID of this dump configuration
|
||||
* @region_type: &enum iwl_fw_ini_region_type
|
||||
* @num_regions: amount of regions in the address array.
|
||||
* @allocation_id: For DRAM type field substitutes for allocation_id.
|
||||
* @name_len: name length
|
||||
* @name: file name to use for this region
|
||||
* @size: size of the data, in bytes.(unused for IWL_FW_INI_REGION_DRAM_BUFFER)
|
||||
* @start_addr: array of addresses. (unused for IWL_FW_INI_REGION_DRAM_BUFFER)
|
||||
*/
|
||||
struct iwl_fw_ini_region_cfg {
|
||||
__le32 region_id;
|
||||
__le32 region_type;
|
||||
__le32 name_len;
|
||||
u8 name[IWL_FW_INI_MAX_NAME];
|
||||
union {
|
||||
__le32 num_regions;
|
||||
__le32 allocation_id;
|
||||
};
|
||||
__le32 size;
|
||||
__le32 start_addr[];
|
||||
} __packed; /* FW_INI_REGION_CONFIG_S */
|
||||
|
||||
/**
|
||||
* struct iwl_fw_ini_region_tlv - (IWL_FW_INI_TLV_TYPE_REGION_CFG)
|
||||
* DUMP sections define IDs and triggers that use those IDs TLV
|
||||
* @header: header
|
||||
* @num_regions: how many different region section and IDs are coming next
|
||||
* @iwl_fw_ini_dump dump_config: list of dump configurations
|
||||
*/
|
||||
struct iwl_fw_ini_region_tlv {
|
||||
struct iwl_fw_ini_header header;
|
||||
__le32 num_regions;
|
||||
struct iwl_fw_ini_region_cfg region_config[];
|
||||
} __packed; /* FW_INI_REGION_CFG_S */
|
||||
|
||||
/**
|
||||
* struct iwl_fw_ini_trigger - (IWL_FW_INI_TLV_TYPE_DUMP_CFG)
|
||||
* Region sections define IDs and triggers that use those IDs TLV
|
||||
*
|
||||
* @trigger_id: enum &iwl_fw_ini_tigger_id
|
||||
* @ignore_default: override FW TLV with binary TLV
|
||||
* @dump_delay: delay from trigger fire to dump, in usec
|
||||
* @occurrences: max amount of times to be fired
|
||||
* @ignore_consec: ignore consecutive triggers, in usec
|
||||
* @force_restart: force FW restart
|
||||
* @multi_dut: initiate debug dump data on several DUTs
|
||||
* @trigger_data: generic data to be utilized per trigger
|
||||
* @num_regions: number of dump regions defined for this trigger
|
||||
* @data: region IDs
|
||||
*/
|
||||
struct iwl_fw_ini_trigger {
|
||||
__le32 trigger_id;
|
||||
__le32 ignore_default;
|
||||
__le32 dump_delay;
|
||||
__le32 occurrences;
|
||||
__le32 ignore_consec;
|
||||
__le32 force_restart;
|
||||
__le32 multi_dut;
|
||||
__le32 trigger_data;
|
||||
__le32 num_regions;
|
||||
__le32 data[];
|
||||
} __packed; /* FW_INI_TRIGGER_CONFIG_S */
|
||||
|
||||
/**
|
||||
* struct iwl_fw_ini_trigger_tlv - (IWL_FW_INI_TLV_TYPE_TRIGGERS_CFG)
|
||||
* DUMP sections define IDs and triggers that use those IDs TLV
|
||||
*
|
||||
* @header: header
|
||||
* @num_triggers: how many different triggers section and IDs are coming next
|
||||
* @trigger_config: list of trigger configurations
|
||||
*/
|
||||
struct iwl_fw_ini_trigger_tlv {
|
||||
struct iwl_fw_ini_header header;
|
||||
__le32 num_triggers;
|
||||
struct iwl_fw_ini_trigger trigger_config[];
|
||||
} __packed; /* FW_INI_TRIGGER_CFG_S */
|
||||
|
||||
/**
|
||||
* enum iwl_fw_ini_trigger_id
|
||||
* @IWL_FW_TRIGGER_ID_FW_ASSERT: FW assert
|
||||
* @IWL_FW_TRIGGER_ID_FW_TFD_Q_HANG: TFD queue hang
|
||||
* @IWL_FW_TRIGGER_ID_FW_HW_ERROR: HW assert
|
||||
* @IWL_FW_TRIGGER_ID_FW_TRIGGER_ERROR: FW error notification
|
||||
* @IWL_FW_TRIGGER_ID_FW_TRIGGER_WARNING: FW warning notification
|
||||
* @IWL_FW_TRIGGER_ID_FW_TRIGGER_INFO: FW info notification
|
||||
* @IWL_FW_TRIGGER_ID_FW_TRIGGER_DEBUG: FW debug notification
|
||||
* @IWL_FW_TRIGGER_ID_USER_TRIGGER: User trigger
|
||||
* @IWL_FW_TRIGGER_ID_HOST_PEER_CLIENT_INACTIVITY: peer inactivity
|
||||
* @FW_DEBUG_TLV_TRIGGER_ID_HOST_DID_INITIATED_EVENT: undefined
|
||||
* @IWL_FW_TRIGGER_ID_HOST_TX_LATENCY_THRESHOLD_CROSSED: TX latency
|
||||
* threshold was crossed
|
||||
* @IWL_FW_TRIGGER_ID_HOST_TX_RESPONSE_STATUS_FAILED: TX failed
|
||||
* @IWL_FW_TRIGGER_ID_HOST_OS_REQ_DEAUTH_PEER: Deauth initiated by host
|
||||
* @IWL_FW_TRIGGER_ID_HOST_STOP_GO_REQUEST: stop GO request
|
||||
* @IWL_FW_TRIGGER_ID_HOST_START_GO_REQUEST: start GO request
|
||||
* @IWL_FW_TRIGGER_ID_HOST_JOIN_GROUP_REQUEST: join P2P group request
|
||||
* @IWL_FW_TRIGGER_ID_HOST_SCAN_START: scan started event
|
||||
* @IWL_FW_TRIGGER_ID_HOST_SCAN_SUBMITTED: undefined
|
||||
* @IWL_FW_TRIGGER_ID_HOST_SCAN_PARAMS: undefined
|
||||
* @IWL_FW_TRIGGER_ID_HOST_CHECK_FOR_HANG: undefined
|
||||
* @IWL_FW_TRIGGER_ID_HOST_BAR_RECEIVED: BAR frame was received
|
||||
* @IWL_FW_TRIGGER_ID_HOST_AGG_TX_RESPONSE_STATUS_FAILED: agg TX failed
|
||||
* @IWL_FW_TRIGGER_ID_HOST_EAPOL_TX_RESPONSE_FAILED: EAPOL TX failed
|
||||
* @IWL_FW_TRIGGER_ID_HOST_FAKE_TX_RESPONSE_SUSPECTED: suspicious TX response
|
||||
* @IWL_FW_TRIGGER_ID_HOST_AUTH_REQ_FROM_ASSOC_CLIENT: received suspicious auth
|
||||
* @IWL_FW_TRIGGER_ID_HOST_ROAM_COMPLETE: roaming was completed
|
||||
* @IWL_FW_TRIGGER_ID_HOST_AUTH_ASSOC_FAST_FAILED: fast assoc failed
|
||||
* @IWL_FW_TRIGGER_ID_HOST_D3_START: D3 start
|
||||
* @IWL_FW_TRIGGER_ID_HOST_D3_END: D3 end
|
||||
* @IWL_FW_TRIGGER_ID_HOST_BSS_MISSED_BEACONS: missed beacon events
|
||||
* @IWL_FW_TRIGGER_ID_HOST_P2P_CLIENT_MISSED_BEACONS: P2P missed beacon events
|
||||
* @IWL_FW_TRIGGER_ID_HOST_PEER_CLIENT_TX_FAILURES: undefined
|
||||
* @IWL_FW_TRIGGER_ID_HOST_TX_WFD_ACTION_FRAME_FAILED: undefined
|
||||
* @IWL_FW_TRIGGER_ID_HOST_AUTH_ASSOC_FAILED: authentication / association
|
||||
* failed
|
||||
* @IWL_FW_TRIGGER_ID_HOST_SCAN_COMPLETE: scan complete event
|
||||
* @IWL_FW_TRIGGER_ID_HOST_SCAN_ABORT: scan abort complete
|
||||
* @IWL_FW_TRIGGER_ID_HOST_NIC_ALIVE: nic alive message was received
|
||||
* @IWL_FW_TRIGGER_ID_HOST_CHANNEL_SWITCH_COMPLETE: CSA was completed
|
||||
* @IWL_FW_TRIGGER_ID_NUM: number of trigger IDs
|
||||
*/
|
||||
enum iwl_fw_ini_trigger_id {
|
||||
/* Errors triggers */
|
||||
IWL_FW_TRIGGER_ID_FW_ASSERT = 1,
|
||||
IWL_FW_TRIGGER_ID_FW_TFD_Q_HANG = 2,
|
||||
IWL_FW_TRIGGER_ID_FW_HW_ERROR = 3,
|
||||
/* Generic triggers */
|
||||
IWL_FW_TRIGGER_ID_FW_TRIGGER_ERROR = 4,
|
||||
IWL_FW_TRIGGER_ID_FW_TRIGGER_WARNING = 5,
|
||||
IWL_FW_TRIGGER_ID_FW_TRIGGER_INFO = 6,
|
||||
IWL_FW_TRIGGER_ID_FW_TRIGGER_DEBUG = 7,
|
||||
/* User Trigger */
|
||||
IWL_FW_TRIGGER_ID_USER_TRIGGER = 8,
|
||||
/* Host triggers */
|
||||
IWL_FW_TRIGGER_ID_HOST_PEER_CLIENT_INACTIVITY = 9,
|
||||
IWL_FW_TRIGGER_ID_HOST_DID_INITIATED_EVENT = 10,
|
||||
IWL_FW_TRIGGER_ID_HOST_TX_LATENCY_THRESHOLD_CROSSED = 11,
|
||||
IWL_FW_TRIGGER_ID_HOST_TX_RESPONSE_STATUS_FAILED = 12,
|
||||
IWL_FW_TRIGGER_ID_HOST_OS_REQ_DEAUTH_PEER = 13,
|
||||
IWL_FW_TRIGGER_ID_HOST_STOP_GO_REQUEST = 14,
|
||||
IWL_FW_TRIGGER_ID_HOST_START_GO_REQUEST = 15,
|
||||
IWL_FW_TRIGGER_ID_HOST_JOIN_GROUP_REQUEST = 16,
|
||||
IWL_FW_TRIGGER_ID_HOST_SCAN_START = 17,
|
||||
IWL_FW_TRIGGER_ID_HOST_SCAN_SUBITTED = 18,
|
||||
IWL_FW_TRIGGER_ID_HOST_SCAN_PARAMS = 19,
|
||||
IWL_FW_TRIGGER_ID_HOST_CHECK_FOR_HANG = 20,
|
||||
IWL_FW_TRIGGER_ID_HOST_BAR_RECEIVED = 21,
|
||||
IWL_FW_TRIGGER_ID_HOST_AGG_TX_RESPONSE_STATUS_FAILED = 22,
|
||||
IWL_FW_TRIGGER_ID_HOST_EAPOL_TX_RESPONSE_FAILED = 23,
|
||||
IWL_FW_TRIGGER_ID_HOST_FAKE_TX_RESPONSE_SUSPECTED = 24,
|
||||
IWL_FW_TRIGGER_ID_HOST_AUTH_REQ_FROM_ASSOC_CLIENT = 25,
|
||||
IWL_FW_TRIGGER_ID_HOST_ROAM_COMPLETE = 26,
|
||||
IWL_FW_TRIGGER_ID_HOST_AUTH_ASSOC_FAST_FAILED = 27,
|
||||
IWL_FW_TRIGGER_ID_HOST_D3_START = 28,
|
||||
IWL_FW_TRIGGER_ID_HOST_D3_END = 29,
|
||||
IWL_FW_TRIGGER_ID_HOST_BSS_MISSED_BEACONS = 30,
|
||||
IWL_FW_TRIGGER_ID_HOST_P2P_CLIENT_MISSED_BEACONS = 31,
|
||||
IWL_FW_TRIGGER_ID_HOST_PEER_CLIENT_TX_FAILURES = 32,
|
||||
IWL_FW_TRIGGER_ID_HOST_TX_WFD_ACTION_FRAME_FAILED = 33,
|
||||
IWL_FW_TRIGGER_ID_HOST_AUTH_ASSOC_FAILED = 34,
|
||||
IWL_FW_TRIGGER_ID_HOST_SCAN_COMPLETE = 35,
|
||||
IWL_FW_TRIGGER_ID_HOST_SCAN_ABORT = 36,
|
||||
IWL_FW_TRIGGER_ID_HOST_NIC_ALIVE = 37,
|
||||
IWL_FW_TRIGGER_ID_HOST_CHANNEL_SWITCH_COMPLETE = 38,
|
||||
IWL_FW_TRIGGER_ID_NUM,
|
||||
}; /* FW_INI_TRIGGER_ID_E_VER_1 */
|
||||
|
||||
/**
|
||||
* enum iwl_fw_ini_apply_point
|
||||
* @IWL_FW_INI_APPLY_INVALID: invalid
|
||||
* @IWL_FW_INI_APPLY_EARLY: pre loading FW
|
||||
* @IWL_FW_INI_APPLY_AFTER_ALIVE: first cmd from host after alive
|
||||
* @IWL_FW_INI_APPLY_POST_INIT: last cmd in initialization sequence
|
||||
* @IWL_FW_INI_APPLY_MISSED_BEACONS: missed beacons notification
|
||||
* @IWL_FW_INI_APPLY_SCAN_COMPLETE: scan completed
|
||||
* @IWL_FW_INI_APPLY_NUM: number of apply points
|
||||
*/
|
||||
enum iwl_fw_ini_apply_point {
|
||||
IWL_FW_INI_APPLY_INVALID,
|
||||
IWL_FW_INI_APPLY_EARLY,
|
||||
IWL_FW_INI_APPLY_AFTER_ALIVE,
|
||||
IWL_FW_INI_APPLY_POST_INIT,
|
||||
IWL_FW_INI_APPLY_MISSED_BEACONS,
|
||||
IWL_FW_INI_APPLY_SCAN_COMPLETE,
|
||||
IWL_FW_INI_APPLY_NUM,
|
||||
}; /* FW_INI_APPLY_POINT_E_VER_1 */
|
||||
|
||||
/**
|
||||
* enum iwl_fw_ini_allocation_id
|
||||
* @IWL_FW_INI_ALLOCATION_INVALID: invalid
|
||||
* @IWL_FW_INI_ALLOCATION_ID_DBGC1: allocation meant for DBGC1 configuration
|
||||
* @IWL_FW_INI_ALLOCATION_ID_DBGC2: allocation meant for DBGC2 configuration
|
||||
* @IWL_FW_INI_ALLOCATION_ID_DBGC3: allocation meant for DBGC3 configuration
|
||||
* @IWL_FW_INI_ALLOCATION_ID_SDFX: for SDFX module
|
||||
* @IWL_FW_INI_ALLOCATION_ID_FW_DUMP: used for crash and runtime dumps
|
||||
* @IWL_FW_INI_ALLOCATION_ID_USER_DEFINED: for future user scenarios
|
||||
*/
|
||||
enum iwl_fw_ini_allocation_id {
|
||||
IWL_FW_INI_ALLOCATION_INVALID,
|
||||
IWL_FW_INI_ALLOCATION_ID_DBGC1,
|
||||
IWL_FW_INI_ALLOCATION_ID_DBGC2,
|
||||
IWL_FW_INI_ALLOCATION_ID_DBGC3,
|
||||
IWL_FW_INI_ALLOCATION_ID_SDFX,
|
||||
IWL_FW_INI_ALLOCATION_ID_FW_DUMP,
|
||||
IWL_FW_INI_ALLOCATION_ID_USER_DEFINED,
|
||||
}; /* FW_INI_ALLOCATION_ID_E_VER_1 */
|
||||
|
||||
/**
|
||||
* enum iwl_fw_ini_buffer_location
|
||||
* @IWL_FW_INI_LOCATION_INVALID: invalid
|
||||
* @IWL_FW_INI_LOCATION_SRAM_PATH: SRAM location
|
||||
* @IWL_FW_INI_LOCATION_DRAM_PATH: DRAM location
|
||||
*/
|
||||
enum iwl_fw_ini_buffer_location {
|
||||
IWL_FW_INI_LOCATION_SRAM_INVALID,
|
||||
IWL_FW_INI_LOCATION_SRAM_PATH,
|
||||
IWL_FW_INI_LOCATION_DRAM_PATH,
|
||||
}; /* FW_INI_BUFFER_LOCATION_E_VER_1 */
|
||||
|
||||
/**
|
||||
* enum iwl_fw_ini_debug_flow
|
||||
* @IWL_FW_INI_DEBUG_INVALID: invalid
|
||||
* @IWL_FW_INI_DEBUG_DBTR_FLOW: undefined
|
||||
* @IWL_FW_INI_DEBUG_TB2DTF_FLOW: undefined
|
||||
*/
|
||||
enum iwl_fw_ini_debug_flow {
|
||||
IWL_FW_INI_DEBUG_INVALID,
|
||||
IWL_FW_INI_DEBUG_DBTR_FLOW,
|
||||
IWL_FW_INI_DEBUG_TB2DTF_FLOW,
|
||||
}; /* FW_INI_DEBUG_FLOW_E_VER_1 */
|
||||
|
||||
/**
|
||||
* enum iwl_fw_ini_region_type
|
||||
* @IWL_FW_INI_REGION_INVALID: invalid
|
||||
* @IWL_FW_INI_REGION_DEVICE_MEMORY: device internal memory
|
||||
* @IWL_FW_INI_REGION_PERIPHERY_MAC: periphery registers of MAC
|
||||
* @IWL_FW_INI_REGION_PERIPHERY_PHY: periphery registers of PHY
|
||||
* @IWL_FW_INI_REGION_PERIPHERY_AUX: periphery registers of AUX
|
||||
* @IWL_FW_INI_REGION_DRAM_BUFFER: DRAM buffer
|
||||
* @IWL_FW_INI_REGION_DRAM_IMR: IMR memory
|
||||
* @IWL_FW_INI_REGION_INTERNAL_BUFFER: undefined
|
||||
* @IWL_FW_INI_REGION_TXF: TX fifos
|
||||
* @IWL_FW_INI_REGION_RXF: RX fifo
|
||||
* @IWL_FW_INI_REGION_PAGING: paging memory
|
||||
* @IWL_FW_INI_REGION_CSR: CSR registers
|
||||
* @IWL_FW_INI_REGION_NUM: number of region types
|
||||
*/
|
||||
enum iwl_fw_ini_region_type {
|
||||
IWL_FW_INI_REGION_INVALID,
|
||||
IWL_FW_INI_REGION_DEVICE_MEMORY,
|
||||
IWL_FW_INI_REGION_PERIPHERY_MAC,
|
||||
IWL_FW_INI_REGION_PERIPHERY_PHY,
|
||||
IWL_FW_INI_REGION_PERIPHERY_AUX,
|
||||
IWL_FW_INI_REGION_DRAM_BUFFER,
|
||||
IWL_FW_INI_REGION_DRAM_IMR,
|
||||
IWL_FW_INI_REGION_INTERNAL_BUFFER,
|
||||
IWL_FW_INI_REGION_TXF,
|
||||
IWL_FW_INI_REGION_RXF,
|
||||
IWL_FW_INI_REGION_PAGING,
|
||||
IWL_FW_INI_REGION_CSR,
|
||||
IWL_FW_INI_REGION_NUM
|
||||
}; /* FW_INI_REGION_TYPE_E_VER_1*/
|
||||
|
||||
#endif
|
|
@ -151,9 +151,9 @@ enum iwl_tsf_id {
|
|||
* @beacon_time: beacon transmit time in system time
|
||||
* @beacon_tsf: beacon transmit time in TSF
|
||||
* @bi: beacon interval in TU
|
||||
* @bi_reciprocal: 2^32 / bi
|
||||
* @reserved1: reserved
|
||||
* @dtim_interval: dtim transmit time in TU
|
||||
* @dtim_reciprocal: 2^32 / dtim_interval
|
||||
* @reserved2: reserved
|
||||
* @mcast_qid: queue ID for multicast traffic.
|
||||
* NOTE: obsolete from VER2 and on
|
||||
* @beacon_template: beacon template ID
|
||||
|
@ -162,9 +162,9 @@ struct iwl_mac_data_ap {
|
|||
__le32 beacon_time;
|
||||
__le64 beacon_tsf;
|
||||
__le32 bi;
|
||||
__le32 bi_reciprocal;
|
||||
__le32 reserved1;
|
||||
__le32 dtim_interval;
|
||||
__le32 dtim_reciprocal;
|
||||
__le32 reserved2;
|
||||
__le32 mcast_qid;
|
||||
__le32 beacon_template;
|
||||
} __packed; /* AP_MAC_DATA_API_S_VER_2 */
|
||||
|
@ -174,26 +174,34 @@ struct iwl_mac_data_ap {
|
|||
* @beacon_time: beacon transmit time in system time
|
||||
* @beacon_tsf: beacon transmit time in TSF
|
||||
* @bi: beacon interval in TU
|
||||
* @bi_reciprocal: 2^32 / bi
|
||||
* @reserved: reserved
|
||||
* @beacon_template: beacon template ID
|
||||
*/
|
||||
struct iwl_mac_data_ibss {
|
||||
__le32 beacon_time;
|
||||
__le64 beacon_tsf;
|
||||
__le32 bi;
|
||||
__le32 bi_reciprocal;
|
||||
__le32 reserved;
|
||||
__le32 beacon_template;
|
||||
} __packed; /* IBSS_MAC_DATA_API_S_VER_1 */
|
||||
|
||||
/**
|
||||
* enum iwl_mac_data_policy - policy of the data path for this MAC
|
||||
* @TWT_SUPPORTED: twt is supported
|
||||
*/
|
||||
enum iwl_mac_data_policy {
|
||||
TWT_SUPPORTED = BIT(0),
|
||||
};
|
||||
|
||||
/**
|
||||
* struct iwl_mac_data_sta - configuration data for station MAC context
|
||||
* @is_assoc: 1 for associated state, 0 otherwise
|
||||
* @dtim_time: DTIM arrival time in system time
|
||||
* @dtim_tsf: DTIM arrival time in TSF
|
||||
* @bi: beacon interval in TU, applicable only when associated
|
||||
* @bi_reciprocal: 2^32 / bi , applicable only when associated
|
||||
* @reserved1: reserved
|
||||
* @dtim_interval: DTIM interval in TU, applicable only when associated
|
||||
* @dtim_reciprocal: 2^32 / dtim_interval , applicable only when associated
|
||||
* @data_policy: see &enum iwl_mac_data_policy
|
||||
* @listen_interval: in beacon intervals, applicable only when associated
|
||||
* @assoc_id: unique ID assigned by the AP during association
|
||||
* @assoc_beacon_arrive_time: TSF of first beacon after association
|
||||
|
@ -203,13 +211,13 @@ struct iwl_mac_data_sta {
|
|||
__le32 dtim_time;
|
||||
__le64 dtim_tsf;
|
||||
__le32 bi;
|
||||
__le32 bi_reciprocal;
|
||||
__le32 reserved1;
|
||||
__le32 dtim_interval;
|
||||
__le32 dtim_reciprocal;
|
||||
__le32 data_policy;
|
||||
__le32 listen_interval;
|
||||
__le32 assoc_id;
|
||||
__le32 assoc_beacon_arrive_time;
|
||||
} __packed; /* STA_MAC_DATA_API_S_VER_1 */
|
||||
} __packed; /* STA_MAC_DATA_API_S_VER_2 */
|
||||
|
||||
/**
|
||||
* struct iwl_mac_data_go - configuration data for P2P GO MAC context
|
||||
|
@ -233,7 +241,7 @@ struct iwl_mac_data_go {
|
|||
struct iwl_mac_data_p2p_sta {
|
||||
struct iwl_mac_data_sta sta;
|
||||
__le32 ctwin;
|
||||
} __packed; /* P2P_STA_MAC_DATA_API_S_VER_1 */
|
||||
} __packed; /* P2P_STA_MAC_DATA_API_S_VER_2 */
|
||||
|
||||
/**
|
||||
* struct iwl_mac_data_pibss - Pseudo IBSS config data
|
||||
|
@ -378,13 +386,6 @@ struct iwl_mac_ctx_cmd {
|
|||
};
|
||||
} __packed; /* MAC_CONTEXT_CMD_API_S_VER_1 */
|
||||
|
||||
static inline u32 iwl_mvm_reciprocal(u32 v)
|
||||
{
|
||||
if (!v)
|
||||
return 0;
|
||||
return 0xFFFFFFFF / v;
|
||||
}
|
||||
|
||||
#define IWL_NONQOS_SEQ_GET 0x1
|
||||
#define IWL_NONQOS_SEQ_SET 0x2
|
||||
struct iwl_nonqos_seq_query_cmd {
|
||||
|
|
|
@ -225,22 +225,18 @@ static void iwl_fwrt_dump_txf(struct iwl_fw_runtime *fwrt,
|
|||
*dump_data = iwl_fw_error_next_data(*dump_data);
|
||||
}
|
||||
|
||||
static void iwl_fw_dump_fifos(struct iwl_fw_runtime *fwrt,
|
||||
struct iwl_fw_error_dump_data **dump_data)
|
||||
static void iwl_fw_dump_rxf(struct iwl_fw_runtime *fwrt,
|
||||
struct iwl_fw_error_dump_data **dump_data)
|
||||
{
|
||||
struct iwl_fw_error_dump_fifo *fifo_hdr;
|
||||
struct iwl_fwrt_shared_mem_cfg *cfg = &fwrt->smem_cfg;
|
||||
u32 *fifo_data;
|
||||
u32 fifo_len;
|
||||
unsigned long flags;
|
||||
int i, j;
|
||||
|
||||
IWL_DEBUG_INFO(fwrt, "WRT FIFO dump\n");
|
||||
IWL_DEBUG_INFO(fwrt, "WRT RX FIFO dump\n");
|
||||
|
||||
if (!iwl_trans_grab_nic_access(fwrt->trans, &flags))
|
||||
return;
|
||||
|
||||
if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_RXF)) {
|
||||
if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_RXF)) {
|
||||
/* Pull RXF1 */
|
||||
iwl_fwrt_dump_rxf(fwrt, dump_data,
|
||||
cfg->lmac[0].rxfifo1_size, 0, 0);
|
||||
|
@ -254,7 +250,25 @@ static void iwl_fw_dump_fifos(struct iwl_fw_runtime *fwrt,
|
|||
LMAC2_PRPH_OFFSET, 2);
|
||||
}
|
||||
|
||||
if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_TXF)) {
|
||||
iwl_trans_release_nic_access(fwrt->trans, &flags);
|
||||
}
|
||||
|
||||
static void iwl_fw_dump_txf(struct iwl_fw_runtime *fwrt,
|
||||
struct iwl_fw_error_dump_data **dump_data)
|
||||
{
|
||||
struct iwl_fw_error_dump_fifo *fifo_hdr;
|
||||
struct iwl_fwrt_shared_mem_cfg *cfg = &fwrt->smem_cfg;
|
||||
u32 *fifo_data;
|
||||
u32 fifo_len;
|
||||
unsigned long flags;
|
||||
int i, j;
|
||||
|
||||
IWL_DEBUG_INFO(fwrt, "WRT TX FIFO dump\n");
|
||||
|
||||
if (!iwl_trans_grab_nic_access(fwrt->trans, &flags))
|
||||
return;
|
||||
|
||||
if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_TXF)) {
|
||||
/* Pull TXF data from LMAC1 */
|
||||
for (i = 0; i < fwrt->smem_cfg.num_txfifo_entries; i++) {
|
||||
/* Mark the number of TXF we're pulling now */
|
||||
|
@ -279,7 +293,7 @@ static void iwl_fw_dump_fifos(struct iwl_fw_runtime *fwrt,
|
|||
}
|
||||
}
|
||||
|
||||
if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_INTERNAL_TXF) &&
|
||||
if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_INTERNAL_TXF) &&
|
||||
fw_has_capa(&fwrt->fw->ucode_capa,
|
||||
IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
|
||||
/* Pull UMAC internal TXF data from all TXFs */
|
||||
|
@ -595,16 +609,16 @@ static void iwl_fw_dump_mem(struct iwl_fw_runtime *fwrt,
|
|||
do {size_t item = item_len; len += (!!item) * const_len + item; } \
|
||||
while (0)
|
||||
|
||||
static int iwl_fw_fifo_len(struct iwl_fw_runtime *fwrt,
|
||||
struct iwl_fwrt_shared_mem_cfg *mem_cfg)
|
||||
static int iwl_fw_rxf_len(struct iwl_fw_runtime *fwrt,
|
||||
struct iwl_fwrt_shared_mem_cfg *mem_cfg)
|
||||
{
|
||||
size_t hdr_len = sizeof(struct iwl_fw_error_dump_data) +
|
||||
sizeof(struct iwl_fw_error_dump_fifo);
|
||||
u32 fifo_len = 0;
|
||||
int i;
|
||||
|
||||
if (!(fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_RXF)))
|
||||
goto dump_txf;
|
||||
if (!iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_RXF))
|
||||
return 0;
|
||||
|
||||
/* Count RXF2 size */
|
||||
ADD_LEN(fifo_len, mem_cfg->rxfifo2_size, hdr_len);
|
||||
|
@ -613,8 +627,18 @@ static int iwl_fw_fifo_len(struct iwl_fw_runtime *fwrt,
|
|||
for (i = 0; i < mem_cfg->num_lmacs; i++)
|
||||
ADD_LEN(fifo_len, mem_cfg->lmac[i].rxfifo1_size, hdr_len);
|
||||
|
||||
dump_txf:
|
||||
if (!(fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_TXF)))
|
||||
return fifo_len;
|
||||
}
|
||||
|
||||
static int iwl_fw_txf_len(struct iwl_fw_runtime *fwrt,
|
||||
struct iwl_fwrt_shared_mem_cfg *mem_cfg)
|
||||
{
|
||||
size_t hdr_len = sizeof(struct iwl_fw_error_dump_data) +
|
||||
sizeof(struct iwl_fw_error_dump_fifo);
|
||||
u32 fifo_len = 0;
|
||||
int i;
|
||||
|
||||
if (!iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_TXF))
|
||||
goto dump_internal_txf;
|
||||
|
||||
/* Count TXF sizes */
|
||||
|
@ -627,7 +651,7 @@ static int iwl_fw_fifo_len(struct iwl_fw_runtime *fwrt,
|
|||
}
|
||||
|
||||
dump_internal_txf:
|
||||
if (!((fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_INTERNAL_TXF)) &&
|
||||
if (!(iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_INTERNAL_TXF) &&
|
||||
fw_has_capa(&fwrt->fw->ucode_capa,
|
||||
IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)))
|
||||
goto out;
|
||||
|
@ -639,6 +663,32 @@ static int iwl_fw_fifo_len(struct iwl_fw_runtime *fwrt,
|
|||
return fifo_len;
|
||||
}
|
||||
|
||||
static void iwl_dump_paging(struct iwl_fw_runtime *fwrt,
|
||||
struct iwl_fw_error_dump_data **data)
|
||||
{
|
||||
int i;
|
||||
|
||||
IWL_DEBUG_INFO(fwrt, "WRT paging dump\n");
|
||||
for (i = 1; i < fwrt->num_of_paging_blk + 1; i++) {
|
||||
struct iwl_fw_error_dump_paging *paging;
|
||||
struct page *pages =
|
||||
fwrt->fw_paging_db[i].fw_paging_block;
|
||||
dma_addr_t addr = fwrt->fw_paging_db[i].fw_paging_phys;
|
||||
|
||||
(*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PAGING);
|
||||
(*data)->len = cpu_to_le32(sizeof(*paging) +
|
||||
PAGING_BLOCK_SIZE);
|
||||
paging = (void *)(*data)->data;
|
||||
paging->index = cpu_to_le32(i);
|
||||
dma_sync_single_for_cpu(fwrt->trans->dev, addr,
|
||||
PAGING_BLOCK_SIZE,
|
||||
DMA_BIDIRECTIONAL);
|
||||
memcpy(paging->data, page_address(pages),
|
||||
PAGING_BLOCK_SIZE);
|
||||
(*data) = iwl_fw_error_next_data(*data);
|
||||
}
|
||||
}
|
||||
|
||||
static struct iwl_fw_error_dump_file *
|
||||
_iwl_fw_error_dump(struct iwl_fw_runtime *fwrt,
|
||||
struct iwl_fw_dump_ptrs *fw_error_dump)
|
||||
|
@ -655,13 +705,8 @@ _iwl_fw_error_dump(struct iwl_fw_runtime *fwrt,
|
|||
u32 smem_len = fwrt->fw->dbg.n_mem_tlv ? 0 : fwrt->trans->cfg->smem_len;
|
||||
u32 sram2_len = fwrt->fw->dbg.n_mem_tlv ?
|
||||
0 : fwrt->trans->cfg->dccm2_len;
|
||||
bool monitor_dump_only = false;
|
||||
int i;
|
||||
|
||||
if (fwrt->dump.trig &&
|
||||
fwrt->dump.trig->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY)
|
||||
monitor_dump_only = true;
|
||||
|
||||
/* SRAM - include stack CCM if driver knows the values for it */
|
||||
if (!fwrt->trans->cfg->dccm_offset || !fwrt->trans->cfg->dccm_len) {
|
||||
const struct fw_img *img;
|
||||
|
@ -676,26 +721,27 @@ _iwl_fw_error_dump(struct iwl_fw_runtime *fwrt,
|
|||
|
||||
/* reading RXF/TXF sizes */
|
||||
if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status)) {
|
||||
fifo_len = iwl_fw_fifo_len(fwrt, mem_cfg);
|
||||
fifo_len = iwl_fw_rxf_len(fwrt, mem_cfg);
|
||||
fifo_len += iwl_fw_txf_len(fwrt, mem_cfg);
|
||||
|
||||
/* Make room for PRPH registers */
|
||||
if (!fwrt->trans->cfg->gen2 &&
|
||||
fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_PRPH))
|
||||
iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_PRPH))
|
||||
prph_len += iwl_fw_get_prph_len(fwrt);
|
||||
|
||||
if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000 &&
|
||||
fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_RADIO_REG))
|
||||
iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_RADIO_REG))
|
||||
radio_len = sizeof(*dump_data) + RADIO_REG_MAX_READ;
|
||||
}
|
||||
|
||||
file_len = sizeof(*dump_file) + fifo_len + prph_len + radio_len;
|
||||
|
||||
if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_DEV_FW_INFO))
|
||||
if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_DEV_FW_INFO))
|
||||
file_len += sizeof(*dump_data) + sizeof(*dump_info);
|
||||
if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM_CFG))
|
||||
if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_MEM_CFG))
|
||||
file_len += sizeof(*dump_data) + sizeof(*dump_smem_cfg);
|
||||
|
||||
if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)) {
|
||||
if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_MEM)) {
|
||||
size_t hdr_len = sizeof(*dump_data) +
|
||||
sizeof(struct iwl_fw_error_dump_mem);
|
||||
|
||||
|
@ -712,10 +758,7 @@ _iwl_fw_error_dump(struct iwl_fw_runtime *fwrt,
|
|||
}
|
||||
|
||||
/* Make room for fw's virtual image pages, if it exists */
|
||||
if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING) &&
|
||||
!fwrt->trans->cfg->gen2 &&
|
||||
fwrt->fw->img[fwrt->cur_fw_img].paging_mem_size &&
|
||||
fwrt->fw_paging_db[0].fw_paging_block)
|
||||
if (iwl_fw_dbg_is_paging_enabled(fwrt))
|
||||
file_len += fwrt->num_of_paging_blk *
|
||||
(sizeof(*dump_data) +
|
||||
sizeof(struct iwl_fw_error_dump_paging) +
|
||||
|
@ -727,12 +770,12 @@ _iwl_fw_error_dump(struct iwl_fw_runtime *fwrt,
|
|||
}
|
||||
|
||||
/* If we only want a monitor dump, reset the file length */
|
||||
if (monitor_dump_only) {
|
||||
if (fwrt->dump.monitor_only) {
|
||||
file_len = sizeof(*dump_file) + sizeof(*dump_data) * 2 +
|
||||
sizeof(*dump_info) + sizeof(*dump_smem_cfg);
|
||||
}
|
||||
|
||||
if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_ERROR_INFO) &&
|
||||
if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_ERROR_INFO) &&
|
||||
fwrt->dump.desc)
|
||||
file_len += sizeof(*dump_data) + sizeof(*dump_trig) +
|
||||
fwrt->dump.desc->len;
|
||||
|
@ -746,7 +789,7 @@ _iwl_fw_error_dump(struct iwl_fw_runtime *fwrt,
|
|||
dump_file->barker = cpu_to_le32(IWL_FW_ERROR_DUMP_BARKER);
|
||||
dump_data = (void *)dump_file->data;
|
||||
|
||||
if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_DEV_FW_INFO)) {
|
||||
if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_DEV_FW_INFO)) {
|
||||
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_DEV_FW_INFO);
|
||||
dump_data->len = cpu_to_le32(sizeof(*dump_info));
|
||||
dump_info = (void *)dump_data->data;
|
||||
|
@ -767,7 +810,7 @@ _iwl_fw_error_dump(struct iwl_fw_runtime *fwrt,
|
|||
dump_data = iwl_fw_error_next_data(dump_data);
|
||||
}
|
||||
|
||||
if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM_CFG)) {
|
||||
if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_MEM_CFG)) {
|
||||
/* Dump shared memory configuration */
|
||||
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_CFG);
|
||||
dump_data->len = cpu_to_le32(sizeof(*dump_smem_cfg));
|
||||
|
@ -799,12 +842,13 @@ _iwl_fw_error_dump(struct iwl_fw_runtime *fwrt,
|
|||
|
||||
/* We only dump the FIFOs if the FW is in error state */
|
||||
if (fifo_len) {
|
||||
iwl_fw_dump_fifos(fwrt, &dump_data);
|
||||
iwl_fw_dump_rxf(fwrt, &dump_data);
|
||||
iwl_fw_dump_txf(fwrt, &dump_data);
|
||||
if (radio_len)
|
||||
iwl_read_radio_regs(fwrt, &dump_data);
|
||||
}
|
||||
|
||||
if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_ERROR_INFO) &&
|
||||
if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_ERROR_INFO) &&
|
||||
fwrt->dump.desc) {
|
||||
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_ERROR_INFO);
|
||||
dump_data->len = cpu_to_le32(sizeof(*dump_trig) +
|
||||
|
@ -817,10 +861,10 @@ _iwl_fw_error_dump(struct iwl_fw_runtime *fwrt,
|
|||
}
|
||||
|
||||
/* In case we only want monitor dump, skip to dump trasport data */
|
||||
if (monitor_dump_only)
|
||||
if (fwrt->dump.monitor_only)
|
||||
goto out;
|
||||
|
||||
if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)) {
|
||||
if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_MEM)) {
|
||||
const struct iwl_fw_dbg_mem_seg_tlv *fw_dbg_mem =
|
||||
fwrt->fw->dbg.mem_tlv;
|
||||
|
||||
|
@ -865,30 +909,8 @@ _iwl_fw_error_dump(struct iwl_fw_runtime *fwrt,
|
|||
}
|
||||
|
||||
/* Dump fw's virtual image */
|
||||
if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING) &&
|
||||
!fwrt->trans->cfg->gen2 &&
|
||||
fwrt->fw->img[fwrt->cur_fw_img].paging_mem_size &&
|
||||
fwrt->fw_paging_db[0].fw_paging_block) {
|
||||
IWL_DEBUG_INFO(fwrt, "WRT paging dump\n");
|
||||
for (i = 1; i < fwrt->num_of_paging_blk + 1; i++) {
|
||||
struct iwl_fw_error_dump_paging *paging;
|
||||
struct page *pages =
|
||||
fwrt->fw_paging_db[i].fw_paging_block;
|
||||
dma_addr_t addr = fwrt->fw_paging_db[i].fw_paging_phys;
|
||||
|
||||
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PAGING);
|
||||
dump_data->len = cpu_to_le32(sizeof(*paging) +
|
||||
PAGING_BLOCK_SIZE);
|
||||
paging = (void *)dump_data->data;
|
||||
paging->index = cpu_to_le32(i);
|
||||
dma_sync_single_for_cpu(fwrt->trans->dev, addr,
|
||||
PAGING_BLOCK_SIZE,
|
||||
DMA_BIDIRECTIONAL);
|
||||
memcpy(paging->data, page_address(pages),
|
||||
PAGING_BLOCK_SIZE);
|
||||
dump_data = iwl_fw_error_next_data(dump_data);
|
||||
}
|
||||
}
|
||||
if (iwl_fw_dbg_is_paging_enabled(fwrt))
|
||||
iwl_dump_paging(fwrt, &dump_data);
|
||||
|
||||
if (prph_len) {
|
||||
iwl_dump_prph(fwrt->trans, &dump_data,
|
||||
|
@ -912,6 +934,7 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
|
|||
struct iwl_fw_error_dump_file *dump_file;
|
||||
struct scatterlist *sg_dump_data;
|
||||
u32 file_len;
|
||||
u32 dump_mask = fwrt->fw->dbg.dump_mask;
|
||||
|
||||
IWL_DEBUG_INFO(fwrt, "WRT dump start\n");
|
||||
|
||||
|
@ -931,8 +954,10 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
|
|||
goto out;
|
||||
}
|
||||
|
||||
fw_error_dump->trans_ptr = iwl_trans_dump_data(fwrt->trans,
|
||||
fwrt->dump.trig);
|
||||
if (fwrt->dump.monitor_only)
|
||||
dump_mask &= IWL_FW_ERROR_DUMP_FW_MONITOR;
|
||||
|
||||
fw_error_dump->trans_ptr = iwl_trans_dump_data(fwrt->trans, dump_mask);
|
||||
file_len = le32_to_cpu(dump_file->file_len);
|
||||
fw_error_dump->fwrt_len = file_len;
|
||||
if (fw_error_dump->trans_ptr) {
|
||||
|
@ -973,6 +998,14 @@ const struct iwl_fw_dump_desc iwl_dump_desc_assert = {
|
|||
};
|
||||
IWL_EXPORT_SYMBOL(iwl_dump_desc_assert);
|
||||
|
||||
void iwl_fw_assert_error_dump(struct iwl_fw_runtime *fwrt)
|
||||
{
|
||||
IWL_INFO(fwrt, "error dump due to fw assert\n");
|
||||
fwrt->dump.desc = &iwl_dump_desc_assert;
|
||||
iwl_fw_error_dump(fwrt);
|
||||
}
|
||||
IWL_EXPORT_SYMBOL(iwl_fw_assert_error_dump);
|
||||
|
||||
void iwl_fw_alive_error_dump(struct iwl_fw_runtime *fwrt)
|
||||
{
|
||||
struct iwl_fw_dump_desc *iwl_dump_desc_no_alive =
|
||||
|
@ -998,7 +1031,8 @@ void iwl_fw_alive_error_dump(struct iwl_fw_runtime *fwrt)
|
|||
IWL_EXPORT_SYMBOL(iwl_fw_alive_error_dump);
|
||||
|
||||
int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt,
|
||||
const struct iwl_fw_dump_desc *desc, void *trigger,
|
||||
const struct iwl_fw_dump_desc *desc,
|
||||
bool monitor_only,
|
||||
unsigned int delay)
|
||||
{
|
||||
/*
|
||||
|
@ -1028,7 +1062,7 @@ int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt,
|
|||
le32_to_cpu(desc->trig_desc.type));
|
||||
|
||||
fwrt->dump.desc = desc;
|
||||
fwrt->dump.trig = trigger;
|
||||
fwrt->dump.monitor_only = monitor_only;
|
||||
|
||||
schedule_delayed_work(&fwrt->dump.wk, delay);
|
||||
|
||||
|
@ -1043,6 +1077,7 @@ int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt,
|
|||
{
|
||||
struct iwl_fw_dump_desc *desc;
|
||||
unsigned int delay = 0;
|
||||
bool monitor_only = false;
|
||||
|
||||
if (trigger) {
|
||||
u16 occurrences = le16_to_cpu(trigger->occurrences) - 1;
|
||||
|
@ -1059,6 +1094,7 @@ int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt,
|
|||
|
||||
trigger->occurrences = cpu_to_le16(occurrences);
|
||||
delay = le16_to_cpu(trigger->trig_dis_ms);
|
||||
monitor_only = trigger->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY;
|
||||
}
|
||||
|
||||
desc = kzalloc(sizeof(*desc) + len, GFP_ATOMIC);
|
||||
|
@ -1070,7 +1106,7 @@ int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt,
|
|||
desc->trig_desc.type = cpu_to_le32(trig);
|
||||
memcpy(desc->trig_desc.data, str, len);
|
||||
|
||||
return iwl_fw_dbg_collect_desc(fwrt, desc, trigger, delay);
|
||||
return iwl_fw_dbg_collect_desc(fwrt, desc, monitor_only, delay);
|
||||
}
|
||||
IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect);
|
||||
|
||||
|
@ -1081,6 +1117,9 @@ int iwl_fw_dbg_collect_trig(struct iwl_fw_runtime *fwrt,
|
|||
int ret, len = 0;
|
||||
char buf[64];
|
||||
|
||||
if (fwrt->trans->ini_valid)
|
||||
return 0;
|
||||
|
||||
if (fmt) {
|
||||
va_list ap;
|
||||
|
||||
|
@ -1224,3 +1263,210 @@ void iwl_fw_dbg_read_d3_debug_data(struct iwl_fw_runtime *fwrt)
|
|||
cfg->d3_debug_data_length);
|
||||
}
|
||||
IWL_EXPORT_SYMBOL(iwl_fw_dbg_read_d3_debug_data);
|
||||
|
||||
static void
|
||||
iwl_fw_dbg_buffer_allocation(struct iwl_fw_runtime *fwrt,
|
||||
struct iwl_fw_ini_allocation_tlv *alloc)
|
||||
{
|
||||
struct iwl_trans *trans = fwrt->trans;
|
||||
struct iwl_continuous_record_cmd cont_rec = {};
|
||||
struct iwl_buffer_allocation_cmd *cmd = (void *)&cont_rec.pad[0];
|
||||
struct iwl_host_cmd hcmd = {
|
||||
.id = LDBG_CONFIG_CMD,
|
||||
.flags = CMD_ASYNC,
|
||||
.data[0] = &cont_rec,
|
||||
.len[0] = sizeof(cont_rec),
|
||||
};
|
||||
void *virtual_addr = NULL;
|
||||
u32 size = le32_to_cpu(alloc->size);
|
||||
dma_addr_t phys_addr;
|
||||
|
||||
cont_rec.record_mode.enable_recording = cpu_to_le16(BUFFER_ALLOCATION);
|
||||
|
||||
if (!trans->num_blocks &&
|
||||
le32_to_cpu(alloc->buffer_location) !=
|
||||
IWL_FW_INI_LOCATION_DRAM_PATH)
|
||||
return;
|
||||
|
||||
virtual_addr = dma_alloc_coherent(fwrt->trans->dev, size,
|
||||
&phys_addr, GFP_KERNEL);
|
||||
|
||||
/* TODO: alloc fragments if needed */
|
||||
if (!virtual_addr)
|
||||
IWL_ERR(fwrt, "Failed to allocate debug memory\n");
|
||||
|
||||
if (WARN_ON_ONCE(trans->num_blocks == ARRAY_SIZE(trans->fw_mon)))
|
||||
return;
|
||||
|
||||
trans->fw_mon[trans->num_blocks].block = virtual_addr;
|
||||
trans->fw_mon[trans->num_blocks].physical = phys_addr;
|
||||
trans->fw_mon[trans->num_blocks].size = size;
|
||||
trans->num_blocks++;
|
||||
|
||||
IWL_DEBUG_FW(trans, "Allocated debug block of size %d\n", size);
|
||||
|
||||
/* First block is assigned via registers / context info */
|
||||
if (trans->num_blocks == 1)
|
||||
return;
|
||||
|
||||
cmd->num_frags = cpu_to_le32(1);
|
||||
cmd->fragments[0].address = cpu_to_le64(phys_addr);
|
||||
cmd->fragments[0].size = alloc->size;
|
||||
cmd->allocation_id = alloc->allocation_id;
|
||||
cmd->buffer_location = alloc->buffer_location;
|
||||
|
||||
iwl_trans_send_cmd(trans, &hcmd);
|
||||
}
|
||||
|
||||
static void iwl_fw_dbg_send_hcmd(struct iwl_fw_runtime *fwrt,
|
||||
struct iwl_ucode_tlv *tlv)
|
||||
{
|
||||
struct iwl_fw_ini_hcmd_tlv *hcmd_tlv = (void *)&tlv->data[0];
|
||||
struct iwl_fw_ini_hcmd *data = &hcmd_tlv->hcmd;
|
||||
u16 len = le32_to_cpu(tlv->length) - sizeof(*hcmd_tlv);
|
||||
|
||||
struct iwl_host_cmd hcmd = {
|
||||
.id = WIDE_ID(data->group, data->id),
|
||||
.len = { len, },
|
||||
.data = { data->data, },
|
||||
};
|
||||
|
||||
iwl_trans_send_cmd(fwrt->trans, &hcmd);
|
||||
}
|
||||
|
||||
static void iwl_fw_dbg_update_regions(struct iwl_fw_runtime *fwrt,
|
||||
struct iwl_fw_ini_region_tlv *tlv,
|
||||
bool ext, enum iwl_fw_ini_apply_point pnt)
|
||||
{
|
||||
void *iter = (void *)tlv->region_config;
|
||||
int i, size = le32_to_cpu(tlv->num_regions);
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
struct iwl_fw_ini_region_cfg *reg = iter;
|
||||
int id = le32_to_cpu(reg->region_id);
|
||||
struct iwl_fw_ini_active_regs *active;
|
||||
|
||||
if (WARN_ON(id >= ARRAY_SIZE(fwrt->dump.active_regs)))
|
||||
break;
|
||||
|
||||
active = &fwrt->dump.active_regs[id];
|
||||
|
||||
if (ext && active->apply_point == pnt)
|
||||
IWL_WARN(fwrt->trans,
|
||||
"External region TLV overrides FW default %x\n",
|
||||
id);
|
||||
|
||||
IWL_DEBUG_FW(fwrt,
|
||||
"%s: apply point %d, activating region ID %d\n",
|
||||
__func__, pnt, id);
|
||||
|
||||
active->reg = reg;
|
||||
active->apply_point = pnt;
|
||||
|
||||
if (le32_to_cpu(reg->region_type) !=
|
||||
IWL_FW_INI_REGION_DRAM_BUFFER)
|
||||
iter += le32_to_cpu(reg->num_regions) * sizeof(__le32);
|
||||
|
||||
iter += sizeof(*reg);
|
||||
}
|
||||
}
|
||||
|
||||
static void iwl_fw_dbg_update_triggers(struct iwl_fw_runtime *fwrt,
|
||||
struct iwl_fw_ini_trigger_tlv *tlv,
|
||||
bool ext,
|
||||
enum iwl_fw_ini_apply_point apply_point)
|
||||
{
|
||||
int i, size = le32_to_cpu(tlv->num_triggers);
|
||||
void *iter = (void *)tlv->trigger_config;
|
||||
|
||||
for (i = 0; i < size; i++) {
|
||||
struct iwl_fw_ini_trigger *trig = iter;
|
||||
struct iwl_fw_ini_active_triggers *active;
|
||||
int id = le32_to_cpu(trig->trigger_id);
|
||||
u32 num;
|
||||
|
||||
if (WARN_ON(id >= ARRAY_SIZE(fwrt->dump.active_trigs)))
|
||||
break;
|
||||
|
||||
active = &fwrt->dump.active_trigs[id];
|
||||
|
||||
if (active->apply_point != apply_point) {
|
||||
active->conf = NULL;
|
||||
active->conf_ext = NULL;
|
||||
}
|
||||
|
||||
num = le32_to_cpu(trig->num_regions);
|
||||
|
||||
if (ext && active->apply_point == apply_point) {
|
||||
num += le32_to_cpu(active->conf->num_regions);
|
||||
if (trig->ignore_default) {
|
||||
active->conf_ext = active->conf;
|
||||
active->conf = trig;
|
||||
} else {
|
||||
active->conf_ext = trig;
|
||||
}
|
||||
} else {
|
||||
active->conf = trig;
|
||||
}
|
||||
|
||||
iter += sizeof(*trig) +
|
||||
le32_to_cpu(trig->num_regions) * sizeof(__le32);
|
||||
|
||||
active->active = num;
|
||||
active->apply_point = apply_point;
|
||||
}
|
||||
}
|
||||
|
||||
static void _iwl_fw_dbg_apply_point(struct iwl_fw_runtime *fwrt,
|
||||
struct iwl_apply_point_data *data,
|
||||
enum iwl_fw_ini_apply_point pnt,
|
||||
bool ext)
|
||||
{
|
||||
void *iter = data->data;
|
||||
|
||||
while (iter && iter < data->data + data->size) {
|
||||
struct iwl_ucode_tlv *tlv = iter;
|
||||
void *ini_tlv = (void *)tlv->data;
|
||||
u32 type = le32_to_cpu(tlv->type);
|
||||
|
||||
switch (type) {
|
||||
case IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION:
|
||||
iwl_fw_dbg_buffer_allocation(fwrt, ini_tlv);
|
||||
break;
|
||||
case IWL_UCODE_TLV_TYPE_HCMD:
|
||||
if (pnt < IWL_FW_INI_APPLY_AFTER_ALIVE) {
|
||||
IWL_ERR(fwrt,
|
||||
"Invalid apply point %x for host command\n",
|
||||
pnt);
|
||||
goto next;
|
||||
}
|
||||
iwl_fw_dbg_send_hcmd(fwrt, tlv);
|
||||
break;
|
||||
case IWL_UCODE_TLV_TYPE_REGIONS:
|
||||
iwl_fw_dbg_update_regions(fwrt, ini_tlv, ext, pnt);
|
||||
break;
|
||||
case IWL_UCODE_TLV_TYPE_TRIGGERS:
|
||||
iwl_fw_dbg_update_triggers(fwrt, ini_tlv, ext, pnt);
|
||||
break;
|
||||
case IWL_UCODE_TLV_TYPE_DEBUG_FLOW:
|
||||
break;
|
||||
default:
|
||||
WARN_ONCE(1, "Invalid TLV %x for apply point\n", type);
|
||||
break;
|
||||
}
|
||||
next:
|
||||
iter += sizeof(*tlv) + le32_to_cpu(tlv->length);
|
||||
}
|
||||
}
|
||||
|
||||
void iwl_fw_dbg_apply_point(struct iwl_fw_runtime *fwrt,
|
||||
enum iwl_fw_ini_apply_point apply_point)
|
||||
{
|
||||
void *data = &fwrt->trans->apply_points[apply_point];
|
||||
|
||||
_iwl_fw_dbg_apply_point(fwrt, data, apply_point, false);
|
||||
|
||||
data = &fwrt->trans->apply_points_ext[apply_point];
|
||||
_iwl_fw_dbg_apply_point(fwrt, data, apply_point, true);
|
||||
}
|
||||
IWL_EXPORT_SYMBOL(iwl_fw_dbg_apply_point);
|
||||
|
|
|
@ -72,6 +72,7 @@
|
|||
#include "file.h"
|
||||
#include "error-dump.h"
|
||||
#include "api/commands.h"
|
||||
#include "api/dbg-tlv.h"
|
||||
|
||||
/**
|
||||
* struct iwl_fw_dump_desc - describes the dump
|
||||
|
@ -101,13 +102,12 @@ static inline void iwl_fw_free_dump_desc(struct iwl_fw_runtime *fwrt)
|
|||
if (fwrt->dump.desc != &iwl_dump_desc_assert)
|
||||
kfree(fwrt->dump.desc);
|
||||
fwrt->dump.desc = NULL;
|
||||
fwrt->dump.trig = NULL;
|
||||
}
|
||||
|
||||
void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt);
|
||||
int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt,
|
||||
const struct iwl_fw_dump_desc *desc,
|
||||
void *trigger, unsigned int delay);
|
||||
bool monitor_only, unsigned int delay);
|
||||
int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt,
|
||||
enum iwl_fw_dbg_trigger trig,
|
||||
const char *str, size_t len,
|
||||
|
@ -193,6 +193,9 @@ _iwl_fw_dbg_trigger_on(struct iwl_fw_runtime *fwrt,
|
|||
{
|
||||
struct iwl_fw_dbg_trigger_tlv *trig;
|
||||
|
||||
if (fwrt->trans->ini_valid)
|
||||
return NULL;
|
||||
|
||||
if (!iwl_fw_dbg_trigger_enabled(fwrt->fw, id))
|
||||
return NULL;
|
||||
|
||||
|
@ -263,6 +266,9 @@ _iwl_fw_dbg_stop_recording(struct iwl_trans *trans,
|
|||
iwl_write_prph(trans, DBGC_IN_SAMPLE, 0);
|
||||
udelay(100);
|
||||
iwl_write_prph(trans, DBGC_OUT_CTRL, 0);
|
||||
#ifdef CONFIG_IWLWIFI_DEBUGFS
|
||||
trans->dbg_rec_on = false;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void
|
||||
|
@ -293,6 +299,14 @@ _iwl_fw_dbg_restart_recording(struct iwl_trans *trans,
|
|||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_IWLWIFI_DEBUGFS
|
||||
static inline void iwl_fw_set_dbg_rec_on(struct iwl_fw_runtime *fwrt)
|
||||
{
|
||||
if (fwrt->fw->dbg.dest_tlv && fwrt->cur_fw_img == IWL_UCODE_REGULAR)
|
||||
fwrt->trans->dbg_rec_on = true;
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline void
|
||||
iwl_fw_dbg_restart_recording(struct iwl_fw_runtime *fwrt,
|
||||
struct iwl_fw_dbg_params *params)
|
||||
|
@ -301,6 +315,9 @@ iwl_fw_dbg_restart_recording(struct iwl_fw_runtime *fwrt,
|
|||
_iwl_fw_dbg_restart_recording(fwrt->trans, params);
|
||||
else
|
||||
iwl_fw_dbg_start_stop_hcmd(fwrt, true);
|
||||
#ifdef CONFIG_IWLWIFI_DEBUGFS
|
||||
iwl_fw_set_dbg_rec_on(fwrt);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void iwl_fw_dump_conf_clear(struct iwl_fw_runtime *fwrt)
|
||||
|
@ -310,12 +327,25 @@ static inline void iwl_fw_dump_conf_clear(struct iwl_fw_runtime *fwrt)
|
|||
|
||||
void iwl_fw_error_dump_wk(struct work_struct *work);
|
||||
|
||||
static inline bool iwl_fw_dbg_type_on(struct iwl_fw_runtime *fwrt, u32 type)
|
||||
{
|
||||
return (fwrt->fw->dbg.dump_mask & BIT(type));
|
||||
}
|
||||
|
||||
static inline bool iwl_fw_dbg_is_d3_debug_enabled(struct iwl_fw_runtime *fwrt)
|
||||
{
|
||||
return fw_has_capa(&fwrt->fw->ucode_capa,
|
||||
IWL_UCODE_TLV_CAPA_D3_DEBUG) &&
|
||||
fwrt->trans->cfg->d3_debug_data_length &&
|
||||
fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_D3_DEBUG_DATA);
|
||||
iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_D3_DEBUG_DATA);
|
||||
}
|
||||
|
||||
static inline bool iwl_fw_dbg_is_paging_enabled(struct iwl_fw_runtime *fwrt)
|
||||
{
|
||||
return iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_PAGING) &&
|
||||
!fwrt->trans->cfg->gen2 &&
|
||||
fwrt->fw->img[fwrt->cur_fw_img].paging_mem_size &&
|
||||
fwrt->fw_paging_db[0].fw_paging_block;
|
||||
}
|
||||
|
||||
void iwl_fw_dbg_read_d3_debug_data(struct iwl_fw_runtime *fwrt);
|
||||
|
@ -366,6 +396,10 @@ static inline void iwl_fw_resume_timestamp(struct iwl_fw_runtime *fwrt) {}
|
|||
|
||||
#endif /* CONFIG_IWLWIFI_DEBUGFS */
|
||||
|
||||
void iwl_fw_assert_error_dump(struct iwl_fw_runtime *fwrt);
|
||||
void iwl_fw_alive_error_dump(struct iwl_fw_runtime *fwrt);
|
||||
void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt);
|
||||
void iwl_fw_dbg_apply_point(struct iwl_fw_runtime *fwrt,
|
||||
enum iwl_fw_ini_apply_point apply_point);
|
||||
|
||||
#endif /* __iwl_fw_dbg_h__ */
|
||||
|
|
|
@ -91,6 +91,8 @@ struct iwl_ucode_header {
|
|||
} u;
|
||||
};
|
||||
|
||||
#define IWL_UCODE_INI_TLV_GROUP BIT(24)
|
||||
|
||||
/*
|
||||
* new TLV uCode file layout
|
||||
*
|
||||
|
@ -141,6 +143,11 @@ enum iwl_ucode_tlv_type {
|
|||
IWL_UCODE_TLV_FW_GSCAN_CAPA = 50,
|
||||
IWL_UCODE_TLV_FW_MEM_SEG = 51,
|
||||
IWL_UCODE_TLV_IML = 52,
|
||||
IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION = IWL_UCODE_INI_TLV_GROUP | 0x1,
|
||||
IWL_UCODE_TLV_TYPE_HCMD = IWL_UCODE_INI_TLV_GROUP | 0x2,
|
||||
IWL_UCODE_TLV_TYPE_REGIONS = IWL_UCODE_INI_TLV_GROUP | 0x3,
|
||||
IWL_UCODE_TLV_TYPE_TRIGGERS = IWL_UCODE_INI_TLV_GROUP | 0x4,
|
||||
IWL_UCODE_TLV_TYPE_DEBUG_FLOW = IWL_UCODE_INI_TLV_GROUP | 0x5,
|
||||
|
||||
/* TLVs 0x1000-0x2000 are for internal driver usage */
|
||||
IWL_UCODE_TLV_FW_DBG_DUMP_LST = 0x1000,
|
||||
|
|
|
@ -65,6 +65,8 @@
|
|||
#define __iwl_fw_img_h__
|
||||
#include <linux/types.h>
|
||||
|
||||
#include "api/dbg-tlv.h"
|
||||
|
||||
#include "file.h"
|
||||
#include "error-dump.h"
|
||||
|
||||
|
@ -220,6 +222,30 @@ struct iwl_fw_dbg {
|
|||
u32 dump_mask;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct iwl_fw_ini_active_triggers
|
||||
* @active: is this trigger active
|
||||
* @apply_point: last apply point that updated this trigger
|
||||
* @conf: active trigger
|
||||
* @conf_ext: second trigger, contains extra regions to dump
|
||||
*/
|
||||
struct iwl_fw_ini_active_triggers {
|
||||
bool active;
|
||||
enum iwl_fw_ini_apply_point apply_point;
|
||||
struct iwl_fw_ini_trigger *conf;
|
||||
struct iwl_fw_ini_trigger *conf_ext;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct iwl_fw_ini_active_regs
|
||||
* @reg: active region from TLV
|
||||
* @apply_point: apply point where it became active
|
||||
*/
|
||||
struct iwl_fw_ini_active_regs {
|
||||
struct iwl_fw_ini_region_cfg *reg;
|
||||
enum iwl_fw_ini_apply_point apply_point;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct iwl_fw - variables associated with the firmware
|
||||
*
|
||||
|
|
|
@ -64,6 +64,7 @@
|
|||
#include "iwl-trans.h"
|
||||
#include "img.h"
|
||||
#include "fw/api/debug.h"
|
||||
#include "fw/api/dbg-tlv.h"
|
||||
#include "fw/api/paging.h"
|
||||
#include "iwl-eeprom-parse.h"
|
||||
|
||||
|
@ -131,7 +132,7 @@ struct iwl_fw_runtime {
|
|||
/* debug */
|
||||
struct {
|
||||
const struct iwl_fw_dump_desc *desc;
|
||||
const struct iwl_fw_dbg_trigger_tlv *trig;
|
||||
bool monitor_only;
|
||||
struct delayed_work wk;
|
||||
|
||||
u8 conf;
|
||||
|
@ -139,6 +140,8 @@ struct iwl_fw_runtime {
|
|||
/* ts of the beginning of a non-collect fw dbg data period */
|
||||
unsigned long non_collect_ts_start[FW_DBG_TRIGGER_MAX - 1];
|
||||
u32 *d3_debug_data;
|
||||
struct iwl_fw_ini_active_regs active_regs[IWL_FW_INI_MAX_REGION_ID];
|
||||
struct iwl_fw_ini_active_triggers active_trigs[IWL_FW_TRIGGER_ID_NUM];
|
||||
} dump;
|
||||
#ifdef CONFIG_IWLWIFI_DEBUGFS
|
||||
struct {
|
||||
|
|
|
@ -0,0 +1,230 @@
|
|||
/******************************************************************************
|
||||
*
|
||||
* This file is provided under a dual BSD/GPLv2 license. When using or
|
||||
* redistributing this file, you may do so under either license.
|
||||
*
|
||||
* GPL LICENSE SUMMARY
|
||||
*
|
||||
* Copyright (C) 2018 Intel Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program.
|
||||
*
|
||||
* The full GNU General Public License is included in this distribution
|
||||
* in the file called COPYING.
|
||||
*
|
||||
* Contact Information:
|
||||
* Intel Linux Wireless <linuxwifi@intel.com>
|
||||
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
|
||||
*
|
||||
* BSD LICENSE
|
||||
*
|
||||
* Copyright (C) 2018 Intel Corporation
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in
|
||||
* the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name Intel Corporation nor the names of its
|
||||
* contributors may be used to endorse or promote products derived
|
||||
* from this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
*****************************************************************************/
|
||||
|
||||
#include <linux/firmware.h>
|
||||
#include "iwl-trans.h"
|
||||
#include "iwl-dbg-tlv.h"
|
||||
|
||||
void iwl_fw_dbg_copy_tlv(struct iwl_trans *trans, struct iwl_ucode_tlv *tlv,
|
||||
bool ext)
|
||||
{
|
||||
struct iwl_apply_point_data *data;
|
||||
struct iwl_fw_ini_header *header = (void *)&tlv->data[0];
|
||||
u32 apply_point = le32_to_cpu(header->apply_point);
|
||||
|
||||
int copy_size = le32_to_cpu(tlv->length) + sizeof(*tlv);
|
||||
|
||||
if (WARN_ONCE(apply_point >= IWL_FW_INI_APPLY_NUM,
|
||||
"Invalid apply point id %d\n", apply_point))
|
||||
return;
|
||||
|
||||
if (ext)
|
||||
data = &trans->apply_points_ext[apply_point];
|
||||
else
|
||||
data = &trans->apply_points[apply_point];
|
||||
|
||||
/*
|
||||
* Make sure we still have room to copy this TLV. Offset points to the
|
||||
* location the last copy ended.
|
||||
*/
|
||||
if (WARN_ONCE(data->offset + copy_size > data->size,
|
||||
"Not enough memory for apply point %d\n",
|
||||
apply_point))
|
||||
return;
|
||||
|
||||
memcpy(data->data + data->offset, (void *)tlv, copy_size);
|
||||
data->offset += copy_size;
|
||||
}
|
||||
|
||||
void iwl_alloc_dbg_tlv(struct iwl_trans *trans, size_t len, const u8 *data,
|
||||
bool ext)
|
||||
{
|
||||
struct iwl_ucode_tlv *tlv;
|
||||
u32 size[IWL_FW_INI_APPLY_NUM] = {0};
|
||||
int i;
|
||||
|
||||
while (len >= sizeof(*tlv)) {
|
||||
u32 tlv_len, tlv_type, apply;
|
||||
struct iwl_fw_ini_header *hdr;
|
||||
|
||||
len -= sizeof(*tlv);
|
||||
tlv = (void *)data;
|
||||
|
||||
tlv_len = le32_to_cpu(tlv->length);
|
||||
tlv_type = le32_to_cpu(tlv->type);
|
||||
|
||||
if (len < tlv_len)
|
||||
return;
|
||||
|
||||
len -= ALIGN(tlv_len, 4);
|
||||
data += sizeof(*tlv) + ALIGN(tlv_len, 4);
|
||||
|
||||
if (!(tlv_type & IWL_UCODE_INI_TLV_GROUP))
|
||||
continue;
|
||||
|
||||
hdr = (void *)&tlv->data[0];
|
||||
apply = le32_to_cpu(hdr->apply_point);
|
||||
|
||||
IWL_DEBUG_FW(trans, "Read TLV %x, apply point %d\n",
|
||||
le32_to_cpu(tlv->type), apply);
|
||||
|
||||
if (WARN_ON(apply >= IWL_FW_INI_APPLY_NUM))
|
||||
continue;
|
||||
|
||||
size[apply] += sizeof(*tlv) + tlv_len;
|
||||
}
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(size); i++) {
|
||||
void *mem;
|
||||
|
||||
if (!size[i])
|
||||
continue;
|
||||
|
||||
mem = kzalloc(size[i], GFP_KERNEL);
|
||||
|
||||
if (!mem) {
|
||||
IWL_ERR(trans, "No memory for apply point %d\n", i);
|
||||
return;
|
||||
}
|
||||
|
||||
if (ext) {
|
||||
trans->apply_points_ext[i].data = mem;
|
||||
trans->apply_points_ext[i].size = size[i];
|
||||
} else {
|
||||
trans->apply_points[i].data = mem;
|
||||
trans->apply_points[i].size = size[i];
|
||||
}
|
||||
|
||||
trans->ini_valid = true;
|
||||
}
|
||||
}
|
||||
|
||||
void iwl_fw_dbg_free(struct iwl_trans *trans)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(trans->apply_points); i++) {
|
||||
kfree(trans->apply_points[i].data);
|
||||
trans->apply_points[i].size = 0;
|
||||
trans->apply_points[i].offset = 0;
|
||||
|
||||
kfree(trans->apply_points_ext[i].data);
|
||||
trans->apply_points_ext[i].size = 0;
|
||||
trans->apply_points_ext[i].offset = 0;
|
||||
}
|
||||
}
|
||||
|
||||
static int iwl_parse_fw_dbg_tlv(struct iwl_trans *trans, const u8 *data,
|
||||
size_t len)
|
||||
{
|
||||
struct iwl_ucode_tlv *tlv;
|
||||
enum iwl_ucode_tlv_type tlv_type;
|
||||
u32 tlv_len;
|
||||
|
||||
while (len >= sizeof(*tlv)) {
|
||||
len -= sizeof(*tlv);
|
||||
tlv = (void *)data;
|
||||
|
||||
tlv_len = le32_to_cpu(tlv->length);
|
||||
tlv_type = le32_to_cpu(tlv->type);
|
||||
|
||||
if (len < tlv_len) {
|
||||
IWL_ERR(trans, "invalid TLV len: %zd/%u\n",
|
||||
len, tlv_len);
|
||||
return -EINVAL;
|
||||
}
|
||||
len -= ALIGN(tlv_len, 4);
|
||||
data += sizeof(*tlv) + ALIGN(tlv_len, 4);
|
||||
|
||||
switch (tlv_type) {
|
||||
case IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION:
|
||||
case IWL_UCODE_TLV_TYPE_HCMD:
|
||||
case IWL_UCODE_TLV_TYPE_REGIONS:
|
||||
case IWL_UCODE_TLV_TYPE_TRIGGERS:
|
||||
case IWL_UCODE_TLV_TYPE_DEBUG_FLOW:
|
||||
iwl_fw_dbg_copy_tlv(trans, tlv, true);
|
||||
default:
|
||||
WARN_ONCE(1, "Invalid TLV %x\n", tlv_type);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void iwl_load_fw_dbg_tlv(struct device *dev, struct iwl_trans *trans)
|
||||
{
|
||||
const struct firmware *fw;
|
||||
int res;
|
||||
|
||||
if (trans->external_ini_loaded || !iwlwifi_mod_params.enable_ini)
|
||||
return;
|
||||
|
||||
res = request_firmware(&fw, "iwl-dbg-tlv.ini", dev);
|
||||
if (res)
|
||||
return;
|
||||
|
||||
iwl_alloc_dbg_tlv(trans, fw->size, fw->data, true);
|
||||
iwl_parse_fw_dbg_tlv(trans, fw->data, fw->size);
|
||||
|
||||
trans->external_ini_loaded = true;
|
||||
release_firmware(fw);
|
||||
}
|
|
@ -0,0 +1,87 @@
|
|||
/******************************************************************************
|
||||
*
|
||||
* This file is provided under a dual BSD/GPLv2 license. When using or
|
||||
* redistributing this file, you may do so under either license.
|
||||
*
|
||||
* GPL LICENSE SUMMARY
|
||||
*
|
||||
* Copyright (C) 2018 Intel Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program.
|
||||
*
|
||||
* The full GNU General Public License is included in this distribution
|
||||
* in the file called COPYING.
|
||||
*
|
||||
* Contact Information:
|
||||
* Intel Linux Wireless <linuxwifi@intel.com>
|
||||
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
|
||||
*
|
||||
* BSD LICENSE
|
||||
*
|
||||
* Copyright (C) 2018 Intel Corporation
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in
|
||||
* the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name Intel Corporation nor the names of its
|
||||
* contributors may be used to endorse or promote products derived
|
||||
* from this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
*****************************************************************************/
|
||||
#ifndef __iwl_dbg_tlv_h__
|
||||
#define __iwl_dbg_tlv_h__
|
||||
|
||||
#include <linux/device.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
/**
|
||||
* struct iwl_apply_point_data
|
||||
* @data: start address of this apply point data
|
||||
* @size total size of the data
|
||||
* @offset: current offset of the copied data
|
||||
*/
|
||||
struct iwl_apply_point_data {
|
||||
void *data;
|
||||
int size;
|
||||
int offset;
|
||||
};
|
||||
|
||||
struct iwl_trans;
|
||||
void iwl_load_fw_dbg_tlv(struct device *dev, struct iwl_trans *trans);
|
||||
void iwl_fw_dbg_free(struct iwl_trans *trans);
|
||||
void iwl_fw_dbg_copy_tlv(struct iwl_trans *trans, struct iwl_ucode_tlv *tlv,
|
||||
bool ext);
|
||||
void iwl_alloc_dbg_tlv(struct iwl_trans *trans, size_t len, const u8 *data,
|
||||
bool ext);
|
||||
|
||||
#endif /* __iwl_dbg_tlv_h__*/
|
|
@ -72,6 +72,7 @@
|
|||
#include "iwl-op-mode.h"
|
||||
#include "iwl-agn-hw.h"
|
||||
#include "fw/img.h"
|
||||
#include "iwl-dbg-tlv.h"
|
||||
#include "iwl-config.h"
|
||||
#include "iwl-modparams.h"
|
||||
|
||||
|
@ -645,6 +646,9 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
|
|||
|
||||
len -= sizeof(*ucode);
|
||||
|
||||
if (iwlwifi_mod_params.enable_ini)
|
||||
iwl_alloc_dbg_tlv(drv->trans, len, data, false);
|
||||
|
||||
while (len >= sizeof(*tlv)) {
|
||||
len -= sizeof(*tlv);
|
||||
tlv = (void *)data;
|
||||
|
@ -1086,6 +1090,13 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
|
|||
return -ENOMEM;
|
||||
break;
|
||||
}
|
||||
case IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION:
|
||||
case IWL_UCODE_TLV_TYPE_HCMD:
|
||||
case IWL_UCODE_TLV_TYPE_REGIONS:
|
||||
case IWL_UCODE_TLV_TYPE_TRIGGERS:
|
||||
case IWL_UCODE_TLV_TYPE_DEBUG_FLOW:
|
||||
if (iwlwifi_mod_params.enable_ini)
|
||||
iwl_fw_dbg_copy_tlv(drv->trans, tlv, false);
|
||||
default:
|
||||
IWL_DEBUG_INFO(drv, "unknown TLV: %d\n", tlv_type);
|
||||
break;
|
||||
|
@ -1565,7 +1576,7 @@ struct iwl_drv *iwl_drv_start(struct iwl_trans *trans)
|
|||
if (!drv->dbgfs_drv) {
|
||||
IWL_ERR(drv, "failed to create debugfs directory\n");
|
||||
ret = -ENOMEM;
|
||||
goto err_free_drv;
|
||||
goto err_free_tlv;
|
||||
}
|
||||
|
||||
/* Create transport layer debugfs dir */
|
||||
|
@ -1590,7 +1601,8 @@ struct iwl_drv *iwl_drv_start(struct iwl_trans *trans)
|
|||
#ifdef CONFIG_IWLWIFI_DEBUGFS
|
||||
err_free_dbgfs:
|
||||
debugfs_remove_recursive(drv->dbgfs_drv);
|
||||
err_free_drv:
|
||||
err_free_tlv:
|
||||
iwl_fw_dbg_free(drv->trans);
|
||||
#endif
|
||||
kfree(drv);
|
||||
err:
|
||||
|
@ -1616,9 +1628,13 @@ void iwl_drv_stop(struct iwl_drv *drv)
|
|||
mutex_unlock(&iwlwifi_opmode_table_mtx);
|
||||
|
||||
#ifdef CONFIG_IWLWIFI_DEBUGFS
|
||||
drv->trans->ops->debugfs_cleanup(drv->trans);
|
||||
|
||||
debugfs_remove_recursive(drv->dbgfs_drv);
|
||||
#endif
|
||||
|
||||
iwl_fw_dbg_free(drv->trans);
|
||||
|
||||
kfree(drv);
|
||||
}
|
||||
|
||||
|
@ -1749,6 +1765,10 @@ MODULE_PARM_DESC(lar_disable, "disable LAR functionality (default: N)");
|
|||
module_param_named(uapsd_disable, iwlwifi_mod_params.uapsd_disable, uint, 0644);
|
||||
MODULE_PARM_DESC(uapsd_disable,
|
||||
"disable U-APSD functionality bitmap 1: BSS 2: P2P Client (default: 3)");
|
||||
module_param_named(enable_ini, iwlwifi_mod_params.enable_ini,
|
||||
bool, S_IRUGO | S_IWUSR);
|
||||
MODULE_PARM_DESC(enable_ini,
|
||||
"Enable debug INI TLV FW debug infrastructure (default: 0");
|
||||
|
||||
/*
|
||||
* set bt_coex_active to true, uCode will do kill/defer
|
||||
|
|
|
@ -122,6 +122,7 @@ enum iwl_uapsd_disable {
|
|||
* @fw_monitor: allow to use firmware monitor
|
||||
* @disable_11ac: disable VHT capabilities, default = false.
|
||||
* @remove_when_gone: remove an inaccessible device from the PCIe bus.
|
||||
* @enable_ini: enable new FW debug infratructure (INI TLVs)
|
||||
*/
|
||||
struct iwl_mod_params {
|
||||
int swcrypto;
|
||||
|
@ -148,6 +149,7 @@ struct iwl_mod_params {
|
|||
*/
|
||||
bool disable_11ax;
|
||||
bool remove_when_gone;
|
||||
bool enable_ini;
|
||||
};
|
||||
|
||||
#endif /* #__iwl_modparams_h__ */
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2016 Intel Deutschland GmbH
|
||||
* Copyright (C) 2018 Intel Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
|
@ -30,6 +31,7 @@
|
|||
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
|
||||
* Copyright(c) 2016 Intel Deutschland GmbH
|
||||
* Copyright (C) 2018 Intel Corporation
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
@ -394,6 +396,7 @@ enum aux_misc_master1_en {
|
|||
#define AUX_MISC_MASTER1_SMPHR_STATUS 0xA20800
|
||||
#define RSA_ENABLE 0xA24B08
|
||||
#define PREG_AUX_BUS_WPROT_0 0xA04CC0
|
||||
#define PREG_PRPH_WPROT_0 0xA04CE0
|
||||
#define SB_CPU_1_STATUS 0xA01E30
|
||||
#define SB_CPU_2_STATUS 0xA01E34
|
||||
#define UMAG_SB_CPU_1_STATUS 0xA038C0
|
||||
|
@ -420,4 +423,8 @@ enum {
|
|||
#define UREG_CHICK (0xA05C00)
|
||||
#define UREG_CHICK_MSI_ENABLE BIT(24)
|
||||
#define UREG_CHICK_MSIX_ENABLE BIT(25)
|
||||
|
||||
#define HPM_DEBUG 0xA03440
|
||||
#define PERSISTENCE_BIT BIT(12)
|
||||
#define PREG_WFPM_ACCESS BIT(12)
|
||||
#endif /* __iwl_prph_h__ */
|
||||
|
|
|
@ -73,6 +73,8 @@
|
|||
#include "iwl-op-mode.h"
|
||||
#include "fw/api/cmdhdr.h"
|
||||
#include "fw/api/txq.h"
|
||||
#include "fw/api/dbg-tlv.h"
|
||||
#include "iwl-dbg-tlv.h"
|
||||
|
||||
/**
|
||||
* DOC: Transport layer - what is it ?
|
||||
|
@ -534,6 +536,8 @@ struct iwl_trans_rxq_dma_data {
|
|||
* @dump_data: return a vmalloc'ed buffer with debug data, maybe containing last
|
||||
* TX'ed commands and similar. The buffer will be vfree'd by the caller.
|
||||
* Note that the transport must fill in the proper file headers.
|
||||
* @debugfs_cleanup: used in the driver unload flow to make a proper cleanup
|
||||
* of the trans debugfs
|
||||
*/
|
||||
struct iwl_trans_ops {
|
||||
|
||||
|
@ -602,8 +606,8 @@ struct iwl_trans_ops {
|
|||
void (*resume)(struct iwl_trans *trans);
|
||||
|
||||
struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans,
|
||||
const struct iwl_fw_dbg_trigger_tlv
|
||||
*trigger);
|
||||
u32 dump_mask);
|
||||
void (*debugfs_cleanup)(struct iwl_trans *trans);
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -679,7 +683,6 @@ enum iwl_plat_pm_mode {
|
|||
* enter/exit (in msecs).
|
||||
*/
|
||||
#define IWL_TRANS_IDLE_TIMEOUT 2000
|
||||
#define IWL_MAX_DEBUG_ALLOCATIONS 1
|
||||
|
||||
/**
|
||||
* struct iwl_dram_data
|
||||
|
@ -734,6 +737,7 @@ struct iwl_dram_data {
|
|||
* @runtime_pm_mode: the runtime power management mode in use. This
|
||||
* mode is set during the initialization phase and is not
|
||||
* supposed to change during runtime.
|
||||
* @dbg_rec_on: true iff there is a fw debug recording currently active
|
||||
*/
|
||||
struct iwl_trans {
|
||||
const struct iwl_trans_ops *ops;
|
||||
|
@ -774,17 +778,23 @@ struct iwl_trans {
|
|||
struct lockdep_map sync_cmd_lockdep_map;
|
||||
#endif
|
||||
|
||||
struct iwl_apply_point_data apply_points[IWL_FW_INI_APPLY_NUM];
|
||||
struct iwl_apply_point_data apply_points_ext[IWL_FW_INI_APPLY_NUM];
|
||||
|
||||
bool external_ini_loaded;
|
||||
bool ini_valid;
|
||||
|
||||
const struct iwl_fw_dbg_dest_tlv_v1 *dbg_dest_tlv;
|
||||
const struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX];
|
||||
struct iwl_fw_dbg_trigger_tlv * const *dbg_trigger_tlv;
|
||||
u32 dbg_dump_mask;
|
||||
u8 dbg_n_dest_reg;
|
||||
int num_blocks;
|
||||
struct iwl_dram_data fw_mon[IWL_MAX_DEBUG_ALLOCATIONS];
|
||||
struct iwl_dram_data fw_mon[IWL_FW_INI_APPLY_NUM];
|
||||
|
||||
enum iwl_plat_pm_mode system_pm_mode;
|
||||
enum iwl_plat_pm_mode runtime_pm_mode;
|
||||
bool suspending;
|
||||
bool dbg_rec_on;
|
||||
|
||||
/* pointer to trans specific struct */
|
||||
/*Ensure that this pointer will always be aligned to sizeof pointer */
|
||||
|
@ -897,12 +907,11 @@ static inline void iwl_trans_resume(struct iwl_trans *trans)
|
|||
}
|
||||
|
||||
static inline struct iwl_trans_dump_data *
|
||||
iwl_trans_dump_data(struct iwl_trans *trans,
|
||||
const struct iwl_fw_dbg_trigger_tlv *trigger)
|
||||
iwl_trans_dump_data(struct iwl_trans *trans, u32 dump_mask)
|
||||
{
|
||||
if (!trans->ops->dump_data)
|
||||
return NULL;
|
||||
return trans->ops->dump_data(trans, trigger);
|
||||
return trans->ops->dump_data(trans, dump_mask);
|
||||
}
|
||||
|
||||
static inline struct iwl_device_cmd *
|
||||
|
|
|
@ -1956,7 +1956,7 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
|
|||
set_bit(STATUS_FW_ERROR, &mvm->trans->status);
|
||||
iwl_mvm_dump_nic_error_log(mvm);
|
||||
iwl_fw_dbg_collect_desc(&mvm->fwrt, &iwl_dump_desc_assert,
|
||||
NULL, 0);
|
||||
false, 0);
|
||||
ret = 1;
|
||||
goto err;
|
||||
}
|
||||
|
|
|
@ -1299,10 +1299,11 @@ static ssize_t iwl_dbgfs_low_latency_read(struct file *file,
|
|||
int len;
|
||||
|
||||
len = scnprintf(buf, sizeof(buf) - 1,
|
||||
"traffic=%d\ndbgfs=%d\nvcmd=%d\n",
|
||||
"traffic=%d\ndbgfs=%d\nvcmd=%d\nvif_type=%d\n",
|
||||
!!(mvmvif->low_latency & LOW_LATENCY_TRAFFIC),
|
||||
!!(mvmvif->low_latency & LOW_LATENCY_DEBUGFS),
|
||||
!!(mvmvif->low_latency & LOW_LATENCY_VCMD));
|
||||
!!(mvmvif->low_latency & LOW_LATENCY_VCMD),
|
||||
!!(mvmvif->low_latency & LOW_LATENCY_VIF_TYPE));
|
||||
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
|
||||
}
|
||||
|
||||
|
@ -1440,15 +1441,6 @@ static ssize_t iwl_dbgfs_quota_min_read(struct file *file,
|
|||
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
|
||||
}
|
||||
|
||||
static const char * const chanwidths[] = {
|
||||
[NL80211_CHAN_WIDTH_20_NOHT] = "noht",
|
||||
[NL80211_CHAN_WIDTH_20] = "ht20",
|
||||
[NL80211_CHAN_WIDTH_40] = "ht40",
|
||||
[NL80211_CHAN_WIDTH_80] = "vht80",
|
||||
[NL80211_CHAN_WIDTH_80P80] = "vht80p80",
|
||||
[NL80211_CHAN_WIDTH_160] = "vht160",
|
||||
};
|
||||
|
||||
#define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \
|
||||
_MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct ieee80211_vif)
|
||||
#define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \
|
||||
|
|
|
@ -377,6 +377,9 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
|
|||
atomic_set(&mvm->mac80211_queue_stop_count[i], 0);
|
||||
|
||||
set_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
|
||||
#ifdef CONFIG_IWLWIFI_DEBUGFS
|
||||
iwl_fw_set_dbg_rec_on(&mvm->fwrt);
|
||||
#endif
|
||||
clear_bit(IWL_FWRT_STATUS_WAIT_ALIVE, &mvm->fwrt.status);
|
||||
|
||||
return 0;
|
||||
|
@ -407,6 +410,7 @@ static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
|
|||
ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR);
|
||||
if (ret) {
|
||||
IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret);
|
||||
iwl_fw_assert_error_dump(&mvm->fwrt);
|
||||
goto error;
|
||||
}
|
||||
|
||||
|
@ -1036,6 +1040,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
|
|||
ret = iwl_mvm_load_rt_fw(mvm);
|
||||
if (ret) {
|
||||
IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret);
|
||||
iwl_fw_assert_error_dump(&mvm->fwrt);
|
||||
goto error;
|
||||
}
|
||||
|
||||
|
|
|
@ -767,13 +767,8 @@ static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm,
|
|||
}
|
||||
|
||||
ctxt_sta->bi = cpu_to_le32(vif->bss_conf.beacon_int);
|
||||
ctxt_sta->bi_reciprocal =
|
||||
cpu_to_le32(iwl_mvm_reciprocal(vif->bss_conf.beacon_int));
|
||||
ctxt_sta->dtim_interval = cpu_to_le32(vif->bss_conf.beacon_int *
|
||||
vif->bss_conf.dtim_period);
|
||||
ctxt_sta->dtim_reciprocal =
|
||||
cpu_to_le32(iwl_mvm_reciprocal(vif->bss_conf.beacon_int *
|
||||
vif->bss_conf.dtim_period));
|
||||
|
||||
ctxt_sta->listen_interval = cpu_to_le32(mvm->hw->conf.listen_interval);
|
||||
ctxt_sta->assoc_id = cpu_to_le32(vif->bss_conf.aid);
|
||||
|
@ -782,8 +777,30 @@ static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm,
|
|||
cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST);
|
||||
|
||||
if (vif->bss_conf.assoc && vif->bss_conf.he_support &&
|
||||
!iwlwifi_mod_params.disable_11ax)
|
||||
!iwlwifi_mod_params.disable_11ax) {
|
||||
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
|
||||
u8 sta_id = mvmvif->ap_sta_id;
|
||||
|
||||
cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_11AX);
|
||||
if (sta_id != IWL_MVM_INVALID_STA) {
|
||||
struct ieee80211_sta *sta;
|
||||
|
||||
sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
|
||||
lockdep_is_held(&mvm->mutex));
|
||||
|
||||
/*
|
||||
* TODO: we should check the ext cap IE but it is
|
||||
* unclear why the spec requires two bits (one in HE
|
||||
* cap IE, and one in the ext cap IE). In the meantime
|
||||
* rely on the HE cap IE only.
|
||||
*/
|
||||
if (sta && (sta->he_cap.he_cap_elem.mac_cap_info[0] &
|
||||
IEEE80211_HE_MAC_CAP0_TWT_RES))
|
||||
ctxt_sta->data_policy |=
|
||||
cpu_to_le32(TWT_SUPPORTED);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
|
||||
}
|
||||
|
@ -832,8 +849,6 @@ static int iwl_mvm_mac_ctxt_cmd_ibss(struct iwl_mvm *mvm,
|
|||
|
||||
/* cmd.ibss.beacon_time/cmd.ibss.beacon_tsf are curently ignored */
|
||||
cmd.ibss.bi = cpu_to_le32(vif->bss_conf.beacon_int);
|
||||
cmd.ibss.bi_reciprocal =
|
||||
cpu_to_le32(iwl_mvm_reciprocal(vif->bss_conf.beacon_int));
|
||||
|
||||
/* TODO: Assumes that the beacon id == mac context id */
|
||||
cmd.ibss.beacon_template = cpu_to_le32(mvmvif->id);
|
||||
|
@ -965,11 +980,8 @@ static void iwl_mvm_mac_ctxt_set_tx(struct iwl_mvm *mvm,
|
|||
tx->tx_flags = cpu_to_le32(tx_flags);
|
||||
|
||||
if (!fw_has_capa(&mvm->fw->ucode_capa,
|
||||
IWL_UCODE_TLV_CAPA_BEACON_ANT_SELECTION)) {
|
||||
mvm->mgmt_last_antenna_idx =
|
||||
iwl_mvm_next_antenna(mvm, iwl_mvm_get_valid_tx_ant(mvm),
|
||||
mvm->mgmt_last_antenna_idx);
|
||||
}
|
||||
IWL_UCODE_TLV_CAPA_BEACON_ANT_SELECTION))
|
||||
iwl_mvm_toggle_tx_ant(mvm, &mvm->mgmt_last_antenna_idx);
|
||||
|
||||
tx->rate_n_flags =
|
||||
cpu_to_le32(BIT(mvm->mgmt_last_antenna_idx) <<
|
||||
|
@ -1182,14 +1194,12 @@ static void iwl_mvm_mac_ctxt_cmd_fill_ap(struct iwl_mvm *mvm,
|
|||
IWL_DEBUG_HC(mvm, "No need to receive beacons\n");
|
||||
}
|
||||
|
||||
if (vif->bss_conf.he_support && !iwlwifi_mod_params.disable_11ax)
|
||||
cmd->filter_flags |= cpu_to_le32(MAC_FILTER_IN_11AX);
|
||||
|
||||
ctxt_ap->bi = cpu_to_le32(vif->bss_conf.beacon_int);
|
||||
ctxt_ap->bi_reciprocal =
|
||||
cpu_to_le32(iwl_mvm_reciprocal(vif->bss_conf.beacon_int));
|
||||
ctxt_ap->dtim_interval = cpu_to_le32(vif->bss_conf.beacon_int *
|
||||
vif->bss_conf.dtim_period);
|
||||
ctxt_ap->dtim_reciprocal =
|
||||
cpu_to_le32(iwl_mvm_reciprocal(vif->bss_conf.beacon_int *
|
||||
vif->bss_conf.dtim_period));
|
||||
|
||||
if (!fw_has_api(&mvm->fw->ucode_capa,
|
||||
IWL_UCODE_TLV_API_STA_TYPE))
|
||||
|
|
|
@ -423,6 +423,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
|
|||
ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
|
||||
ieee80211_hw_set(hw, NEEDS_UNIQUE_STA_ADDR);
|
||||
ieee80211_hw_set(hw, DEAUTH_NEED_MGD_TX_PREP);
|
||||
ieee80211_hw_set(hw, SUPPORTS_VHT_EXT_NSS_BW);
|
||||
|
||||
if (iwl_mvm_has_tlc_offload(mvm)) {
|
||||
ieee80211_hw_set(hw, TX_AMPDU_SETUP_IN_HW);
|
||||
|
@ -813,6 +814,21 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
|
|||
!ieee80211_is_bufferable_mmpdu(hdr->frame_control))
|
||||
sta = NULL;
|
||||
|
||||
/* If there is no sta, and it's not offchannel - send through AP */
|
||||
if (info->control.vif->type == NL80211_IFTYPE_STATION &&
|
||||
info->hw_queue != IWL_MVM_OFFCHANNEL_QUEUE && !sta) {
|
||||
struct iwl_mvm_vif *mvmvif =
|
||||
iwl_mvm_vif_from_mac80211(info->control.vif);
|
||||
u8 ap_sta_id = READ_ONCE(mvmvif->ap_sta_id);
|
||||
|
||||
if (ap_sta_id < IWL_MVM_STATION_COUNT) {
|
||||
/* mac80211 holds rcu read lock */
|
||||
sta = rcu_dereference(mvm->fw_id_to_mac_id[ap_sta_id]);
|
||||
if (IS_ERR_OR_NULL(sta))
|
||||
goto drop;
|
||||
}
|
||||
}
|
||||
|
||||
if (sta) {
|
||||
if (iwl_mvm_defer_tx(mvm, sta, skb))
|
||||
return;
|
||||
|
@ -2383,6 +2399,12 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
|
|||
/* must be set before quota calculations */
|
||||
mvmvif->ap_ibss_active = true;
|
||||
|
||||
if (vif->type == NL80211_IFTYPE_AP && !vif->p2p) {
|
||||
iwl_mvm_vif_set_low_latency(mvmvif, true,
|
||||
LOW_LATENCY_VIF_TYPE);
|
||||
iwl_mvm_send_low_latency_cmd(mvm, true, mvmvif->id);
|
||||
}
|
||||
|
||||
/* power updated needs to be done before quotas */
|
||||
iwl_mvm_power_update_mac(mvm);
|
||||
|
||||
|
@ -2445,6 +2467,12 @@ static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw,
|
|||
mvmvif->ap_ibss_active = false;
|
||||
mvm->ap_last_beacon_gp2 = 0;
|
||||
|
||||
if (vif->type == NL80211_IFTYPE_AP && !vif->p2p) {
|
||||
iwl_mvm_vif_set_low_latency(mvmvif, false,
|
||||
LOW_LATENCY_VIF_TYPE);
|
||||
iwl_mvm_send_low_latency_cmd(mvm, false, mvmvif->id);
|
||||
}
|
||||
|
||||
iwl_mvm_bt_coex_vif_change(mvm);
|
||||
|
||||
iwl_mvm_unref(mvm, IWL_MVM_REF_AP_IBSS);
|
||||
|
@ -2945,6 +2973,9 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
|
|||
if (vif->type == NL80211_IFTYPE_AP) {
|
||||
mvmvif->ap_assoc_sta_count++;
|
||||
iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
|
||||
if (vif->bss_conf.he_support &&
|
||||
!iwlwifi_mod_params.disable_11ax)
|
||||
iwl_mvm_cfg_he_sta(mvm, vif, mvm_sta->sta_id);
|
||||
}
|
||||
|
||||
iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band,
|
||||
|
|
|
@ -303,11 +303,13 @@ enum iwl_bt_force_ant_mode {
|
|||
* @LOW_LATENCY_TRAFFIC: indicates low latency traffic was detected
|
||||
* @LOW_LATENCY_DEBUGFS: low latency mode set from debugfs
|
||||
* @LOW_LATENCY_VCMD: low latency mode set from vendor command
|
||||
* @LOW_LATENCY_VIF_TYPE: low latency mode set because of vif type (ap)
|
||||
*/
|
||||
enum iwl_mvm_low_latency_cause {
|
||||
LOW_LATENCY_TRAFFIC = BIT(0),
|
||||
LOW_LATENCY_DEBUGFS = BIT(1),
|
||||
LOW_LATENCY_VCMD = BIT(2),
|
||||
LOW_LATENCY_VIF_TYPE = BIT(3),
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -844,7 +846,6 @@ struct iwl_mvm {
|
|||
u16 hw_queue_to_mac80211[IWL_MAX_TVQM_QUEUES];
|
||||
|
||||
struct iwl_mvm_dqa_txq_info queue_info[IWL_MAX_HW_QUEUES];
|
||||
spinlock_t queue_info_lock; /* For syncing queue mgmt operations */
|
||||
struct work_struct add_stream_wk; /* To add streams to queues */
|
||||
|
||||
atomic_t mac80211_queue_stop_count[IEEE80211_MAX_QUEUES];
|
||||
|
@ -1521,6 +1522,11 @@ static inline u8 iwl_mvm_get_valid_rx_ant(struct iwl_mvm *mvm)
|
|||
mvm->fw->valid_rx_ant;
|
||||
}
|
||||
|
||||
static inline void iwl_mvm_toggle_tx_ant(struct iwl_mvm *mvm, u8 *ant)
|
||||
{
|
||||
*ant = iwl_mvm_next_antenna(mvm, iwl_mvm_get_valid_tx_ant(mvm), *ant);
|
||||
}
|
||||
|
||||
static inline u32 iwl_mvm_get_phy_config(struct iwl_mvm *mvm)
|
||||
{
|
||||
u32 phy_config = ~(FW_PHY_CFG_TX_CHAIN |
|
||||
|
@ -1846,6 +1852,8 @@ int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
|||
/* get SystemLowLatencyMode - only needed for beacon threshold? */
|
||||
bool iwl_mvm_low_latency(struct iwl_mvm *mvm);
|
||||
bool iwl_mvm_low_latency_band(struct iwl_mvm *mvm, enum nl80211_band band);
|
||||
void iwl_mvm_send_low_latency_cmd(struct iwl_mvm *mvm, bool low_latency,
|
||||
u16 mac_id);
|
||||
|
||||
/* get VMACLowLatencyMode */
|
||||
static inline bool iwl_mvm_vif_low_latency(struct iwl_mvm_vif *mvmvif)
|
||||
|
|
|
@ -676,7 +676,6 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
|
|||
INIT_LIST_HEAD(&mvm->aux_roc_te_list);
|
||||
INIT_LIST_HEAD(&mvm->async_handlers_list);
|
||||
spin_lock_init(&mvm->time_event_lock);
|
||||
spin_lock_init(&mvm->queue_info_lock);
|
||||
|
||||
INIT_WORK(&mvm->async_handlers_wk, iwl_mvm_async_handlers_wk);
|
||||
INIT_WORK(&mvm->roc_done_wk, iwl_mvm_roc_done_wk);
|
||||
|
@ -770,7 +769,6 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
|
|||
memcpy(trans->dbg_conf_tlv, mvm->fw->dbg.conf_tlv,
|
||||
sizeof(trans->dbg_conf_tlv));
|
||||
trans->dbg_trigger_tlv = mvm->fw->dbg.trigger_tlv;
|
||||
trans->dbg_dump_mask = mvm->fw->dbg.dump_mask;
|
||||
|
||||
trans->iml = mvm->fw->iml;
|
||||
trans->iml_len = mvm->fw->iml_len;
|
||||
|
@ -846,6 +844,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
|
|||
|
||||
iwl_mvm_tof_init(mvm);
|
||||
|
||||
iwl_mvm_toggle_tx_ant(mvm, &mvm->mgmt_last_antenna_idx);
|
||||
|
||||
return op_mode;
|
||||
|
||||
out_unregister:
|
||||
|
@ -1110,11 +1110,7 @@ static void iwl_mvm_async_cb(struct iwl_op_mode *op_mode,
|
|||
static void iwl_mvm_stop_sw_queue(struct iwl_op_mode *op_mode, int hw_queue)
|
||||
{
|
||||
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
|
||||
unsigned long mq;
|
||||
|
||||
spin_lock_bh(&mvm->queue_info_lock);
|
||||
mq = mvm->hw_queue_to_mac80211[hw_queue];
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
unsigned long mq = mvm->hw_queue_to_mac80211[hw_queue];
|
||||
|
||||
iwl_mvm_stop_mac_queues(mvm, mq);
|
||||
}
|
||||
|
@ -1140,11 +1136,7 @@ void iwl_mvm_start_mac_queues(struct iwl_mvm *mvm, unsigned long mq)
|
|||
static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int hw_queue)
|
||||
{
|
||||
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
|
||||
unsigned long mq;
|
||||
|
||||
spin_lock_bh(&mvm->queue_info_lock);
|
||||
mq = mvm->hw_queue_to_mac80211[hw_queue];
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
unsigned long mq = mvm->hw_queue_to_mac80211[hw_queue];
|
||||
|
||||
iwl_mvm_start_mac_queues(mvm, mq);
|
||||
}
|
||||
|
@ -1242,7 +1234,7 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
|
|||
*/
|
||||
if (!mvm->fw_restart && fw_error) {
|
||||
iwl_fw_dbg_collect_desc(&mvm->fwrt, &iwl_dump_desc_assert,
|
||||
NULL, 0);
|
||||
false, 0);
|
||||
} else if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
|
||||
struct iwl_mvm_reprobe *reprobe;
|
||||
|
||||
|
|
|
@ -98,8 +98,12 @@ static u8 rs_fw_sgi_cw_support(struct ieee80211_sta *sta)
|
|||
{
|
||||
struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
|
||||
struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
|
||||
struct ieee80211_sta_he_cap *he_cap = &sta->he_cap;
|
||||
u8 supp = 0;
|
||||
|
||||
if (he_cap && he_cap->has_he)
|
||||
return 0;
|
||||
|
||||
if (ht_cap->cap & IEEE80211_HT_CAP_SGI_20)
|
||||
supp |= BIT(IWL_TLC_MNG_CH_WIDTH_20MHZ);
|
||||
if (ht_cap->cap & IEEE80211_HT_CAP_SGI_40)
|
||||
|
|
|
@ -1422,12 +1422,6 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
|
|||
/* update aggregation data for monitor sake on default queue */
|
||||
if (!queue && (phy_info & IWL_RX_MPDU_PHY_AMPDU)) {
|
||||
bool toggle_bit = phy_info & IWL_RX_MPDU_PHY_AMPDU_TOGGLE;
|
||||
u64 he_phy_data;
|
||||
|
||||
if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
|
||||
he_phy_data = le64_to_cpu(desc->v3.he_phy_data);
|
||||
else
|
||||
he_phy_data = le64_to_cpu(desc->v1.he_phy_data);
|
||||
|
||||
rx_status->flag |= RX_FLAG_AMPDU_DETAILS;
|
||||
rx_status->ampdu_reference = mvm->ampdu_ref;
|
||||
|
|
|
@ -205,9 +205,7 @@ iwl_mvm_scan_rate_n_flags(struct iwl_mvm *mvm, enum nl80211_band band,
|
|||
{
|
||||
u32 tx_ant;
|
||||
|
||||
mvm->scan_last_antenna_idx =
|
||||
iwl_mvm_next_antenna(mvm, iwl_mvm_get_valid_tx_ant(mvm),
|
||||
mvm->scan_last_antenna_idx);
|
||||
iwl_mvm_toggle_tx_ant(mvm, &mvm->scan_last_antenna_idx);
|
||||
tx_ant = BIT(mvm->scan_last_antenna_idx) << RATE_MCS_ANT_POS;
|
||||
|
||||
if (band == NL80211_BAND_2GHZ && !no_cck)
|
||||
|
|
|
@ -319,9 +319,7 @@ static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue,
|
|||
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
|
||||
return -EINVAL;
|
||||
|
||||
spin_lock_bh(&mvm->queue_info_lock);
|
||||
sta_id = mvm->queue_info[queue].ra_sta_id;
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
|
@ -372,25 +370,17 @@ static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue,
|
|||
return -EINVAL;
|
||||
|
||||
if (iwl_mvm_has_new_tx_api(mvm)) {
|
||||
spin_lock_bh(&mvm->queue_info_lock);
|
||||
|
||||
if (remove_mac_queue)
|
||||
mvm->hw_queue_to_mac80211[queue] &=
|
||||
~BIT(mac80211_queue);
|
||||
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
|
||||
iwl_trans_txq_free(mvm->trans, queue);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
spin_lock_bh(&mvm->queue_info_lock);
|
||||
|
||||
if (WARN_ON(mvm->queue_info[queue].tid_bitmap == 0)) {
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
if (WARN_ON(mvm->queue_info[queue].tid_bitmap == 0))
|
||||
return 0;
|
||||
}
|
||||
|
||||
mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
|
||||
|
||||
|
@ -426,10 +416,8 @@ static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue,
|
|||
mvm->hw_queue_to_mac80211[queue]);
|
||||
|
||||
/* If the queue is still enabled - nothing left to do in this func */
|
||||
if (cmd.action == SCD_CFG_ENABLE_QUEUE) {
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
if (cmd.action == SCD_CFG_ENABLE_QUEUE)
|
||||
return 0;
|
||||
}
|
||||
|
||||
cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
|
||||
cmd.tid = mvm->queue_info[queue].txq_tid;
|
||||
|
@ -448,8 +436,6 @@ static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue,
|
|||
/* Regardless if this is a reserved TXQ for a STA - mark it as false */
|
||||
mvm->queue_info[queue].reserved = false;
|
||||
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
|
||||
iwl_trans_txq_disable(mvm->trans, queue, false);
|
||||
ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags,
|
||||
sizeof(struct iwl_scd_txq_cfg_cmd), &cmd);
|
||||
|
@ -474,10 +460,8 @@ static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm *mvm, int queue)
|
|||
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
|
||||
return -EINVAL;
|
||||
|
||||
spin_lock_bh(&mvm->queue_info_lock);
|
||||
sta_id = mvm->queue_info[queue].ra_sta_id;
|
||||
tid_bitmap = mvm->queue_info[queue].tid_bitmap;
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
|
||||
sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
|
||||
lockdep_is_held(&mvm->mutex));
|
||||
|
@ -516,10 +500,8 @@ static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
|
|||
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
|
||||
return -EINVAL;
|
||||
|
||||
spin_lock_bh(&mvm->queue_info_lock);
|
||||
sta_id = mvm->queue_info[queue].ra_sta_id;
|
||||
tid_bitmap = mvm->queue_info[queue].tid_bitmap;
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
|
@ -545,6 +527,16 @@ static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
|
|||
|
||||
rcu_read_unlock();
|
||||
|
||||
/*
|
||||
* The TX path may have been using this TXQ_ID from the tid_data,
|
||||
* so make sure it's no longer running so that we can safely reuse
|
||||
* this TXQ later. We've set all the TIDs to IWL_MVM_INVALID_QUEUE
|
||||
* above, but nothing guarantees we've stopped using them. Thus,
|
||||
* without this, we could get to iwl_mvm_disable_txq() and remove
|
||||
* the queue while still sending frames to it.
|
||||
*/
|
||||
synchronize_net();
|
||||
|
||||
return disable_agg_tids;
|
||||
}
|
||||
|
||||
|
@ -562,11 +554,9 @@ static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
|
|||
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
|
||||
return -EINVAL;
|
||||
|
||||
spin_lock_bh(&mvm->queue_info_lock);
|
||||
txq_curr_ac = mvm->queue_info[queue].mac80211_ac;
|
||||
sta_id = mvm->queue_info[queue].ra_sta_id;
|
||||
tid = mvm->queue_info[queue].txq_tid;
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
|
||||
same_sta = sta_id == new_sta_id;
|
||||
|
||||
|
@ -610,7 +600,6 @@ static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
|
|||
* by the inactivity checker.
|
||||
*/
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
lockdep_assert_held(&mvm->queue_info_lock);
|
||||
|
||||
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
|
||||
return -EINVAL;
|
||||
|
@ -696,10 +685,7 @@ static int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
|
|||
* value 3 and VO with value 0, so to check if ac X is lower than ac Y
|
||||
* we need to check if the numerical value of X is LARGER than of Y.
|
||||
*/
|
||||
spin_lock_bh(&mvm->queue_info_lock);
|
||||
if (ac <= mvm->queue_info[queue].mac80211_ac && !force) {
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
|
||||
IWL_DEBUG_TX_QUEUES(mvm,
|
||||
"No redirection needed on TXQ #%d\n",
|
||||
queue);
|
||||
|
@ -711,7 +697,6 @@ static int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
|
|||
cmd.tid = mvm->queue_info[queue].txq_tid;
|
||||
mq = mvm->hw_queue_to_mac80211[queue];
|
||||
shared_queue = hweight16(mvm->queue_info[queue].tid_bitmap) > 1;
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
|
||||
IWL_DEBUG_TX_QUEUES(mvm, "Redirecting TXQ #%d to FIFO #%d\n",
|
||||
queue, iwl_mvm_ac_to_tx_fifo[ac]);
|
||||
|
@ -737,9 +722,7 @@ static int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
|
|||
iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout);
|
||||
|
||||
/* Update the TID "owner" of the queue */
|
||||
spin_lock_bh(&mvm->queue_info_lock);
|
||||
mvm->queue_info[queue].txq_tid = tid;
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
|
||||
/* TODO: Work-around SCD bug when moving back by multiples of 0x40 */
|
||||
|
||||
|
@ -748,9 +731,7 @@ static int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
|
|||
cmd.sta_id, tid, IWL_FRAME_LIMIT, ssn);
|
||||
|
||||
/* Update AC marking of the queue */
|
||||
spin_lock_bh(&mvm->queue_info_lock);
|
||||
mvm->queue_info[queue].mac80211_ac = ac;
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
|
||||
/*
|
||||
* Mark queue as shared in transport if shared
|
||||
|
@ -773,7 +754,7 @@ static int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id,
|
|||
{
|
||||
int i;
|
||||
|
||||
lockdep_assert_held(&mvm->queue_info_lock);
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
|
||||
/* This should not be hit with new TX path */
|
||||
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
|
||||
|
@ -853,11 +834,8 @@ static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm, int queue,
|
|||
{
|
||||
bool enable_queue = true;
|
||||
|
||||
spin_lock_bh(&mvm->queue_info_lock);
|
||||
|
||||
/* Make sure this TID isn't already enabled */
|
||||
if (mvm->queue_info[queue].tid_bitmap & BIT(tid)) {
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
IWL_ERR(mvm, "Trying to enable TXQ %d with existing TID %d\n",
|
||||
queue, tid);
|
||||
return false;
|
||||
|
@ -893,8 +871,6 @@ static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm, int queue,
|
|||
queue, mvm->queue_info[queue].tid_bitmap,
|
||||
mvm->hw_queue_to_mac80211[queue]);
|
||||
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
|
||||
return enable_queue;
|
||||
}
|
||||
|
||||
|
@ -949,9 +925,7 @@ static void iwl_mvm_change_queue_tid(struct iwl_mvm *mvm, int queue)
|
|||
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
|
||||
return;
|
||||
|
||||
spin_lock_bh(&mvm->queue_info_lock);
|
||||
tid_bitmap = mvm->queue_info[queue].tid_bitmap;
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
|
||||
if (WARN(!tid_bitmap, "TXQ %d has no tids assigned to it\n", queue))
|
||||
return;
|
||||
|
@ -968,9 +942,7 @@ static void iwl_mvm_change_queue_tid(struct iwl_mvm *mvm, int queue)
|
|||
return;
|
||||
}
|
||||
|
||||
spin_lock_bh(&mvm->queue_info_lock);
|
||||
mvm->queue_info[queue].txq_tid = tid;
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
IWL_DEBUG_TX_QUEUES(mvm, "Changed TXQ %d ownership to tid %d\n",
|
||||
queue, tid);
|
||||
}
|
||||
|
@ -992,10 +964,8 @@ static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue)
|
|||
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
|
||||
spin_lock_bh(&mvm->queue_info_lock);
|
||||
sta_id = mvm->queue_info[queue].ra_sta_id;
|
||||
tid_bitmap = mvm->queue_info[queue].tid_bitmap;
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
|
||||
/* Find TID for queue, and make sure it is the only one on the queue */
|
||||
tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
|
||||
|
@ -1052,9 +1022,7 @@ static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue)
|
|||
}
|
||||
}
|
||||
|
||||
spin_lock_bh(&mvm->queue_info_lock);
|
||||
mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1073,7 +1041,7 @@ static bool iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,
|
|||
int tid;
|
||||
|
||||
lockdep_assert_held(&mvmsta->lock);
|
||||
lockdep_assert_held(&mvm->queue_info_lock);
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
|
||||
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
|
||||
return false;
|
||||
|
@ -1174,8 +1142,6 @@ static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta)
|
|||
if (iwl_mvm_has_new_tx_api(mvm))
|
||||
return -ENOSPC;
|
||||
|
||||
spin_lock_bh(&mvm->queue_info_lock);
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
/* we skip the CMD queue below by starting at 1 */
|
||||
|
@ -1230,12 +1196,7 @@ static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta)
|
|||
|
||||
mvmsta = iwl_mvm_sta_from_mac80211(sta);
|
||||
|
||||
/* this isn't so nice, but works OK due to the way we loop */
|
||||
spin_unlock(&mvm->queue_info_lock);
|
||||
|
||||
/* and we need this locking order */
|
||||
spin_lock(&mvmsta->lock);
|
||||
spin_lock(&mvm->queue_info_lock);
|
||||
spin_lock_bh(&mvmsta->lock);
|
||||
ret = iwl_mvm_remove_inactive_tids(mvm, mvmsta, i,
|
||||
inactive_tid_bitmap,
|
||||
&unshare_queues,
|
||||
|
@ -1243,11 +1204,10 @@ static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta)
|
|||
if (ret >= 0 && free_queue < 0)
|
||||
free_queue = ret;
|
||||
/* only unlock sta lock - we still need the queue info lock */
|
||||
spin_unlock(&mvmsta->lock);
|
||||
spin_unlock_bh(&mvmsta->lock);
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
|
||||
/* Reconfigure queues requiring reconfiguation */
|
||||
for_each_set_bit(i, &unshare_queues, IWL_MAX_HW_QUEUES)
|
||||
|
@ -1296,8 +1256,6 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
|
|||
tfd_queue_mask = mvmsta->tfd_queue_msk;
|
||||
spin_unlock_bh(&mvmsta->lock);
|
||||
|
||||
spin_lock_bh(&mvm->queue_info_lock);
|
||||
|
||||
/*
|
||||
* Non-QoS, QoS NDP and MGMT frames should go to a MGMT queue, if one
|
||||
* exists
|
||||
|
@ -1327,12 +1285,8 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
|
|||
IWL_MVM_DQA_MIN_DATA_QUEUE,
|
||||
IWL_MVM_DQA_MAX_DATA_QUEUE);
|
||||
if (queue < 0) {
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
|
||||
/* try harder - perhaps kill an inactive queue */
|
||||
queue = iwl_mvm_inactivity_check(mvm, mvmsta->sta_id);
|
||||
|
||||
spin_lock_bh(&mvm->queue_info_lock);
|
||||
}
|
||||
|
||||
/* No free queue - we'll have to share */
|
||||
|
@ -1353,8 +1307,6 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
|
|||
if (queue > 0 && !shared_queue)
|
||||
mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
|
||||
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
|
||||
/* This shouldn't happen - out of queues */
|
||||
if (WARN_ON(queue <= 0)) {
|
||||
IWL_ERR(mvm, "No available queues for tid %d on sta_id %d\n",
|
||||
|
@ -1556,8 +1508,6 @@ static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
|
|||
/* run the general cleanup/unsharing of queues */
|
||||
iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA);
|
||||
|
||||
spin_lock_bh(&mvm->queue_info_lock);
|
||||
|
||||
/* Make sure we have free resources for this STA */
|
||||
if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls &&
|
||||
!mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].tid_bitmap &&
|
||||
|
@ -1569,19 +1519,15 @@ static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
|
|||
IWL_MVM_DQA_MIN_DATA_QUEUE,
|
||||
IWL_MVM_DQA_MAX_DATA_QUEUE);
|
||||
if (queue < 0) {
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
/* try again - this time kick out a queue if needed */
|
||||
queue = iwl_mvm_inactivity_check(mvm, mvmsta->sta_id);
|
||||
if (queue < 0) {
|
||||
IWL_ERR(mvm, "No available queues for new station\n");
|
||||
return -ENOSPC;
|
||||
}
|
||||
spin_lock_bh(&mvm->queue_info_lock);
|
||||
}
|
||||
mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
|
||||
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
|
||||
mvmsta->reserved_queue = queue;
|
||||
|
||||
IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n",
|
||||
|
@ -1822,6 +1768,8 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
|
|||
if (iwl_mvm_has_tlc_offload(mvm))
|
||||
iwl_mvm_rs_add_sta(mvm, mvm_sta);
|
||||
|
||||
iwl_mvm_toggle_tx_ant(mvm, &mvm_sta->tx_ant);
|
||||
|
||||
update_fw:
|
||||
ret = iwl_mvm_sta_send_to_fw(mvm, sta, sta_update, sta_flags);
|
||||
if (ret)
|
||||
|
@ -2004,18 +1952,14 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
|
|||
* is still marked as IWL_MVM_QUEUE_RESERVED, and
|
||||
* should be manually marked as free again
|
||||
*/
|
||||
spin_lock_bh(&mvm->queue_info_lock);
|
||||
status = &mvm->queue_info[reserved_txq].status;
|
||||
if (WARN((*status != IWL_MVM_QUEUE_RESERVED) &&
|
||||
(*status != IWL_MVM_QUEUE_FREE),
|
||||
"sta_id %d reserved txq %d status %d",
|
||||
sta_id, reserved_txq, *status)) {
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
sta_id, reserved_txq, *status))
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
*status = IWL_MVM_QUEUE_FREE;
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
}
|
||||
|
||||
if (vif->type == NL80211_IFTYPE_STATION &&
|
||||
|
@ -2873,8 +2817,6 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
|||
return -EIO;
|
||||
}
|
||||
|
||||
spin_lock(&mvm->queue_info_lock);
|
||||
|
||||
/*
|
||||
* Note the possible cases:
|
||||
* 1. An enabled TXQ - TXQ needs to become agg'ed
|
||||
|
@ -2889,7 +2831,7 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
|||
if (txq_id < 0) {
|
||||
ret = txq_id;
|
||||
IWL_ERR(mvm, "Failed to allocate agg queue\n");
|
||||
goto release_locks;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* TXQ hasn't yet been enabled, so mark it only as reserved */
|
||||
|
@ -2900,11 +2842,9 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
|||
IWL_DEBUG_TX_QUEUES(mvm,
|
||||
"Can't start tid %d agg on shared queue!\n",
|
||||
tid);
|
||||
goto release_locks;
|
||||
goto out;
|
||||
}
|
||||
|
||||
spin_unlock(&mvm->queue_info_lock);
|
||||
|
||||
IWL_DEBUG_TX_QUEUES(mvm,
|
||||
"AGG for tid %d will be on queue #%d\n",
|
||||
tid, txq_id);
|
||||
|
@ -2935,10 +2875,7 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
|||
}
|
||||
|
||||
ret = 0;
|
||||
goto out;
|
||||
|
||||
release_locks:
|
||||
spin_unlock(&mvm->queue_info_lock);
|
||||
out:
|
||||
spin_unlock_bh(&mvmsta->lock);
|
||||
|
||||
|
@ -3007,9 +2944,7 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
|||
|
||||
cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
|
||||
|
||||
spin_lock_bh(&mvm->queue_info_lock);
|
||||
queue_status = mvm->queue_info[queue].status;
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
|
||||
/* Maybe there is no need to even alloc a queue... */
|
||||
if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY)
|
||||
|
@ -3055,9 +2990,7 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
|||
}
|
||||
|
||||
/* No need to mark as reserved */
|
||||
spin_lock_bh(&mvm->queue_info_lock);
|
||||
mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
|
||||
out:
|
||||
/*
|
||||
|
@ -3083,10 +3016,11 @@ static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm,
|
|||
{
|
||||
u16 txq_id = tid_data->txq_id;
|
||||
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
|
||||
if (iwl_mvm_has_new_tx_api(mvm))
|
||||
return;
|
||||
|
||||
spin_lock_bh(&mvm->queue_info_lock);
|
||||
/*
|
||||
* The TXQ is marked as reserved only if no traffic came through yet
|
||||
* This means no traffic has been sent on this TID (agg'd or not), so
|
||||
|
@ -3098,8 +3032,6 @@ static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm,
|
|||
mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
|
||||
tid_data->txq_id = IWL_MVM_INVALID_QUEUE;
|
||||
}
|
||||
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
}
|
||||
|
||||
int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
||||
|
|
|
@ -397,6 +397,9 @@ struct iwl_mvm_rxq_dup_data {
|
|||
* @ptk_pn: per-queue PTK PN data structures
|
||||
* @dup_data: per queue duplicate packet detection data
|
||||
* @deferred_traffic_tid_map: indication bitmap of deferred traffic per-TID
|
||||
* @tx_ant: the index of the antenna to use for data tx to this station. Only
|
||||
* used during connection establishment (e.g. for the 4 way handshake
|
||||
* exchange).
|
||||
*
|
||||
* When mac80211 creates a station it reserves some space (hw->sta_data_size)
|
||||
* in the structure for use by driver. This structure is placed in that
|
||||
|
@ -439,6 +442,7 @@ struct iwl_mvm_sta {
|
|||
u8 agg_tids;
|
||||
u8 sleep_tx_count;
|
||||
u8 avg_energy;
|
||||
u8 tx_ant;
|
||||
};
|
||||
|
||||
u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data);
|
||||
|
|
|
@ -302,13 +302,30 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
|
|||
offload_assist));
|
||||
}
|
||||
|
||||
static u32 iwl_mvm_get_tx_ant(struct iwl_mvm *mvm,
|
||||
struct ieee80211_tx_info *info,
|
||||
struct ieee80211_sta *sta, __le16 fc)
|
||||
{
|
||||
if (info->band == NL80211_BAND_2GHZ &&
|
||||
!iwl_mvm_bt_coex_is_shared_ant_avail(mvm))
|
||||
return mvm->cfg->non_shared_ant << RATE_MCS_ANT_POS;
|
||||
|
||||
if (sta && ieee80211_is_data(fc)) {
|
||||
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
|
||||
|
||||
return BIT(mvmsta->tx_ant) << RATE_MCS_ANT_POS;
|
||||
}
|
||||
|
||||
return BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS;
|
||||
}
|
||||
|
||||
static u32 iwl_mvm_get_tx_rate(struct iwl_mvm *mvm,
|
||||
struct ieee80211_tx_info *info,
|
||||
struct ieee80211_sta *sta)
|
||||
{
|
||||
int rate_idx;
|
||||
u8 rate_plcp;
|
||||
u32 rate_flags;
|
||||
u32 rate_flags = 0;
|
||||
|
||||
/* HT rate doesn't make sense for a non data frame */
|
||||
WARN_ONCE(info->control.rates[0].flags & IEEE80211_TX_RC_MCS,
|
||||
|
@ -332,13 +349,6 @@ static u32 iwl_mvm_get_tx_rate(struct iwl_mvm *mvm,
|
|||
/* Get PLCP rate for tx_cmd->rate_n_flags */
|
||||
rate_plcp = iwl_mvm_mac80211_idx_to_hwrate(rate_idx);
|
||||
|
||||
if (info->band == NL80211_BAND_2GHZ &&
|
||||
!iwl_mvm_bt_coex_is_shared_ant_avail(mvm))
|
||||
rate_flags = mvm->cfg->non_shared_ant << RATE_MCS_ANT_POS;
|
||||
else
|
||||
rate_flags =
|
||||
BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS;
|
||||
|
||||
/* Set CCK flag as needed */
|
||||
if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
|
||||
rate_flags |= RATE_MCS_CCK_MSK;
|
||||
|
@ -346,6 +356,14 @@ static u32 iwl_mvm_get_tx_rate(struct iwl_mvm *mvm,
|
|||
return (u32)rate_plcp | rate_flags;
|
||||
}
|
||||
|
||||
static u32 iwl_mvm_get_tx_rate_n_flags(struct iwl_mvm *mvm,
|
||||
struct ieee80211_tx_info *info,
|
||||
struct ieee80211_sta *sta, __le16 fc)
|
||||
{
|
||||
return iwl_mvm_get_tx_rate(mvm, info, sta) |
|
||||
iwl_mvm_get_tx_ant(mvm, info, sta, fc);
|
||||
}
|
||||
|
||||
/*
|
||||
* Sets the fields in the Tx cmd that are rate related
|
||||
*/
|
||||
|
@ -373,20 +391,21 @@ void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd,
|
|||
*/
|
||||
|
||||
if (ieee80211_is_data(fc) && sta) {
|
||||
tx_cmd->initial_rate_index = 0;
|
||||
tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE);
|
||||
return;
|
||||
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
|
||||
|
||||
if (mvmsta->sta_state >= IEEE80211_STA_AUTHORIZED) {
|
||||
tx_cmd->initial_rate_index = 0;
|
||||
tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE);
|
||||
return;
|
||||
}
|
||||
} else if (ieee80211_is_back_req(fc)) {
|
||||
tx_cmd->tx_flags |=
|
||||
cpu_to_le32(TX_CMD_FLG_ACK | TX_CMD_FLG_BAR);
|
||||
}
|
||||
|
||||
mvm->mgmt_last_antenna_idx =
|
||||
iwl_mvm_next_antenna(mvm, iwl_mvm_get_valid_tx_ant(mvm),
|
||||
mvm->mgmt_last_antenna_idx);
|
||||
|
||||
/* Set the rate in the TX cmd */
|
||||
tx_cmd->rate_n_flags = cpu_to_le32(iwl_mvm_get_tx_rate(mvm, info, sta));
|
||||
tx_cmd->rate_n_flags =
|
||||
cpu_to_le32(iwl_mvm_get_tx_rate_n_flags(mvm, info, sta, fc));
|
||||
}
|
||||
|
||||
static inline void iwl_mvm_set_tx_cmd_pn(struct ieee80211_tx_info *info,
|
||||
|
@ -491,6 +510,8 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
|
|||
u16 offload_assist = 0;
|
||||
u32 rate_n_flags = 0;
|
||||
u16 flags = 0;
|
||||
struct iwl_mvm_sta *mvmsta = sta ?
|
||||
iwl_mvm_sta_from_mac80211(sta) : NULL;
|
||||
|
||||
if (ieee80211_is_data_qos(hdr->frame_control)) {
|
||||
u8 *qc = ieee80211_get_qos_ctl(hdr);
|
||||
|
@ -510,10 +531,16 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
|
|||
if (!info->control.hw_key)
|
||||
flags |= IWL_TX_FLAGS_ENCRYPT_DIS;
|
||||
|
||||
/* For data packets rate info comes from the fw */
|
||||
if (!(ieee80211_is_data(hdr->frame_control) && sta)) {
|
||||
/*
|
||||
* For data packets rate info comes from the fw. Only
|
||||
* set rate/antenna during connection establishment.
|
||||
*/
|
||||
if (sta && (!ieee80211_is_data(hdr->frame_control) ||
|
||||
mvmsta->sta_state < IEEE80211_STA_AUTHORIZED)) {
|
||||
flags |= IWL_TX_FLAGS_CMD_RATE;
|
||||
rate_n_flags = iwl_mvm_get_tx_rate(mvm, info, sta);
|
||||
rate_n_flags =
|
||||
iwl_mvm_get_tx_rate_n_flags(mvm, info, sta,
|
||||
hdr->frame_control);
|
||||
}
|
||||
|
||||
if (mvm->trans->cfg->device_family >=
|
||||
|
@ -1160,11 +1187,11 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
|
|||
* If we have timed-out TIDs - schedule the worker that will
|
||||
* reconfig the queues and update them
|
||||
*
|
||||
* Note that the mvm->queue_info_lock isn't being taken here in
|
||||
* order to not serialize the TX flow. This isn't dangerous
|
||||
* because scheduling mvm->add_stream_wk can't ruin the state,
|
||||
* and if we DON'T schedule it due to some race condition then
|
||||
* next TX we get here we will.
|
||||
* Note that the no lock is taken here in order to not serialize
|
||||
* the TX flow. This isn't dangerous because scheduling
|
||||
* mvm->add_stream_wk can't ruin the state, and if we DON'T
|
||||
* schedule it due to some race condition then next TX we get
|
||||
* here we will.
|
||||
*/
|
||||
if (unlikely(mvm->queue_info[txq_id].status ==
|
||||
IWL_MVM_QUEUE_SHARED &&
|
||||
|
@ -1501,6 +1528,10 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
|
|||
break;
|
||||
}
|
||||
|
||||
if ((status & TX_STATUS_MSK) != TX_STATUS_SUCCESS &&
|
||||
ieee80211_is_mgmt(hdr->frame_control))
|
||||
iwl_mvm_toggle_tx_ant(mvm, &mvm->mgmt_last_antenna_idx);
|
||||
|
||||
/*
|
||||
* If we are freeing multiple frames, mark all the frames
|
||||
* but the first one as acked, since they were acknowledged
|
||||
|
@ -1600,6 +1631,10 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
|
|||
iwl_mvm_tx_airtime(mvm, mvmsta,
|
||||
le16_to_cpu(tx_resp->wireless_media_time));
|
||||
|
||||
if ((status & TX_STATUS_MSK) != TX_STATUS_SUCCESS &&
|
||||
mvmsta->sta_state < IEEE80211_STA_AUTHORIZED)
|
||||
iwl_mvm_toggle_tx_ant(mvm, &mvmsta->tx_ant);
|
||||
|
||||
if (sta->wme && tid != IWL_MGMT_TID) {
|
||||
struct iwl_mvm_tid_data *tid_data =
|
||||
&mvmsta->tid_data[tid];
|
||||
|
|
|
@ -285,6 +285,7 @@ u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx)
|
|||
return last_idx;
|
||||
}
|
||||
|
||||
#define FW_SYSASSERT_CPU_MASK 0xf0000000
|
||||
static const struct {
|
||||
const char *name;
|
||||
u8 num;
|
||||
|
@ -301,6 +302,9 @@ static const struct {
|
|||
{ "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
|
||||
{ "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
|
||||
{ "NMI_INTERRUPT_HOST", 0x66 },
|
||||
{ "NMI_INTERRUPT_LMAC_FATAL", 0x70 },
|
||||
{ "NMI_INTERRUPT_UMAC_FATAL", 0x71 },
|
||||
{ "NMI_INTERRUPT_OTHER_LMAC_FATAL", 0x73 },
|
||||
{ "NMI_INTERRUPT_ACTION_PT", 0x7C },
|
||||
{ "NMI_INTERRUPT_UNKNOWN", 0x84 },
|
||||
{ "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
|
||||
|
@ -312,7 +316,7 @@ static const char *desc_lookup(u32 num)
|
|||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(advanced_lookup) - 1; i++)
|
||||
if (advanced_lookup[i].num == num)
|
||||
if (advanced_lookup[i].num == (num & ~FW_SYSASSERT_CPU_MASK))
|
||||
return advanced_lookup[i].name;
|
||||
|
||||
/* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
|
||||
|
@ -618,13 +622,9 @@ int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
|
|||
if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
|
||||
return -EINVAL;
|
||||
|
||||
spin_lock_bh(&mvm->queue_info_lock);
|
||||
if (WARN(mvm->queue_info[queue].tid_bitmap == 0,
|
||||
"Trying to reconfig unallocated queue %d\n", queue)) {
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
"Trying to reconfig unallocated queue %d\n", queue))
|
||||
return -ENXIO;
|
||||
}
|
||||
spin_unlock_bh(&mvm->queue_info_lock);
|
||||
|
||||
IWL_DEBUG_TX_QUEUES(mvm, "Reconfig SCD for TXQ #%d\n", queue);
|
||||
|
||||
|
@ -768,6 +768,29 @@ bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm)
|
|||
return result;
|
||||
}
|
||||
|
||||
void iwl_mvm_send_low_latency_cmd(struct iwl_mvm *mvm,
|
||||
bool low_latency, u16 mac_id)
|
||||
{
|
||||
struct iwl_mac_low_latency_cmd cmd = {
|
||||
.mac_id = cpu_to_le32(mac_id)
|
||||
};
|
||||
|
||||
if (!fw_has_capa(&mvm->fw->ucode_capa,
|
||||
IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA))
|
||||
return;
|
||||
|
||||
if (low_latency) {
|
||||
/* currently we don't care about the direction */
|
||||
cmd.low_latency_rx = 1;
|
||||
cmd.low_latency_tx = 1;
|
||||
}
|
||||
|
||||
if (iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(LOW_LATENCY_CMD,
|
||||
MAC_CONF_GROUP, 0),
|
||||
0, sizeof(cmd), &cmd))
|
||||
IWL_ERR(mvm, "Failed to send low latency command\n");
|
||||
}
|
||||
|
||||
int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
||||
bool low_latency,
|
||||
enum iwl_mvm_low_latency_cause cause)
|
||||
|
@ -786,24 +809,7 @@ int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
|
|||
if (low_latency == prev)
|
||||
return 0;
|
||||
|
||||
if (fw_has_capa(&mvm->fw->ucode_capa,
|
||||
IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA)) {
|
||||
struct iwl_mac_low_latency_cmd cmd = {
|
||||
.mac_id = cpu_to_le32(mvmvif->id)
|
||||
};
|
||||
|
||||
if (low_latency) {
|
||||
/* currently we don't care about the direction */
|
||||
cmd.low_latency_rx = 1;
|
||||
cmd.low_latency_tx = 1;
|
||||
}
|
||||
res = iwl_mvm_send_cmd_pdu(mvm,
|
||||
iwl_cmd_id(LOW_LATENCY_CMD,
|
||||
MAC_CONF_GROUP, 0),
|
||||
0, sizeof(cmd), &cmd);
|
||||
if (res)
|
||||
IWL_ERR(mvm, "Failed to send low latency command\n");
|
||||
}
|
||||
iwl_mvm_send_low_latency_cmd(mvm, low_latency, mvmvif->id);
|
||||
|
||||
res = iwl_mvm_update_quotas(mvm, false, NULL);
|
||||
if (res)
|
||||
|
|
|
@ -513,6 +513,56 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
|
|||
{IWL_PCI_DEVICE(0x24FD, 0x9074, iwl8265_2ac_cfg)},
|
||||
|
||||
/* 9000 Series */
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x0030, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x0034, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x0038, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x003C, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x0060, iwl9461_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x0064, iwl9461_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x00A0, iwl9462_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x00A4, iwl9462_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x0230, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x0234, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x0238, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x023C, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x0260, iwl9461_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x0264, iwl9461_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x02A0, iwl9462_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x02A4, iwl9462_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x2030, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x2034, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x4030, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x4034, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x40A4, iwl9462_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x4234, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x02F0, 0x42A4, iwl9462_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x06F0, 0x0030, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x06F0, 0x0034, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x06F0, 0x0038, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x06F0, 0x003C, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x06F0, 0x0060, iwl9461_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x06F0, 0x0064, iwl9461_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x06F0, 0x00A0, iwl9462_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x06F0, 0x00A4, iwl9462_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x06F0, 0x0230, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x06F0, 0x0234, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x06F0, 0x0238, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x06F0, 0x023C, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x06F0, 0x0260, iwl9461_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x06F0, 0x0264, iwl9461_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x06F0, 0x02A0, iwl9462_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x06F0, 0x02A4, iwl9462_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x06F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x06F0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x06F0, 0x2030, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x06F0, 0x2034, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x06F0, 0x4030, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x06F0, 0x4034, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x06F0, 0x40A4, iwl9462_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x06F0, 0x4234, iwl9560_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x06F0, 0x42A4, iwl9462_2ac_cfg_soc)},
|
||||
{IWL_PCI_DEVICE(0x2526, 0x0010, iwl9260_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x2526, 0x0014, iwl9260_2ac_cfg)},
|
||||
{IWL_PCI_DEVICE(0x2526, 0x0018, iwl9260_2ac_cfg)},
|
||||
|
@ -832,7 +882,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
|
|||
{IWL_PCI_DEVICE(0x34F0, 0x0040, iwl22000_2ax_cfg_hr)},
|
||||
{IWL_PCI_DEVICE(0x34F0, 0x0070, iwl22000_2ax_cfg_hr)},
|
||||
{IWL_PCI_DEVICE(0x34F0, 0x0078, iwl22000_2ax_cfg_hr)},
|
||||
{IWL_PCI_DEVICE(0x34F0, 0x0310, iwl22000_2ac_cfg_jf)},
|
||||
{IWL_PCI_DEVICE(0x34F0, 0x0310, iwl22000_2ax_cfg_hr)},
|
||||
{IWL_PCI_DEVICE(0x40C0, 0x0000, iwl22560_2ax_cfg_su_cdb)},
|
||||
{IWL_PCI_DEVICE(0x40C0, 0x0010, iwl22560_2ax_cfg_su_cdb)},
|
||||
{IWL_PCI_DEVICE(0x40c0, 0x0090, iwl22560_2ax_cfg_su_cdb)},
|
||||
|
|
|
@ -378,6 +378,23 @@ struct iwl_tso_hdr_page {
|
|||
u8 *pos;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_IWLWIFI_DEBUGFS
|
||||
/**
|
||||
* enum iwl_fw_mon_dbgfs_state - the different states of the monitor_data
|
||||
* debugfs file
|
||||
*
|
||||
* @IWL_FW_MON_DBGFS_STATE_CLOSED: the file is closed.
|
||||
* @IWL_FW_MON_DBGFS_STATE_OPEN: the file is open.
|
||||
* @IWL_FW_MON_DBGFS_STATE_DISABLED: the file is disabled, once this state is
|
||||
* set the file can no longer be used.
|
||||
*/
|
||||
enum iwl_fw_mon_dbgfs_state {
|
||||
IWL_FW_MON_DBGFS_STATE_CLOSED,
|
||||
IWL_FW_MON_DBGFS_STATE_OPEN,
|
||||
IWL_FW_MON_DBGFS_STATE_DISABLED,
|
||||
};
|
||||
#endif
|
||||
|
||||
/**
|
||||
* enum iwl_shared_irq_flags - level of sharing for irq
|
||||
* @IWL_SHARED_IRQ_NON_RX: interrupt vector serves non rx causes.
|
||||
|
@ -414,6 +431,26 @@ struct iwl_self_init_dram {
|
|||
int paging_cnt;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct cont_rec: continuous recording data structure
|
||||
* @prev_wr_ptr: the last address that was read in monitor_data
|
||||
* debugfs file
|
||||
* @prev_wrap_cnt: the wrap count that was used during the last read in
|
||||
* monitor_data debugfs file
|
||||
* @state: the state of monitor_data debugfs file as described
|
||||
* in &iwl_fw_mon_dbgfs_state enum
|
||||
* @mutex: locked while reading from monitor_data debugfs file
|
||||
*/
|
||||
#ifdef CONFIG_IWLWIFI_DEBUGFS
|
||||
struct cont_rec {
|
||||
u32 prev_wr_ptr;
|
||||
u32 prev_wrap_cnt;
|
||||
u8 state;
|
||||
/* Used to sync monitor_data debugfs file with driver unload flow */
|
||||
struct mutex mutex;
|
||||
};
|
||||
#endif
|
||||
|
||||
/**
|
||||
* struct iwl_trans_pcie - PCIe transport specific data
|
||||
* @rxq: all the RX queue data
|
||||
|
@ -451,6 +488,9 @@ struct iwl_self_init_dram {
|
|||
* @reg_lock: protect hw register access
|
||||
* @mutex: to protect stop_device / start_fw / start_hw
|
||||
* @cmd_in_flight: true when we have a host command in flight
|
||||
#ifdef CONFIG_IWLWIFI_DEBUGFS
|
||||
* @fw_mon_data: fw continuous recording data
|
||||
#endif
|
||||
* @msix_entries: array of MSI-X entries
|
||||
* @msix_enabled: true if managed to enable MSI-X
|
||||
* @shared_vec_mask: the type of causes the shared vector handles
|
||||
|
@ -538,6 +578,10 @@ struct iwl_trans_pcie {
|
|||
bool cmd_hold_nic_awake;
|
||||
bool ref_cmd_in_flight;
|
||||
|
||||
#ifdef CONFIG_IWLWIFI_DEBUGFS
|
||||
struct cont_rec fw_mon_data;
|
||||
#endif
|
||||
|
||||
struct msix_entry msix_entries[IWL_MAX_RX_HW_QUEUES];
|
||||
bool msix_enabled;
|
||||
u8 shared_vec_mask;
|
||||
|
|
|
@ -71,6 +71,7 @@
|
|||
#include <linux/vmalloc.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/wait.h>
|
||||
|
||||
#include "iwl-drv.h"
|
||||
#include "iwl-trans.h"
|
||||
|
@ -1729,6 +1730,7 @@ static int iwl_pcie_init_msix_handler(struct pci_dev *pdev,
|
|||
static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
u32 hpm;
|
||||
int err;
|
||||
|
||||
lockdep_assert_held(&trans_pcie->mutex);
|
||||
|
@ -1739,6 +1741,17 @@ static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
|
|||
return err;
|
||||
}
|
||||
|
||||
hpm = iwl_trans_read_prph(trans, HPM_DEBUG);
|
||||
if (hpm != 0xa5a5a5a0 && (hpm & PERSISTENCE_BIT)) {
|
||||
if (iwl_trans_read_prph(trans, PREG_PRPH_WPROT_0) &
|
||||
PREG_WFPM_ACCESS) {
|
||||
IWL_ERR(trans,
|
||||
"Error, can not clear persistence bit\n");
|
||||
return -EPERM;
|
||||
}
|
||||
iwl_trans_write_prph(trans, HPM_DEBUG, hpm & ~PERSISTENCE_BIT);
|
||||
}
|
||||
|
||||
iwl_trans_pcie_sw_reset(trans);
|
||||
|
||||
err = iwl_pcie_apm_init(trans);
|
||||
|
@ -2697,6 +2710,137 @@ static ssize_t iwl_dbgfs_rfkill_write(struct file *file,
|
|||
return count;
|
||||
}
|
||||
|
||||
static int iwl_dbgfs_monitor_data_open(struct inode *inode,
|
||||
struct file *file)
|
||||
{
|
||||
struct iwl_trans *trans = inode->i_private;
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
|
||||
if (!trans->dbg_dest_tlv ||
|
||||
trans->dbg_dest_tlv->monitor_mode != EXTERNAL_MODE) {
|
||||
IWL_ERR(trans, "Debug destination is not set to DRAM\n");
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
if (trans_pcie->fw_mon_data.state != IWL_FW_MON_DBGFS_STATE_CLOSED)
|
||||
return -EBUSY;
|
||||
|
||||
trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_OPEN;
|
||||
return simple_open(inode, file);
|
||||
}
|
||||
|
||||
static int iwl_dbgfs_monitor_data_release(struct inode *inode,
|
||||
struct file *file)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie =
|
||||
IWL_TRANS_GET_PCIE_TRANS(inode->i_private);
|
||||
|
||||
if (trans_pcie->fw_mon_data.state == IWL_FW_MON_DBGFS_STATE_OPEN)
|
||||
trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_CLOSED;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool iwl_write_to_user_buf(char __user *user_buf, ssize_t count,
|
||||
void *buf, ssize_t *size,
|
||||
ssize_t *bytes_copied)
|
||||
{
|
||||
int buf_size_left = count - *bytes_copied;
|
||||
|
||||
buf_size_left = buf_size_left - (buf_size_left % sizeof(u32));
|
||||
if (*size > buf_size_left)
|
||||
*size = buf_size_left;
|
||||
|
||||
*size -= copy_to_user(user_buf, buf, *size);
|
||||
*bytes_copied += *size;
|
||||
|
||||
if (buf_size_left == *size)
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
static ssize_t iwl_dbgfs_monitor_data_read(struct file *file,
|
||||
char __user *user_buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
struct iwl_trans *trans = file->private_data;
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
void *cpu_addr = (void *)trans->fw_mon[0].block, *curr_buf;
|
||||
struct cont_rec *data = &trans_pcie->fw_mon_data;
|
||||
u32 write_ptr_addr, wrap_cnt_addr, write_ptr, wrap_cnt;
|
||||
ssize_t size, bytes_copied = 0;
|
||||
bool b_full;
|
||||
|
||||
if (trans->dbg_dest_tlv) {
|
||||
write_ptr_addr =
|
||||
le32_to_cpu(trans->dbg_dest_tlv->write_ptr_reg);
|
||||
wrap_cnt_addr = le32_to_cpu(trans->dbg_dest_tlv->wrap_count);
|
||||
} else {
|
||||
write_ptr_addr = MON_BUFF_WRPTR;
|
||||
wrap_cnt_addr = MON_BUFF_CYCLE_CNT;
|
||||
}
|
||||
|
||||
if (unlikely(!trans->dbg_rec_on))
|
||||
return 0;
|
||||
|
||||
mutex_lock(&data->mutex);
|
||||
if (data->state ==
|
||||
IWL_FW_MON_DBGFS_STATE_DISABLED) {
|
||||
mutex_unlock(&data->mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* write_ptr position in bytes rather then DW */
|
||||
write_ptr = iwl_read_prph(trans, write_ptr_addr) * sizeof(u32);
|
||||
wrap_cnt = iwl_read_prph(trans, wrap_cnt_addr);
|
||||
|
||||
if (data->prev_wrap_cnt == wrap_cnt) {
|
||||
size = write_ptr - data->prev_wr_ptr;
|
||||
curr_buf = cpu_addr + data->prev_wr_ptr;
|
||||
b_full = iwl_write_to_user_buf(user_buf, count,
|
||||
curr_buf, &size,
|
||||
&bytes_copied);
|
||||
data->prev_wr_ptr += size;
|
||||
|
||||
} else if (data->prev_wrap_cnt == wrap_cnt - 1 &&
|
||||
write_ptr < data->prev_wr_ptr) {
|
||||
size = trans->fw_mon[0].size - data->prev_wr_ptr;
|
||||
curr_buf = cpu_addr + data->prev_wr_ptr;
|
||||
b_full = iwl_write_to_user_buf(user_buf, count,
|
||||
curr_buf, &size,
|
||||
&bytes_copied);
|
||||
data->prev_wr_ptr += size;
|
||||
|
||||
if (!b_full) {
|
||||
size = write_ptr;
|
||||
b_full = iwl_write_to_user_buf(user_buf, count,
|
||||
cpu_addr, &size,
|
||||
&bytes_copied);
|
||||
data->prev_wr_ptr = size;
|
||||
data->prev_wrap_cnt++;
|
||||
}
|
||||
} else {
|
||||
if (data->prev_wrap_cnt == wrap_cnt - 1 &&
|
||||
write_ptr > data->prev_wr_ptr)
|
||||
IWL_WARN(trans,
|
||||
"write pointer passed previous write pointer, start copying from the beginning\n");
|
||||
else if (!unlikely(data->prev_wrap_cnt == 0 &&
|
||||
data->prev_wr_ptr == 0))
|
||||
IWL_WARN(trans,
|
||||
"monitor data is out of sync, start copying from the beginning\n");
|
||||
|
||||
size = write_ptr;
|
||||
b_full = iwl_write_to_user_buf(user_buf, count,
|
||||
cpu_addr, &size,
|
||||
&bytes_copied);
|
||||
data->prev_wr_ptr = size;
|
||||
data->prev_wrap_cnt = wrap_cnt;
|
||||
}
|
||||
|
||||
mutex_unlock(&data->mutex);
|
||||
|
||||
return bytes_copied;
|
||||
}
|
||||
|
||||
DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
|
||||
DEBUGFS_READ_FILE_OPS(fh_reg);
|
||||
DEBUGFS_READ_FILE_OPS(rx_queue);
|
||||
|
@ -2704,6 +2848,12 @@ DEBUGFS_READ_FILE_OPS(tx_queue);
|
|||
DEBUGFS_WRITE_FILE_OPS(csr);
|
||||
DEBUGFS_READ_WRITE_FILE_OPS(rfkill);
|
||||
|
||||
static const struct file_operations iwl_dbgfs_monitor_data_ops = {
|
||||
.read = iwl_dbgfs_monitor_data_read,
|
||||
.open = iwl_dbgfs_monitor_data_open,
|
||||
.release = iwl_dbgfs_monitor_data_release,
|
||||
};
|
||||
|
||||
/* Create the debugfs files and directories */
|
||||
int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans)
|
||||
{
|
||||
|
@ -2715,12 +2865,23 @@ int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans)
|
|||
DEBUGFS_ADD_FILE(csr, dir, 0200);
|
||||
DEBUGFS_ADD_FILE(fh_reg, dir, 0400);
|
||||
DEBUGFS_ADD_FILE(rfkill, dir, 0600);
|
||||
DEBUGFS_ADD_FILE(monitor_data, dir, 0400);
|
||||
return 0;
|
||||
|
||||
err:
|
||||
IWL_ERR(trans, "failed to create the trans debugfs entry\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static void iwl_trans_pcie_debugfs_cleanup(struct iwl_trans *trans)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
struct cont_rec *data = &trans_pcie->fw_mon_data;
|
||||
|
||||
mutex_lock(&data->mutex);
|
||||
data->state = IWL_FW_MON_DBGFS_STATE_DISABLED;
|
||||
mutex_unlock(&data->mutex);
|
||||
}
|
||||
#endif /*CONFIG_IWLWIFI_DEBUGFS */
|
||||
|
||||
static u32 iwl_trans_pcie_get_cmdlen(struct iwl_trans *trans, void *tfd)
|
||||
|
@ -2978,7 +3139,7 @@ static int iwl_trans_get_fw_monitor_len(struct iwl_trans *trans, int *len)
|
|||
|
||||
static struct iwl_trans_dump_data
|
||||
*iwl_trans_pcie_dump_data(struct iwl_trans *trans,
|
||||
const struct iwl_fw_dbg_trigger_tlv *trigger)
|
||||
u32 dump_mask)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
struct iwl_fw_error_dump_data *data;
|
||||
|
@ -2990,7 +3151,10 @@ static struct iwl_trans_dump_data
|
|||
int i, ptr;
|
||||
bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status) &&
|
||||
!trans->cfg->mq_rx_supported &&
|
||||
trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_RB);
|
||||
dump_mask & BIT(IWL_FW_ERROR_DUMP_RB);
|
||||
|
||||
if (!dump_mask)
|
||||
return NULL;
|
||||
|
||||
/* transport dump header */
|
||||
len = sizeof(*dump_data);
|
||||
|
@ -3002,11 +3166,7 @@ static struct iwl_trans_dump_data
|
|||
/* FW monitor */
|
||||
monitor_len = iwl_trans_get_fw_monitor_len(trans, &len);
|
||||
|
||||
if (trigger && (trigger->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY)) {
|
||||
if (!(trans->dbg_dump_mask &
|
||||
BIT(IWL_FW_ERROR_DUMP_FW_MONITOR)))
|
||||
return NULL;
|
||||
|
||||
if (dump_mask == BIT(IWL_FW_ERROR_DUMP_FW_MONITOR)) {
|
||||
dump_data = vzalloc(len);
|
||||
if (!dump_data)
|
||||
return NULL;
|
||||
|
@ -3019,11 +3179,11 @@ static struct iwl_trans_dump_data
|
|||
}
|
||||
|
||||
/* CSR registers */
|
||||
if (trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_CSR))
|
||||
if (dump_mask & BIT(IWL_FW_ERROR_DUMP_CSR))
|
||||
len += sizeof(*data) + IWL_CSR_TO_DUMP;
|
||||
|
||||
/* FH registers */
|
||||
if (trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_FH_REGS)) {
|
||||
if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FH_REGS)) {
|
||||
if (trans->cfg->gen2)
|
||||
len += sizeof(*data) +
|
||||
(FH_MEM_UPPER_BOUND_GEN2 -
|
||||
|
@ -3048,8 +3208,7 @@ static struct iwl_trans_dump_data
|
|||
}
|
||||
|
||||
/* Paged memory for gen2 HW */
|
||||
if (trans->cfg->gen2 &&
|
||||
trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING))
|
||||
if (trans->cfg->gen2 && dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING))
|
||||
for (i = 0; i < trans_pcie->init_dram.paging_cnt; i++)
|
||||
len += sizeof(*data) +
|
||||
sizeof(struct iwl_fw_error_dump_paging) +
|
||||
|
@ -3062,7 +3221,7 @@ static struct iwl_trans_dump_data
|
|||
len = 0;
|
||||
data = (void *)dump_data->data;
|
||||
|
||||
if (trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_TXCMD)) {
|
||||
if (dump_mask & BIT(IWL_FW_ERROR_DUMP_TXCMD)) {
|
||||
u16 tfd_size = trans_pcie->tfd_size;
|
||||
|
||||
data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD);
|
||||
|
@ -3096,16 +3255,15 @@ static struct iwl_trans_dump_data
|
|||
data = iwl_fw_error_next_data(data);
|
||||
}
|
||||
|
||||
if (trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_CSR))
|
||||
if (dump_mask & BIT(IWL_FW_ERROR_DUMP_CSR))
|
||||
len += iwl_trans_pcie_dump_csr(trans, &data);
|
||||
if (trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_FH_REGS))
|
||||
if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FH_REGS))
|
||||
len += iwl_trans_pcie_fh_regs_dump(trans, &data);
|
||||
if (dump_rbs)
|
||||
len += iwl_trans_pcie_dump_rbs(trans, &data, num_rbs);
|
||||
|
||||
/* Paged memory for gen2 HW */
|
||||
if (trans->cfg->gen2 &&
|
||||
trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING)) {
|
||||
if (trans->cfg->gen2 && dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING)) {
|
||||
for (i = 0; i < trans_pcie->init_dram.paging_cnt; i++) {
|
||||
struct iwl_fw_error_dump_paging *paging;
|
||||
dma_addr_t addr =
|
||||
|
@ -3125,7 +3283,7 @@ static struct iwl_trans_dump_data
|
|||
len += sizeof(*data) + sizeof(*paging) + page_len;
|
||||
}
|
||||
}
|
||||
if (trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_FW_MONITOR))
|
||||
if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FW_MONITOR))
|
||||
len += iwl_trans_pcie_dump_monitor(trans, &data, monitor_len);
|
||||
|
||||
dump_data->len = len;
|
||||
|
@ -3202,6 +3360,9 @@ static const struct iwl_trans_ops trans_ops_pcie = {
|
|||
|
||||
.freeze_txq_timer = iwl_trans_pcie_freeze_txq_timer,
|
||||
.block_txq_ptrs = iwl_trans_pcie_block_txq_ptrs,
|
||||
#ifdef CONFIG_IWLWIFI_DEBUGFS
|
||||
.debugfs_cleanup = iwl_trans_pcie_debugfs_cleanup,
|
||||
#endif
|
||||
};
|
||||
|
||||
static const struct iwl_trans_ops trans_ops_pcie_gen2 = {
|
||||
|
@ -3221,6 +3382,9 @@ static const struct iwl_trans_ops trans_ops_pcie_gen2 = {
|
|||
.txq_free = iwl_trans_pcie_dyn_txq_free,
|
||||
.wait_txq_empty = iwl_trans_pcie_wait_txq_empty,
|
||||
.rxq_dma_data = iwl_trans_pcie_rxq_dma_data,
|
||||
#ifdef CONFIG_IWLWIFI_DEBUGFS
|
||||
.debugfs_cleanup = iwl_trans_pcie_debugfs_cleanup,
|
||||
#endif
|
||||
};
|
||||
|
||||
struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
|
||||
|
@ -3392,8 +3556,26 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
|
|||
#if IS_ENABLED(CONFIG_IWLMVM)
|
||||
trans->hw_rf_id = iwl_read32(trans, CSR_HW_RF_ID);
|
||||
|
||||
if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
|
||||
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR)) {
|
||||
if (cfg == &iwl22000_2ax_cfg_hr) {
|
||||
if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
|
||||
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR)) {
|
||||
trans->cfg = &iwl22000_2ax_cfg_hr;
|
||||
} else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
|
||||
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_JF)) {
|
||||
trans->cfg = &iwl22000_2ax_cfg_jf;
|
||||
} else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
|
||||
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HRCDB)) {
|
||||
IWL_ERR(trans, "RF ID HRCDB is not supported\n");
|
||||
ret = -EINVAL;
|
||||
goto out_no_pci;
|
||||
} else {
|
||||
IWL_ERR(trans, "Unrecognized RF ID 0x%08x\n",
|
||||
CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id));
|
||||
ret = -EINVAL;
|
||||
goto out_no_pci;
|
||||
}
|
||||
} else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
|
||||
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR)) {
|
||||
u32 hw_status;
|
||||
|
||||
hw_status = iwl_read_prph(trans, UMAG_GEN_HW_STATUS);
|
||||
|
@ -3454,6 +3636,11 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
|
|||
trans->runtime_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
|
||||
#endif /* CONFIG_IWLWIFI_PCIE_RTPM */
|
||||
|
||||
#ifdef CONFIG_IWLWIFI_DEBUGFS
|
||||
trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_CLOSED;
|
||||
mutex_init(&trans_pcie->fw_mon_data.mutex);
|
||||
#endif
|
||||
|
||||
return trans;
|
||||
|
||||
out_free_ict:
|
||||
|
|
|
@ -1228,8 +1228,7 @@ int iwl_trans_pcie_txq_alloc_response(struct iwl_trans *trans,
|
|||
/* Place first TFD at index corresponding to start sequence number */
|
||||
txq->read_ptr = wr_ptr;
|
||||
txq->write_ptr = wr_ptr;
|
||||
iwl_write_direct32(trans, HBUS_TARG_WRPTR,
|
||||
(txq->write_ptr) | (qid << 16));
|
||||
|
||||
IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d\n", qid);
|
||||
|
||||
iwl_free_resp(hcmd);
|
||||
|
|
|
@ -908,6 +908,7 @@ static int ezusb_access_ltv(struct ezusb_priv *upriv,
|
|||
case EZUSB_CTX_REQ_SUBMITTED:
|
||||
if (!ctx->in_rid)
|
||||
break;
|
||||
/* fall through */
|
||||
default:
|
||||
err("%s: Unexpected context state %d", __func__,
|
||||
state);
|
||||
|
|
|
@ -235,6 +235,7 @@ isl38xx_in_queue(isl38xx_control_block *cb, int queue)
|
|||
/* send queues */
|
||||
case ISL38XX_CB_TX_MGMTQ:
|
||||
BUG_ON(delta > ISL38XX_CB_MGMT_QSIZE);
|
||||
/* fall through */
|
||||
|
||||
case ISL38XX_CB_TX_DATA_LQ:
|
||||
case ISL38XX_CB_TX_DATA_HQ:
|
||||
|
|
|
@ -1691,6 +1691,7 @@ static int prism54_get_encodeext(struct net_device *ndev,
|
|||
case DOT11_AUTH_BOTH:
|
||||
case DOT11_AUTH_SK:
|
||||
wrqu->encoding.flags |= IW_ENCODE_RESTRICTED;
|
||||
/* fall through */
|
||||
case DOT11_AUTH_OS:
|
||||
default:
|
||||
wrqu->encoding.flags |= IW_ENCODE_OPEN;
|
||||
|
|
|
@ -932,6 +932,7 @@ islpci_set_state(islpci_private *priv, islpci_state_t new_state)
|
|||
switch (new_state) {
|
||||
case PRV_STATE_OFF:
|
||||
priv->state_off++;
|
||||
/* fall through */
|
||||
default:
|
||||
priv->state = new_state;
|
||||
break;
|
||||
|
|
|
@ -796,15 +796,13 @@ static void if_spi_h2c(struct if_spi_card *card,
|
|||
{
|
||||
struct lbs_private *priv = card->priv;
|
||||
int err = 0;
|
||||
u16 int_type, port_reg;
|
||||
u16 port_reg;
|
||||
|
||||
switch (type) {
|
||||
case MVMS_DAT:
|
||||
int_type = IF_SPI_CIC_TX_DOWNLOAD_OVER;
|
||||
port_reg = IF_SPI_DATA_RDWRPORT_REG;
|
||||
break;
|
||||
case MVMS_CMD:
|
||||
int_type = IF_SPI_CIC_CMD_DOWNLOAD_OVER;
|
||||
port_reg = IF_SPI_CMD_RDWRPORT_REG;
|
||||
break;
|
||||
default:
|
||||
|
|
|
@ -363,6 +363,7 @@ static int mwifiex_uap_parse_tail_ies(struct mwifiex_private *priv,
|
|||
(const u8 *)hdr,
|
||||
hdr->len + sizeof(struct ieee_types_header)))
|
||||
break;
|
||||
/* fall through */
|
||||
default:
|
||||
memcpy(gen_ie->ie_buffer + ie_len, hdr,
|
||||
hdr->len + sizeof(struct ieee_types_header));
|
||||
|
|
|
@ -1,11 +1,11 @@
|
|||
config QTNFMAC
|
||||
tristate
|
||||
depends on QTNFMAC_PEARL_PCIE
|
||||
default m if QTNFMAC_PEARL_PCIE=m
|
||||
default y if QTNFMAC_PEARL_PCIE=y
|
||||
depends on QTNFMAC_PCIE
|
||||
default m if QTNFMAC_PCIE=m
|
||||
default y if QTNFMAC_PCIE=y
|
||||
|
||||
config QTNFMAC_PEARL_PCIE
|
||||
tristate "Quantenna QSR10g PCIe support"
|
||||
config QTNFMAC_PCIE
|
||||
tristate "Quantenna QSR1000/QSR2000/QSR10g PCIe support"
|
||||
default n
|
||||
depends on PCI && CFG80211
|
||||
select QTNFMAC
|
||||
|
@ -13,7 +13,8 @@ config QTNFMAC_PEARL_PCIE
|
|||
select CRC32
|
||||
help
|
||||
This option adds support for wireless adapters based on Quantenna
|
||||
802.11ac QSR10g (aka Pearl) FullMAC chipset running over PCIe.
|
||||
802.11ac QSR10g (aka Pearl) and QSR1000/QSR2000 (aka Topaz)
|
||||
FullMAC chipsets running over PCIe.
|
||||
|
||||
If you choose to build it as a module, two modules will be built:
|
||||
qtnfmac.ko and qtnfmac_pearl_pcie.ko.
|
||||
qtnfmac.ko and qtnfmac_pcie.ko.
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue