mirror of https://gitee.com/openkylin/linux.git
wireless-drivers fixes for v5.5
Second set of fixes for v5.5. There are quite a few patches, especially on iwlwifi, due to me being on a long break. Libertas also has a security fix and mt76 a build fix. iwlwifi * don't send the PPAG command when PPAG is disabled, since it can cause problems * a few fixes for a HW bug * a fix for RS offload; * a fix for 3168 devices where the NVM tables where the wrong tables were being read * fix a couple of potential memory leaks in TXQ code * disable L0S states in all hardware since our hardware doesn't officially support them anymore (and older versions of the hardware had instability in these states) * remove lar_disable parameter since it has been causing issues for some people who erroneously disable it * force the debug monitor HW to stop also when debug is disabled, since it sometimes stays on and prevents low system power states * don't send IWL_MVM_RXQ_NSSN_SYNC notification due to DMA problems libertas * fix two buffer overflows mt76 * build fix related to CONFIG_MT76_LEDS * fix off by one in bitrates handling -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQEcBAABAgAGBQJeKX2wAAoJEG4XJFUm622bG8QH/3+QXaSJAVqzbQh2boy7aS88 xhrrnh+bSqWY0ChMk0Z73RF8Ek0WlO+os4uN1cbWIWujrdQUPbTBtOwX4d0TzTue E3tBFPiHTlVtU43z1bsprA+6EE7fqt/H2lWtlxk0IHzeiQY9NcB6BlDKKCzk5Hib aMb5HCQy4JmSK83E60HLM9L4nEmEP+yveaKL7uaAZw+qkmyk2mT6um0TlmOYVoNG 9V6k3OZto8LvyV6jKPZgVI6QBATnwHDxlWgooYRj54PuCj9hTbR2mcuUL2QyQeze AX2QNI+1kWIrAiDaU/lOj8579SiUl36iqtuKmtLhDnSe1GxDkrzmawtz3aGDm4k= =VZox -----END PGP SIGNATURE----- Merge tag 'wireless-drivers-2020-01-23' of git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers Kalle Valo says: ==================== wireless-drivers fixes for v5.5 Second set of fixes for v5.5. There are quite a few patches, especially on iwlwifi, due to me being on a long break. Libertas also has a security fix and mt76 a build fix. iwlwifi * don't send the PPAG command when PPAG is disabled, since it can cause problems * a few fixes for a HW bug * a fix for RS offload; * a fix for 3168 devices where the NVM tables where the wrong tables were being read * fix a couple of potential memory leaks in TXQ code * disable L0S states in all hardware since our hardware doesn't officially support them anymore (and older versions of the hardware had instability in these states) * remove lar_disable parameter since it has been causing issues for some people who erroneously disable it * force the debug monitor HW to stop also when debug is disabled, since it sometimes stays on and prevents low system power states * don't send IWL_MVM_RXQ_NSSN_SYNC notification due to DMA problems libertas * fix two buffer overflows mt76 * build fix related to CONFIG_MT76_LEDS * fix off by one in bitrates handling ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
5169adbc98
|
@ -8570,7 +8570,7 @@ S: Maintained
|
|||
F: drivers/platform/x86/intel-vbtn.c
|
||||
|
||||
INTEL WIRELESS 3945ABG/BG, 4965AGN (iwlegacy)
|
||||
M: Stanislaw Gruszka <sgruszka@redhat.com>
|
||||
M: Stanislaw Gruszka <stf_xl@wp.pl>
|
||||
L: linux-wireless@vger.kernel.org
|
||||
S: Supported
|
||||
F: drivers/net/wireless/intel/iwlegacy/
|
||||
|
@ -13822,7 +13822,7 @@ S: Maintained
|
|||
F: arch/mips/ralink
|
||||
|
||||
RALINK RT2X00 WIRELESS LAN DRIVER
|
||||
M: Stanislaw Gruszka <sgruszka@redhat.com>
|
||||
M: Stanislaw Gruszka <stf_xl@wp.pl>
|
||||
M: Helmut Schaa <helmut.schaa@googlemail.com>
|
||||
L: linux-wireless@vger.kernel.org
|
||||
S: Maintained
|
||||
|
|
|
@ -267,7 +267,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv,
|
|||
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
||||
struct iwl_station_priv *sta_priv = NULL;
|
||||
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
|
||||
struct iwl_device_cmd *dev_cmd;
|
||||
struct iwl_device_tx_cmd *dev_cmd;
|
||||
struct iwl_tx_cmd *tx_cmd;
|
||||
__le16 fc;
|
||||
u8 hdr_len;
|
||||
|
@ -348,7 +348,6 @@ int iwlagn_tx_skb(struct iwl_priv *priv,
|
|||
if (unlikely(!dev_cmd))
|
||||
goto drop_unlock_priv;
|
||||
|
||||
memset(dev_cmd, 0, sizeof(*dev_cmd));
|
||||
dev_cmd->hdr.cmd = REPLY_TX;
|
||||
tx_cmd = (struct iwl_tx_cmd *) dev_cmd->payload;
|
||||
|
||||
|
|
|
@ -357,8 +357,8 @@ int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt)
|
|||
{
|
||||
union acpi_object *wifi_pkg, *data;
|
||||
bool enabled;
|
||||
int i, n_profiles, tbl_rev;
|
||||
int ret = 0;
|
||||
int i, n_profiles, tbl_rev, pos;
|
||||
int ret = 0;
|
||||
|
||||
data = iwl_acpi_get_object(fwrt->dev, ACPI_EWRD_METHOD);
|
||||
if (IS_ERR(data))
|
||||
|
@ -390,10 +390,10 @@ int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt)
|
|||
goto out_free;
|
||||
}
|
||||
|
||||
for (i = 0; i < n_profiles; i++) {
|
||||
/* the tables start at element 3 */
|
||||
int pos = 3;
|
||||
/* the tables start at element 3 */
|
||||
pos = 3;
|
||||
|
||||
for (i = 0; i < n_profiles; i++) {
|
||||
/* The EWRD profiles officially go from 2 to 4, but we
|
||||
* save them in sar_profiles[1-3] (because we don't
|
||||
* have profile 0). So in the array we start from 1.
|
||||
|
|
|
@ -2669,12 +2669,7 @@ int iwl_fw_dbg_stop_restart_recording(struct iwl_fw_runtime *fwrt,
|
|||
{
|
||||
int ret = 0;
|
||||
|
||||
/* if the FW crashed or not debug monitor cfg was given, there is
|
||||
* no point in changing the recording state
|
||||
*/
|
||||
if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status) ||
|
||||
(!fwrt->trans->dbg.dest_tlv &&
|
||||
fwrt->trans->dbg.ini_dest == IWL_FW_INI_LOCATION_INVALID))
|
||||
if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status))
|
||||
return 0;
|
||||
|
||||
if (fw_has_capa(&fwrt->fw->ucode_capa,
|
||||
|
|
|
@ -379,7 +379,7 @@ enum {
|
|||
|
||||
|
||||
/* CSR GIO */
|
||||
#define CSR_GIO_REG_VAL_L0S_ENABLED (0x00000002)
|
||||
#define CSR_GIO_REG_VAL_L0S_DISABLED (0x00000002)
|
||||
|
||||
/*
|
||||
* UCODE-DRIVER GP (general purpose) mailbox register 1
|
||||
|
|
|
@ -480,7 +480,14 @@ static int iwl_dbg_tlv_alloc_fragment(struct iwl_fw_runtime *fwrt,
|
|||
if (!frag || frag->size || !pages)
|
||||
return -EIO;
|
||||
|
||||
while (pages) {
|
||||
/*
|
||||
* We try to allocate as many pages as we can, starting with
|
||||
* the requested amount and going down until we can allocate
|
||||
* something. Because of DIV_ROUND_UP(), pages will never go
|
||||
* down to 0 and stop the loop, so stop when pages reaches 1,
|
||||
* which is too small anyway.
|
||||
*/
|
||||
while (pages > 1) {
|
||||
block = dma_alloc_coherent(fwrt->dev, pages * PAGE_SIZE,
|
||||
&physical,
|
||||
GFP_KERNEL | __GFP_NOWARN);
|
||||
|
|
|
@ -1817,9 +1817,6 @@ MODULE_PARM_DESC(antenna_coupling,
|
|||
module_param_named(nvm_file, iwlwifi_mod_params.nvm_file, charp, 0444);
|
||||
MODULE_PARM_DESC(nvm_file, "NVM file name");
|
||||
|
||||
module_param_named(lar_disable, iwlwifi_mod_params.lar_disable, bool, 0444);
|
||||
MODULE_PARM_DESC(lar_disable, "disable LAR functionality (default: N)");
|
||||
|
||||
module_param_named(uapsd_disable, iwlwifi_mod_params.uapsd_disable, uint, 0644);
|
||||
MODULE_PARM_DESC(uapsd_disable,
|
||||
"disable U-APSD functionality bitmap 1: BSS 2: P2P Client (default: 3)");
|
||||
|
|
|
@ -115,7 +115,6 @@ enum iwl_uapsd_disable {
|
|||
* @nvm_file: specifies a external NVM file
|
||||
* @uapsd_disable: disable U-APSD, see &enum iwl_uapsd_disable, default =
|
||||
* IWL_DISABLE_UAPSD_BSS | IWL_DISABLE_UAPSD_P2P_CLIENT
|
||||
* @lar_disable: disable LAR (regulatory), default = 0
|
||||
* @fw_monitor: allow to use firmware monitor
|
||||
* @disable_11ac: disable VHT capabilities, default = false.
|
||||
* @remove_when_gone: remove an inaccessible device from the PCIe bus.
|
||||
|
@ -136,7 +135,6 @@ struct iwl_mod_params {
|
|||
int antenna_coupling;
|
||||
char *nvm_file;
|
||||
u32 uapsd_disable;
|
||||
bool lar_disable;
|
||||
bool fw_monitor;
|
||||
bool disable_11ac;
|
||||
/**
|
||||
|
|
|
@ -224,6 +224,34 @@ enum iwl_nvm_channel_flags {
|
|||
NVM_CHANNEL_DC_HIGH = BIT(12),
|
||||
};
|
||||
|
||||
/**
|
||||
* enum iwl_reg_capa_flags - global flags applied for the whole regulatory
|
||||
* domain.
|
||||
* @REG_CAPA_BF_CCD_LOW_BAND: Beam-forming or Cyclic Delay Diversity in the
|
||||
* 2.4Ghz band is allowed.
|
||||
* @REG_CAPA_BF_CCD_HIGH_BAND: Beam-forming or Cyclic Delay Diversity in the
|
||||
* 5Ghz band is allowed.
|
||||
* @REG_CAPA_160MHZ_ALLOWED: 11ac channel with a width of 160Mhz is allowed
|
||||
* for this regulatory domain (valid only in 5Ghz).
|
||||
* @REG_CAPA_80MHZ_ALLOWED: 11ac channel with a width of 80Mhz is allowed
|
||||
* for this regulatory domain (valid only in 5Ghz).
|
||||
* @REG_CAPA_MCS_8_ALLOWED: 11ac with MCS 8 is allowed.
|
||||
* @REG_CAPA_MCS_9_ALLOWED: 11ac with MCS 9 is allowed.
|
||||
* @REG_CAPA_40MHZ_FORBIDDEN: 11n channel with a width of 40Mhz is forbidden
|
||||
* for this regulatory domain (valid only in 5Ghz).
|
||||
* @REG_CAPA_DC_HIGH_ENABLED: DC HIGH allowed.
|
||||
*/
|
||||
enum iwl_reg_capa_flags {
|
||||
REG_CAPA_BF_CCD_LOW_BAND = BIT(0),
|
||||
REG_CAPA_BF_CCD_HIGH_BAND = BIT(1),
|
||||
REG_CAPA_160MHZ_ALLOWED = BIT(2),
|
||||
REG_CAPA_80MHZ_ALLOWED = BIT(3),
|
||||
REG_CAPA_MCS_8_ALLOWED = BIT(4),
|
||||
REG_CAPA_MCS_9_ALLOWED = BIT(5),
|
||||
REG_CAPA_40MHZ_FORBIDDEN = BIT(7),
|
||||
REG_CAPA_DC_HIGH_ENABLED = BIT(9),
|
||||
};
|
||||
|
||||
static inline void iwl_nvm_print_channel_flags(struct device *dev, u32 level,
|
||||
int chan, u32 flags)
|
||||
{
|
||||
|
@ -939,10 +967,11 @@ iwl_nvm_no_wide_in_5ghz(struct iwl_trans *trans, const struct iwl_cfg *cfg,
|
|||
|
||||
struct iwl_nvm_data *
|
||||
iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
|
||||
const struct iwl_fw *fw,
|
||||
const __be16 *nvm_hw, const __le16 *nvm_sw,
|
||||
const __le16 *nvm_calib, const __le16 *regulatory,
|
||||
const __le16 *mac_override, const __le16 *phy_sku,
|
||||
u8 tx_chains, u8 rx_chains, bool lar_fw_supported)
|
||||
u8 tx_chains, u8 rx_chains)
|
||||
{
|
||||
struct iwl_nvm_data *data;
|
||||
bool lar_enabled;
|
||||
|
@ -1022,7 +1051,8 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
if (lar_fw_supported && lar_enabled)
|
||||
if (lar_enabled &&
|
||||
fw_has_capa(&fw->ucode_capa, IWL_UCODE_TLV_CAPA_LAR_SUPPORT))
|
||||
sbands_flags |= IWL_NVM_SBANDS_FLAGS_LAR;
|
||||
|
||||
if (iwl_nvm_no_wide_in_5ghz(trans, cfg, nvm_hw))
|
||||
|
@ -1038,6 +1068,7 @@ IWL_EXPORT_SYMBOL(iwl_parse_nvm_data);
|
|||
|
||||
static u32 iwl_nvm_get_regdom_bw_flags(const u16 *nvm_chan,
|
||||
int ch_idx, u16 nvm_flags,
|
||||
u16 cap_flags,
|
||||
const struct iwl_cfg *cfg)
|
||||
{
|
||||
u32 flags = NL80211_RRF_NO_HT40;
|
||||
|
@ -1076,13 +1107,27 @@ static u32 iwl_nvm_get_regdom_bw_flags(const u16 *nvm_chan,
|
|||
(flags & NL80211_RRF_NO_IR))
|
||||
flags |= NL80211_RRF_GO_CONCURRENT;
|
||||
|
||||
/*
|
||||
* cap_flags is per regulatory domain so apply it for every channel
|
||||
*/
|
||||
if (ch_idx >= NUM_2GHZ_CHANNELS) {
|
||||
if (cap_flags & REG_CAPA_40MHZ_FORBIDDEN)
|
||||
flags |= NL80211_RRF_NO_HT40;
|
||||
|
||||
if (!(cap_flags & REG_CAPA_80MHZ_ALLOWED))
|
||||
flags |= NL80211_RRF_NO_80MHZ;
|
||||
|
||||
if (!(cap_flags & REG_CAPA_160MHZ_ALLOWED))
|
||||
flags |= NL80211_RRF_NO_160MHZ;
|
||||
}
|
||||
|
||||
return flags;
|
||||
}
|
||||
|
||||
struct ieee80211_regdomain *
|
||||
iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
|
||||
int num_of_ch, __le32 *channels, u16 fw_mcc,
|
||||
u16 geo_info)
|
||||
u16 geo_info, u16 cap)
|
||||
{
|
||||
int ch_idx;
|
||||
u16 ch_flags;
|
||||
|
@ -1140,7 +1185,8 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
|
|||
}
|
||||
|
||||
reg_rule_flags = iwl_nvm_get_regdom_bw_flags(nvm_chan, ch_idx,
|
||||
ch_flags, cfg);
|
||||
ch_flags, cap,
|
||||
cfg);
|
||||
|
||||
/* we can't continue the same rule */
|
||||
if (ch_idx == 0 || prev_reg_rule_flags != reg_rule_flags ||
|
||||
|
@ -1405,9 +1451,6 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans,
|
|||
.id = WIDE_ID(REGULATORY_AND_NVM_GROUP, NVM_GET_INFO)
|
||||
};
|
||||
int ret;
|
||||
bool lar_fw_supported = !iwlwifi_mod_params.lar_disable &&
|
||||
fw_has_capa(&fw->ucode_capa,
|
||||
IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
|
||||
bool empty_otp;
|
||||
u32 mac_flags;
|
||||
u32 sbands_flags = 0;
|
||||
|
@ -1485,7 +1528,9 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans,
|
|||
nvm->valid_tx_ant = (u8)le32_to_cpu(rsp->phy_sku.tx_chains);
|
||||
nvm->valid_rx_ant = (u8)le32_to_cpu(rsp->phy_sku.rx_chains);
|
||||
|
||||
if (le32_to_cpu(rsp->regulatory.lar_enabled) && lar_fw_supported) {
|
||||
if (le32_to_cpu(rsp->regulatory.lar_enabled) &&
|
||||
fw_has_capa(&fw->ucode_capa,
|
||||
IWL_UCODE_TLV_CAPA_LAR_SUPPORT)) {
|
||||
nvm->lar_enabled = true;
|
||||
sbands_flags |= IWL_NVM_SBANDS_FLAGS_LAR;
|
||||
}
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
*
|
||||
* Copyright(c) 2008 - 2015 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
|
||||
* Copyright(c) 2018 Intel Corporation
|
||||
* Copyright(c) 2018 - 2019 Intel Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
|
@ -29,7 +29,7 @@
|
|||
*
|
||||
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
|
||||
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
|
||||
* Copyright(c) 2018 Intel Corporation
|
||||
* Copyright(c) 2018 - 2019 Intel Corporation
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
@ -85,10 +85,11 @@ enum iwl_nvm_sbands_flags {
|
|||
*/
|
||||
struct iwl_nvm_data *
|
||||
iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
|
||||
const struct iwl_fw *fw,
|
||||
const __be16 *nvm_hw, const __le16 *nvm_sw,
|
||||
const __le16 *nvm_calib, const __le16 *regulatory,
|
||||
const __le16 *mac_override, const __le16 *phy_sku,
|
||||
u8 tx_chains, u8 rx_chains, bool lar_fw_supported);
|
||||
u8 tx_chains, u8 rx_chains);
|
||||
|
||||
/**
|
||||
* iwl_parse_mcc_info - parse MCC (mobile country code) info coming from FW
|
||||
|
@ -103,7 +104,7 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
|
|||
struct ieee80211_regdomain *
|
||||
iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
|
||||
int num_of_ch, __le32 *channels, u16 fw_mcc,
|
||||
u16 geo_info);
|
||||
u16 geo_info, u16 cap);
|
||||
|
||||
/**
|
||||
* struct iwl_nvm_section - describes an NVM section in memory.
|
||||
|
|
|
@ -66,7 +66,9 @@
|
|||
|
||||
struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
|
||||
struct device *dev,
|
||||
const struct iwl_trans_ops *ops)
|
||||
const struct iwl_trans_ops *ops,
|
||||
unsigned int cmd_pool_size,
|
||||
unsigned int cmd_pool_align)
|
||||
{
|
||||
struct iwl_trans *trans;
|
||||
#ifdef CONFIG_LOCKDEP
|
||||
|
@ -90,10 +92,8 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
|
|||
"iwl_cmd_pool:%s", dev_name(trans->dev));
|
||||
trans->dev_cmd_pool =
|
||||
kmem_cache_create(trans->dev_cmd_pool_name,
|
||||
sizeof(struct iwl_device_cmd),
|
||||
sizeof(void *),
|
||||
SLAB_HWCACHE_ALIGN,
|
||||
NULL);
|
||||
cmd_pool_size, cmd_pool_align,
|
||||
SLAB_HWCACHE_ALIGN, NULL);
|
||||
if (!trans->dev_cmd_pool)
|
||||
return NULL;
|
||||
|
||||
|
|
|
@ -193,6 +193,18 @@ struct iwl_device_cmd {
|
|||
};
|
||||
} __packed;
|
||||
|
||||
/**
|
||||
* struct iwl_device_tx_cmd - buffer for TX command
|
||||
* @hdr: the header
|
||||
* @payload: the payload placeholder
|
||||
*
|
||||
* The actual structure is sized dynamically according to need.
|
||||
*/
|
||||
struct iwl_device_tx_cmd {
|
||||
struct iwl_cmd_header hdr;
|
||||
u8 payload[];
|
||||
} __packed;
|
||||
|
||||
#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd))
|
||||
|
||||
/*
|
||||
|
@ -544,7 +556,7 @@ struct iwl_trans_ops {
|
|||
int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
|
||||
|
||||
int (*tx)(struct iwl_trans *trans, struct sk_buff *skb,
|
||||
struct iwl_device_cmd *dev_cmd, int queue);
|
||||
struct iwl_device_tx_cmd *dev_cmd, int queue);
|
||||
void (*reclaim)(struct iwl_trans *trans, int queue, int ssn,
|
||||
struct sk_buff_head *skbs);
|
||||
|
||||
|
@ -948,22 +960,22 @@ iwl_trans_dump_data(struct iwl_trans *trans, u32 dump_mask)
|
|||
return trans->ops->dump_data(trans, dump_mask);
|
||||
}
|
||||
|
||||
static inline struct iwl_device_cmd *
|
||||
static inline struct iwl_device_tx_cmd *
|
||||
iwl_trans_alloc_tx_cmd(struct iwl_trans *trans)
|
||||
{
|
||||
return kmem_cache_alloc(trans->dev_cmd_pool, GFP_ATOMIC);
|
||||
return kmem_cache_zalloc(trans->dev_cmd_pool, GFP_ATOMIC);
|
||||
}
|
||||
|
||||
int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
|
||||
|
||||
static inline void iwl_trans_free_tx_cmd(struct iwl_trans *trans,
|
||||
struct iwl_device_cmd *dev_cmd)
|
||||
struct iwl_device_tx_cmd *dev_cmd)
|
||||
{
|
||||
kmem_cache_free(trans->dev_cmd_pool, dev_cmd);
|
||||
}
|
||||
|
||||
static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
|
||||
struct iwl_device_cmd *dev_cmd, int queue)
|
||||
struct iwl_device_tx_cmd *dev_cmd, int queue)
|
||||
{
|
||||
if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status)))
|
||||
return -EIO;
|
||||
|
@ -1271,7 +1283,9 @@ static inline bool iwl_trans_dbg_ini_valid(struct iwl_trans *trans)
|
|||
*****************************************************/
|
||||
struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
|
||||
struct device *dev,
|
||||
const struct iwl_trans_ops *ops);
|
||||
const struct iwl_trans_ops *ops,
|
||||
unsigned int cmd_pool_size,
|
||||
unsigned int cmd_pool_align);
|
||||
void iwl_trans_free(struct iwl_trans *trans);
|
||||
|
||||
/*****************************************************
|
||||
|
|
|
@ -154,5 +154,6 @@
|
|||
#define IWL_MVM_D3_DEBUG false
|
||||
#define IWL_MVM_USE_TWT false
|
||||
#define IWL_MVM_AMPDU_CONSEC_DROPS_DELBA 10
|
||||
#define IWL_MVM_USE_NSSN_SYNC 0
|
||||
|
||||
#endif /* __MVM_CONSTANTS_H */
|
||||
|
|
|
@ -841,9 +841,13 @@ int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm)
|
|||
return 0;
|
||||
}
|
||||
|
||||
if (!mvm->fwrt.ppag_table.enabled) {
|
||||
IWL_DEBUG_RADIO(mvm,
|
||||
"PPAG not enabled, command not sent.\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
IWL_DEBUG_RADIO(mvm, "Sending PER_PLATFORM_ANT_GAIN_CMD\n");
|
||||
IWL_DEBUG_RADIO(mvm, "PPAG is %s\n",
|
||||
mvm->fwrt.ppag_table.enabled ? "enabled" : "disabled");
|
||||
|
||||
for (i = 0; i < ACPI_PPAG_NUM_CHAINS; i++) {
|
||||
for (j = 0; j < ACPI_PPAG_NUM_SUB_BANDS; j++) {
|
||||
|
|
|
@ -256,7 +256,8 @@ struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
|
|||
__le32_to_cpu(resp->n_channels),
|
||||
resp->channels,
|
||||
__le16_to_cpu(resp->mcc),
|
||||
__le16_to_cpu(resp->geo_info));
|
||||
__le16_to_cpu(resp->geo_info),
|
||||
__le16_to_cpu(resp->cap));
|
||||
/* Store the return source id */
|
||||
src_id = resp->source_id;
|
||||
kfree(resp);
|
||||
|
@ -754,6 +755,20 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
|
||||
struct ieee80211_sta *sta)
|
||||
{
|
||||
if (likely(sta)) {
|
||||
if (likely(iwl_mvm_tx_skb_sta(mvm, skb, sta) == 0))
|
||||
return;
|
||||
} else {
|
||||
if (likely(iwl_mvm_tx_skb_non_sta(mvm, skb) == 0))
|
||||
return;
|
||||
}
|
||||
|
||||
ieee80211_free_txskb(mvm->hw, skb);
|
||||
}
|
||||
|
||||
static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
|
||||
struct ieee80211_tx_control *control,
|
||||
struct sk_buff *skb)
|
||||
|
@ -797,14 +812,7 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
|
|||
}
|
||||
}
|
||||
|
||||
if (sta) {
|
||||
if (iwl_mvm_tx_skb(mvm, skb, sta))
|
||||
goto drop;
|
||||
return;
|
||||
}
|
||||
|
||||
if (iwl_mvm_tx_skb_non_sta(mvm, skb))
|
||||
goto drop;
|
||||
iwl_mvm_tx_skb(mvm, skb, sta);
|
||||
return;
|
||||
drop:
|
||||
ieee80211_free_txskb(hw, skb);
|
||||
|
@ -854,10 +862,7 @@ void iwl_mvm_mac_itxq_xmit(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
|
|||
break;
|
||||
}
|
||||
|
||||
if (!txq->sta)
|
||||
iwl_mvm_tx_skb_non_sta(mvm, skb);
|
||||
else
|
||||
iwl_mvm_tx_skb(mvm, skb, txq->sta);
|
||||
iwl_mvm_tx_skb(mvm, skb, txq->sta);
|
||||
}
|
||||
} while (atomic_dec_return(&mvmtxq->tx_request));
|
||||
rcu_read_unlock();
|
||||
|
@ -4771,6 +4776,125 @@ static int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void iwl_mvm_set_sta_rate(u32 rate_n_flags, struct rate_info *rinfo)
|
||||
{
|
||||
switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) {
|
||||
case RATE_MCS_CHAN_WIDTH_20:
|
||||
rinfo->bw = RATE_INFO_BW_20;
|
||||
break;
|
||||
case RATE_MCS_CHAN_WIDTH_40:
|
||||
rinfo->bw = RATE_INFO_BW_40;
|
||||
break;
|
||||
case RATE_MCS_CHAN_WIDTH_80:
|
||||
rinfo->bw = RATE_INFO_BW_80;
|
||||
break;
|
||||
case RATE_MCS_CHAN_WIDTH_160:
|
||||
rinfo->bw = RATE_INFO_BW_160;
|
||||
break;
|
||||
}
|
||||
|
||||
if (rate_n_flags & RATE_MCS_HT_MSK) {
|
||||
rinfo->flags |= RATE_INFO_FLAGS_MCS;
|
||||
rinfo->mcs = u32_get_bits(rate_n_flags, RATE_HT_MCS_INDEX_MSK);
|
||||
rinfo->nss = u32_get_bits(rate_n_flags,
|
||||
RATE_HT_MCS_NSS_MSK) + 1;
|
||||
if (rate_n_flags & RATE_MCS_SGI_MSK)
|
||||
rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI;
|
||||
} else if (rate_n_flags & RATE_MCS_VHT_MSK) {
|
||||
rinfo->flags |= RATE_INFO_FLAGS_VHT_MCS;
|
||||
rinfo->mcs = u32_get_bits(rate_n_flags,
|
||||
RATE_VHT_MCS_RATE_CODE_MSK);
|
||||
rinfo->nss = u32_get_bits(rate_n_flags,
|
||||
RATE_VHT_MCS_NSS_MSK) + 1;
|
||||
if (rate_n_flags & RATE_MCS_SGI_MSK)
|
||||
rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI;
|
||||
} else if (rate_n_flags & RATE_MCS_HE_MSK) {
|
||||
u32 gi_ltf = u32_get_bits(rate_n_flags,
|
||||
RATE_MCS_HE_GI_LTF_MSK);
|
||||
|
||||
rinfo->flags |= RATE_INFO_FLAGS_HE_MCS;
|
||||
rinfo->mcs = u32_get_bits(rate_n_flags,
|
||||
RATE_VHT_MCS_RATE_CODE_MSK);
|
||||
rinfo->nss = u32_get_bits(rate_n_flags,
|
||||
RATE_VHT_MCS_NSS_MSK) + 1;
|
||||
|
||||
if (rate_n_flags & RATE_MCS_HE_106T_MSK) {
|
||||
rinfo->bw = RATE_INFO_BW_HE_RU;
|
||||
rinfo->he_ru_alloc = NL80211_RATE_INFO_HE_RU_ALLOC_106;
|
||||
}
|
||||
|
||||
switch (rate_n_flags & RATE_MCS_HE_TYPE_MSK) {
|
||||
case RATE_MCS_HE_TYPE_SU:
|
||||
case RATE_MCS_HE_TYPE_EXT_SU:
|
||||
if (gi_ltf == 0 || gi_ltf == 1)
|
||||
rinfo->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
|
||||
else if (gi_ltf == 2)
|
||||
rinfo->he_gi = NL80211_RATE_INFO_HE_GI_1_6;
|
||||
else if (rate_n_flags & RATE_MCS_SGI_MSK)
|
||||
rinfo->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
|
||||
else
|
||||
rinfo->he_gi = NL80211_RATE_INFO_HE_GI_3_2;
|
||||
break;
|
||||
case RATE_MCS_HE_TYPE_MU:
|
||||
if (gi_ltf == 0 || gi_ltf == 1)
|
||||
rinfo->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
|
||||
else if (gi_ltf == 2)
|
||||
rinfo->he_gi = NL80211_RATE_INFO_HE_GI_1_6;
|
||||
else
|
||||
rinfo->he_gi = NL80211_RATE_INFO_HE_GI_3_2;
|
||||
break;
|
||||
case RATE_MCS_HE_TYPE_TRIG:
|
||||
if (gi_ltf == 0 || gi_ltf == 1)
|
||||
rinfo->he_gi = NL80211_RATE_INFO_HE_GI_1_6;
|
||||
else
|
||||
rinfo->he_gi = NL80211_RATE_INFO_HE_GI_3_2;
|
||||
break;
|
||||
}
|
||||
|
||||
if (rate_n_flags & RATE_HE_DUAL_CARRIER_MODE_MSK)
|
||||
rinfo->he_dcm = 1;
|
||||
} else {
|
||||
switch (u32_get_bits(rate_n_flags, RATE_LEGACY_RATE_MSK)) {
|
||||
case IWL_RATE_1M_PLCP:
|
||||
rinfo->legacy = 10;
|
||||
break;
|
||||
case IWL_RATE_2M_PLCP:
|
||||
rinfo->legacy = 20;
|
||||
break;
|
||||
case IWL_RATE_5M_PLCP:
|
||||
rinfo->legacy = 55;
|
||||
break;
|
||||
case IWL_RATE_11M_PLCP:
|
||||
rinfo->legacy = 110;
|
||||
break;
|
||||
case IWL_RATE_6M_PLCP:
|
||||
rinfo->legacy = 60;
|
||||
break;
|
||||
case IWL_RATE_9M_PLCP:
|
||||
rinfo->legacy = 90;
|
||||
break;
|
||||
case IWL_RATE_12M_PLCP:
|
||||
rinfo->legacy = 120;
|
||||
break;
|
||||
case IWL_RATE_18M_PLCP:
|
||||
rinfo->legacy = 180;
|
||||
break;
|
||||
case IWL_RATE_24M_PLCP:
|
||||
rinfo->legacy = 240;
|
||||
break;
|
||||
case IWL_RATE_36M_PLCP:
|
||||
rinfo->legacy = 360;
|
||||
break;
|
||||
case IWL_RATE_48M_PLCP:
|
||||
rinfo->legacy = 480;
|
||||
break;
|
||||
case IWL_RATE_54M_PLCP:
|
||||
rinfo->legacy = 540;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw,
|
||||
struct ieee80211_vif *vif,
|
||||
struct ieee80211_sta *sta,
|
||||
|
@ -4785,6 +4909,13 @@ static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw,
|
|||
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG);
|
||||
}
|
||||
|
||||
if (iwl_mvm_has_tlc_offload(mvm)) {
|
||||
struct iwl_lq_sta_rs_fw *lq_sta = &mvmsta->lq_sta.rs_fw;
|
||||
|
||||
iwl_mvm_set_sta_rate(lq_sta->last_rate_n_flags, &sinfo->txrate);
|
||||
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
|
||||
}
|
||||
|
||||
/* if beacon filtering isn't on mac80211 does it anyway */
|
||||
if (!(vif->driver_flags & IEEE80211_VIF_BEACON_FILTER))
|
||||
return;
|
||||
|
|
|
@ -1298,9 +1298,6 @@ static inline bool iwl_mvm_is_lar_supported(struct iwl_mvm *mvm)
|
|||
bool tlv_lar = fw_has_capa(&mvm->fw->ucode_capa,
|
||||
IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
|
||||
|
||||
if (iwlwifi_mod_params.lar_disable)
|
||||
return false;
|
||||
|
||||
/*
|
||||
* Enable LAR only if it is supported by the FW (TLV) &&
|
||||
* enabled in the NVM
|
||||
|
@ -1508,8 +1505,8 @@ int __must_check iwl_mvm_send_cmd_status(struct iwl_mvm *mvm,
|
|||
int __must_check iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u32 id,
|
||||
u16 len, const void *data,
|
||||
u32 *status);
|
||||
int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
|
||||
struct ieee80211_sta *sta);
|
||||
int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb,
|
||||
struct ieee80211_sta *sta);
|
||||
int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb);
|
||||
void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
|
||||
struct iwl_tx_cmd *tx_cmd,
|
||||
|
|
|
@ -277,11 +277,10 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm)
|
|||
struct iwl_nvm_section *sections = mvm->nvm_sections;
|
||||
const __be16 *hw;
|
||||
const __le16 *sw, *calib, *regulatory, *mac_override, *phy_sku;
|
||||
bool lar_enabled;
|
||||
int regulatory_type;
|
||||
|
||||
/* Checking for required sections */
|
||||
if (mvm->trans->cfg->nvm_type != IWL_NVM_EXT) {
|
||||
if (mvm->trans->cfg->nvm_type == IWL_NVM) {
|
||||
if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data ||
|
||||
!mvm->nvm_sections[mvm->cfg->nvm_hw_section_num].data) {
|
||||
IWL_ERR(mvm, "Can't parse empty OTP/NVM sections\n");
|
||||
|
@ -327,14 +326,9 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm)
|
|||
(const __le16 *)sections[NVM_SECTION_TYPE_REGULATORY_SDP].data :
|
||||
(const __le16 *)sections[NVM_SECTION_TYPE_REGULATORY].data;
|
||||
|
||||
lar_enabled = !iwlwifi_mod_params.lar_disable &&
|
||||
fw_has_capa(&mvm->fw->ucode_capa,
|
||||
IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
|
||||
|
||||
return iwl_parse_nvm_data(mvm->trans, mvm->cfg, hw, sw, calib,
|
||||
return iwl_parse_nvm_data(mvm->trans, mvm->cfg, mvm->fw, hw, sw, calib,
|
||||
regulatory, mac_override, phy_sku,
|
||||
mvm->fw->valid_tx_ant, mvm->fw->valid_rx_ant,
|
||||
lar_enabled);
|
||||
mvm->fw->valid_tx_ant, mvm->fw->valid_rx_ant);
|
||||
}
|
||||
|
||||
/* Loads the NVM data stored in mvm->nvm_sections into the NIC */
|
||||
|
|
|
@ -514,14 +514,17 @@ static bool iwl_mvm_is_sn_less(u16 sn1, u16 sn2, u16 buffer_size)
|
|||
|
||||
static void iwl_mvm_sync_nssn(struct iwl_mvm *mvm, u8 baid, u16 nssn)
|
||||
{
|
||||
struct iwl_mvm_rss_sync_notif notif = {
|
||||
.metadata.type = IWL_MVM_RXQ_NSSN_SYNC,
|
||||
.metadata.sync = 0,
|
||||
.nssn_sync.baid = baid,
|
||||
.nssn_sync.nssn = nssn,
|
||||
};
|
||||
if (IWL_MVM_USE_NSSN_SYNC) {
|
||||
struct iwl_mvm_rss_sync_notif notif = {
|
||||
.metadata.type = IWL_MVM_RXQ_NSSN_SYNC,
|
||||
.metadata.sync = 0,
|
||||
.nssn_sync.baid = baid,
|
||||
.nssn_sync.nssn = nssn,
|
||||
};
|
||||
|
||||
iwl_mvm_sync_rx_queues_internal(mvm, (void *)¬if, sizeof(notif));
|
||||
iwl_mvm_sync_rx_queues_internal(mvm, (void *)¬if,
|
||||
sizeof(notif));
|
||||
}
|
||||
}
|
||||
|
||||
#define RX_REORDER_BUF_TIMEOUT_MQ (HZ / 10)
|
||||
|
|
|
@ -1213,7 +1213,7 @@ static int iwl_mvm_legacy_config_scan(struct iwl_mvm *mvm)
|
|||
cmd_size = sizeof(struct iwl_scan_config_v2);
|
||||
else
|
||||
cmd_size = sizeof(struct iwl_scan_config_v1);
|
||||
cmd_size += num_channels;
|
||||
cmd_size += mvm->fw->ucode_capa.n_scan_channels;
|
||||
|
||||
cfg = kzalloc(cmd_size, GFP_KERNEL);
|
||||
if (!cfg)
|
||||
|
|
|
@ -490,13 +490,13 @@ static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
|
|||
/*
|
||||
* Allocates and sets the Tx cmd the driver data pointers in the skb
|
||||
*/
|
||||
static struct iwl_device_cmd *
|
||||
static struct iwl_device_tx_cmd *
|
||||
iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
|
||||
struct ieee80211_tx_info *info, int hdrlen,
|
||||
struct ieee80211_sta *sta, u8 sta_id)
|
||||
{
|
||||
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
|
||||
struct iwl_device_cmd *dev_cmd;
|
||||
struct iwl_device_tx_cmd *dev_cmd;
|
||||
struct iwl_tx_cmd *tx_cmd;
|
||||
|
||||
dev_cmd = iwl_trans_alloc_tx_cmd(mvm->trans);
|
||||
|
@ -504,11 +504,6 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
|
|||
if (unlikely(!dev_cmd))
|
||||
return NULL;
|
||||
|
||||
/* Make sure we zero enough of dev_cmd */
|
||||
BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) > sizeof(*tx_cmd));
|
||||
BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen3) > sizeof(*tx_cmd));
|
||||
|
||||
memset(dev_cmd, 0, sizeof(dev_cmd->hdr) + sizeof(*tx_cmd));
|
||||
dev_cmd->hdr.cmd = TX_CMD;
|
||||
|
||||
if (iwl_mvm_has_new_tx_api(mvm)) {
|
||||
|
@ -597,7 +592,7 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
|
|||
}
|
||||
|
||||
static void iwl_mvm_skb_prepare_status(struct sk_buff *skb,
|
||||
struct iwl_device_cmd *cmd)
|
||||
struct iwl_device_tx_cmd *cmd)
|
||||
{
|
||||
struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb);
|
||||
|
||||
|
@ -716,7 +711,7 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
|
|||
{
|
||||
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
|
||||
struct ieee80211_tx_info info;
|
||||
struct iwl_device_cmd *dev_cmd;
|
||||
struct iwl_device_tx_cmd *dev_cmd;
|
||||
u8 sta_id;
|
||||
int hdrlen = ieee80211_hdrlen(hdr->frame_control);
|
||||
__le16 fc = hdr->frame_control;
|
||||
|
@ -1078,7 +1073,7 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
|
|||
{
|
||||
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
|
||||
struct iwl_mvm_sta *mvmsta;
|
||||
struct iwl_device_cmd *dev_cmd;
|
||||
struct iwl_device_tx_cmd *dev_cmd;
|
||||
__le16 fc;
|
||||
u16 seq_number = 0;
|
||||
u8 tid = IWL_MAX_TID_COUNT;
|
||||
|
@ -1154,7 +1149,7 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
|
|||
if (WARN_ONCE(txq_id == IWL_MVM_INVALID_QUEUE, "Invalid TXQ id")) {
|
||||
iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
|
||||
spin_unlock(&mvmsta->lock);
|
||||
return 0;
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (!iwl_mvm_has_new_tx_api(mvm)) {
|
||||
|
@ -1206,8 +1201,8 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
|
|||
return -1;
|
||||
}
|
||||
|
||||
int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
|
||||
struct ieee80211_sta *sta)
|
||||
int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb,
|
||||
struct ieee80211_sta *sta)
|
||||
{
|
||||
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
|
||||
struct ieee80211_tx_info info;
|
||||
|
|
|
@ -57,6 +57,42 @@
|
|||
#include "internal.h"
|
||||
#include "iwl-prph.h"
|
||||
|
||||
static void *_iwl_pcie_ctxt_info_dma_alloc_coherent(struct iwl_trans *trans,
|
||||
size_t size,
|
||||
dma_addr_t *phys,
|
||||
int depth)
|
||||
{
|
||||
void *result;
|
||||
|
||||
if (WARN(depth > 2,
|
||||
"failed to allocate DMA memory not crossing 2^32 boundary"))
|
||||
return NULL;
|
||||
|
||||
result = dma_alloc_coherent(trans->dev, size, phys, GFP_KERNEL);
|
||||
|
||||
if (!result)
|
||||
return NULL;
|
||||
|
||||
if (unlikely(iwl_pcie_crosses_4g_boundary(*phys, size))) {
|
||||
void *old = result;
|
||||
dma_addr_t oldphys = *phys;
|
||||
|
||||
result = _iwl_pcie_ctxt_info_dma_alloc_coherent(trans, size,
|
||||
phys,
|
||||
depth + 1);
|
||||
dma_free_coherent(trans->dev, size, old, oldphys);
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static void *iwl_pcie_ctxt_info_dma_alloc_coherent(struct iwl_trans *trans,
|
||||
size_t size,
|
||||
dma_addr_t *phys)
|
||||
{
|
||||
return _iwl_pcie_ctxt_info_dma_alloc_coherent(trans, size, phys, 0);
|
||||
}
|
||||
|
||||
void iwl_pcie_ctxt_info_free_paging(struct iwl_trans *trans)
|
||||
{
|
||||
struct iwl_self_init_dram *dram = &trans->init_dram;
|
||||
|
@ -161,14 +197,17 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,
|
|||
struct iwl_context_info *ctxt_info;
|
||||
struct iwl_context_info_rbd_cfg *rx_cfg;
|
||||
u32 control_flags = 0, rb_size;
|
||||
dma_addr_t phys;
|
||||
int ret;
|
||||
|
||||
ctxt_info = dma_alloc_coherent(trans->dev, sizeof(*ctxt_info),
|
||||
&trans_pcie->ctxt_info_dma_addr,
|
||||
GFP_KERNEL);
|
||||
ctxt_info = iwl_pcie_ctxt_info_dma_alloc_coherent(trans,
|
||||
sizeof(*ctxt_info),
|
||||
&phys);
|
||||
if (!ctxt_info)
|
||||
return -ENOMEM;
|
||||
|
||||
trans_pcie->ctxt_info_dma_addr = phys;
|
||||
|
||||
ctxt_info->version.version = 0;
|
||||
ctxt_info->version.mac_id =
|
||||
cpu_to_le16((u16)iwl_read32(trans, CSR_HW_REV));
|
||||
|
|
|
@ -305,7 +305,7 @@ struct iwl_cmd_meta {
|
|||
#define IWL_FIRST_TB_SIZE_ALIGN ALIGN(IWL_FIRST_TB_SIZE, 64)
|
||||
|
||||
struct iwl_pcie_txq_entry {
|
||||
struct iwl_device_cmd *cmd;
|
||||
void *cmd;
|
||||
struct sk_buff *skb;
|
||||
/* buffer to free after command completes */
|
||||
const void *free_buf;
|
||||
|
@ -672,6 +672,16 @@ void iwl_pcie_disable_ict(struct iwl_trans *trans);
|
|||
/*****************************************************
|
||||
* TX / HCMD
|
||||
******************************************************/
|
||||
/*
|
||||
* We need this inline in case dma_addr_t is only 32-bits - since the
|
||||
* hardware is always 64-bit, the issue can still occur in that case,
|
||||
* so use u64 for 'phys' here to force the addition in 64-bit.
|
||||
*/
|
||||
static inline bool iwl_pcie_crosses_4g_boundary(u64 phys, u16 len)
|
||||
{
|
||||
return upper_32_bits(phys) != upper_32_bits(phys + len);
|
||||
}
|
||||
|
||||
int iwl_pcie_tx_init(struct iwl_trans *trans);
|
||||
int iwl_pcie_gen2_tx_init(struct iwl_trans *trans, int txq_id,
|
||||
int queue_size);
|
||||
|
@ -688,7 +698,7 @@ void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id,
|
|||
void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans,
|
||||
struct iwl_txq *txq);
|
||||
int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
|
||||
struct iwl_device_cmd *dev_cmd, int txq_id);
|
||||
struct iwl_device_tx_cmd *dev_cmd, int txq_id);
|
||||
void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans);
|
||||
int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
|
||||
void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx);
|
||||
|
@ -1082,7 +1092,8 @@ void iwl_pcie_apply_destination(struct iwl_trans *trans);
|
|||
void iwl_pcie_free_tso_page(struct iwl_trans_pcie *trans_pcie,
|
||||
struct sk_buff *skb);
|
||||
#ifdef CONFIG_INET
|
||||
struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len);
|
||||
struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len,
|
||||
struct sk_buff *skb);
|
||||
#endif
|
||||
|
||||
/* common functions that are used by gen3 transport */
|
||||
|
@ -1106,7 +1117,7 @@ int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
|
|||
unsigned int timeout);
|
||||
void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue);
|
||||
int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
|
||||
struct iwl_device_cmd *dev_cmd, int txq_id);
|
||||
struct iwl_device_tx_cmd *dev_cmd, int txq_id);
|
||||
int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans,
|
||||
struct iwl_host_cmd *cmd);
|
||||
void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans);
|
||||
|
|
|
@ -79,6 +79,7 @@
|
|||
#include "iwl-agn-hw.h"
|
||||
#include "fw/error-dump.h"
|
||||
#include "fw/dbg.h"
|
||||
#include "fw/api/tx.h"
|
||||
#include "internal.h"
|
||||
#include "iwl-fh.h"
|
||||
|
||||
|
@ -301,18 +302,13 @@ void iwl_pcie_apm_config(struct iwl_trans *trans)
|
|||
u16 cap;
|
||||
|
||||
/*
|
||||
* HW bug W/A for instability in PCIe bus L0S->L1 transition.
|
||||
* Check if BIOS (or OS) enabled L1-ASPM on this device.
|
||||
* If so (likely), disable L0S, so device moves directly L0->L1;
|
||||
* costs negligible amount of power savings.
|
||||
* If not (unlikely), enable L0S, so there is at least some
|
||||
* power savings, even without L1.
|
||||
* L0S states have been found to be unstable with our devices
|
||||
* and in newer hardware they are not officially supported at
|
||||
* all, so we must always set the L0S_DISABLED bit.
|
||||
*/
|
||||
iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_DISABLED);
|
||||
|
||||
pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL, &lctl);
|
||||
if (lctl & PCI_EXP_LNKCTL_ASPM_L1)
|
||||
iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
|
||||
else
|
||||
iwl_clear_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
|
||||
trans->pm_support = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S);
|
||||
|
||||
pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_DEVCTL2, &cap);
|
||||
|
@ -3460,19 +3456,34 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
|
|||
{
|
||||
struct iwl_trans_pcie *trans_pcie;
|
||||
struct iwl_trans *trans;
|
||||
int ret, addr_size;
|
||||
int ret, addr_size, txcmd_size, txcmd_align;
|
||||
const struct iwl_trans_ops *ops = &trans_ops_pcie_gen2;
|
||||
|
||||
if (!cfg_trans->gen2) {
|
||||
ops = &trans_ops_pcie;
|
||||
txcmd_size = sizeof(struct iwl_tx_cmd);
|
||||
txcmd_align = sizeof(void *);
|
||||
} else if (cfg_trans->device_family < IWL_DEVICE_FAMILY_AX210) {
|
||||
txcmd_size = sizeof(struct iwl_tx_cmd_gen2);
|
||||
txcmd_align = 64;
|
||||
} else {
|
||||
txcmd_size = sizeof(struct iwl_tx_cmd_gen3);
|
||||
txcmd_align = 128;
|
||||
}
|
||||
|
||||
txcmd_size += sizeof(struct iwl_cmd_header);
|
||||
txcmd_size += 36; /* biggest possible 802.11 header */
|
||||
|
||||
/* Ensure device TX cmd cannot reach/cross a page boundary in gen2 */
|
||||
if (WARN_ON(cfg_trans->gen2 && txcmd_size >= txcmd_align))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
ret = pcim_enable_device(pdev);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
if (cfg_trans->gen2)
|
||||
trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie),
|
||||
&pdev->dev, &trans_ops_pcie_gen2);
|
||||
else
|
||||
trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie),
|
||||
&pdev->dev, &trans_ops_pcie);
|
||||
|
||||
trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), &pdev->dev, ops,
|
||||
txcmd_size, txcmd_align);
|
||||
if (!trans)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
|
|
|
@ -221,6 +221,17 @@ static int iwl_pcie_gen2_set_tb(struct iwl_trans *trans,
|
|||
int idx = iwl_pcie_gen2_get_num_tbs(trans, tfd);
|
||||
struct iwl_tfh_tb *tb;
|
||||
|
||||
/*
|
||||
* Only WARN here so we know about the issue, but we mess up our
|
||||
* unmap path because not every place currently checks for errors
|
||||
* returned from this function - it can only return an error if
|
||||
* there's no more space, and so when we know there is enough we
|
||||
* don't always check ...
|
||||
*/
|
||||
WARN(iwl_pcie_crosses_4g_boundary(addr, len),
|
||||
"possible DMA problem with iova:0x%llx, len:%d\n",
|
||||
(unsigned long long)addr, len);
|
||||
|
||||
if (WARN_ON(idx >= IWL_TFH_NUM_TBS))
|
||||
return -EINVAL;
|
||||
tb = &tfd->tbs[idx];
|
||||
|
@ -240,13 +251,114 @@ static int iwl_pcie_gen2_set_tb(struct iwl_trans *trans,
|
|||
return idx;
|
||||
}
|
||||
|
||||
static struct page *get_workaround_page(struct iwl_trans *trans,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
struct page **page_ptr;
|
||||
struct page *ret;
|
||||
|
||||
page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
|
||||
|
||||
ret = alloc_page(GFP_ATOMIC);
|
||||
if (!ret)
|
||||
return NULL;
|
||||
|
||||
/* set the chaining pointer to the previous page if there */
|
||||
*(void **)(page_address(ret) + PAGE_SIZE - sizeof(void *)) = *page_ptr;
|
||||
*page_ptr = ret;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Add a TB and if needed apply the FH HW bug workaround;
|
||||
* meta != NULL indicates that it's a page mapping and we
|
||||
* need to dma_unmap_page() and set the meta->tbs bit in
|
||||
* this case.
|
||||
*/
|
||||
static int iwl_pcie_gen2_set_tb_with_wa(struct iwl_trans *trans,
|
||||
struct sk_buff *skb,
|
||||
struct iwl_tfh_tfd *tfd,
|
||||
dma_addr_t phys, void *virt,
|
||||
u16 len, struct iwl_cmd_meta *meta)
|
||||
{
|
||||
dma_addr_t oldphys = phys;
|
||||
struct page *page;
|
||||
int ret;
|
||||
|
||||
if (unlikely(dma_mapping_error(trans->dev, phys)))
|
||||
return -ENOMEM;
|
||||
|
||||
if (likely(!iwl_pcie_crosses_4g_boundary(phys, len))) {
|
||||
ret = iwl_pcie_gen2_set_tb(trans, tfd, phys, len);
|
||||
|
||||
if (ret < 0)
|
||||
goto unmap;
|
||||
|
||||
if (meta)
|
||||
meta->tbs |= BIT(ret);
|
||||
|
||||
ret = 0;
|
||||
goto trace;
|
||||
}
|
||||
|
||||
/*
|
||||
* Work around a hardware bug. If (as expressed in the
|
||||
* condition above) the TB ends on a 32-bit boundary,
|
||||
* then the next TB may be accessed with the wrong
|
||||
* address.
|
||||
* To work around it, copy the data elsewhere and make
|
||||
* a new mapping for it so the device will not fail.
|
||||
*/
|
||||
|
||||
if (WARN_ON(len > PAGE_SIZE - sizeof(void *))) {
|
||||
ret = -ENOBUFS;
|
||||
goto unmap;
|
||||
}
|
||||
|
||||
page = get_workaround_page(trans, skb);
|
||||
if (!page) {
|
||||
ret = -ENOMEM;
|
||||
goto unmap;
|
||||
}
|
||||
|
||||
memcpy(page_address(page), virt, len);
|
||||
|
||||
phys = dma_map_single(trans->dev, page_address(page), len,
|
||||
DMA_TO_DEVICE);
|
||||
if (unlikely(dma_mapping_error(trans->dev, phys)))
|
||||
return -ENOMEM;
|
||||
ret = iwl_pcie_gen2_set_tb(trans, tfd, phys, len);
|
||||
if (ret < 0) {
|
||||
/* unmap the new allocation as single */
|
||||
oldphys = phys;
|
||||
meta = NULL;
|
||||
goto unmap;
|
||||
}
|
||||
IWL_WARN(trans,
|
||||
"TB bug workaround: copied %d bytes from 0x%llx to 0x%llx\n",
|
||||
len, (unsigned long long)oldphys, (unsigned long long)phys);
|
||||
|
||||
ret = 0;
|
||||
unmap:
|
||||
if (meta)
|
||||
dma_unmap_page(trans->dev, oldphys, len, DMA_TO_DEVICE);
|
||||
else
|
||||
dma_unmap_single(trans->dev, oldphys, len, DMA_TO_DEVICE);
|
||||
trace:
|
||||
trace_iwlwifi_dev_tx_tb(trans->dev, skb, virt, phys, len);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
|
||||
struct sk_buff *skb,
|
||||
struct iwl_tfh_tfd *tfd, int start_len,
|
||||
u8 hdr_len, struct iwl_device_cmd *dev_cmd)
|
||||
u8 hdr_len,
|
||||
struct iwl_device_tx_cmd *dev_cmd)
|
||||
{
|
||||
#ifdef CONFIG_INET
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
struct iwl_tx_cmd_gen2 *tx_cmd = (void *)dev_cmd->payload;
|
||||
struct ieee80211_hdr *hdr = (void *)skb->data;
|
||||
unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
|
||||
|
@ -254,7 +366,6 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
|
|||
u16 length, amsdu_pad;
|
||||
u8 *start_hdr;
|
||||
struct iwl_tso_hdr_page *hdr_page;
|
||||
struct page **page_ptr;
|
||||
struct tso_t tso;
|
||||
|
||||
trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd),
|
||||
|
@ -270,14 +381,11 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
|
|||
(3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr));
|
||||
|
||||
/* Our device supports 9 segments at most, it will fit in 1 page */
|
||||
hdr_page = get_page_hdr(trans, hdr_room);
|
||||
hdr_page = get_page_hdr(trans, hdr_room, skb);
|
||||
if (!hdr_page)
|
||||
return -ENOMEM;
|
||||
|
||||
get_page(hdr_page->page);
|
||||
start_hdr = hdr_page->pos;
|
||||
page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
|
||||
*page_ptr = hdr_page->page;
|
||||
|
||||
/*
|
||||
* Pull the ieee80211 header to be able to use TSO core,
|
||||
|
@ -332,6 +440,11 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
|
|||
dev_kfree_skb(csum_skb);
|
||||
goto out_err;
|
||||
}
|
||||
/*
|
||||
* No need for _with_wa, this is from the TSO page and
|
||||
* we leave some space at the end of it so can't hit
|
||||
* the buggy scenario.
|
||||
*/
|
||||
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb_len);
|
||||
trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr,
|
||||
tb_phys, tb_len);
|
||||
|
@ -343,16 +456,18 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
|
|||
|
||||
/* put the payload */
|
||||
while (data_left) {
|
||||
int ret;
|
||||
|
||||
tb_len = min_t(unsigned int, tso.size, data_left);
|
||||
tb_phys = dma_map_single(trans->dev, tso.data,
|
||||
tb_len, DMA_TO_DEVICE);
|
||||
if (unlikely(dma_mapping_error(trans->dev, tb_phys))) {
|
||||
ret = iwl_pcie_gen2_set_tb_with_wa(trans, skb, tfd,
|
||||
tb_phys, tso.data,
|
||||
tb_len, NULL);
|
||||
if (ret) {
|
||||
dev_kfree_skb(csum_skb);
|
||||
goto out_err;
|
||||
}
|
||||
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb_len);
|
||||
trace_iwlwifi_dev_tx_tb(trans->dev, skb, tso.data,
|
||||
tb_phys, tb_len);
|
||||
|
||||
data_left -= tb_len;
|
||||
tso_build_data(skb, &tso, tb_len);
|
||||
|
@ -372,7 +487,7 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
|
|||
static struct
|
||||
iwl_tfh_tfd *iwl_pcie_gen2_build_tx_amsdu(struct iwl_trans *trans,
|
||||
struct iwl_txq *txq,
|
||||
struct iwl_device_cmd *dev_cmd,
|
||||
struct iwl_device_tx_cmd *dev_cmd,
|
||||
struct sk_buff *skb,
|
||||
struct iwl_cmd_meta *out_meta,
|
||||
int hdr_len,
|
||||
|
@ -386,6 +501,11 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx_amsdu(struct iwl_trans *trans,
|
|||
|
||||
tb_phys = iwl_pcie_get_first_tb_dma(txq, idx);
|
||||
|
||||
/*
|
||||
* No need for _with_wa, the first TB allocation is aligned up
|
||||
* to a 64-byte boundary and thus can't be at the end or cross
|
||||
* a page boundary (much less a 2^32 boundary).
|
||||
*/
|
||||
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
|
||||
|
||||
/*
|
||||
|
@ -404,6 +524,10 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx_amsdu(struct iwl_trans *trans,
|
|||
tb_phys = dma_map_single(trans->dev, tb1_addr, len, DMA_TO_DEVICE);
|
||||
if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
|
||||
goto out_err;
|
||||
/*
|
||||
* No need for _with_wa(), we ensure (via alignment) that the data
|
||||
* here can never cross or end at a page boundary.
|
||||
*/
|
||||
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, len);
|
||||
|
||||
if (iwl_pcie_gen2_build_amsdu(trans, skb, tfd,
|
||||
|
@ -430,24 +554,19 @@ static int iwl_pcie_gen2_tx_add_frags(struct iwl_trans *trans,
|
|||
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
|
||||
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
|
||||
dma_addr_t tb_phys;
|
||||
int tb_idx;
|
||||
unsigned int fragsz = skb_frag_size(frag);
|
||||
int ret;
|
||||
|
||||
if (!skb_frag_size(frag))
|
||||
if (!fragsz)
|
||||
continue;
|
||||
|
||||
tb_phys = skb_frag_dma_map(trans->dev, frag, 0,
|
||||
skb_frag_size(frag), DMA_TO_DEVICE);
|
||||
|
||||
if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
|
||||
return -ENOMEM;
|
||||
tb_idx = iwl_pcie_gen2_set_tb(trans, tfd, tb_phys,
|
||||
skb_frag_size(frag));
|
||||
trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb_frag_address(frag),
|
||||
tb_phys, skb_frag_size(frag));
|
||||
if (tb_idx < 0)
|
||||
return tb_idx;
|
||||
|
||||
out_meta->tbs |= BIT(tb_idx);
|
||||
fragsz, DMA_TO_DEVICE);
|
||||
ret = iwl_pcie_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
|
||||
skb_frag_address(frag),
|
||||
fragsz, out_meta);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -456,7 +575,7 @@ static int iwl_pcie_gen2_tx_add_frags(struct iwl_trans *trans,
|
|||
static struct
|
||||
iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
|
||||
struct iwl_txq *txq,
|
||||
struct iwl_device_cmd *dev_cmd,
|
||||
struct iwl_device_tx_cmd *dev_cmd,
|
||||
struct sk_buff *skb,
|
||||
struct iwl_cmd_meta *out_meta,
|
||||
int hdr_len,
|
||||
|
@ -475,6 +594,11 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
|
|||
/* The first TB points to bi-directional DMA data */
|
||||
memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE);
|
||||
|
||||
/*
|
||||
* No need for _with_wa, the first TB allocation is aligned up
|
||||
* to a 64-byte boundary and thus can't be at the end or cross
|
||||
* a page boundary (much less a 2^32 boundary).
|
||||
*/
|
||||
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
|
||||
|
||||
/*
|
||||
|
@ -496,6 +620,10 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
|
|||
tb_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE);
|
||||
if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
|
||||
goto out_err;
|
||||
/*
|
||||
* No need for _with_wa(), we ensure (via alignment) that the data
|
||||
* here can never cross or end at a page boundary.
|
||||
*/
|
||||
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb1_len);
|
||||
trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr,
|
||||
IWL_FIRST_TB_SIZE + tb1_len, hdr_len);
|
||||
|
@ -504,26 +632,30 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
|
|||
tb2_len = skb_headlen(skb) - hdr_len;
|
||||
|
||||
if (tb2_len > 0) {
|
||||
int ret;
|
||||
|
||||
tb_phys = dma_map_single(trans->dev, skb->data + hdr_len,
|
||||
tb2_len, DMA_TO_DEVICE);
|
||||
if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
|
||||
ret = iwl_pcie_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
|
||||
skb->data + hdr_len, tb2_len,
|
||||
NULL);
|
||||
if (ret)
|
||||
goto out_err;
|
||||
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb2_len);
|
||||
trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb->data + hdr_len,
|
||||
tb_phys, tb2_len);
|
||||
}
|
||||
|
||||
if (iwl_pcie_gen2_tx_add_frags(trans, skb, tfd, out_meta))
|
||||
goto out_err;
|
||||
|
||||
skb_walk_frags(skb, frag) {
|
||||
int ret;
|
||||
|
||||
tb_phys = dma_map_single(trans->dev, frag->data,
|
||||
skb_headlen(frag), DMA_TO_DEVICE);
|
||||
if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
|
||||
ret = iwl_pcie_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
|
||||
frag->data,
|
||||
skb_headlen(frag), NULL);
|
||||
if (ret)
|
||||
goto out_err;
|
||||
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, skb_headlen(frag));
|
||||
trace_iwlwifi_dev_tx_tb(trans->dev, skb, frag->data,
|
||||
tb_phys, skb_headlen(frag));
|
||||
if (iwl_pcie_gen2_tx_add_frags(trans, frag, tfd, out_meta))
|
||||
goto out_err;
|
||||
}
|
||||
|
@ -538,7 +670,7 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
|
|||
static
|
||||
struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
|
||||
struct iwl_txq *txq,
|
||||
struct iwl_device_cmd *dev_cmd,
|
||||
struct iwl_device_tx_cmd *dev_cmd,
|
||||
struct sk_buff *skb,
|
||||
struct iwl_cmd_meta *out_meta)
|
||||
{
|
||||
|
@ -578,7 +710,7 @@ struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
|
|||
}
|
||||
|
||||
int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
|
||||
struct iwl_device_cmd *dev_cmd, int txq_id)
|
||||
struct iwl_device_tx_cmd *dev_cmd, int txq_id)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
struct iwl_cmd_meta *out_meta;
|
||||
|
@ -603,7 +735,7 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
|
|||
|
||||
/* don't put the packet on the ring, if there is no room */
|
||||
if (unlikely(iwl_queue_space(trans, txq) < 3)) {
|
||||
struct iwl_device_cmd **dev_cmd_ptr;
|
||||
struct iwl_device_tx_cmd **dev_cmd_ptr;
|
||||
|
||||
dev_cmd_ptr = (void *)((u8 *)skb->cb +
|
||||
trans_pcie->dev_cmd_offs);
|
||||
|
|
|
@ -213,8 +213,8 @@ static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
|
|||
u8 sec_ctl = 0;
|
||||
u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
|
||||
__le16 bc_ent;
|
||||
struct iwl_tx_cmd *tx_cmd =
|
||||
(void *)txq->entries[txq->write_ptr].cmd->payload;
|
||||
struct iwl_device_tx_cmd *dev_cmd = txq->entries[txq->write_ptr].cmd;
|
||||
struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
|
||||
u8 sta_id = tx_cmd->sta_id;
|
||||
|
||||
scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
|
||||
|
@ -257,8 +257,8 @@ static void iwl_pcie_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
|
|||
int read_ptr = txq->read_ptr;
|
||||
u8 sta_id = 0;
|
||||
__le16 bc_ent;
|
||||
struct iwl_tx_cmd *tx_cmd =
|
||||
(void *)txq->entries[read_ptr].cmd->payload;
|
||||
struct iwl_device_tx_cmd *dev_cmd = txq->entries[read_ptr].cmd;
|
||||
struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
|
||||
|
||||
WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
|
||||
|
||||
|
@ -624,12 +624,18 @@ void iwl_pcie_free_tso_page(struct iwl_trans_pcie *trans_pcie,
|
|||
struct sk_buff *skb)
|
||||
{
|
||||
struct page **page_ptr;
|
||||
struct page *next;
|
||||
|
||||
page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
|
||||
next = *page_ptr;
|
||||
*page_ptr = NULL;
|
||||
|
||||
if (*page_ptr) {
|
||||
__free_page(*page_ptr);
|
||||
*page_ptr = NULL;
|
||||
while (next) {
|
||||
struct page *tmp = next;
|
||||
|
||||
next = *(void **)(page_address(next) + PAGE_SIZE -
|
||||
sizeof(void *));
|
||||
__free_page(tmp);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1196,7 +1202,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
|
|||
|
||||
while (!skb_queue_empty(&overflow_skbs)) {
|
||||
struct sk_buff *skb = __skb_dequeue(&overflow_skbs);
|
||||
struct iwl_device_cmd *dev_cmd_ptr;
|
||||
struct iwl_device_tx_cmd *dev_cmd_ptr;
|
||||
|
||||
dev_cmd_ptr = *(void **)((u8 *)skb->cb +
|
||||
trans_pcie->dev_cmd_offs);
|
||||
|
@ -2052,17 +2058,34 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,
|
|||
}
|
||||
|
||||
#ifdef CONFIG_INET
|
||||
struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len)
|
||||
struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
struct iwl_tso_hdr_page *p = this_cpu_ptr(trans_pcie->tso_hdr_page);
|
||||
struct page **page_ptr;
|
||||
|
||||
page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
|
||||
|
||||
if (WARN_ON(*page_ptr))
|
||||
return NULL;
|
||||
|
||||
if (!p->page)
|
||||
goto alloc;
|
||||
|
||||
/* enough room on this page */
|
||||
if (p->pos + len < (u8 *)page_address(p->page) + PAGE_SIZE)
|
||||
return p;
|
||||
/*
|
||||
* Check if there's enough room on this page
|
||||
*
|
||||
* Note that we put a page chaining pointer *last* in the
|
||||
* page - we need it somewhere, and if it's there then we
|
||||
* avoid DMA mapping the last bits of the page which may
|
||||
* trigger the 32-bit boundary hardware bug.
|
||||
*
|
||||
* (see also get_workaround_page() in tx-gen2.c)
|
||||
*/
|
||||
if (p->pos + len < (u8 *)page_address(p->page) + PAGE_SIZE -
|
||||
sizeof(void *))
|
||||
goto out;
|
||||
|
||||
/* We don't have enough room on this page, get a new one. */
|
||||
__free_page(p->page);
|
||||
|
@ -2072,6 +2095,11 @@ struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len)
|
|||
if (!p->page)
|
||||
return NULL;
|
||||
p->pos = page_address(p->page);
|
||||
/* set the chaining pointer to NULL */
|
||||
*(void **)(page_address(p->page) + PAGE_SIZE - sizeof(void *)) = NULL;
|
||||
out:
|
||||
*page_ptr = p->page;
|
||||
get_page(p->page);
|
||||
return p;
|
||||
}
|
||||
|
||||
|
@ -2097,7 +2125,8 @@ static void iwl_compute_pseudo_hdr_csum(void *iph, struct tcphdr *tcph,
|
|||
static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
|
||||
struct iwl_txq *txq, u8 hdr_len,
|
||||
struct iwl_cmd_meta *out_meta,
|
||||
struct iwl_device_cmd *dev_cmd, u16 tb1_len)
|
||||
struct iwl_device_tx_cmd *dev_cmd,
|
||||
u16 tb1_len)
|
||||
{
|
||||
struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
|
||||
struct iwl_trans_pcie *trans_pcie = txq->trans_pcie;
|
||||
|
@ -2107,7 +2136,6 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
|
|||
u16 length, iv_len, amsdu_pad;
|
||||
u8 *start_hdr;
|
||||
struct iwl_tso_hdr_page *hdr_page;
|
||||
struct page **page_ptr;
|
||||
struct tso_t tso;
|
||||
|
||||
/* if the packet is protected, then it must be CCMP or GCMP */
|
||||
|
@ -2130,14 +2158,11 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
|
|||
(3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)) + iv_len;
|
||||
|
||||
/* Our device supports 9 segments at most, it will fit in 1 page */
|
||||
hdr_page = get_page_hdr(trans, hdr_room);
|
||||
hdr_page = get_page_hdr(trans, hdr_room, skb);
|
||||
if (!hdr_page)
|
||||
return -ENOMEM;
|
||||
|
||||
get_page(hdr_page->page);
|
||||
start_hdr = hdr_page->pos;
|
||||
page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
|
||||
*page_ptr = hdr_page->page;
|
||||
memcpy(hdr_page->pos, skb->data + hdr_len, iv_len);
|
||||
hdr_page->pos += iv_len;
|
||||
|
||||
|
@ -2279,7 +2304,8 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
|
|||
static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
|
||||
struct iwl_txq *txq, u8 hdr_len,
|
||||
struct iwl_cmd_meta *out_meta,
|
||||
struct iwl_device_cmd *dev_cmd, u16 tb1_len)
|
||||
struct iwl_device_tx_cmd *dev_cmd,
|
||||
u16 tb1_len)
|
||||
{
|
||||
/* No A-MSDU without CONFIG_INET */
|
||||
WARN_ON(1);
|
||||
|
@ -2289,7 +2315,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
|
|||
#endif /* CONFIG_INET */
|
||||
|
||||
int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
|
||||
struct iwl_device_cmd *dev_cmd, int txq_id)
|
||||
struct iwl_device_tx_cmd *dev_cmd, int txq_id)
|
||||
{
|
||||
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
|
||||
struct ieee80211_hdr *hdr;
|
||||
|
@ -2346,7 +2372,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
|
|||
|
||||
/* don't put the packet on the ring, if there is no room */
|
||||
if (unlikely(iwl_queue_space(trans, txq) < 3)) {
|
||||
struct iwl_device_cmd **dev_cmd_ptr;
|
||||
struct iwl_device_tx_cmd **dev_cmd_ptr;
|
||||
|
||||
dev_cmd_ptr = (void *)((u8 *)skb->cb +
|
||||
trans_pcie->dev_cmd_offs);
|
||||
|
|
|
@ -273,6 +273,10 @@ add_ie_rates(u8 *tlv, const u8 *ie, int *nrates)
|
|||
int hw, ap, ap_max = ie[1];
|
||||
u8 hw_rate;
|
||||
|
||||
if (ap_max > MAX_RATES) {
|
||||
lbs_deb_assoc("invalid rates\n");
|
||||
return tlv;
|
||||
}
|
||||
/* Advance past IE header */
|
||||
ie += 2;
|
||||
|
||||
|
@ -1717,6 +1721,9 @@ static int lbs_ibss_join_existing(struct lbs_private *priv,
|
|||
struct cmd_ds_802_11_ad_hoc_join cmd;
|
||||
u8 preamble = RADIO_PREAMBLE_SHORT;
|
||||
int ret = 0;
|
||||
int hw, i;
|
||||
u8 rates_max;
|
||||
u8 *rates;
|
||||
|
||||
/* TODO: set preamble based on scan result */
|
||||
ret = lbs_set_radio(priv, preamble, 1);
|
||||
|
@ -1775,9 +1782,12 @@ static int lbs_ibss_join_existing(struct lbs_private *priv,
|
|||
if (!rates_eid) {
|
||||
lbs_add_rates(cmd.bss.rates);
|
||||
} else {
|
||||
int hw, i;
|
||||
u8 rates_max = rates_eid[1];
|
||||
u8 *rates = cmd.bss.rates;
|
||||
rates_max = rates_eid[1];
|
||||
if (rates_max > MAX_RATES) {
|
||||
lbs_deb_join("invalid rates");
|
||||
goto out;
|
||||
}
|
||||
rates = cmd.bss.rates;
|
||||
for (hw = 0; hw < ARRAY_SIZE(lbs_rates); hw++) {
|
||||
u8 hw_rate = lbs_rates[hw].bitrate / 5;
|
||||
for (i = 0; i < rates_max; i++) {
|
||||
|
|
|
@ -242,7 +242,7 @@ u32 mt76_calc_rx_airtime(struct mt76_dev *dev, struct mt76_rx_status *status,
|
|||
return 0;
|
||||
|
||||
sband = dev->hw->wiphy->bands[status->band];
|
||||
if (!sband || status->rate_idx > sband->n_bitrates)
|
||||
if (!sband || status->rate_idx >= sband->n_bitrates)
|
||||
return 0;
|
||||
|
||||
rate = &sband->bitrates[status->rate_idx];
|
||||
|
|
|
@ -378,7 +378,8 @@ void mt76_unregister_device(struct mt76_dev *dev)
|
|||
{
|
||||
struct ieee80211_hw *hw = dev->hw;
|
||||
|
||||
mt76_led_cleanup(dev);
|
||||
if (IS_ENABLED(CONFIG_MT76_LEDS))
|
||||
mt76_led_cleanup(dev);
|
||||
mt76_tx_status_check(dev, NULL, true);
|
||||
ieee80211_unregister_hw(hw);
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue