Merge branch 'for-john' of git://git.kernel.org/pub/scm/linux/kernel/git/iwlwifi/iwlwifi-next

This commit is contained in:
John W. Linville 2013-08-16 14:48:40 -04:00
commit 24de851b79
20 changed files with 811 additions and 715 deletions

View File

@ -22,6 +22,8 @@ config IWLWIFI
Intel Wireless WiFi Link 6150BGN 2 Adapter Intel Wireless WiFi Link 6150BGN 2 Adapter
Intel 100 Series Wi-Fi Adapters (100BGN and 130BGN) Intel 100 Series Wi-Fi Adapters (100BGN and 130BGN)
Intel 2000 Series Wi-Fi Adapters Intel 2000 Series Wi-Fi Adapters
Intel 7260 Wi-Fi Adapter
Intel 3160 Wi-Fi Adapter
This driver uses the kernel's mac80211 subsystem. This driver uses the kernel's mac80211 subsystem.
@ -46,17 +48,16 @@ config IWLDVM
depends on IWLWIFI depends on IWLWIFI
default IWLWIFI default IWLWIFI
help help
This is the driver supporting the DVM firmware which is This is the driver that supports the DVM firmware which is
currently the only firmware available for existing devices. used by most existing devices (with the exception of 7260
and 3160).
config IWLMVM config IWLMVM
tristate "Intel Wireless WiFi MVM Firmware support" tristate "Intel Wireless WiFi MVM Firmware support"
depends on IWLWIFI depends on IWLWIFI
help help
This is the driver supporting the MVM firmware which is This is the driver that supports the MVM firmware which is
currently only available for 7000 series devices. currently only available for 7260 and 3160 devices.
Say yes if you have such a device.
# don't call it _MODULE -- will confuse Kconfig/fixdep/... # don't call it _MODULE -- will confuse Kconfig/fixdep/...
config IWLWIFI_OPMODE_MODULAR config IWLWIFI_OPMODE_MODULAR

View File

@ -145,6 +145,7 @@ do { \
#define IWL_DL_RX 0x01000000 #define IWL_DL_RX 0x01000000
#define IWL_DL_ISR 0x02000000 #define IWL_DL_ISR 0x02000000
#define IWL_DL_HT 0x04000000 #define IWL_DL_HT 0x04000000
#define IWL_DL_EXTERNAL 0x08000000
/* 0xF0000000 - 0x10000000 */ /* 0xF0000000 - 0x10000000 */
#define IWL_DL_11H 0x10000000 #define IWL_DL_11H 0x10000000
#define IWL_DL_STATS 0x20000000 #define IWL_DL_STATS 0x20000000
@ -153,6 +154,7 @@ do { \
#define IWL_DEBUG_INFO(p, f, a...) IWL_DEBUG(p, IWL_DL_INFO, f, ## a) #define IWL_DEBUG_INFO(p, f, a...) IWL_DEBUG(p, IWL_DL_INFO, f, ## a)
#define IWL_DEBUG_MAC80211(p, f, a...) IWL_DEBUG(p, IWL_DL_MAC80211, f, ## a) #define IWL_DEBUG_MAC80211(p, f, a...) IWL_DEBUG(p, IWL_DL_MAC80211, f, ## a)
#define IWL_DEBUG_EXTERNAL(p, f, a...) IWL_DEBUG(p, IWL_DL_EXTERNAL, f, ## a)
#define IWL_DEBUG_TEMP(p, f, a...) IWL_DEBUG(p, IWL_DL_TEMP, f, ## a) #define IWL_DEBUG_TEMP(p, f, a...) IWL_DEBUG(p, IWL_DL_TEMP, f, ## a)
#define IWL_DEBUG_SCAN(p, f, a...) IWL_DEBUG(p, IWL_DL_SCAN, f, ## a) #define IWL_DEBUG_SCAN(p, f, a...) IWL_DEBUG(p, IWL_DL_SCAN, f, ## a)
#define IWL_DEBUG_RX(p, f, a...) IWL_DEBUG(p, IWL_DL_RX, f, ## a) #define IWL_DEBUG_RX(p, f, a...) IWL_DEBUG(p, IWL_DL_RX, f, ## a)

View File

@ -843,7 +843,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
int i; int i;
bool load_module = false; bool load_module = false;
fw->ucode_capa.max_probe_length = 200; fw->ucode_capa.max_probe_length = IWL_DEFAULT_MAX_PROBE_LENGTH;
fw->ucode_capa.standard_phy_calibration_size = fw->ucode_capa.standard_phy_calibration_size =
IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE; IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE;
@ -1032,8 +1032,10 @@ struct iwl_drv *iwl_drv_start(struct iwl_trans *trans,
int ret; int ret;
drv = kzalloc(sizeof(*drv), GFP_KERNEL); drv = kzalloc(sizeof(*drv), GFP_KERNEL);
if (!drv) if (!drv) {
return NULL; ret = -ENOMEM;
goto err;
}
drv->trans = trans; drv->trans = trans;
drv->dev = trans->dev; drv->dev = trans->dev;
@ -1078,7 +1080,7 @@ struct iwl_drv *iwl_drv_start(struct iwl_trans *trans,
err_free_drv: err_free_drv:
#endif #endif
kfree(drv); kfree(drv);
err:
return ERR_PTR(ret); return ERR_PTR(ret);
} }

View File

@ -76,6 +76,7 @@
* @IWL_UCODE_TLV_FLAGS_DW_BC_TABLE: The SCD byte count table is in DWORDS * @IWL_UCODE_TLV_FLAGS_DW_BC_TABLE: The SCD byte count table is in DWORDS
* @IWL_UCODE_TLV_FLAGS_UAPSD: This uCode image supports uAPSD * @IWL_UCODE_TLV_FLAGS_UAPSD: This uCode image supports uAPSD
* @IWL_UCODE_TLV_FLAGS_RX_ENERGY_API: supports rx signal strength api * @IWL_UCODE_TLV_FLAGS_RX_ENERGY_API: supports rx signal strength api
* @IWL_UCODE_TLV_FLAGS_TIME_EVENT_API_V2: using the new time event API.
* @IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS: D3 image supports up to six * @IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS: D3 image supports up to six
* (rather than two) IPv6 addresses * (rather than two) IPv6 addresses
* @IWL_UCODE_TLV_FLAGS_BF_UPDATED: new beacon filtering API * @IWL_UCODE_TLV_FLAGS_BF_UPDATED: new beacon filtering API
@ -88,6 +89,7 @@ enum iwl_ucode_tlv_flag {
IWL_UCODE_TLV_FLAGS_DW_BC_TABLE = BIT(4), IWL_UCODE_TLV_FLAGS_DW_BC_TABLE = BIT(4),
IWL_UCODE_TLV_FLAGS_UAPSD = BIT(6), IWL_UCODE_TLV_FLAGS_UAPSD = BIT(6),
IWL_UCODE_TLV_FLAGS_RX_ENERGY_API = BIT(8), IWL_UCODE_TLV_FLAGS_RX_ENERGY_API = BIT(8),
IWL_UCODE_TLV_FLAGS_TIME_EVENT_API_V2 = BIT(9),
IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS = BIT(10), IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS = BIT(10),
IWL_UCODE_TLV_FLAGS_BF_UPDATED = BIT(11), IWL_UCODE_TLV_FLAGS_BF_UPDATED = BIT(11),
}; };
@ -97,6 +99,9 @@ enum iwl_ucode_tlv_flag {
#define IWL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE 19 #define IWL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE 19
#define IWL_MAX_PHY_CALIBRATE_TBL_SIZE 253 #define IWL_MAX_PHY_CALIBRATE_TBL_SIZE 253
/* The default max probe length if not specified by the firmware file */
#define IWL_DEFAULT_MAX_PROBE_LENGTH 200
/** /**
* enum iwl_ucode_type * enum iwl_ucode_type
* *

View File

@ -67,5 +67,14 @@
#define IWL_MVM_DEFAULT_PS_RX_DATA_TIMEOUT (100 * USEC_PER_MSEC) #define IWL_MVM_DEFAULT_PS_RX_DATA_TIMEOUT (100 * USEC_PER_MSEC)
#define IWL_MVM_WOWLAN_PS_TX_DATA_TIMEOUT (10 * USEC_PER_MSEC) #define IWL_MVM_WOWLAN_PS_TX_DATA_TIMEOUT (10 * USEC_PER_MSEC)
#define IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT (10 * USEC_PER_MSEC) #define IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT (10 * USEC_PER_MSEC)
#define IWL_MVM_UAPSD_RX_DATA_TIMEOUT (50 * USEC_PER_MSEC)
#define IWL_MVM_UAPSD_TX_DATA_TIMEOUT (50 * USEC_PER_MSEC)
#define IWL_MVM_PS_HEAVY_TX_THLD_PACKETS 20
#define IWL_MVM_PS_HEAVY_RX_THLD_PACKETS 20
#define IWL_MVM_PS_HEAVY_TX_THLD_PERCENT 50
#define IWL_MVM_PS_HEAVY_RX_THLD_PERCENT 50
#define IWL_MVM_PS_SNOOZE_INTERVAL 25
#define IWL_MVM_PS_SNOOZE_WINDOW 50
#define IWL_MVM_WOWLAN_PS_SNOOZE_WINDOW 25
#endif /* __MVM_CONSTANTS_H */ #endif /* __MVM_CONSTANTS_H */

View File

@ -1109,73 +1109,16 @@ int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
return __iwl_mvm_suspend(hw, wowlan, false); return __iwl_mvm_suspend(hw, wowlan, false);
} }
static void iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm, static void iwl_mvm_report_wakeup_reasons(struct iwl_mvm *mvm,
struct ieee80211_vif *vif) struct ieee80211_vif *vif,
struct iwl_wowlan_status *status)
{ {
u32 base = mvm->error_event_table; struct sk_buff *pkt = NULL;
struct error_table_start {
/* cf. struct iwl_error_event_table */
u32 valid;
u32 error_id;
} err_info;
struct cfg80211_wowlan_wakeup wakeup = { struct cfg80211_wowlan_wakeup wakeup = {
.pattern_idx = -1, .pattern_idx = -1,
}; };
struct cfg80211_wowlan_wakeup *wakeup_report = &wakeup; struct cfg80211_wowlan_wakeup *wakeup_report = &wakeup;
struct iwl_host_cmd cmd = { u32 reasons = le32_to_cpu(status->wakeup_reasons);
.id = WOWLAN_GET_STATUSES,
.flags = CMD_SYNC | CMD_WANT_SKB,
};
struct iwl_wowlan_status *status;
u32 reasons;
int ret, len;
struct sk_buff *pkt = NULL;
iwl_trans_read_mem_bytes(mvm->trans, base,
&err_info, sizeof(err_info));
if (err_info.valid) {
IWL_INFO(mvm, "error table is valid (%d)\n",
err_info.valid);
if (err_info.error_id == RF_KILL_INDICATOR_FOR_WOWLAN) {
wakeup.rfkill_release = true;
ieee80211_report_wowlan_wakeup(vif, &wakeup,
GFP_KERNEL);
}
return;
}
/* only for tracing for now */
ret = iwl_mvm_send_cmd_pdu(mvm, OFFLOADS_QUERY_CMD, CMD_SYNC, 0, NULL);
if (ret)
IWL_ERR(mvm, "failed to query offload statistics (%d)\n", ret);
ret = iwl_mvm_send_cmd(mvm, &cmd);
if (ret) {
IWL_ERR(mvm, "failed to query status (%d)\n", ret);
return;
}
/* RF-kill already asserted again... */
if (!cmd.resp_pkt)
return;
len = le32_to_cpu(cmd.resp_pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
if (len - sizeof(struct iwl_cmd_header) < sizeof(*status)) {
IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
goto out;
}
status = (void *)cmd.resp_pkt->data;
if (len - sizeof(struct iwl_cmd_header) !=
sizeof(*status) +
ALIGN(le32_to_cpu(status->wake_packet_bufsize), 4)) {
IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
goto out;
}
reasons = le32_to_cpu(status->wakeup_reasons);
if (reasons == IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS) { if (reasons == IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS) {
wakeup_report = NULL; wakeup_report = NULL;
@ -1238,6 +1181,12 @@ static void iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
pktsize -= hdrlen; pktsize -= hdrlen;
if (ieee80211_has_protected(hdr->frame_control)) { if (ieee80211_has_protected(hdr->frame_control)) {
/*
* This is unlocked and using gtk_i(c)vlen,
* but since everything is under RTNL still
* that's not really a problem - changing
* it would be difficult.
*/
if (is_multicast_ether_addr(hdr->addr1)) { if (is_multicast_ether_addr(hdr->addr1)) {
ivlen = mvm->gtk_ivlen; ivlen = mvm->gtk_ivlen;
icvlen += mvm->gtk_icvlen; icvlen += mvm->gtk_icvlen;
@ -1288,9 +1237,82 @@ static void iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
report: report:
ieee80211_report_wowlan_wakeup(vif, wakeup_report, GFP_KERNEL); ieee80211_report_wowlan_wakeup(vif, wakeup_report, GFP_KERNEL);
kfree_skb(pkt); kfree_skb(pkt);
}
out: /* releases the MVM mutex */
static void iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
u32 base = mvm->error_event_table;
struct error_table_start {
/* cf. struct iwl_error_event_table */
u32 valid;
u32 error_id;
} err_info;
struct iwl_host_cmd cmd = {
.id = WOWLAN_GET_STATUSES,
.flags = CMD_SYNC | CMD_WANT_SKB,
};
struct iwl_wowlan_status *status;
int ret, len;
iwl_trans_read_mem_bytes(mvm->trans, base,
&err_info, sizeof(err_info));
if (err_info.valid) {
IWL_INFO(mvm, "error table is valid (%d)\n",
err_info.valid);
if (err_info.error_id == RF_KILL_INDICATOR_FOR_WOWLAN) {
struct cfg80211_wowlan_wakeup wakeup = {
.rfkill_release = true,
};
ieee80211_report_wowlan_wakeup(vif, &wakeup,
GFP_KERNEL);
}
goto out_unlock;
}
/* only for tracing for now */
ret = iwl_mvm_send_cmd_pdu(mvm, OFFLOADS_QUERY_CMD, CMD_SYNC, 0, NULL);
if (ret)
IWL_ERR(mvm, "failed to query offload statistics (%d)\n", ret);
ret = iwl_mvm_send_cmd(mvm, &cmd);
if (ret) {
IWL_ERR(mvm, "failed to query status (%d)\n", ret);
goto out_unlock;
}
/* RF-kill already asserted again... */
if (!cmd.resp_pkt)
goto out_unlock;
len = le32_to_cpu(cmd.resp_pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
if (len - sizeof(struct iwl_cmd_header) < sizeof(*status)) {
IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
goto out_free_resp;
}
status = (void *)cmd.resp_pkt->data;
if (len - sizeof(struct iwl_cmd_header) !=
sizeof(*status) +
ALIGN(le32_to_cpu(status->wake_packet_bufsize), 4)) {
IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
goto out_free_resp;
}
/* now we have all the data we need, unlock to avoid mac80211 issues */
mutex_unlock(&mvm->mutex);
iwl_mvm_report_wakeup_reasons(mvm, vif, status);
iwl_free_resp(&cmd); iwl_free_resp(&cmd);
return;
out_free_resp:
iwl_free_resp(&cmd);
out_unlock:
mutex_unlock(&mvm->mutex);
} }
static void iwl_mvm_read_d3_sram(struct iwl_mvm *mvm) static void iwl_mvm_read_d3_sram(struct iwl_mvm *mvm)
@ -1347,10 +1369,13 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
iwl_mvm_read_d3_sram(mvm); iwl_mvm_read_d3_sram(mvm);
iwl_mvm_query_wakeup_reasons(mvm, vif); iwl_mvm_query_wakeup_reasons(mvm, vif);
/* has unlocked the mutex, so skip that */
goto out;
out_unlock: out_unlock:
mutex_unlock(&mvm->mutex); mutex_unlock(&mvm->mutex);
out:
if (!test && vif) if (!test && vif)
ieee80211_resume_disconnect(vif); ieee80211_resume_disconnect(vif);

View File

@ -352,6 +352,10 @@ static void iwl_dbgfs_update_pm(struct iwl_mvm *mvm,
IWL_DEBUG_POWER(mvm, "lprx_rssi_threshold=%d\n", val); IWL_DEBUG_POWER(mvm, "lprx_rssi_threshold=%d\n", val);
dbgfs_pm->lprx_rssi_threshold = val; dbgfs_pm->lprx_rssi_threshold = val;
break; break;
case MVM_DEBUGFS_PM_SNOOZE_ENABLE:
IWL_DEBUG_POWER(mvm, "snooze_enable=%d\n", val);
dbgfs_pm->snooze_ena = val;
break;
} }
} }
@ -405,6 +409,10 @@ static ssize_t iwl_dbgfs_pm_params_write(struct file *file,
POWER_LPRX_RSSI_THRESHOLD_MIN) POWER_LPRX_RSSI_THRESHOLD_MIN)
return -EINVAL; return -EINVAL;
param = MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD; param = MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD;
} else if (!strncmp("snooze_enable=", buf, 14)) {
if (sscanf(buf + 14, "%d", &val) != 1)
return -EINVAL;
param = MVM_DEBUGFS_PM_SNOOZE_ENABLE;
} else { } else {
return -EINVAL; return -EINVAL;
} }
@ -424,7 +432,7 @@ static ssize_t iwl_dbgfs_pm_params_read(struct file *file,
struct ieee80211_vif *vif = file->private_data; struct ieee80211_vif *vif = file->private_data;
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm *mvm = mvmvif->dbgfs_data; struct iwl_mvm *mvm = mvmvif->dbgfs_data;
char buf[256]; char buf[512];
int bufsz = sizeof(buf); int bufsz = sizeof(buf);
int pos; int pos;
@ -895,10 +903,7 @@ static ssize_t iwl_dbgfs_bf_params_write(struct file *file,
if (param == MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER && !value) { if (param == MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER && !value) {
ret = iwl_mvm_disable_beacon_filter(mvm, vif); ret = iwl_mvm_disable_beacon_filter(mvm, vif);
} else { } else {
if (mvmvif->bf_enabled) ret = iwl_mvm_enable_beacon_filter(mvm, vif);
ret = iwl_mvm_enable_beacon_filter(mvm, vif);
else
ret = iwl_mvm_disable_beacon_filter(mvm, vif);
} }
mutex_unlock(&mvm->mutex); mutex_unlock(&mvm->mutex);
@ -923,7 +928,7 @@ static ssize_t iwl_dbgfs_bf_params_read(struct file *file,
}; };
iwl_mvm_beacon_filter_debugfs_parameters(vif, &cmd); iwl_mvm_beacon_filter_debugfs_parameters(vif, &cmd);
if (mvmvif->bf_enabled) if (mvmvif->bf_data.bf_enabled)
cmd.bf_enable_beacon_filter = cpu_to_le32(1); cmd.bf_enable_beacon_filter = cpu_to_le32(1);
else else
cmd.bf_enable_beacon_filter = 0; cmd.bf_enable_beacon_filter = 0;

View File

@ -155,8 +155,12 @@ struct iwl_powertable_cmd {
* @lprx_rssi_threshold: Signal strength up to which LP RX can be enabled. * @lprx_rssi_threshold: Signal strength up to which LP RX can be enabled.
* Default: 80dbm * Default: 80dbm
* @num_skip_dtim: Number of DTIMs to skip if Skip over DTIM flag is set * @num_skip_dtim: Number of DTIMs to skip if Skip over DTIM flag is set
* @snooze_interval: TBD * @snooze_interval: Maximum time between attempts to retrieve buffered data
* @snooze_window: TBD * from the AP [msec]
* @snooze_window: A window of time in which PBW snoozing insures that all
* packets received. It is also the minimum time from last
* received unicast RX packet, before client stops snoozing
* for data. [msec]
* @snooze_step: TBD * @snooze_step: TBD
* @qndp_tid: TID client shall use for uAPSD QNDP triggers * @qndp_tid: TID client shall use for uAPSD QNDP triggers
* @uapsd_ac_flags: Set trigger-enabled and delivery-enabled indication for * @uapsd_ac_flags: Set trigger-enabled and delivery-enabled indication for
@ -164,10 +168,10 @@ struct iwl_powertable_cmd {
* Use IEEE80211_WMM_IE_STA_QOSINFO_AC* for correct values. * Use IEEE80211_WMM_IE_STA_QOSINFO_AC* for correct values.
* @uapsd_max_sp: Use IEEE80211_WMM_IE_STA_QOSINFO_SP_* for correct * @uapsd_max_sp: Use IEEE80211_WMM_IE_STA_QOSINFO_SP_* for correct
* values. * values.
* @heavy_traffic_thr_tx_pkts: TX threshold measured in number of packets * @heavy_tx_thld_packets: TX threshold measured in number of packets
* @heavy_traffic_thr_rx_pkts: RX threshold measured in number of packets * @heavy_rx_thld_packets: RX threshold measured in number of packets
* @heavy_traffic_thr_tx_load: TX threshold measured in load's percentage * @heavy_tx_thld_percentage: TX threshold measured in load's percentage
* @heavy_traffic_thr_rx_load: RX threshold measured in load's percentage * @heavy_rx_thld_percentage: RX threshold measured in load's percentage
* @limited_ps_threshold: * @limited_ps_threshold:
*/ */
struct iwl_mac_power_cmd { struct iwl_mac_power_cmd {
@ -189,10 +193,10 @@ struct iwl_mac_power_cmd {
u8 qndp_tid; u8 qndp_tid;
u8 uapsd_ac_flags; u8 uapsd_ac_flags;
u8 uapsd_max_sp; u8 uapsd_max_sp;
u8 heavy_traffic_threshold_tx_packets; u8 heavy_tx_thld_packets;
u8 heavy_traffic_threshold_rx_packets; u8 heavy_rx_thld_packets;
u8 heavy_traffic_threshold_tx_percentage; u8 heavy_tx_thld_percentage;
u8 heavy_traffic_threshold_rx_percentage; u8 heavy_rx_thld_percentage;
u8 limited_ps_threshold; u8 limited_ps_threshold;
u8 reserved; u8 reserved;
} __packed; } __packed;

View File

@ -499,71 +499,79 @@ enum iwl_time_event_type {
TE_MAX TE_MAX
}; /* MAC_EVENT_TYPE_API_E_VER_1 */ }; /* MAC_EVENT_TYPE_API_E_VER_1 */
/* Time Event dependencies: none, on another TE, or in a specific time */
enum {
TE_INDEPENDENT = 0, /* Time event - defines for command API v1 */
TE_DEP_OTHER = 1,
TE_DEP_TSF = 2,
TE_EVENT_SOCIOPATHIC = 4,
}; /* MAC_EVENT_DEPENDENCY_POLICY_API_E_VER_2 */
/*
* Supported Time event notifications configuration.
* A notification (both event and fragment) includes a status indicating weather
* the FW was able to schedule the event or not. For fragment start/end
* notification the status is always success. There is no start/end fragment
* notification for monolithic events.
*
* @TE_NOTIF_NONE: no notifications
* @TE_NOTIF_HOST_EVENT_START: request/receive notification on event start
* @TE_NOTIF_HOST_EVENT_END:request/receive notification on event end
* @TE_NOTIF_INTERNAL_EVENT_START: internal FW use
* @TE_NOTIF_INTERNAL_EVENT_END: internal FW use.
* @TE_NOTIF_HOST_FRAG_START: request/receive notification on frag start
* @TE_NOTIF_HOST_FRAG_END:request/receive notification on frag end
* @TE_NOTIF_INTERNAL_FRAG_START: internal FW use.
* @TE_NOTIF_INTERNAL_FRAG_END: internal FW use.
*/
enum {
TE_NOTIF_NONE = 0,
TE_NOTIF_HOST_EVENT_START = 0x1,
TE_NOTIF_HOST_EVENT_END = 0x2,
TE_NOTIF_INTERNAL_EVENT_START = 0x4,
TE_NOTIF_INTERNAL_EVENT_END = 0x8,
TE_NOTIF_HOST_FRAG_START = 0x10,
TE_NOTIF_HOST_FRAG_END = 0x20,
TE_NOTIF_INTERNAL_FRAG_START = 0x40,
TE_NOTIF_INTERNAL_FRAG_END = 0x80
}; /* MAC_EVENT_ACTION_API_E_VER_2 */
/* /*
* @TE_FRAG_NONE: fragmentation of the time event is NOT allowed. * @TE_V1_FRAG_NONE: fragmentation of the time event is NOT allowed.
* @TE_FRAG_SINGLE: fragmentation of the time event is allowed, but only * @TE_V1_FRAG_SINGLE: fragmentation of the time event is allowed, but only
* the first fragment is scheduled. * the first fragment is scheduled.
* @TE_FRAG_DUAL: fragmentation of the time event is allowed, but only * @TE_V1_FRAG_DUAL: fragmentation of the time event is allowed, but only
* the first 2 fragments are scheduled. * the first 2 fragments are scheduled.
* @TE_FRAG_ENDLESS: fragmentation of the time event is allowed, and any number * @TE_V1_FRAG_ENDLESS: fragmentation of the time event is allowed, and any
* of fragments are valid. * number of fragments are valid.
* *
* Other than the constant defined above, specifying a fragmentation value 'x' * Other than the constant defined above, specifying a fragmentation value 'x'
* means that the event can be fragmented but only the first 'x' will be * means that the event can be fragmented but only the first 'x' will be
* scheduled. * scheduled.
*/ */
enum { enum {
TE_FRAG_NONE = 0, TE_V1_FRAG_NONE = 0,
TE_FRAG_SINGLE = 1, TE_V1_FRAG_SINGLE = 1,
TE_FRAG_DUAL = 2, TE_V1_FRAG_DUAL = 2,
TE_FRAG_ENDLESS = 0xffffffff TE_V1_FRAG_ENDLESS = 0xffffffff
}; };
/* Repeat the time event endlessly (until removed) */
#define TE_REPEAT_ENDLESS (0xffffffff)
/* If a Time Event has bounded repetitions, this is the maximal value */
#define TE_REPEAT_MAX_MSK (0x0fffffff)
/* If a Time Event can be fragmented, this is the max number of fragments */ /* If a Time Event can be fragmented, this is the max number of fragments */
#define TE_FRAG_MAX_MSK (0x0fffffff) #define TE_V1_FRAG_MAX_MSK 0x0fffffff
/* Repeat the time event endlessly (until removed) */
#define TE_V1_REPEAT_ENDLESS 0xffffffff
/* If a Time Event has bounded repetitions, this is the maximal value */
#define TE_V1_REPEAT_MAX_MSK_V1 0x0fffffff
/* Time Event dependencies: none, on another TE, or in a specific time */
enum {
TE_V1_INDEPENDENT = 0,
TE_V1_DEP_OTHER = BIT(0),
TE_V1_DEP_TSF = BIT(1),
TE_V1_EVENT_SOCIOPATHIC = BIT(2),
}; /* MAC_EVENT_DEPENDENCY_POLICY_API_E_VER_2 */
/*
* @TE_V1_NOTIF_NONE: no notifications
* @TE_V1_NOTIF_HOST_EVENT_START: request/receive notification on event start
* @TE_V1_NOTIF_HOST_EVENT_END:request/receive notification on event end
* @TE_V1_NOTIF_INTERNAL_EVENT_START: internal FW use
* @TE_V1_NOTIF_INTERNAL_EVENT_END: internal FW use.
* @TE_V1_NOTIF_HOST_FRAG_START: request/receive notification on frag start
* @TE_V1_NOTIF_HOST_FRAG_END:request/receive notification on frag end
* @TE_V1_NOTIF_INTERNAL_FRAG_START: internal FW use.
* @TE_V1_NOTIF_INTERNAL_FRAG_END: internal FW use.
*
* Supported Time event notifications configuration.
* A notification (both event and fragment) includes a status indicating weather
* the FW was able to schedule the event or not. For fragment start/end
* notification the status is always success. There is no start/end fragment
* notification for monolithic events.
*/
enum {
TE_V1_NOTIF_NONE = 0,
TE_V1_NOTIF_HOST_EVENT_START = BIT(0),
TE_V1_NOTIF_HOST_EVENT_END = BIT(1),
TE_V1_NOTIF_INTERNAL_EVENT_START = BIT(2),
TE_V1_NOTIF_INTERNAL_EVENT_END = BIT(3),
TE_V1_NOTIF_HOST_FRAG_START = BIT(4),
TE_V1_NOTIF_HOST_FRAG_END = BIT(5),
TE_V1_NOTIF_INTERNAL_FRAG_START = BIT(6),
TE_V1_NOTIF_INTERNAL_FRAG_END = BIT(7),
}; /* MAC_EVENT_ACTION_API_E_VER_2 */
/** /**
* struct iwl_time_event_cmd - configuring Time Events * struct iwl_time_event_cmd_api_v1 - configuring Time Events
* with struct MAC_TIME_EVENT_DATA_API_S_VER_1 (see also
* with version 2. determined by IWL_UCODE_TLV_FLAGS)
* ( TIME_EVENT_CMD = 0x29 ) * ( TIME_EVENT_CMD = 0x29 )
* @id_and_color: ID and color of the relevant MAC * @id_and_color: ID and color of the relevant MAC
* @action: action to perform, one of FW_CTXT_ACTION_* * @action: action to perform, one of FW_CTXT_ACTION_*
@ -578,12 +586,13 @@ enum {
* @interval_reciprocal: 2^32 / interval * @interval_reciprocal: 2^32 / interval
* @duration: duration of event in TU * @duration: duration of event in TU
* @repeat: how many repetitions to do, can be TE_REPEAT_ENDLESS * @repeat: how many repetitions to do, can be TE_REPEAT_ENDLESS
* @dep_policy: one of TE_INDEPENDENT, TE_DEP_OTHER, TE_DEP_TSF * @dep_policy: one of TE_V1_INDEPENDENT, TE_V1_DEP_OTHER, TE_V1_DEP_TSF
* and TE_V1_EVENT_SOCIOPATHIC
* @is_present: 0 or 1, are we present or absent during the Time Event * @is_present: 0 or 1, are we present or absent during the Time Event
* @max_frags: maximal number of fragments the Time Event can be divided to * @max_frags: maximal number of fragments the Time Event can be divided to
* @notify: notifications using TE_NOTIF_* (whom to notify when) * @notify: notifications using TE_V1_NOTIF_* (whom to notify when)
*/ */
struct iwl_time_event_cmd { struct iwl_time_event_cmd_v1 {
/* COMMON_INDEX_HDR_API_S_VER_1 */ /* COMMON_INDEX_HDR_API_S_VER_1 */
__le32 id_and_color; __le32 id_and_color;
__le32 action; __le32 action;
@ -602,6 +611,123 @@ struct iwl_time_event_cmd {
__le32 notify; __le32 notify;
} __packed; /* MAC_TIME_EVENT_CMD_API_S_VER_1 */ } __packed; /* MAC_TIME_EVENT_CMD_API_S_VER_1 */
/* Time event - defines for command API v2 */
/*
* @TE_V2_FRAG_NONE: fragmentation of the time event is NOT allowed.
* @TE_V2_FRAG_SINGLE: fragmentation of the time event is allowed, but only
* the first fragment is scheduled.
* @TE_V2_FRAG_DUAL: fragmentation of the time event is allowed, but only
* the first 2 fragments are scheduled.
* @TE_V2_FRAG_ENDLESS: fragmentation of the time event is allowed, and any
* number of fragments are valid.
*
* Other than the constant defined above, specifying a fragmentation value 'x'
* means that the event can be fragmented but only the first 'x' will be
* scheduled.
*/
enum {
TE_V2_FRAG_NONE = 0,
TE_V2_FRAG_SINGLE = 1,
TE_V2_FRAG_DUAL = 2,
TE_V2_FRAG_MAX = 0xfe,
TE_V2_FRAG_ENDLESS = 0xff
};
/* Repeat the time event endlessly (until removed) */
#define TE_V2_REPEAT_ENDLESS 0xff
/* If a Time Event has bounded repetitions, this is the maximal value */
#define TE_V2_REPEAT_MAX 0xfe
#define TE_V2_PLACEMENT_POS 12
#define TE_V2_ABSENCE_POS 15
/* Time event policy values (for time event cmd api v2)
* A notification (both event and fragment) includes a status indicating weather
* the FW was able to schedule the event or not. For fragment start/end
* notification the status is always success. There is no start/end fragment
* notification for monolithic events.
*
* @TE_V2_DEFAULT_POLICY: independent, social, present, unoticable
* @TE_V2_NOTIF_HOST_EVENT_START: request/receive notification on event start
* @TE_V2_NOTIF_HOST_EVENT_END:request/receive notification on event end
* @TE_V2_NOTIF_INTERNAL_EVENT_START: internal FW use
* @TE_V2_NOTIF_INTERNAL_EVENT_END: internal FW use.
* @TE_V2_NOTIF_HOST_FRAG_START: request/receive notification on frag start
* @TE_V2_NOTIF_HOST_FRAG_END:request/receive notification on frag end
* @TE_V2_NOTIF_INTERNAL_FRAG_START: internal FW use.
* @TE_V2_NOTIF_INTERNAL_FRAG_END: internal FW use.
* @TE_V2_DEP_OTHER: depends on another time event
* @TE_V2_DEP_TSF: depends on a specific time
* @TE_V2_EVENT_SOCIOPATHIC: can't co-exist with other events of tha same MAC
* @TE_V2_ABSENCE: are we present or absent during the Time Event.
*/
enum {
TE_V2_DEFAULT_POLICY = 0x0,
/* notifications (event start/stop, fragment start/stop) */
TE_V2_NOTIF_HOST_EVENT_START = BIT(0),
TE_V2_NOTIF_HOST_EVENT_END = BIT(1),
TE_V2_NOTIF_INTERNAL_EVENT_START = BIT(2),
TE_V2_NOTIF_INTERNAL_EVENT_END = BIT(3),
TE_V2_NOTIF_HOST_FRAG_START = BIT(4),
TE_V2_NOTIF_HOST_FRAG_END = BIT(5),
TE_V2_NOTIF_INTERNAL_FRAG_START = BIT(6),
TE_V2_NOTIF_INTERNAL_FRAG_END = BIT(7),
TE_V2_NOTIF_MSK = 0xff,
/* placement characteristics */
TE_V2_DEP_OTHER = BIT(TE_V2_PLACEMENT_POS),
TE_V2_DEP_TSF = BIT(TE_V2_PLACEMENT_POS + 1),
TE_V2_EVENT_SOCIOPATHIC = BIT(TE_V2_PLACEMENT_POS + 2),
/* are we present or absent during the Time Event. */
TE_V2_ABSENCE = BIT(TE_V2_ABSENCE_POS),
};
/**
* struct iwl_time_event_cmd_api_v2 - configuring Time Events
* with struct MAC_TIME_EVENT_DATA_API_S_VER_2 (see also
* with version 1. determined by IWL_UCODE_TLV_FLAGS)
* ( TIME_EVENT_CMD = 0x29 )
* @id_and_color: ID and color of the relevant MAC
* @action: action to perform, one of FW_CTXT_ACTION_*
* @id: this field has two meanings, depending on the action:
* If the action is ADD, then it means the type of event to add.
* For all other actions it is the unique event ID assigned when the
* event was added by the FW.
* @apply_time: When to start the Time Event (in GP2)
* @max_delay: maximum delay to event's start (apply time), in TU
* @depends_on: the unique ID of the event we depend on (if any)
* @interval: interval between repetitions, in TU
* @duration: duration of event in TU
* @repeat: how many repetitions to do, can be TE_REPEAT_ENDLESS
* @max_frags: maximal number of fragments the Time Event can be divided to
* @policy: defines whether uCode shall notify the host or other uCode modules
* on event and/or fragment start and/or end
* using one of TE_INDEPENDENT, TE_DEP_OTHER, TE_DEP_TSF
* TE_EVENT_SOCIOPATHIC
* using TE_ABSENCE and using TE_NOTIF_*
*/
struct iwl_time_event_cmd_v2 {
/* COMMON_INDEX_HDR_API_S_VER_1 */
__le32 id_and_color;
__le32 action;
__le32 id;
/* MAC_TIME_EVENT_DATA_API_S_VER_2 */
__le32 apply_time;
__le32 max_delay;
__le32 depends_on;
__le32 interval;
__le32 duration;
u8 repeat;
u8 max_frags;
__le16 policy;
} __packed; /* MAC_TIME_EVENT_CMD_API_S_VER_2 */
/** /**
* struct iwl_time_event_resp - response structure to iwl_time_event_cmd * struct iwl_time_event_resp - response structure to iwl_time_event_cmd
* @status: bit 0 indicates success, all others specify errors * @status: bit 0 indicates success, all others specify errors
@ -1195,7 +1321,7 @@ struct mvm_statistics_general {
struct mvm_statistics_general_common common; struct mvm_statistics_general_common common;
__le32 beacon_filtered; __le32 beacon_filtered;
__le32 missed_beacons; __le32 missed_beacons;
__s8 beacon_filter_everage_energy; __s8 beacon_filter_average_energy;
__s8 beacon_filter_reason; __s8 beacon_filter_reason;
__s8 beacon_filter_current_energy; __s8 beacon_filter_current_energy;
__s8 beacon_filter_reserved; __s8 beacon_filter_reserved;

View File

@ -155,7 +155,8 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
IEEE80211_HW_TIMING_BEACON_ONLY | IEEE80211_HW_TIMING_BEACON_ONLY |
IEEE80211_HW_CONNECTION_MONITOR | IEEE80211_HW_CONNECTION_MONITOR |
IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS | IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
IEEE80211_HW_SUPPORTS_STATIC_SMPS; IEEE80211_HW_SUPPORTS_STATIC_SMPS |
IEEE80211_HW_SUPPORTS_UAPSD;
hw->queues = IWL_MVM_FIRST_AGG_QUEUE; hw->queues = IWL_MVM_FIRST_AGG_QUEUE;
hw->offchannel_tx_hw_queue = IWL_MVM_OFFCHANNEL_QUEUE; hw->offchannel_tx_hw_queue = IWL_MVM_OFFCHANNEL_QUEUE;
@ -190,6 +191,8 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
hw->wiphy->max_remain_on_channel_duration = 10000; hw->wiphy->max_remain_on_channel_duration = 10000;
hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL; hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
hw->uapsd_queues = IWL_UAPSD_AC_INFO;
hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
/* Extract MAC address */ /* Extract MAC address */
memcpy(mvm->addresses[0].addr, mvm->nvm_data->hw_addr, ETH_ALEN); memcpy(mvm->addresses[0].addr, mvm->nvm_data->hw_addr, ETH_ALEN);
@ -577,7 +580,8 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
vif->type == NL80211_IFTYPE_STATION && !vif->p2p && vif->type == NL80211_IFTYPE_STATION && !vif->p2p &&
mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BF_UPDATED){ mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BF_UPDATED){
mvm->bf_allowed_vif = mvmvif; mvm->bf_allowed_vif = mvmvif;
vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER; vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
IEEE80211_VIF_SUPPORTS_CQM_RSSI;
} }
/* /*
@ -617,7 +621,8 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
out_free_bf: out_free_bf:
if (mvm->bf_allowed_vif == mvmvif) { if (mvm->bf_allowed_vif == mvmvif) {
mvm->bf_allowed_vif = NULL; mvm->bf_allowed_vif = NULL;
vif->driver_flags &= ~IEEE80211_VIF_BEACON_FILTER; vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER |
IEEE80211_VIF_SUPPORTS_CQM_RSSI);
} }
out_remove_mac: out_remove_mac:
mvmvif->phy_ctxt = NULL; mvmvif->phy_ctxt = NULL;
@ -683,7 +688,8 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
if (mvm->bf_allowed_vif == mvmvif) { if (mvm->bf_allowed_vif == mvmvif) {
mvm->bf_allowed_vif = NULL; mvm->bf_allowed_vif = NULL;
vif->driver_flags &= ~IEEE80211_VIF_BEACON_FILTER; vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER |
IEEE80211_VIF_SUPPORTS_CQM_RSSI);
} }
iwl_mvm_vif_dbgfs_clean(mvm, vif); iwl_mvm_vif_dbgfs_clean(mvm, vif);
@ -801,6 +807,10 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
if (ret) if (ret)
IWL_ERR(mvm, "failed to update quotas\n"); IWL_ERR(mvm, "failed to update quotas\n");
} }
/* reset rssi values */
mvmvif->bf_data.ave_beacon_signal = 0;
if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD)) { if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD)) {
/* Workaround for FW bug, otherwise FW disables device /* Workaround for FW bug, otherwise FW disables device
* power save upon disassociation * power save upon disassociation
@ -817,7 +827,7 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
*/ */
iwl_mvm_remove_time_event(mvm, mvmvif, iwl_mvm_remove_time_event(mvm, mvmvif,
&mvmvif->time_event_data); &mvmvif->time_event_data);
} else if (changes & BSS_CHANGED_PS) { } else if (changes & (BSS_CHANGED_PS | BSS_CHANGED_QOS)) {
ret = iwl_mvm_power_update_mode(mvm, vif); ret = iwl_mvm_power_update_mode(mvm, vif);
if (ret) if (ret)
IWL_ERR(mvm, "failed to update power mode\n"); IWL_ERR(mvm, "failed to update power mode\n");
@ -827,6 +837,15 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
bss_conf->txpower); bss_conf->txpower);
iwl_mvm_set_tx_power(mvm, vif, bss_conf->txpower); iwl_mvm_set_tx_power(mvm, vif, bss_conf->txpower);
} }
if (changes & BSS_CHANGED_CQM) {
IWL_DEBUG_MAC80211(mvm, "cqm info_changed");
/* reset cqm events tracking */
mvmvif->bf_data.last_cqm_event = 0;
ret = iwl_mvm_update_beacon_filter(mvm, vif);
if (ret)
IWL_ERR(mvm, "failed to update CQM thresholds\n");
}
} }
static int iwl_mvm_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif) static int iwl_mvm_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif)

View File

@ -153,6 +153,11 @@ enum iwl_power_scheme {
}; };
#define IWL_CONN_MAX_LISTEN_INTERVAL 70 #define IWL_CONN_MAX_LISTEN_INTERVAL 70
#define IWL_UAPSD_AC_INFO (IEEE80211_WMM_IE_STA_QOSINFO_AC_VO |\
IEEE80211_WMM_IE_STA_QOSINFO_AC_VI |\
IEEE80211_WMM_IE_STA_QOSINFO_AC_BK |\
IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
#define IWL_UAPSD_MAX_SP IEEE80211_WMM_IE_STA_QOSINFO_SP_2
struct iwl_mvm_power_ops { struct iwl_mvm_power_ops {
int (*power_update_mode)(struct iwl_mvm *mvm, int (*power_update_mode)(struct iwl_mvm *mvm,
@ -175,6 +180,7 @@ enum iwl_dbgfs_pm_mask {
MVM_DEBUGFS_PM_DISABLE_POWER_OFF = BIT(5), MVM_DEBUGFS_PM_DISABLE_POWER_OFF = BIT(5),
MVM_DEBUGFS_PM_LPRX_ENA = BIT(6), MVM_DEBUGFS_PM_LPRX_ENA = BIT(6),
MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD = BIT(7), MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD = BIT(7),
MVM_DEBUGFS_PM_SNOOZE_ENABLE = BIT(8),
}; };
struct iwl_dbgfs_pm { struct iwl_dbgfs_pm {
@ -186,6 +192,7 @@ struct iwl_dbgfs_pm {
bool disable_power_off; bool disable_power_off;
bool lprx_ena; bool lprx_ena;
u32 lprx_rssi_threshold; u32 lprx_rssi_threshold;
bool snooze_ena;
int mask; int mask;
}; };
@ -227,6 +234,21 @@ enum iwl_mvm_smps_type_request {
NUM_IWL_MVM_SMPS_REQ, NUM_IWL_MVM_SMPS_REQ,
}; };
/**
* struct iwl_mvm_vif_bf_data - beacon filtering related data
* @bf_enabled: indicates if beacon filtering is enabled
* @ba_enabled: indicated if beacon abort is enabled
* @last_beacon_signal: last beacon rssi signal in dbm
* @ave_beacon_signal: average beacon signal
* @last_cqm_event: rssi of the last cqm event
*/
struct iwl_mvm_vif_bf_data {
bool bf_enabled;
bool ba_enabled;
s8 ave_beacon_signal;
s8 last_cqm_event;
};
/** /**
* struct iwl_mvm_vif - data per Virtual Interface, it is a MAC context * struct iwl_mvm_vif - data per Virtual Interface, it is a MAC context
* @id: between 0 and 3 * @id: between 0 and 3
@ -252,8 +274,7 @@ struct iwl_mvm_vif {
bool uploaded; bool uploaded;
bool ap_active; bool ap_active;
bool monitor_active; bool monitor_active;
/* indicate whether beacon filtering is enabled */ struct iwl_mvm_vif_bf_data bf_data;
bool bf_enabled;
u32 ap_beacon_time; u32 ap_beacon_time;
@ -754,6 +775,8 @@ int iwl_mvm_beacon_filter_send_cmd(struct iwl_mvm *mvm,
struct iwl_beacon_filter_cmd *cmd); struct iwl_beacon_filter_cmd *cmd);
int iwl_mvm_update_beacon_abort(struct iwl_mvm *mvm, int iwl_mvm_update_beacon_abort(struct iwl_mvm *mvm,
struct ieee80211_vif *vif, bool enable); struct ieee80211_vif *vif, bool enable);
int iwl_mvm_update_beacon_filter(struct iwl_mvm *mvm,
struct ieee80211_vif *vif);
/* SMPS */ /* SMPS */
void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif, void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,

View File

@ -110,6 +110,23 @@ int iwl_mvm_beacon_filter_send_cmd(struct iwl_mvm *mvm,
return ret; return ret;
} }
static
void iwl_mvm_beacon_filter_set_cqm_params(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct iwl_beacon_filter_cmd *cmd)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
if (vif->bss_conf.cqm_rssi_thold) {
cmd->bf_energy_delta =
cpu_to_le32(vif->bss_conf.cqm_rssi_hyst);
/* fw uses an absolute value for this */
cmd->bf_roaming_state =
cpu_to_le32(-vif->bss_conf.cqm_rssi_thold);
}
cmd->ba_enable_beacon_abort = cpu_to_le32(mvmvif->bf_data.ba_enabled);
}
int iwl_mvm_update_beacon_abort(struct iwl_mvm *mvm, int iwl_mvm_update_beacon_abort(struct iwl_mvm *mvm,
struct ieee80211_vif *vif, bool enable) struct ieee80211_vif *vif, bool enable)
{ {
@ -120,12 +137,14 @@ int iwl_mvm_update_beacon_abort(struct iwl_mvm *mvm,
.ba_enable_beacon_abort = cpu_to_le32(enable), .ba_enable_beacon_abort = cpu_to_le32(enable),
}; };
if (!mvmvif->bf_enabled) if (!mvmvif->bf_data.bf_enabled)
return 0; return 0;
if (mvm->cur_ucode == IWL_UCODE_WOWLAN) if (mvm->cur_ucode == IWL_UCODE_WOWLAN)
cmd.ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER_D3); cmd.ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER_D3);
mvmvif->bf_data.ba_enabled = enable;
iwl_mvm_beacon_filter_set_cqm_params(mvm, vif, &cmd);
iwl_mvm_beacon_filter_debugfs_parameters(vif, &cmd); iwl_mvm_beacon_filter_debugfs_parameters(vif, &cmd);
return iwl_mvm_beacon_filter_send_cmd(mvm, &cmd); return iwl_mvm_beacon_filter_send_cmd(mvm, &cmd);
} }
@ -140,17 +159,30 @@ static void iwl_mvm_power_log(struct iwl_mvm *mvm,
IWL_DEBUG_POWER(mvm, "Keep alive = %u sec\n", IWL_DEBUG_POWER(mvm, "Keep alive = %u sec\n",
le16_to_cpu(cmd->keep_alive_seconds)); le16_to_cpu(cmd->keep_alive_seconds));
if (cmd->flags & cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK)) { if (!(cmd->flags & cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK))) {
IWL_DEBUG_POWER(mvm, "Rx timeout = %u usec\n", IWL_DEBUG_POWER(mvm, "Disable power management\n");
le32_to_cpu(cmd->rx_data_timeout)); return;
IWL_DEBUG_POWER(mvm, "Tx timeout = %u usec\n", }
le32_to_cpu(cmd->tx_data_timeout));
if (cmd->flags & cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK)) IWL_DEBUG_POWER(mvm, "Rx timeout = %u usec\n",
IWL_DEBUG_POWER(mvm, "DTIM periods to skip = %u\n", le32_to_cpu(cmd->rx_data_timeout));
cmd->skip_dtim_periods); IWL_DEBUG_POWER(mvm, "Tx timeout = %u usec\n",
if (cmd->flags & cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK)) le32_to_cpu(cmd->tx_data_timeout));
IWL_DEBUG_POWER(mvm, "LP RX RSSI threshold = %u\n", if (cmd->flags & cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK))
cmd->lprx_rssi_threshold); IWL_DEBUG_POWER(mvm, "DTIM periods to skip = %u\n",
cmd->skip_dtim_periods);
if (cmd->flags & cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK))
IWL_DEBUG_POWER(mvm, "LP RX RSSI threshold = %u\n",
cmd->lprx_rssi_threshold);
if (cmd->flags & cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK)) {
IWL_DEBUG_POWER(mvm, "uAPSD enabled\n");
IWL_DEBUG_POWER(mvm, "Rx timeout (uAPSD) = %u usec\n",
le32_to_cpu(cmd->rx_data_timeout_uapsd));
IWL_DEBUG_POWER(mvm, "Tx timeout (uAPSD) = %u usec\n",
le32_to_cpu(cmd->tx_data_timeout_uapsd));
IWL_DEBUG_POWER(mvm, "QNDP TID = %d\n", cmd->qndp_tid);
IWL_DEBUG_POWER(mvm, "ACs flags = 0x%x\n", cmd->uapsd_ac_flags);
IWL_DEBUG_POWER(mvm, "Max SP = %d\n", cmd->uapsd_max_sp);
} }
} }
@ -166,6 +198,8 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
bool radar_detect = false; bool radar_detect = false;
struct iwl_mvm_vif *mvmvif __maybe_unused = struct iwl_mvm_vif *mvmvif __maybe_unused =
iwl_mvm_vif_from_mac80211(vif); iwl_mvm_vif_from_mac80211(vif);
enum ieee80211_ac_numbers ac;
bool tid_found = false;
cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
mvmvif->color)); mvmvif->color));
@ -235,6 +269,63 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
cpu_to_le32(IWL_MVM_WOWLAN_PS_TX_DATA_TIMEOUT); cpu_to_le32(IWL_MVM_WOWLAN_PS_TX_DATA_TIMEOUT);
} }
for (ac = IEEE80211_AC_VO; ac <= IEEE80211_AC_BK; ac++) {
if (!mvmvif->queue_params[ac].uapsd)
continue;
cmd->flags |= cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK);
cmd->uapsd_ac_flags |= BIT(ac);
/* QNDP TID - the highest TID with no admission control */
if (!tid_found && !mvmvif->queue_params[ac].acm) {
tid_found = true;
switch (ac) {
case IEEE80211_AC_VO:
cmd->qndp_tid = 6;
break;
case IEEE80211_AC_VI:
cmd->qndp_tid = 5;
break;
case IEEE80211_AC_BE:
cmd->qndp_tid = 0;
break;
case IEEE80211_AC_BK:
cmd->qndp_tid = 1;
break;
}
}
}
if (cmd->flags & cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK)) {
cmd->rx_data_timeout_uapsd =
cpu_to_le32(IWL_MVM_UAPSD_RX_DATA_TIMEOUT);
cmd->tx_data_timeout_uapsd =
cpu_to_le32(IWL_MVM_UAPSD_TX_DATA_TIMEOUT);
if (cmd->uapsd_ac_flags == (BIT(IEEE80211_AC_VO) |
BIT(IEEE80211_AC_VI) |
BIT(IEEE80211_AC_BE) |
BIT(IEEE80211_AC_BK))) {
cmd->flags |= cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK);
cmd->snooze_interval =
cpu_to_le16(IWL_MVM_PS_SNOOZE_INTERVAL);
cmd->snooze_window =
(mvm->cur_ucode == IWL_UCODE_WOWLAN) ?
cpu_to_le16(IWL_MVM_WOWLAN_PS_SNOOZE_WINDOW) :
cpu_to_le16(IWL_MVM_PS_SNOOZE_WINDOW);
}
cmd->uapsd_max_sp = IWL_UAPSD_MAX_SP;
cmd->heavy_tx_thld_packets =
IWL_MVM_PS_HEAVY_TX_THLD_PACKETS;
cmd->heavy_rx_thld_packets =
IWL_MVM_PS_HEAVY_RX_THLD_PACKETS;
cmd->heavy_tx_thld_percentage =
IWL_MVM_PS_HEAVY_TX_THLD_PERCENT;
cmd->heavy_rx_thld_percentage =
IWL_MVM_PS_HEAVY_RX_THLD_PERCENT;
}
#ifdef CONFIG_IWLWIFI_DEBUGFS #ifdef CONFIG_IWLWIFI_DEBUGFS
if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_KEEP_ALIVE) if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_KEEP_ALIVE)
cmd->keep_alive_seconds = cmd->keep_alive_seconds =
@ -263,6 +354,14 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
} }
if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD) if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD)
cmd->lprx_rssi_threshold = mvmvif->dbgfs_pm.lprx_rssi_threshold; cmd->lprx_rssi_threshold = mvmvif->dbgfs_pm.lprx_rssi_threshold;
if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_SNOOZE_ENABLE) {
if (mvmvif->dbgfs_pm.snooze_ena)
cmd->flags |=
cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK);
else
cmd->flags &=
cpu_to_le16(~POWER_FLAGS_SNOOZE_ENA_MSK);
}
#endif /* CONFIG_IWLWIFI_DEBUGFS */ #endif /* CONFIG_IWLWIFI_DEBUGFS */
} }
@ -342,8 +441,6 @@ static int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm,
(cmd.flags & (cmd.flags &
cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK)) ? cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK)) ?
0 : 1); 0 : 1);
pos += scnprintf(buf+pos, bufsz-pos, "skip_dtim_periods = %d\n",
cmd.skip_dtim_periods);
pos += scnprintf(buf+pos, bufsz-pos, "power_scheme = %d\n", pos += scnprintf(buf+pos, bufsz-pos, "power_scheme = %d\n",
iwlmvm_mod_params.power_scheme); iwlmvm_mod_params.power_scheme);
pos += scnprintf(buf+pos, bufsz-pos, "flags = 0x%x\n", pos += scnprintf(buf+pos, bufsz-pos, "flags = 0x%x\n",
@ -356,14 +453,64 @@ static int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm,
(cmd.flags & (cmd.flags &
cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK)) ? cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK)) ?
1 : 0); 1 : 0);
pos += scnprintf(buf+pos, bufsz-pos, "rx_data_timeout = %d\n", pos += scnprintf(buf+pos, bufsz-pos, "skip_dtim_periods = %d\n",
le32_to_cpu(cmd.rx_data_timeout)); cmd.skip_dtim_periods);
pos += scnprintf(buf+pos, bufsz-pos, "tx_data_timeout = %d\n", if (!(cmd.flags &
le32_to_cpu(cmd.tx_data_timeout)); cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK))) {
pos += scnprintf(buf+pos, bufsz-pos,
"rx_data_timeout = %d\n",
le32_to_cpu(cmd.rx_data_timeout));
pos += scnprintf(buf+pos, bufsz-pos,
"tx_data_timeout = %d\n",
le32_to_cpu(cmd.tx_data_timeout));
}
if (cmd.flags & cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK)) if (cmd.flags & cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK))
pos += scnprintf(buf+pos, bufsz-pos, pos += scnprintf(buf+pos, bufsz-pos,
"lprx_rssi_threshold = %d\n", "lprx_rssi_threshold = %d\n",
cmd.lprx_rssi_threshold); cmd.lprx_rssi_threshold);
if (cmd.flags & cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK)) {
pos +=
scnprintf(buf+pos, bufsz-pos,
"rx_data_timeout_uapsd = %d\n",
le32_to_cpu(cmd.rx_data_timeout_uapsd));
pos +=
scnprintf(buf+pos, bufsz-pos,
"tx_data_timeout_uapsd = %d\n",
le32_to_cpu(cmd.tx_data_timeout_uapsd));
pos += scnprintf(buf+pos, bufsz-pos, "qndp_tid = %d\n",
cmd.qndp_tid);
pos += scnprintf(buf+pos, bufsz-pos,
"uapsd_ac_flags = 0x%x\n",
cmd.uapsd_ac_flags);
pos += scnprintf(buf+pos, bufsz-pos,
"uapsd_max_sp = %d\n",
cmd.uapsd_max_sp);
pos += scnprintf(buf+pos, bufsz-pos,
"heavy_tx_thld_packets = %d\n",
cmd.heavy_tx_thld_packets);
pos += scnprintf(buf+pos, bufsz-pos,
"heavy_rx_thld_packets = %d\n",
cmd.heavy_rx_thld_packets);
pos += scnprintf(buf+pos, bufsz-pos,
"heavy_tx_thld_percentage = %d\n",
cmd.heavy_tx_thld_percentage);
pos += scnprintf(buf+pos, bufsz-pos,
"heavy_rx_thld_percentage = %d\n",
cmd.heavy_rx_thld_percentage);
pos +=
scnprintf(buf+pos, bufsz-pos, "snooze_enable = %d\n",
(cmd.flags &
cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) ?
1 : 0);
}
if (cmd.flags & cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) {
pos += scnprintf(buf+pos, bufsz-pos,
"snooze_interval = %d\n",
cmd.snooze_interval);
pos += scnprintf(buf+pos, bufsz-pos,
"snooze_window = %d\n",
cmd.snooze_window);
}
} }
return pos; return pos;
} }
@ -417,11 +564,12 @@ int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
vif->type != NL80211_IFTYPE_STATION || vif->p2p) vif->type != NL80211_IFTYPE_STATION || vif->p2p)
return 0; return 0;
iwl_mvm_beacon_filter_set_cqm_params(mvm, vif, &cmd);
iwl_mvm_beacon_filter_debugfs_parameters(vif, &cmd); iwl_mvm_beacon_filter_debugfs_parameters(vif, &cmd);
ret = iwl_mvm_beacon_filter_send_cmd(mvm, &cmd); ret = iwl_mvm_beacon_filter_send_cmd(mvm, &cmd);
if (!ret) if (!ret)
mvmvif->bf_enabled = true; mvmvif->bf_data.bf_enabled = true;
return ret; return ret;
} }
@ -440,11 +588,22 @@ int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
ret = iwl_mvm_beacon_filter_send_cmd(mvm, &cmd); ret = iwl_mvm_beacon_filter_send_cmd(mvm, &cmd);
if (!ret) if (!ret)
mvmvif->bf_enabled = false; mvmvif->bf_data.bf_enabled = false;
return ret; return ret;
} }
int iwl_mvm_update_beacon_filter(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
if (!mvmvif->bf_data.bf_enabled)
return 0;
return iwl_mvm_enable_beacon_filter(mvm, vif);
}
const struct iwl_mvm_power_ops pm_mac_ops = { const struct iwl_mvm_power_ops pm_mac_ops = {
.power_update_mode = iwl_mvm_power_mac_update_mode, .power_update_mode = iwl_mvm_power_mac_update_mode,
.power_disable = iwl_mvm_power_mac_disable, .power_disable = iwl_mvm_power_mac_disable,

View File

@ -82,41 +82,35 @@ static const u8 ant_toggle_lookup[] = {
[ANT_ABC] = ANT_ABC, [ANT_ABC] = ANT_ABC,
}; };
#define IWL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np) \ #define IWL_DECLARE_RATE_INFO(r, s, rp, rn) \
[IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \ [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \
IWL_RATE_SISO_##s##M_PLCP, \ IWL_RATE_SISO_##s##M_PLCP, \
IWL_RATE_MIMO2_##s##M_PLCP,\ IWL_RATE_MIMO2_##s##M_PLCP,\
IWL_RATE_MIMO3_##s##M_PLCP,\
IWL_RATE_##r##M_IEEE, \
IWL_RATE_##ip##M_INDEX, \
IWL_RATE_##in##M_INDEX, \
IWL_RATE_##rp##M_INDEX, \ IWL_RATE_##rp##M_INDEX, \
IWL_RATE_##rn##M_INDEX, \ IWL_RATE_##rn##M_INDEX }
IWL_RATE_##pp##M_INDEX, \
IWL_RATE_##np##M_INDEX }
/* /*
* Parameter order: * Parameter order:
* rate, ht rate, prev rate, next rate, prev tgg rate, next tgg rate * rate, ht rate, prev rate, next rate
* *
* If there isn't a valid next or previous rate then INV is used which * If there isn't a valid next or previous rate then INV is used which
* maps to IWL_RATE_INVALID * maps to IWL_RATE_INVALID
* *
*/ */
static const struct iwl_rs_rate_info iwl_rates[IWL_RATE_COUNT] = { static const struct iwl_rs_rate_info iwl_rates[IWL_RATE_COUNT] = {
IWL_DECLARE_RATE_INFO(1, INV, INV, 2, INV, 2, INV, 2), /* 1mbps */ IWL_DECLARE_RATE_INFO(1, INV, INV, 2), /* 1mbps */
IWL_DECLARE_RATE_INFO(2, INV, 1, 5, 1, 5, 1, 5), /* 2mbps */ IWL_DECLARE_RATE_INFO(2, INV, 1, 5), /* 2mbps */
IWL_DECLARE_RATE_INFO(5, INV, 2, 6, 2, 11, 2, 11), /*5.5mbps */ IWL_DECLARE_RATE_INFO(5, INV, 2, 11), /*5.5mbps */
IWL_DECLARE_RATE_INFO(11, INV, 9, 12, 9, 12, 5, 18), /* 11mbps */ IWL_DECLARE_RATE_INFO(11, INV, 9, 12), /* 11mbps */
IWL_DECLARE_RATE_INFO(6, 6, 5, 9, 5, 11, 5, 11), /* 6mbps */ IWL_DECLARE_RATE_INFO(6, 6, 5, 11), /* 6mbps */
IWL_DECLARE_RATE_INFO(9, 6, 6, 11, 6, 11, 5, 11), /* 9mbps */ IWL_DECLARE_RATE_INFO(9, 6, 6, 11), /* 9mbps */
IWL_DECLARE_RATE_INFO(12, 12, 11, 18, 11, 18, 11, 18), /* 12mbps */ IWL_DECLARE_RATE_INFO(12, 12, 11, 18), /* 12mbps */
IWL_DECLARE_RATE_INFO(18, 18, 12, 24, 12, 24, 11, 24), /* 18mbps */ IWL_DECLARE_RATE_INFO(18, 18, 12, 24), /* 18mbps */
IWL_DECLARE_RATE_INFO(24, 24, 18, 36, 18, 36, 18, 36), /* 24mbps */ IWL_DECLARE_RATE_INFO(24, 24, 18, 36), /* 24mbps */
IWL_DECLARE_RATE_INFO(36, 36, 24, 48, 24, 48, 24, 48), /* 36mbps */ IWL_DECLARE_RATE_INFO(36, 36, 24, 48), /* 36mbps */
IWL_DECLARE_RATE_INFO(48, 48, 36, 54, 36, 54, 36, 54), /* 48mbps */ IWL_DECLARE_RATE_INFO(48, 48, 36, 54), /* 48mbps */
IWL_DECLARE_RATE_INFO(54, 54, 48, INV, 48, INV, 48, INV),/* 54mbps */ IWL_DECLARE_RATE_INFO(54, 54, 48, INV), /* 54mbps */
IWL_DECLARE_RATE_INFO(60, 60, 48, INV, 48, INV, 48, INV),/* 60mbps */ IWL_DECLARE_RATE_INFO(60, 60, 48, INV), /* 60mbps */
/* FIXME:RS: ^^ should be INV (legacy) */ /* FIXME:RS: ^^ should be INV (legacy) */
}; };
@ -134,9 +128,8 @@ static int iwl_hwrate_to_plcp_idx(u32 rate_n_flags)
if (rate_n_flags & RATE_MCS_HT_MSK) { if (rate_n_flags & RATE_MCS_HT_MSK) {
idx = rs_extract_rate(rate_n_flags); idx = rs_extract_rate(rate_n_flags);
if (idx >= IWL_RATE_MIMO3_6M_PLCP) WARN_ON_ONCE(idx >= IWL_RATE_MIMO3_6M_PLCP);
idx = idx - IWL_RATE_MIMO3_6M_PLCP; if (idx >= IWL_RATE_MIMO2_6M_PLCP)
else if (idx >= IWL_RATE_MIMO2_6M_PLCP)
idx = idx - IWL_RATE_MIMO2_6M_PLCP; idx = idx - IWL_RATE_MIMO2_6M_PLCP;
idx += IWL_FIRST_OFDM_RATE; idx += IWL_FIRST_OFDM_RATE;
@ -168,10 +161,10 @@ static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search);
#ifdef CONFIG_MAC80211_DEBUGFS #ifdef CONFIG_MAC80211_DEBUGFS
static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta, static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
u32 *rate_n_flags, int index); u32 *rate_n_flags);
#else #else
static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta, static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
u32 *rate_n_flags, int index) u32 *rate_n_flags)
{} {}
#endif #endif
@ -218,20 +211,6 @@ static s32 expected_tpt_mimo2_40MHz[4][IWL_RATE_COUNT] = {
{0, 0, 0, 0, 186, 0, 329, 439, 527, 667, 764, 803, 838}, /* AGG+SGI */ {0, 0, 0, 0, 186, 0, 329, 439, 527, 667, 764, 803, 838}, /* AGG+SGI */
}; };
static s32 expected_tpt_mimo3_20MHz[4][IWL_RATE_COUNT] = {
{0, 0, 0, 0, 99, 0, 153, 186, 208, 239, 256, 263, 268}, /* Norm */
{0, 0, 0, 0, 106, 0, 162, 194, 215, 246, 262, 268, 273}, /* SGI */
{0, 0, 0, 0, 134, 0, 249, 346, 431, 574, 685, 732, 775}, /* AGG */
{0, 0, 0, 0, 148, 0, 272, 376, 465, 614, 727, 775, 818}, /* AGG+SGI */
};
static s32 expected_tpt_mimo3_40MHz[4][IWL_RATE_COUNT] = {
{0, 0, 0, 0, 152, 0, 211, 239, 255, 279, 290, 294, 297}, /* Norm */
{0, 0, 0, 0, 160, 0, 219, 245, 261, 284, 294, 297, 300}, /* SGI */
{0, 0, 0, 0, 254, 0, 443, 584, 695, 868, 984, 1030, 1070}, /* AGG */
{0, 0, 0, 0, 277, 0, 478, 624, 737, 911, 1026, 1070, 1109}, /* AGG+SGI */
};
/* mbps, mcs */ /* mbps, mcs */
static const struct iwl_rate_mcs_info iwl_rate_mcs[IWL_RATE_COUNT] = { static const struct iwl_rate_mcs_info iwl_rate_mcs[IWL_RATE_COUNT] = {
{ "1", "BPSK DSSS"}, { "1", "BPSK DSSS"},
@ -279,7 +258,6 @@ static void rs_program_fix_rate(struct iwl_mvm *mvm,
lq_sta->active_legacy_rate = 0x0FFF; /* 1 - 54 MBits, includes CCK */ lq_sta->active_legacy_rate = 0x0FFF; /* 1 - 54 MBits, includes CCK */
lq_sta->active_siso_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */ lq_sta->active_siso_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
lq_sta->active_mimo2_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */ lq_sta->active_mimo2_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
lq_sta->active_mimo3_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
IWL_DEBUG_RATE(mvm, "sta_id %d rate 0x%X\n", IWL_DEBUG_RATE(mvm, "sta_id %d rate 0x%X\n",
lq_sta->lq.sta_id, lq_sta->dbg_fixed_rate); lq_sta->lq.sta_id, lq_sta->dbg_fixed_rate);
@ -459,7 +437,7 @@ static u32 rate_n_flags_from_tbl(struct iwl_mvm *mvm,
else if (is_mimo2(tbl->lq_type)) else if (is_mimo2(tbl->lq_type))
rate_n_flags |= iwl_rates[index].plcp_mimo2; rate_n_flags |= iwl_rates[index].plcp_mimo2;
else else
rate_n_flags |= iwl_rates[index].plcp_mimo3; WARN_ON_ONCE(1);
} else { } else {
IWL_ERR(mvm, "Invalid tbl->lq_type %d\n", tbl->lq_type); IWL_ERR(mvm, "Invalid tbl->lq_type %d\n", tbl->lq_type);
} }
@ -497,7 +475,7 @@ static int rs_get_tbl_info_from_mcs(const u32 rate_n_flags,
u8 num_of_ant = get_num_of_ant_from_rate(rate_n_flags); u8 num_of_ant = get_num_of_ant_from_rate(rate_n_flags);
u8 mcs; u8 mcs;
memset(tbl, 0, sizeof(struct iwl_scale_tbl_info)); memset(tbl, 0, offsetof(struct iwl_scale_tbl_info, win));
*rate_idx = iwl_hwrate_to_plcp_idx(rate_n_flags); *rate_idx = iwl_hwrate_to_plcp_idx(rate_n_flags);
if (*rate_idx == IWL_RATE_INVALID) { if (*rate_idx == IWL_RATE_INVALID) {
@ -536,12 +514,8 @@ static int rs_get_tbl_info_from_mcs(const u32 rate_n_flags,
} else if (mcs <= IWL_RATE_MIMO2_60M_PLCP) { } else if (mcs <= IWL_RATE_MIMO2_60M_PLCP) {
if (num_of_ant == 2) if (num_of_ant == 2)
tbl->lq_type = LQ_MIMO2; tbl->lq_type = LQ_MIMO2;
/* MIMO3 */
} else { } else {
if (num_of_ant == 3) { WARN_ON_ONCE(num_of_ant == 3);
tbl->max_search = IWL_MAX_11N_MIMO3_SEARCH;
tbl->lq_type = LQ_MIMO3;
}
} }
} }
return 0; return 0;
@ -607,10 +581,10 @@ static u16 rs_get_supported_rates(struct iwl_lq_sta *lq_sta,
} else { } else {
if (is_siso(rate_type)) if (is_siso(rate_type))
return lq_sta->active_siso_rate; return lq_sta->active_siso_rate;
else if (is_mimo2(rate_type)) else {
WARN_ON_ONCE(!is_mimo2(rate_type));
return lq_sta->active_mimo2_rate; return lq_sta->active_mimo2_rate;
else }
return lq_sta->active_mimo3_rate;
} }
} }
@ -985,7 +959,7 @@ static void rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta,
} }
/* Choose among many HT tables depending on number of streams /* Choose among many HT tables depending on number of streams
* (SISO/MIMO2/MIMO3), channel width (20/40), SGI, and aggregation * (SISO/MIMO2), channel width (20/40), SGI, and aggregation
* status */ * status */
if (is_siso(tbl->lq_type) && !tbl->is_ht40) if (is_siso(tbl->lq_type) && !tbl->is_ht40)
ht_tbl_pointer = expected_tpt_siso20MHz; ht_tbl_pointer = expected_tpt_siso20MHz;
@ -993,12 +967,10 @@ static void rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta,
ht_tbl_pointer = expected_tpt_siso40MHz; ht_tbl_pointer = expected_tpt_siso40MHz;
else if (is_mimo2(tbl->lq_type) && !tbl->is_ht40) else if (is_mimo2(tbl->lq_type) && !tbl->is_ht40)
ht_tbl_pointer = expected_tpt_mimo2_20MHz; ht_tbl_pointer = expected_tpt_mimo2_20MHz;
else if (is_mimo2(tbl->lq_type)) else {
WARN_ON_ONCE(!is_mimo2(tbl->lq_type));
ht_tbl_pointer = expected_tpt_mimo2_40MHz; ht_tbl_pointer = expected_tpt_mimo2_40MHz;
else if (is_mimo3(tbl->lq_type) && !tbl->is_ht40) }
ht_tbl_pointer = expected_tpt_mimo3_20MHz;
else /* if (is_mimo3(tbl->lq_type)) <-- must be true */
ht_tbl_pointer = expected_tpt_mimo3_40MHz;
if (!tbl->is_SGI && !lq_sta->is_agg) /* Normal */ if (!tbl->is_SGI && !lq_sta->is_agg) /* Normal */
tbl->expected_tpt = ht_tbl_pointer[0]; tbl->expected_tpt = ht_tbl_pointer[0];
@ -1169,58 +1141,6 @@ static int rs_switch_to_mimo2(struct iwl_mvm *mvm,
return 0; return 0;
} }
/*
* Set up search table for MIMO3
*/
static int rs_switch_to_mimo3(struct iwl_mvm *mvm,
struct iwl_lq_sta *lq_sta,
struct ieee80211_sta *sta,
struct iwl_scale_tbl_info *tbl, int index)
{
u16 rate_mask;
s32 rate;
s8 is_green = lq_sta->is_green;
if (!sta->ht_cap.ht_supported)
return -1;
if (sta->smps_mode == IEEE80211_SMPS_STATIC)
return -1;
/* Need both Tx chains/antennas to support MIMO */
if (num_of_ant(iwl_fw_valid_tx_ant(mvm->fw)) < 3)
return -1;
IWL_DEBUG_RATE(mvm, "LQ: try to switch to MIMO3\n");
tbl->lq_type = LQ_MIMO3;
tbl->action = 0;
tbl->max_search = IWL_MAX_11N_MIMO3_SEARCH;
rate_mask = lq_sta->active_mimo3_rate;
if (iwl_is_ht40_tx_allowed(sta))
tbl->is_ht40 = 1;
else
tbl->is_ht40 = 0;
rs_set_expected_tpt_table(lq_sta, tbl);
rate = rs_get_best_rate(mvm, lq_sta, tbl, rate_mask, index);
IWL_DEBUG_RATE(mvm, "LQ: MIMO3 best rate %d mask %X\n",
rate, rate_mask);
if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) {
IWL_DEBUG_RATE(mvm, "Can't switch with index %d rate mask %x\n",
rate, rate_mask);
return -1;
}
tbl->current_rate = rate_n_flags_from_tbl(mvm, tbl, rate, is_green);
IWL_DEBUG_RATE(mvm, "LQ: Switch to new mcs %X index is green %X\n",
tbl->current_rate, is_green);
return 0;
}
/* /*
* Set up search table for SISO * Set up search table for SISO
*/ */
@ -1330,21 +1250,14 @@ static int rs_move_legacy_other(struct iwl_mvm *mvm,
} }
break; break;
case IWL_LEGACY_SWITCH_MIMO2_AB: case IWL_LEGACY_SWITCH_MIMO2:
case IWL_LEGACY_SWITCH_MIMO2_AC:
case IWL_LEGACY_SWITCH_MIMO2_BC:
IWL_DEBUG_RATE(mvm, "LQ: Legacy switch to MIMO2\n"); IWL_DEBUG_RATE(mvm, "LQ: Legacy switch to MIMO2\n");
/* Set up search table to try MIMO */ /* Set up search table to try MIMO */
memcpy(search_tbl, tbl, sz); memcpy(search_tbl, tbl, sz);
search_tbl->is_SGI = 0; search_tbl->is_SGI = 0;
if (tbl->action == IWL_LEGACY_SWITCH_MIMO2_AB) search_tbl->ant_type = ANT_AB;
search_tbl->ant_type = ANT_AB;
else if (tbl->action == IWL_LEGACY_SWITCH_MIMO2_AC)
search_tbl->ant_type = ANT_AC;
else
search_tbl->ant_type = ANT_BC;
if (!rs_is_valid_ant(valid_tx_ant, if (!rs_is_valid_ant(valid_tx_ant,
search_tbl->ant_type)) search_tbl->ant_type))
@ -1357,30 +1270,11 @@ static int rs_move_legacy_other(struct iwl_mvm *mvm,
goto out; goto out;
} }
break; break;
default:
case IWL_LEGACY_SWITCH_MIMO3_ABC: WARN_ON_ONCE(1);
IWL_DEBUG_RATE(mvm, "LQ: Legacy switch to MIMO3\n");
/* Set up search table to try MIMO3 */
memcpy(search_tbl, tbl, sz);
search_tbl->is_SGI = 0;
search_tbl->ant_type = ANT_ABC;
if (!rs_is_valid_ant(valid_tx_ant,
search_tbl->ant_type))
break;
ret = rs_switch_to_mimo3(mvm, lq_sta, sta,
search_tbl, index);
if (!ret) {
lq_sta->action_counter = 0;
goto out;
}
break;
} }
tbl->action++; tbl->action++;
if (tbl->action > IWL_LEGACY_SWITCH_MIMO3_ABC) if (tbl->action > IWL_LEGACY_SWITCH_MIMO2)
tbl->action = IWL_LEGACY_SWITCH_ANTENNA1; tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
if (tbl->action == start_action) if (tbl->action == start_action)
@ -1392,7 +1286,7 @@ static int rs_move_legacy_other(struct iwl_mvm *mvm,
out: out:
lq_sta->search_better_tbl = 1; lq_sta->search_better_tbl = 1;
tbl->action++; tbl->action++;
if (tbl->action > IWL_LEGACY_SWITCH_MIMO3_ABC) if (tbl->action > IWL_LEGACY_SWITCH_MIMO2)
tbl->action = IWL_LEGACY_SWITCH_ANTENNA1; tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
if (update_search_tbl_counter) if (update_search_tbl_counter)
search_tbl->action = tbl->action; search_tbl->action = tbl->action;
@ -1427,7 +1321,7 @@ static int rs_move_siso_to_other(struct iwl_mvm *mvm,
case IWL_BT_COEX_TRAFFIC_LOAD_LOW: case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
/* avoid antenna B unless MIMO */ /* avoid antenna B unless MIMO */
if (tbl->action == IWL_SISO_SWITCH_ANTENNA2) if (tbl->action == IWL_SISO_SWITCH_ANTENNA2)
tbl->action = IWL_SISO_SWITCH_MIMO2_AB; tbl->action = IWL_SISO_SWITCH_MIMO2;
break; break;
case IWL_BT_COEX_TRAFFIC_LOAD_HIGH: case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS: case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
@ -1469,19 +1363,12 @@ static int rs_move_siso_to_other(struct iwl_mvm *mvm,
goto out; goto out;
} }
break; break;
case IWL_SISO_SWITCH_MIMO2_AB: case IWL_SISO_SWITCH_MIMO2:
case IWL_SISO_SWITCH_MIMO2_AC:
case IWL_SISO_SWITCH_MIMO2_BC:
IWL_DEBUG_RATE(mvm, "LQ: SISO switch to MIMO2\n"); IWL_DEBUG_RATE(mvm, "LQ: SISO switch to MIMO2\n");
memcpy(search_tbl, tbl, sz); memcpy(search_tbl, tbl, sz);
search_tbl->is_SGI = 0; search_tbl->is_SGI = 0;
if (tbl->action == IWL_SISO_SWITCH_MIMO2_AB) search_tbl->ant_type = ANT_AB;
search_tbl->ant_type = ANT_AB;
else if (tbl->action == IWL_SISO_SWITCH_MIMO2_AC)
search_tbl->ant_type = ANT_AC;
else
search_tbl->ant_type = ANT_BC;
if (!rs_is_valid_ant(valid_tx_ant, if (!rs_is_valid_ant(valid_tx_ant,
search_tbl->ant_type)) search_tbl->ant_type))
@ -1522,24 +1409,11 @@ static int rs_move_siso_to_other(struct iwl_mvm *mvm,
index, is_green); index, is_green);
update_search_tbl_counter = 1; update_search_tbl_counter = 1;
goto out; goto out;
case IWL_SISO_SWITCH_MIMO3_ABC: default:
IWL_DEBUG_RATE(mvm, "LQ: SISO switch to MIMO3\n"); WARN_ON_ONCE(1);
memcpy(search_tbl, tbl, sz);
search_tbl->is_SGI = 0;
search_tbl->ant_type = ANT_ABC;
if (!rs_is_valid_ant(valid_tx_ant,
search_tbl->ant_type))
break;
ret = rs_switch_to_mimo3(mvm, lq_sta, sta,
search_tbl, index);
if (!ret)
goto out;
break;
} }
tbl->action++; tbl->action++;
if (tbl->action > IWL_LEGACY_SWITCH_MIMO3_ABC) if (tbl->action > IWL_SISO_SWITCH_GI)
tbl->action = IWL_SISO_SWITCH_ANTENNA1; tbl->action = IWL_SISO_SWITCH_ANTENNA1;
if (tbl->action == start_action) if (tbl->action == start_action)
@ -1551,7 +1425,7 @@ static int rs_move_siso_to_other(struct iwl_mvm *mvm,
out: out:
lq_sta->search_better_tbl = 1; lq_sta->search_better_tbl = 1;
tbl->action++; tbl->action++;
if (tbl->action > IWL_SISO_SWITCH_MIMO3_ABC) if (tbl->action > IWL_SISO_SWITCH_GI)
tbl->action = IWL_SISO_SWITCH_ANTENNA1; tbl->action = IWL_SISO_SWITCH_ANTENNA1;
if (update_search_tbl_counter) if (update_search_tbl_counter)
search_tbl->action = tbl->action; search_tbl->action = tbl->action;
@ -1592,8 +1466,7 @@ static int rs_move_mimo2_to_other(struct iwl_mvm *mvm,
break; break;
case IWL_BT_COEX_TRAFFIC_LOAD_LOW: case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
/* avoid antenna B unless MIMO */ /* avoid antenna B unless MIMO */
if (tbl->action == IWL_MIMO2_SWITCH_SISO_B || if (tbl->action == IWL_MIMO2_SWITCH_SISO_B)
tbl->action == IWL_MIMO2_SWITCH_SISO_C)
tbl->action = IWL_MIMO2_SWITCH_SISO_A; tbl->action = IWL_MIMO2_SWITCH_SISO_A;
break; break;
default: default:
@ -1626,7 +1499,6 @@ static int rs_move_mimo2_to_other(struct iwl_mvm *mvm,
break; break;
case IWL_MIMO2_SWITCH_SISO_A: case IWL_MIMO2_SWITCH_SISO_A:
case IWL_MIMO2_SWITCH_SISO_B: case IWL_MIMO2_SWITCH_SISO_B:
case IWL_MIMO2_SWITCH_SISO_C:
IWL_DEBUG_RATE(mvm, "LQ: MIMO2 switch to SISO\n"); IWL_DEBUG_RATE(mvm, "LQ: MIMO2 switch to SISO\n");
/* Set up new search table for SISO */ /* Set up new search table for SISO */
@ -1634,10 +1506,8 @@ static int rs_move_mimo2_to_other(struct iwl_mvm *mvm,
if (tbl->action == IWL_MIMO2_SWITCH_SISO_A) if (tbl->action == IWL_MIMO2_SWITCH_SISO_A)
search_tbl->ant_type = ANT_A; search_tbl->ant_type = ANT_A;
else if (tbl->action == IWL_MIMO2_SWITCH_SISO_B) else /* tbl->action == IWL_MIMO2_SWITCH_SISO_B */
search_tbl->ant_type = ANT_B; search_tbl->ant_type = ANT_B;
else
search_tbl->ant_type = ANT_C;
if (!rs_is_valid_ant(valid_tx_ant, if (!rs_is_valid_ant(valid_tx_ant,
search_tbl->ant_type)) search_tbl->ant_type))
@ -1680,26 +1550,11 @@ static int rs_move_mimo2_to_other(struct iwl_mvm *mvm,
index, is_green); index, is_green);
update_search_tbl_counter = 1; update_search_tbl_counter = 1;
goto out; goto out;
default:
case IWL_MIMO2_SWITCH_MIMO3_ABC: WARN_ON_ONCE(1);
IWL_DEBUG_RATE(mvm, "LQ: MIMO2 switch to MIMO3\n");
memcpy(search_tbl, tbl, sz);
search_tbl->is_SGI = 0;
search_tbl->ant_type = ANT_ABC;
if (!rs_is_valid_ant(valid_tx_ant,
search_tbl->ant_type))
break;
ret = rs_switch_to_mimo3(mvm, lq_sta, sta,
search_tbl, index);
if (!ret)
goto out;
break;
} }
tbl->action++; tbl->action++;
if (tbl->action > IWL_MIMO2_SWITCH_MIMO3_ABC) if (tbl->action > IWL_MIMO2_SWITCH_GI)
tbl->action = IWL_MIMO2_SWITCH_ANTENNA1; tbl->action = IWL_MIMO2_SWITCH_ANTENNA1;
if (tbl->action == start_action) if (tbl->action == start_action)
@ -1710,7 +1565,7 @@ static int rs_move_mimo2_to_other(struct iwl_mvm *mvm,
out: out:
lq_sta->search_better_tbl = 1; lq_sta->search_better_tbl = 1;
tbl->action++; tbl->action++;
if (tbl->action > IWL_MIMO2_SWITCH_MIMO3_ABC) if (tbl->action > IWL_MIMO2_SWITCH_GI)
tbl->action = IWL_MIMO2_SWITCH_ANTENNA1; tbl->action = IWL_MIMO2_SWITCH_ANTENNA1;
if (update_search_tbl_counter) if (update_search_tbl_counter)
search_tbl->action = tbl->action; search_tbl->action = tbl->action;
@ -1718,171 +1573,6 @@ static int rs_move_mimo2_to_other(struct iwl_mvm *mvm,
return 0; return 0;
} }
/*
* Try to switch to new modulation mode from MIMO3
*/
static int rs_move_mimo3_to_other(struct iwl_mvm *mvm,
struct iwl_lq_sta *lq_sta,
struct ieee80211_sta *sta, int index)
{
s8 is_green = lq_sta->is_green;
struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
struct iwl_scale_tbl_info *search_tbl =
&(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
struct iwl_rate_scale_data *window = &(tbl->win[index]);
struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
u32 sz = (sizeof(struct iwl_scale_tbl_info) -
(sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
u8 start_action;
u8 valid_tx_ant = iwl_fw_valid_tx_ant(mvm->fw);
u8 tx_chains_num = num_of_ant(valid_tx_ant);
int ret;
u8 update_search_tbl_counter = 0;
switch (BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD)) {
case IWL_BT_COEX_TRAFFIC_LOAD_NONE:
/* nothing */
break;
case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
/* avoid antenna B and MIMO */
if (tbl->action != IWL_MIMO3_SWITCH_SISO_A)
tbl->action = IWL_MIMO3_SWITCH_SISO_A;
break;
case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
/* avoid antenna B unless MIMO */
if (tbl->action == IWL_MIMO3_SWITCH_SISO_B ||
tbl->action == IWL_MIMO3_SWITCH_SISO_C)
tbl->action = IWL_MIMO3_SWITCH_SISO_A;
break;
default:
IWL_ERR(mvm, "Invalid BT load %d",
BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD));
break;
}
start_action = tbl->action;
while (1) {
lq_sta->action_counter++;
switch (tbl->action) {
case IWL_MIMO3_SWITCH_ANTENNA1:
case IWL_MIMO3_SWITCH_ANTENNA2:
IWL_DEBUG_RATE(mvm, "LQ: MIMO3 toggle Antennas\n");
if (tx_chains_num <= 3)
break;
if (window->success_ratio >= IWL_RS_GOOD_RATIO)
break;
memcpy(search_tbl, tbl, sz);
if (rs_toggle_antenna(valid_tx_ant,
&search_tbl->current_rate,
search_tbl))
goto out;
break;
case IWL_MIMO3_SWITCH_SISO_A:
case IWL_MIMO3_SWITCH_SISO_B:
case IWL_MIMO3_SWITCH_SISO_C:
IWL_DEBUG_RATE(mvm, "LQ: MIMO3 switch to SISO\n");
/* Set up new search table for SISO */
memcpy(search_tbl, tbl, sz);
if (tbl->action == IWL_MIMO3_SWITCH_SISO_A)
search_tbl->ant_type = ANT_A;
else if (tbl->action == IWL_MIMO3_SWITCH_SISO_B)
search_tbl->ant_type = ANT_B;
else
search_tbl->ant_type = ANT_C;
if (!rs_is_valid_ant(valid_tx_ant,
search_tbl->ant_type))
break;
ret = rs_switch_to_siso(mvm, lq_sta, sta,
search_tbl, index);
if (!ret)
goto out;
break;
case IWL_MIMO3_SWITCH_MIMO2_AB:
case IWL_MIMO3_SWITCH_MIMO2_AC:
case IWL_MIMO3_SWITCH_MIMO2_BC:
IWL_DEBUG_RATE(mvm, "LQ: MIMO3 switch to MIMO2\n");
memcpy(search_tbl, tbl, sz);
search_tbl->is_SGI = 0;
if (tbl->action == IWL_MIMO3_SWITCH_MIMO2_AB)
search_tbl->ant_type = ANT_AB;
else if (tbl->action == IWL_MIMO3_SWITCH_MIMO2_AC)
search_tbl->ant_type = ANT_AC;
else
search_tbl->ant_type = ANT_BC;
if (!rs_is_valid_ant(valid_tx_ant,
search_tbl->ant_type))
break;
ret = rs_switch_to_mimo2(mvm, lq_sta, sta,
search_tbl, index);
if (!ret)
goto out;
break;
case IWL_MIMO3_SWITCH_GI:
if (!tbl->is_ht40 && !(ht_cap->cap &
IEEE80211_HT_CAP_SGI_20))
break;
if (tbl->is_ht40 && !(ht_cap->cap &
IEEE80211_HT_CAP_SGI_40))
break;
IWL_DEBUG_RATE(mvm, "LQ: MIMO3 toggle SGI/NGI\n");
/* Set up new search table for MIMO */
memcpy(search_tbl, tbl, sz);
search_tbl->is_SGI = !tbl->is_SGI;
rs_set_expected_tpt_table(lq_sta, search_tbl);
/*
* If active table already uses the fastest possible
* modulation (dual stream with short guard interval),
* and it's working well, there's no need to look
* for a better type of modulation!
*/
if (tbl->is_SGI) {
s32 tpt = lq_sta->last_tpt / 100;
if (tpt >= search_tbl->expected_tpt[index])
break;
}
search_tbl->current_rate =
rate_n_flags_from_tbl(mvm, search_tbl,
index, is_green);
update_search_tbl_counter = 1;
goto out;
}
tbl->action++;
if (tbl->action > IWL_MIMO3_SWITCH_GI)
tbl->action = IWL_MIMO3_SWITCH_ANTENNA1;
if (tbl->action == start_action)
break;
}
search_tbl->lq_type = LQ_NONE;
return 0;
out:
lq_sta->search_better_tbl = 1;
tbl->action++;
if (tbl->action > IWL_MIMO3_SWITCH_GI)
tbl->action = IWL_MIMO3_SWITCH_ANTENNA1;
if (update_search_tbl_counter)
search_tbl->action = tbl->action;
return 0;
}
/* /*
* Check whether we should continue using same modulation mode, or * Check whether we should continue using same modulation mode, or
* begin search for a new mode, based on: * begin search for a new mode, based on:
@ -2289,8 +1979,7 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
scale_action = 0; scale_action = 0;
if ((BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD) >= if ((BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD) >=
IWL_BT_COEX_TRAFFIC_LOAD_HIGH) && IWL_BT_COEX_TRAFFIC_LOAD_HIGH) && (is_mimo(tbl->lq_type))) {
(is_mimo2(tbl->lq_type) || is_mimo3(tbl->lq_type))) {
if (lq_sta->last_bt_traffic > if (lq_sta->last_bt_traffic >
BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD)) { BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD)) {
/* /*
@ -2307,8 +1996,7 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD); BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD);
if ((BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD) >= if ((BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD) >=
IWL_BT_COEX_TRAFFIC_LOAD_HIGH) && IWL_BT_COEX_TRAFFIC_LOAD_HIGH) && is_mimo(tbl->lq_type)) {
(is_mimo2(tbl->lq_type) || is_mimo3(tbl->lq_type))) {
/* search for a new modulation */ /* search for a new modulation */
rs_stay_in_table(lq_sta, true); rs_stay_in_table(lq_sta, true);
goto lq_update; goto lq_update;
@ -2368,7 +2056,7 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
else if (is_mimo2(tbl->lq_type)) else if (is_mimo2(tbl->lq_type))
rs_move_mimo2_to_other(mvm, lq_sta, sta, index); rs_move_mimo2_to_other(mvm, lq_sta, sta, index);
else else
rs_move_mimo3_to_other(mvm, lq_sta, sta, index); WARN_ON_ONCE(1);
/* If new "search" mode was selected, set up in uCode table */ /* If new "search" mode was selected, set up in uCode table */
if (lq_sta->search_better_tbl) { if (lq_sta->search_better_tbl) {
@ -2533,11 +2221,10 @@ static void rs_get_rate(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta,
rate_idx -= IWL_FIRST_OFDM_RATE; rate_idx -= IWL_FIRST_OFDM_RATE;
/* 6M and 9M shared same MCS index */ /* 6M and 9M shared same MCS index */
rate_idx = (rate_idx > 0) ? (rate_idx - 1) : 0; rate_idx = (rate_idx > 0) ? (rate_idx - 1) : 0;
WARN_ON_ONCE(rs_extract_rate(lq_sta->last_rate_n_flags) >=
IWL_RATE_MIMO3_6M_PLCP);
if (rs_extract_rate(lq_sta->last_rate_n_flags) >= if (rs_extract_rate(lq_sta->last_rate_n_flags) >=
IWL_RATE_MIMO3_6M_PLCP) IWL_RATE_MIMO2_6M_PLCP)
rate_idx = rate_idx + (2 * MCS_INDEX_PER_STREAM);
else if (rs_extract_rate(lq_sta->last_rate_n_flags) >=
IWL_RATE_MIMO2_6M_PLCP)
rate_idx = rate_idx + MCS_INDEX_PER_STREAM; rate_idx = rate_idx + MCS_INDEX_PER_STREAM;
info->control.rates[0].flags = IEEE80211_TX_RC_MCS; info->control.rates[0].flags = IEEE80211_TX_RC_MCS;
if (lq_sta->last_rate_n_flags & RATE_MCS_SGI_MSK) if (lq_sta->last_rate_n_flags & RATE_MCS_SGI_MSK)
@ -2636,16 +2323,10 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
lq_sta->active_mimo2_rate &= ~((u16)0x2); lq_sta->active_mimo2_rate &= ~((u16)0x2);
lq_sta->active_mimo2_rate <<= IWL_FIRST_OFDM_RATE; lq_sta->active_mimo2_rate <<= IWL_FIRST_OFDM_RATE;
lq_sta->active_mimo3_rate = ht_cap->mcs.rx_mask[2] << 1;
lq_sta->active_mimo3_rate |= ht_cap->mcs.rx_mask[2] & 0x1;
lq_sta->active_mimo3_rate &= ~((u16)0x2);
lq_sta->active_mimo3_rate <<= IWL_FIRST_OFDM_RATE;
IWL_DEBUG_RATE(mvm, IWL_DEBUG_RATE(mvm,
"SISO-RATE=%X MIMO2-RATE=%X MIMO3-RATE=%X\n", "SISO-RATE=%X MIMO2-RATE=%X\n",
lq_sta->active_siso_rate, lq_sta->active_siso_rate,
lq_sta->active_mimo2_rate, lq_sta->active_mimo2_rate);
lq_sta->active_mimo3_rate);
/* These values will be overridden later */ /* These values will be overridden later */
lq_sta->lq.single_stream_ant_msk = lq_sta->lq.single_stream_ant_msk =
@ -2689,7 +2370,7 @@ static void rs_fill_link_cmd(struct iwl_mvm *mvm,
struct iwl_lq_cmd *lq_cmd = &lq_sta->lq; struct iwl_lq_cmd *lq_cmd = &lq_sta->lq;
/* Override starting rate (index 0) if needed for debug purposes */ /* Override starting rate (index 0) if needed for debug purposes */
rs_dbgfs_set_mcs(lq_sta, &new_rate, index); rs_dbgfs_set_mcs(lq_sta, &new_rate);
/* Interpret new_rate (rate_n_flags) */ /* Interpret new_rate (rate_n_flags) */
rs_get_tbl_info_from_mcs(new_rate, lq_sta->band, rs_get_tbl_info_from_mcs(new_rate, lq_sta->band,
@ -2736,7 +2417,7 @@ static void rs_fill_link_cmd(struct iwl_mvm *mvm,
} }
/* Override next rate if needed for debug purposes */ /* Override next rate if needed for debug purposes */
rs_dbgfs_set_mcs(lq_sta, &new_rate, index); rs_dbgfs_set_mcs(lq_sta, &new_rate);
/* Fill next table entry */ /* Fill next table entry */
lq_cmd->rs_table[index] = lq_cmd->rs_table[index] =
@ -2778,7 +2459,7 @@ static void rs_fill_link_cmd(struct iwl_mvm *mvm,
use_ht_possible = 0; use_ht_possible = 0;
/* Override next rate if needed for debug purposes */ /* Override next rate if needed for debug purposes */
rs_dbgfs_set_mcs(lq_sta, &new_rate, index); rs_dbgfs_set_mcs(lq_sta, &new_rate);
/* Fill next table entry */ /* Fill next table entry */
lq_cmd->rs_table[index] = cpu_to_le32(new_rate); lq_cmd->rs_table[index] = cpu_to_le32(new_rate);
@ -2823,7 +2504,7 @@ static void rs_free_sta(void *mvm_r, struct ieee80211_sta *sta,
#ifdef CONFIG_MAC80211_DEBUGFS #ifdef CONFIG_MAC80211_DEBUGFS
static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta, static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
u32 *rate_n_flags, int index) u32 *rate_n_flags)
{ {
struct iwl_mvm *mvm; struct iwl_mvm *mvm;
u8 valid_tx_ant; u8 valid_tx_ant;
@ -2908,8 +2589,7 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
(is_legacy(tbl->lq_type)) ? "legacy" : "HT"); (is_legacy(tbl->lq_type)) ? "legacy" : "HT");
if (is_Ht(tbl->lq_type)) { if (is_Ht(tbl->lq_type)) {
desc += sprintf(buff+desc, " %s", desc += sprintf(buff+desc, " %s",
(is_siso(tbl->lq_type)) ? "SISO" : (is_siso(tbl->lq_type)) ? "SISO" : "MIMO2");
((is_mimo2(tbl->lq_type)) ? "MIMO2" : "MIMO3"));
desc += sprintf(buff+desc, " %s", desc += sprintf(buff+desc, " %s",
(tbl->is_ht40) ? "40MHz" : "20MHz"); (tbl->is_ht40) ? "40MHz" : "20MHz");
desc += sprintf(buff+desc, " %s %s %s\n", desc += sprintf(buff+desc, " %s %s %s\n",
@ -3009,32 +2689,6 @@ static const struct file_operations rs_sta_dbgfs_stats_table_ops = {
.llseek = default_llseek, .llseek = default_llseek,
}; };
static ssize_t rs_sta_dbgfs_rate_scale_data_read(struct file *file,
char __user *user_buf, size_t count, loff_t *ppos)
{
struct iwl_lq_sta *lq_sta = file->private_data;
struct iwl_scale_tbl_info *tbl = &lq_sta->lq_info[lq_sta->active_tbl];
char buff[120];
int desc = 0;
if (is_Ht(tbl->lq_type))
desc += sprintf(buff+desc,
"Bit Rate= %d Mb/s\n",
tbl->expected_tpt[lq_sta->last_txrate_idx]);
else
desc += sprintf(buff+desc,
"Bit Rate= %d Mb/s\n",
iwl_rates[lq_sta->last_txrate_idx].ieee >> 1);
return simple_read_from_buffer(user_buf, count, ppos, buff, desc);
}
static const struct file_operations rs_sta_dbgfs_rate_scale_data_ops = {
.read = rs_sta_dbgfs_rate_scale_data_read,
.open = simple_open,
.llseek = default_llseek,
};
static void rs_add_debugfs(void *mvm, void *mvm_sta, struct dentry *dir) static void rs_add_debugfs(void *mvm, void *mvm_sta, struct dentry *dir)
{ {
struct iwl_lq_sta *lq_sta = mvm_sta; struct iwl_lq_sta *lq_sta = mvm_sta;
@ -3044,9 +2698,6 @@ static void rs_add_debugfs(void *mvm, void *mvm_sta, struct dentry *dir)
lq_sta->rs_sta_dbgfs_stats_table_file = lq_sta->rs_sta_dbgfs_stats_table_file =
debugfs_create_file("rate_stats_table", S_IRUSR, dir, debugfs_create_file("rate_stats_table", S_IRUSR, dir,
lq_sta, &rs_sta_dbgfs_stats_table_ops); lq_sta, &rs_sta_dbgfs_stats_table_ops);
lq_sta->rs_sta_dbgfs_rate_scale_data_file =
debugfs_create_file("rate_scale_data", S_IRUSR, dir,
lq_sta, &rs_sta_dbgfs_rate_scale_data_ops);
lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file = lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file =
debugfs_create_u8("tx_agg_tid_enable", S_IRUSR | S_IWUSR, dir, debugfs_create_u8("tx_agg_tid_enable", S_IRUSR | S_IWUSR, dir,
&lq_sta->tx_agg_tid_en); &lq_sta->tx_agg_tid_en);
@ -3057,7 +2708,6 @@ static void rs_remove_debugfs(void *mvm, void *mvm_sta)
struct iwl_lq_sta *lq_sta = mvm_sta; struct iwl_lq_sta *lq_sta = mvm_sta;
debugfs_remove(lq_sta->rs_sta_dbgfs_scale_table_file); debugfs_remove(lq_sta->rs_sta_dbgfs_scale_table_file);
debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file); debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file);
debugfs_remove(lq_sta->rs_sta_dbgfs_rate_scale_data_file);
debugfs_remove(lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file); debugfs_remove(lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file);
} }
#endif #endif

View File

@ -38,14 +38,8 @@ struct iwl_rs_rate_info {
u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */ u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */
u8 plcp_siso; /* uCode API: IWL_RATE_SISO_6M_PLCP, etc. */ u8 plcp_siso; /* uCode API: IWL_RATE_SISO_6M_PLCP, etc. */
u8 plcp_mimo2; /* uCode API: IWL_RATE_MIMO2_6M_PLCP, etc. */ u8 plcp_mimo2; /* uCode API: IWL_RATE_MIMO2_6M_PLCP, etc. */
u8 plcp_mimo3; /* uCode API: IWL_RATE_MIMO3_6M_PLCP, etc. */
u8 ieee; /* MAC header: IWL_RATE_6M_IEEE, etc. */
u8 prev_ieee; /* previous rate in IEEE speeds */
u8 next_ieee; /* next rate in IEEE speeds */
u8 prev_rs; /* previous rate used in rs algo */ u8 prev_rs; /* previous rate used in rs algo */
u8 next_rs; /* next rate used in rs algo */ u8 next_rs; /* next rate used in rs algo */
u8 prev_rs_tgg; /* previous rate used in TGG rs algo */
u8 next_rs_tgg; /* next rate used in TGG rs algo */
}; };
#define IWL_RATE_60M_PLCP 3 #define IWL_RATE_60M_PLCP 3
@ -120,23 +114,6 @@ enum {
IWL_RATE_MIMO3_INVM_PLCP = IWL_RATE_SISO_INVM_PLCP, IWL_RATE_MIMO3_INVM_PLCP = IWL_RATE_SISO_INVM_PLCP,
}; };
/* MAC header values for bit rates */
enum {
IWL_RATE_6M_IEEE = 12,
IWL_RATE_9M_IEEE = 18,
IWL_RATE_12M_IEEE = 24,
IWL_RATE_18M_IEEE = 36,
IWL_RATE_24M_IEEE = 48,
IWL_RATE_36M_IEEE = 72,
IWL_RATE_48M_IEEE = 96,
IWL_RATE_54M_IEEE = 108,
IWL_RATE_60M_IEEE = 120,
IWL_RATE_1M_IEEE = 2,
IWL_RATE_2M_IEEE = 4,
IWL_RATE_5M_IEEE = 11,
IWL_RATE_11M_IEEE = 22,
};
#define IWL_RATES_MASK ((1 << IWL_RATE_COUNT) - 1) #define IWL_RATES_MASK ((1 << IWL_RATE_COUNT) - 1)
#define IWL_INVALID_VALUE -1 #define IWL_INVALID_VALUE -1
@ -165,47 +142,22 @@ enum {
#define IWL_LEGACY_SWITCH_ANTENNA1 0 #define IWL_LEGACY_SWITCH_ANTENNA1 0
#define IWL_LEGACY_SWITCH_ANTENNA2 1 #define IWL_LEGACY_SWITCH_ANTENNA2 1
#define IWL_LEGACY_SWITCH_SISO 2 #define IWL_LEGACY_SWITCH_SISO 2
#define IWL_LEGACY_SWITCH_MIMO2_AB 3 #define IWL_LEGACY_SWITCH_MIMO2 3
#define IWL_LEGACY_SWITCH_MIMO2_AC 4
#define IWL_LEGACY_SWITCH_MIMO2_BC 5
#define IWL_LEGACY_SWITCH_MIMO3_ABC 6
/* possible actions when in siso mode */ /* possible actions when in siso mode */
#define IWL_SISO_SWITCH_ANTENNA1 0 #define IWL_SISO_SWITCH_ANTENNA1 0
#define IWL_SISO_SWITCH_ANTENNA2 1 #define IWL_SISO_SWITCH_ANTENNA2 1
#define IWL_SISO_SWITCH_MIMO2_AB 2 #define IWL_SISO_SWITCH_MIMO2 2
#define IWL_SISO_SWITCH_MIMO2_AC 3 #define IWL_SISO_SWITCH_GI 3
#define IWL_SISO_SWITCH_MIMO2_BC 4
#define IWL_SISO_SWITCH_GI 5
#define IWL_SISO_SWITCH_MIMO3_ABC 6
/* possible actions when in mimo mode */ /* possible actions when in mimo mode */
#define IWL_MIMO2_SWITCH_ANTENNA1 0 #define IWL_MIMO2_SWITCH_ANTENNA1 0
#define IWL_MIMO2_SWITCH_ANTENNA2 1 #define IWL_MIMO2_SWITCH_ANTENNA2 1
#define IWL_MIMO2_SWITCH_SISO_A 2 #define IWL_MIMO2_SWITCH_SISO_A 2
#define IWL_MIMO2_SWITCH_SISO_B 3 #define IWL_MIMO2_SWITCH_SISO_B 3
#define IWL_MIMO2_SWITCH_SISO_C 4 #define IWL_MIMO2_SWITCH_GI 4
#define IWL_MIMO2_SWITCH_GI 5
#define IWL_MIMO2_SWITCH_MIMO3_ABC 6
#define IWL_MAX_SEARCH IWL_MIMO2_SWITCH_GI
/* possible actions when in mimo3 mode */
#define IWL_MIMO3_SWITCH_ANTENNA1 0
#define IWL_MIMO3_SWITCH_ANTENNA2 1
#define IWL_MIMO3_SWITCH_SISO_A 2
#define IWL_MIMO3_SWITCH_SISO_B 3
#define IWL_MIMO3_SWITCH_SISO_C 4
#define IWL_MIMO3_SWITCH_MIMO2_AB 5
#define IWL_MIMO3_SWITCH_MIMO2_AC 6
#define IWL_MIMO3_SWITCH_MIMO2_BC 7
#define IWL_MIMO3_SWITCH_GI 8
#define IWL_MAX_11N_MIMO3_SEARCH IWL_MIMO3_SWITCH_GI
#define IWL_MAX_SEARCH IWL_MIMO2_SWITCH_MIMO3_ABC
/*FIXME:RS:add possible actions for MIMO3*/
#define IWL_ACTION_LIMIT 3 /* # possible actions */ #define IWL_ACTION_LIMIT 3 /* # possible actions */
@ -240,15 +192,13 @@ enum iwl_table_type {
LQ_A, LQ_A,
LQ_SISO, /* high-throughput types */ LQ_SISO, /* high-throughput types */
LQ_MIMO2, LQ_MIMO2,
LQ_MIMO3,
LQ_MAX, LQ_MAX,
}; };
#define is_legacy(tbl) (((tbl) == LQ_G) || ((tbl) == LQ_A)) #define is_legacy(tbl) (((tbl) == LQ_G) || ((tbl) == LQ_A))
#define is_siso(tbl) ((tbl) == LQ_SISO) #define is_siso(tbl) ((tbl) == LQ_SISO)
#define is_mimo2(tbl) ((tbl) == LQ_MIMO2) #define is_mimo2(tbl) ((tbl) == LQ_MIMO2)
#define is_mimo3(tbl) ((tbl) == LQ_MIMO3) #define is_mimo(tbl) is_mimo2(tbl)
#define is_mimo(tbl) (is_mimo2(tbl) || is_mimo3(tbl))
#define is_Ht(tbl) (is_siso(tbl) || is_mimo(tbl)) #define is_Ht(tbl) (is_siso(tbl) || is_mimo(tbl))
#define is_a_band(tbl) ((tbl) == LQ_A) #define is_a_band(tbl) ((tbl) == LQ_A)
#define is_g_and(tbl) ((tbl) == LQ_G) #define is_g_and(tbl) ((tbl) == LQ_G)
@ -320,7 +270,6 @@ struct iwl_lq_sta {
u16 active_legacy_rate; u16 active_legacy_rate;
u16 active_siso_rate; u16 active_siso_rate;
u16 active_mimo2_rate; u16 active_mimo2_rate;
u16 active_mimo3_rate;
s8 max_rate_idx; /* Max rate set by user */ s8 max_rate_idx; /* Max rate set by user */
u8 missed_rate_counter; u8 missed_rate_counter;
@ -330,7 +279,6 @@ struct iwl_lq_sta {
#ifdef CONFIG_MAC80211_DEBUGFS #ifdef CONFIG_MAC80211_DEBUGFS
struct dentry *rs_sta_dbgfs_scale_table_file; struct dentry *rs_sta_dbgfs_scale_table_file;
struct dentry *rs_sta_dbgfs_stats_table_file; struct dentry *rs_sta_dbgfs_stats_table_file;
struct dentry *rs_sta_dbgfs_rate_scale_data_file;
struct dentry *rs_sta_dbgfs_tx_agg_tid_en_file; struct dentry *rs_sta_dbgfs_tx_agg_tid_en_file;
u32 dbg_fixed_rate; u32 dbg_fixed_rate;
#endif #endif

View File

@ -396,11 +396,62 @@ static void iwl_mvm_update_rx_statistics(struct iwl_mvm *mvm,
memcpy(&mvm->rx_stats, &stats->rx, sizeof(struct mvm_statistics_rx)); memcpy(&mvm->rx_stats, &stats->rx, sizeof(struct mvm_statistics_rx));
} }
struct iwl_mvm_stat_data {
struct iwl_notif_statistics *stats;
struct iwl_mvm *mvm;
};
static void iwl_mvm_stat_iterator(void *_data, u8 *mac,
struct ieee80211_vif *vif)
{
struct iwl_mvm_stat_data *data = _data;
struct iwl_notif_statistics *stats = data->stats;
struct iwl_mvm *mvm = data->mvm;
int sig = -stats->general.beacon_filter_average_energy;
int last_event;
int thold = vif->bss_conf.cqm_rssi_thold;
int hyst = vif->bss_conf.cqm_rssi_hyst;
u16 id = le32_to_cpu(stats->rx.general.mac_id);
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
if (mvmvif->id != id)
return;
if (vif->type != NL80211_IFTYPE_STATION)
return;
mvmvif->bf_data.ave_beacon_signal = sig;
if (!(vif->driver_flags & IEEE80211_VIF_SUPPORTS_CQM_RSSI))
return;
/* CQM Notification */
last_event = mvmvif->bf_data.last_cqm_event;
if (thold && sig < thold && (last_event == 0 ||
sig < last_event - hyst)) {
mvmvif->bf_data.last_cqm_event = sig;
IWL_DEBUG_RX(mvm, "cqm_iterator cqm low %d\n",
sig);
ieee80211_cqm_rssi_notify(
vif,
NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW,
GFP_KERNEL);
} else if (sig > thold &&
(last_event == 0 || sig > last_event + hyst)) {
mvmvif->bf_data.last_cqm_event = sig;
IWL_DEBUG_RX(mvm, "cqm_iterator cqm high %d\n",
sig);
ieee80211_cqm_rssi_notify(
vif,
NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH,
GFP_KERNEL);
}
}
/* /*
* iwl_mvm_rx_statistics - STATISTICS_NOTIFICATION handler * iwl_mvm_rx_statistics - STATISTICS_NOTIFICATION handler
* *
* TODO: This handler is implemented partially. * TODO: This handler is implemented partially.
* It only gets the NIC's temperature.
*/ */
int iwl_mvm_rx_statistics(struct iwl_mvm *mvm, int iwl_mvm_rx_statistics(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb, struct iwl_rx_cmd_buffer *rxb,
@ -409,6 +460,10 @@ int iwl_mvm_rx_statistics(struct iwl_mvm *mvm,
struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_notif_statistics *stats = (void *)&pkt->data; struct iwl_notif_statistics *stats = (void *)&pkt->data;
struct mvm_statistics_general_common *common = &stats->general.common; struct mvm_statistics_general_common *common = &stats->general.common;
struct iwl_mvm_stat_data data = {
.stats = stats,
.mvm = mvm,
};
if (mvm->temperature != le32_to_cpu(common->temperature)) { if (mvm->temperature != le32_to_cpu(common->temperature)) {
mvm->temperature = le32_to_cpu(common->temperature); mvm->temperature = le32_to_cpu(common->temperature);
@ -416,5 +471,9 @@ int iwl_mvm_rx_statistics(struct iwl_mvm *mvm,
} }
iwl_mvm_update_rx_statistics(mvm, stats); iwl_mvm_update_rx_statistics(mvm, stats);
ieee80211_iterate_active_interfaces(mvm->hw,
IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_stat_iterator,
&data);
return 0; return 0;
} }

View File

@ -165,7 +165,7 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm,
WARN_ONCE(!le32_to_cpu(notif->status), WARN_ONCE(!le32_to_cpu(notif->status),
"Failed to schedule time event\n"); "Failed to schedule time event\n");
if (le32_to_cpu(notif->action) & TE_NOTIF_HOST_EVENT_END) { if (le32_to_cpu(notif->action) & TE_V2_NOTIF_HOST_EVENT_END) {
IWL_DEBUG_TE(mvm, IWL_DEBUG_TE(mvm,
"TE ended - current time %lu, estimated end %lu\n", "TE ended - current time %lu, estimated end %lu\n",
jiffies, te_data->end_jiffies); jiffies, te_data->end_jiffies);
@ -188,7 +188,7 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm,
} }
iwl_mvm_te_clear_data(mvm, te_data); iwl_mvm_te_clear_data(mvm, te_data);
} else if (le32_to_cpu(notif->action) & TE_NOTIF_HOST_EVENT_START) { } else if (le32_to_cpu(notif->action) & TE_V2_NOTIF_HOST_EVENT_START) {
te_data->running = true; te_data->running = true;
te_data->end_jiffies = TU_TO_EXP_TIME(te_data->duration); te_data->end_jiffies = TU_TO_EXP_TIME(te_data->duration);
@ -255,10 +255,67 @@ static bool iwl_mvm_time_event_response(struct iwl_notif_wait_data *notif_wait,
return true; return true;
} }
/* used to convert from time event API v2 to v1 */
#define TE_V2_DEP_POLICY_MSK (TE_V2_DEP_OTHER | TE_V2_DEP_TSF |\
TE_V2_EVENT_SOCIOPATHIC)
static inline u16 te_v2_get_notify(__le16 policy)
{
return le16_to_cpu(policy) & TE_V2_NOTIF_MSK;
}
static inline u16 te_v2_get_dep_policy(__le16 policy)
{
return (le16_to_cpu(policy) & TE_V2_DEP_POLICY_MSK) >>
TE_V2_PLACEMENT_POS;
}
static inline u16 te_v2_get_absence(__le16 policy)
{
return (le16_to_cpu(policy) & TE_V2_ABSENCE) >> TE_V2_ABSENCE_POS;
}
static void iwl_mvm_te_v2_to_v1(const struct iwl_time_event_cmd_v2 *cmd_v2,
struct iwl_time_event_cmd_v1 *cmd_v1)
{
cmd_v1->id_and_color = cmd_v2->id_and_color;
cmd_v1->action = cmd_v2->action;
cmd_v1->id = cmd_v2->id;
cmd_v1->apply_time = cmd_v2->apply_time;
cmd_v1->max_delay = cmd_v2->max_delay;
cmd_v1->depends_on = cmd_v2->depends_on;
cmd_v1->interval = cmd_v2->interval;
cmd_v1->duration = cmd_v2->duration;
if (cmd_v2->repeat == TE_V2_REPEAT_ENDLESS)
cmd_v1->repeat = cpu_to_le32(TE_V1_REPEAT_ENDLESS);
else
cmd_v1->repeat = cpu_to_le32(cmd_v2->repeat);
cmd_v1->max_frags = cpu_to_le32(cmd_v2->max_frags);
cmd_v1->interval_reciprocal = 0; /* unused */
cmd_v1->dep_policy = cpu_to_le32(te_v2_get_dep_policy(cmd_v2->policy));
cmd_v1->is_present = cpu_to_le32(!te_v2_get_absence(cmd_v2->policy));
cmd_v1->notify = cpu_to_le32(te_v2_get_notify(cmd_v2->policy));
}
static int iwl_mvm_send_time_event_cmd(struct iwl_mvm *mvm,
const struct iwl_time_event_cmd_v2 *cmd)
{
struct iwl_time_event_cmd_v1 cmd_v1;
if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_TIME_EVENT_API_V2)
return iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, CMD_SYNC,
sizeof(*cmd), cmd);
iwl_mvm_te_v2_to_v1(cmd, &cmd_v1);
return iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, CMD_SYNC,
sizeof(cmd_v1), &cmd_v1);
}
static int iwl_mvm_time_event_send_add(struct iwl_mvm *mvm, static int iwl_mvm_time_event_send_add(struct iwl_mvm *mvm,
struct ieee80211_vif *vif, struct ieee80211_vif *vif,
struct iwl_mvm_time_event_data *te_data, struct iwl_mvm_time_event_data *te_data,
struct iwl_time_event_cmd *te_cmd) struct iwl_time_event_cmd_v2 *te_cmd)
{ {
static const u8 time_event_response[] = { TIME_EVENT_CMD }; static const u8 time_event_response[] = { TIME_EVENT_CMD };
struct iwl_notification_wait wait_time_event; struct iwl_notification_wait wait_time_event;
@ -294,8 +351,7 @@ static int iwl_mvm_time_event_send_add(struct iwl_mvm *mvm,
ARRAY_SIZE(time_event_response), ARRAY_SIZE(time_event_response),
iwl_mvm_time_event_response, te_data); iwl_mvm_time_event_response, te_data);
ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, CMD_SYNC, ret = iwl_mvm_send_time_event_cmd(mvm, te_cmd);
sizeof(*te_cmd), te_cmd);
if (ret) { if (ret) {
IWL_ERR(mvm, "Couldn't send TIME_EVENT_CMD: %d\n", ret); IWL_ERR(mvm, "Couldn't send TIME_EVENT_CMD: %d\n", ret);
iwl_remove_notification(&mvm->notif_wait, &wait_time_event); iwl_remove_notification(&mvm->notif_wait, &wait_time_event);
@ -322,7 +378,7 @@ void iwl_mvm_protect_session(struct iwl_mvm *mvm,
{ {
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data; struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
struct iwl_time_event_cmd time_cmd = {}; struct iwl_time_event_cmd_v2 time_cmd = {};
lockdep_assert_held(&mvm->mutex); lockdep_assert_held(&mvm->mutex);
@ -356,17 +412,14 @@ void iwl_mvm_protect_session(struct iwl_mvm *mvm,
time_cmd.apply_time = time_cmd.apply_time =
cpu_to_le32(iwl_read_prph(mvm->trans, DEVICE_SYSTEM_TIME_REG)); cpu_to_le32(iwl_read_prph(mvm->trans, DEVICE_SYSTEM_TIME_REG));
time_cmd.dep_policy = TE_INDEPENDENT; time_cmd.max_frags = TE_V2_FRAG_NONE;
time_cmd.is_present = cpu_to_le32(1);
time_cmd.max_frags = cpu_to_le32(TE_FRAG_NONE);
time_cmd.max_delay = cpu_to_le32(500); time_cmd.max_delay = cpu_to_le32(500);
/* TODO: why do we need to interval = bi if it is not periodic? */ /* TODO: why do we need to interval = bi if it is not periodic? */
time_cmd.interval = cpu_to_le32(1); time_cmd.interval = cpu_to_le32(1);
time_cmd.interval_reciprocal = cpu_to_le32(iwl_mvm_reciprocal(1));
time_cmd.duration = cpu_to_le32(duration); time_cmd.duration = cpu_to_le32(duration);
time_cmd.repeat = cpu_to_le32(1); time_cmd.repeat = 1;
time_cmd.notify = cpu_to_le32(TE_NOTIF_HOST_EVENT_START | time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START |
TE_NOTIF_HOST_EVENT_END); TE_V2_NOTIF_HOST_EVENT_END);
iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd); iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
} }
@ -380,7 +433,7 @@ void iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
struct iwl_mvm_vif *mvmvif, struct iwl_mvm_vif *mvmvif,
struct iwl_mvm_time_event_data *te_data) struct iwl_mvm_time_event_data *te_data)
{ {
struct iwl_time_event_cmd time_cmd = {}; struct iwl_time_event_cmd_v2 time_cmd = {};
u32 id, uid; u32 id, uid;
int ret; int ret;
@ -417,8 +470,7 @@ void iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)); cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
IWL_DEBUG_TE(mvm, "Removing TE 0x%x\n", le32_to_cpu(time_cmd.id)); IWL_DEBUG_TE(mvm, "Removing TE 0x%x\n", le32_to_cpu(time_cmd.id));
ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, CMD_SYNC, ret = iwl_mvm_send_time_event_cmd(mvm, &time_cmd);
sizeof(time_cmd), &time_cmd);
if (WARN_ON(ret)) if (WARN_ON(ret))
return; return;
} }
@ -438,7 +490,7 @@ int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
{ {
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data; struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
struct iwl_time_event_cmd time_cmd = {}; struct iwl_time_event_cmd_v2 time_cmd = {};
lockdep_assert_held(&mvm->mutex); lockdep_assert_held(&mvm->mutex);
if (te_data->running) { if (te_data->running) {
@ -469,8 +521,6 @@ int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
} }
time_cmd.apply_time = cpu_to_le32(0); time_cmd.apply_time = cpu_to_le32(0);
time_cmd.dep_policy = cpu_to_le32(TE_INDEPENDENT);
time_cmd.is_present = cpu_to_le32(1);
time_cmd.interval = cpu_to_le32(1); time_cmd.interval = cpu_to_le32(1);
/* /*
@ -479,12 +529,12 @@ int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
* scheduled. To improve the chances of it being scheduled, allow them * scheduled. To improve the chances of it being scheduled, allow them
* to be fragmented, and in addition allow them to be delayed. * to be fragmented, and in addition allow them to be delayed.
*/ */
time_cmd.max_frags = cpu_to_le32(MSEC_TO_TU(duration)/20); time_cmd.max_frags = min(MSEC_TO_TU(duration)/50, TE_V2_FRAG_ENDLESS);
time_cmd.max_delay = cpu_to_le32(MSEC_TO_TU(duration/2)); time_cmd.max_delay = cpu_to_le32(MSEC_TO_TU(duration/2));
time_cmd.duration = cpu_to_le32(MSEC_TO_TU(duration)); time_cmd.duration = cpu_to_le32(MSEC_TO_TU(duration));
time_cmd.repeat = cpu_to_le32(1); time_cmd.repeat = 1;
time_cmd.notify = cpu_to_le32(TE_NOTIF_HOST_EVENT_START | time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START |
TE_NOTIF_HOST_EVENT_END); TE_V2_NOTIF_HOST_EVENT_END);
return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd); return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
} }

View File

@ -325,15 +325,15 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
int ret; int ret;
iwl_trans = iwl_trans_pcie_alloc(pdev, ent, cfg); iwl_trans = iwl_trans_pcie_alloc(pdev, ent, cfg);
if (iwl_trans == NULL) if (IS_ERR(iwl_trans))
return -ENOMEM; return PTR_ERR(iwl_trans);
pci_set_drvdata(pdev, iwl_trans); pci_set_drvdata(pdev, iwl_trans);
trans_pcie = IWL_TRANS_GET_PCIE_TRANS(iwl_trans); trans_pcie = IWL_TRANS_GET_PCIE_TRANS(iwl_trans);
trans_pcie->drv = iwl_drv_start(iwl_trans, cfg); trans_pcie->drv = iwl_drv_start(iwl_trans, cfg);
if (IS_ERR_OR_NULL(trans_pcie->drv)) { if (IS_ERR(trans_pcie->drv)) {
ret = PTR_ERR(trans_pcie->drv); ret = PTR_ERR(trans_pcie->drv);
goto out_free_trans; goto out_free_trans;
} }

View File

@ -112,15 +112,16 @@
*/ */
static int iwl_rxq_space(const struct iwl_rxq *rxq) static int iwl_rxq_space(const struct iwl_rxq *rxq)
{ {
int s = rxq->read - rxq->write; /* Make sure RX_QUEUE_SIZE is a power of 2 */
BUILD_BUG_ON(RX_QUEUE_SIZE & (RX_QUEUE_SIZE - 1));
if (s <= 0) /*
s += RX_QUEUE_SIZE; * There can be up to (RX_QUEUE_SIZE - 1) free slots, to avoid ambiguity
/* keep some buffer to not confuse full and empty queue */ * between empty and completely full queues.
s -= 2; * The following is equivalent to modulo by RX_QUEUE_SIZE and is well
if (s < 0) * defined for negative dividends.
s = 0; */
return s; return (rxq->read - rxq->write - 1) & (RX_QUEUE_SIZE - 1);
} }
/* /*
@ -1128,6 +1129,7 @@ static irqreturn_t iwl_pcie_isr(int irq, void *data)
struct iwl_trans *trans = data; struct iwl_trans *trans = data;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u32 inta, inta_mask; u32 inta, inta_mask;
irqreturn_t ret = IRQ_NONE;
lockdep_assert_held(&trans_pcie->irq_lock); lockdep_assert_held(&trans_pcie->irq_lock);
@ -1176,10 +1178,8 @@ static irqreturn_t iwl_pcie_isr(int irq, void *data)
/* the thread will service interrupts and re-enable them */ /* the thread will service interrupts and re-enable them */
if (likely(inta)) if (likely(inta))
return IRQ_WAKE_THREAD; return IRQ_WAKE_THREAD;
else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
!trans_pcie->inta) ret = IRQ_HANDLED;
iwl_enable_interrupts(trans);
return IRQ_HANDLED;
none: none:
/* re-enable interrupts here since we don't have anything to service. */ /* re-enable interrupts here since we don't have anything to service. */
@ -1188,7 +1188,7 @@ static irqreturn_t iwl_pcie_isr(int irq, void *data)
!trans_pcie->inta) !trans_pcie->inta)
iwl_enable_interrupts(trans); iwl_enable_interrupts(trans);
return IRQ_NONE; return ret;
} }
/* interrupt handler using ict table, with this interrupt driver will /* interrupt handler using ict table, with this interrupt driver will
@ -1207,6 +1207,7 @@ irqreturn_t iwl_pcie_isr_ict(int irq, void *data)
u32 val = 0; u32 val = 0;
u32 read; u32 read;
unsigned long flags; unsigned long flags;
irqreturn_t ret = IRQ_NONE;
if (!trans) if (!trans)
return IRQ_NONE; return IRQ_NONE;
@ -1219,7 +1220,7 @@ irqreturn_t iwl_pcie_isr_ict(int irq, void *data)
* use legacy interrupt. * use legacy interrupt.
*/ */
if (unlikely(!trans_pcie->use_ict)) { if (unlikely(!trans_pcie->use_ict)) {
irqreturn_t ret = iwl_pcie_isr(irq, data); ret = iwl_pcie_isr(irq, data);
spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
return ret; return ret;
} }
@ -1288,17 +1289,9 @@ irqreturn_t iwl_pcie_isr_ict(int irq, void *data)
if (likely(inta)) { if (likely(inta)) {
spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
return IRQ_WAKE_THREAD; return IRQ_WAKE_THREAD;
} else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
!trans_pcie->inta) {
/* Allow interrupt if was disabled by this handler and
* no tasklet was schedules, We should not enable interrupt,
* tasklet will enable it.
*/
iwl_enable_interrupts(trans);
} }
spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); ret = IRQ_HANDLED;
return IRQ_HANDLED;
none: none:
/* re-enable interrupts here since we don't have anything to service. /* re-enable interrupts here since we don't have anything to service.
@ -1309,5 +1302,5 @@ irqreturn_t iwl_pcie_isr_ict(int irq, void *data)
iwl_enable_interrupts(trans); iwl_enable_interrupts(trans);
spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
return IRQ_NONE; return ret;
} }

View File

@ -1386,9 +1386,10 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
trans = kzalloc(sizeof(struct iwl_trans) + trans = kzalloc(sizeof(struct iwl_trans) +
sizeof(struct iwl_trans_pcie), GFP_KERNEL); sizeof(struct iwl_trans_pcie), GFP_KERNEL);
if (!trans) {
if (!trans) err = -ENOMEM;
return NULL; goto out;
}
trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@ -1411,10 +1412,9 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
PCIE_LINK_STATE_CLKPM); PCIE_LINK_STATE_CLKPM);
} }
if (pci_enable_device(pdev)) { err = pci_enable_device(pdev);
err = -ENODEV; if (err)
goto out_no_pci; goto out_no_pci;
}
pci_set_master(pdev); pci_set_master(pdev);
@ -1483,17 +1483,20 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
SLAB_HWCACHE_ALIGN, SLAB_HWCACHE_ALIGN,
NULL); NULL);
if (!trans->dev_cmd_pool) if (!trans->dev_cmd_pool) {
err = -ENOMEM;
goto out_pci_disable_msi; goto out_pci_disable_msi;
}
trans_pcie->inta_mask = CSR_INI_SET_MASK; trans_pcie->inta_mask = CSR_INI_SET_MASK;
if (iwl_pcie_alloc_ict(trans)) if (iwl_pcie_alloc_ict(trans))
goto out_free_cmd_pool; goto out_free_cmd_pool;
if (request_threaded_irq(pdev->irq, iwl_pcie_isr_ict, err = request_threaded_irq(pdev->irq, iwl_pcie_isr_ict,
iwl_pcie_irq_handler, iwl_pcie_irq_handler,
IRQF_SHARED, DRV_NAME, trans)) { IRQF_SHARED, DRV_NAME, trans);
if (err) {
IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq); IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq);
goto out_free_ict; goto out_free_ict;
} }
@ -1512,5 +1515,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
pci_disable_device(pdev); pci_disable_device(pdev);
out_no_pci: out_no_pci:
kfree(trans); kfree(trans);
return NULL; out:
return ERR_PTR(err);
} }

View File

@ -65,18 +65,30 @@
***************************************************/ ***************************************************/
static int iwl_queue_space(const struct iwl_queue *q) static int iwl_queue_space(const struct iwl_queue *q)
{ {
int s = q->read_ptr - q->write_ptr; unsigned int max;
unsigned int used;
if (q->read_ptr > q->write_ptr) /*
s -= q->n_bd; * To avoid ambiguity between empty and completely full queues, there
* should always be less than q->n_bd elements in the queue.
* If q->n_window is smaller than q->n_bd, there is no need to reserve
* any queue entries for this purpose.
*/
if (q->n_window < q->n_bd)
max = q->n_window;
else
max = q->n_bd - 1;
if (s <= 0) /*
s += q->n_window; * q->n_bd is a power of 2, so the following is equivalent to modulo by
/* keep some reserve to not confuse empty and full situations */ * q->n_bd and is well defined for negative dividends.
s -= 2; */
if (s < 0) used = (q->write_ptr - q->read_ptr) & (q->n_bd - 1);
s = 0;
return s; if (WARN_ON(used > max))
return 0;
return max - used;
} }
/* /*
@ -826,7 +838,7 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
sizeof(struct iwl_txq), GFP_KERNEL); sizeof(struct iwl_txq), GFP_KERNEL);
if (!trans_pcie->txq) { if (!trans_pcie->txq) {
IWL_ERR(trans, "Not enough memory for txq\n"); IWL_ERR(trans, "Not enough memory for txq\n");
ret = ENOMEM; ret = -ENOMEM;
goto error; goto error;
} }