mirror of https://gitee.com/openkylin/linux.git
Merge branch 'wireless-next-2.6' of git://git.kernel.org/pub/scm/linux/kernel/git/iwlwifi/iwlwifi-2.6
This commit is contained in:
commit
f51f87a091
|
@ -171,10 +171,6 @@ static int iwl1000_hw_set_hw_params(struct iwl_priv *priv)
|
|||
|
||||
static struct iwl_lib_ops iwl1000_lib = {
|
||||
.set_hw_params = iwl1000_hw_set_hw_params,
|
||||
.txq_set_sched = iwlagn_txq_set_sched,
|
||||
.txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd,
|
||||
.txq_free_tfd = iwl_hw_txq_free_tfd,
|
||||
.txq_init = iwl_hw_tx_queue_init,
|
||||
.rx_handler_setup = iwlagn_rx_handler_setup,
|
||||
.setup_deferred_work = iwlagn_setup_deferred_work,
|
||||
.is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr,
|
||||
|
|
|
@ -195,9 +195,9 @@ static int iwl2030_hw_channel_switch(struct iwl_priv *priv,
|
|||
struct ieee80211_vif *vif = ctx->vif;
|
||||
struct iwl_host_cmd hcmd = {
|
||||
.id = REPLY_CHANNEL_SWITCH,
|
||||
.len = sizeof(cmd),
|
||||
.len = { sizeof(cmd), },
|
||||
.flags = CMD_SYNC,
|
||||
.data = &cmd,
|
||||
.data = { &cmd, },
|
||||
};
|
||||
|
||||
cmd.band = priv->band == IEEE80211_BAND_2GHZ;
|
||||
|
@ -252,10 +252,6 @@ static int iwl2030_hw_channel_switch(struct iwl_priv *priv,
|
|||
|
||||
static struct iwl_lib_ops iwl2000_lib = {
|
||||
.set_hw_params = iwl2000_hw_set_hw_params,
|
||||
.txq_set_sched = iwlagn_txq_set_sched,
|
||||
.txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd,
|
||||
.txq_free_tfd = iwl_hw_txq_free_tfd,
|
||||
.txq_init = iwl_hw_tx_queue_init,
|
||||
.rx_handler_setup = iwlagn_rx_handler_setup,
|
||||
.setup_deferred_work = iwlagn_bt_setup_deferred_work,
|
||||
.cancel_deferred_work = iwlagn_bt_cancel_deferred_work,
|
||||
|
|
|
@ -282,9 +282,9 @@ static int iwl5000_hw_channel_switch(struct iwl_priv *priv,
|
|||
struct ieee80211_vif *vif = ctx->vif;
|
||||
struct iwl_host_cmd hcmd = {
|
||||
.id = REPLY_CHANNEL_SWITCH,
|
||||
.len = sizeof(cmd),
|
||||
.len = { sizeof(cmd), },
|
||||
.flags = CMD_SYNC,
|
||||
.data = &cmd,
|
||||
.data = { &cmd, },
|
||||
};
|
||||
|
||||
cmd.band = priv->band == IEEE80211_BAND_2GHZ;
|
||||
|
@ -339,10 +339,6 @@ static int iwl5000_hw_channel_switch(struct iwl_priv *priv,
|
|||
|
||||
static struct iwl_lib_ops iwl5000_lib = {
|
||||
.set_hw_params = iwl5000_hw_set_hw_params,
|
||||
.txq_set_sched = iwlagn_txq_set_sched,
|
||||
.txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd,
|
||||
.txq_free_tfd = iwl_hw_txq_free_tfd,
|
||||
.txq_init = iwl_hw_tx_queue_init,
|
||||
.rx_handler_setup = iwlagn_rx_handler_setup,
|
||||
.setup_deferred_work = iwlagn_setup_deferred_work,
|
||||
.is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr,
|
||||
|
@ -374,10 +370,6 @@ static struct iwl_lib_ops iwl5000_lib = {
|
|||
|
||||
static struct iwl_lib_ops iwl5150_lib = {
|
||||
.set_hw_params = iwl5150_hw_set_hw_params,
|
||||
.txq_set_sched = iwlagn_txq_set_sched,
|
||||
.txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd,
|
||||
.txq_free_tfd = iwl_hw_txq_free_tfd,
|
||||
.txq_init = iwl_hw_tx_queue_init,
|
||||
.rx_handler_setup = iwlagn_rx_handler_setup,
|
||||
.setup_deferred_work = iwlagn_setup_deferred_work,
|
||||
.is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr,
|
||||
|
|
|
@ -221,9 +221,9 @@ static int iwl6000_hw_channel_switch(struct iwl_priv *priv,
|
|||
struct ieee80211_vif *vif = ctx->vif;
|
||||
struct iwl_host_cmd hcmd = {
|
||||
.id = REPLY_CHANNEL_SWITCH,
|
||||
.len = sizeof(cmd),
|
||||
.len = { sizeof(cmd), },
|
||||
.flags = CMD_SYNC,
|
||||
.data = &cmd,
|
||||
.data = { &cmd, },
|
||||
};
|
||||
|
||||
cmd.band = priv->band == IEEE80211_BAND_2GHZ;
|
||||
|
@ -278,10 +278,6 @@ static int iwl6000_hw_channel_switch(struct iwl_priv *priv,
|
|||
|
||||
static struct iwl_lib_ops iwl6000_lib = {
|
||||
.set_hw_params = iwl6000_hw_set_hw_params,
|
||||
.txq_set_sched = iwlagn_txq_set_sched,
|
||||
.txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd,
|
||||
.txq_free_tfd = iwl_hw_txq_free_tfd,
|
||||
.txq_init = iwl_hw_tx_queue_init,
|
||||
.rx_handler_setup = iwlagn_rx_handler_setup,
|
||||
.setup_deferred_work = iwlagn_setup_deferred_work,
|
||||
.is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr,
|
||||
|
@ -314,10 +310,6 @@ static struct iwl_lib_ops iwl6000_lib = {
|
|||
|
||||
static struct iwl_lib_ops iwl6030_lib = {
|
||||
.set_hw_params = iwl6000_hw_set_hw_params,
|
||||
.txq_set_sched = iwlagn_txq_set_sched,
|
||||
.txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd,
|
||||
.txq_free_tfd = iwl_hw_txq_free_tfd,
|
||||
.txq_init = iwl_hw_tx_queue_init,
|
||||
.rx_handler_setup = iwlagn_bt_rx_handler_setup,
|
||||
.setup_deferred_work = iwlagn_bt_setup_deferred_work,
|
||||
.cancel_deferred_work = iwlagn_bt_cancel_deferred_work,
|
||||
|
|
|
@ -87,14 +87,14 @@ int iwl_send_calib_results(struct iwl_priv *priv)
|
|||
|
||||
struct iwl_host_cmd hcmd = {
|
||||
.id = REPLY_PHY_CALIBRATION_CMD,
|
||||
.flags = CMD_SIZE_HUGE,
|
||||
};
|
||||
|
||||
for (i = 0; i < IWL_CALIB_MAX; i++) {
|
||||
if ((BIT(i) & priv->hw_params.calib_init_cfg) &&
|
||||
priv->calib_results[i].buf) {
|
||||
hcmd.len = priv->calib_results[i].buf_len;
|
||||
hcmd.data = priv->calib_results[i].buf;
|
||||
hcmd.len[0] = priv->calib_results[i].buf_len;
|
||||
hcmd.data[0] = priv->calib_results[i].buf;
|
||||
hcmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
|
||||
ret = iwl_send_cmd_sync(priv, &hcmd);
|
||||
if (ret) {
|
||||
IWL_ERR(priv, "Error %d iteration %d\n",
|
||||
|
@ -456,9 +456,9 @@ static int iwl_sensitivity_write(struct iwl_priv *priv)
|
|||
struct iwl_sensitivity_data *data = NULL;
|
||||
struct iwl_host_cmd cmd_out = {
|
||||
.id = SENSITIVITY_CMD,
|
||||
.len = sizeof(struct iwl_sensitivity_cmd),
|
||||
.len = { sizeof(struct iwl_sensitivity_cmd), },
|
||||
.flags = CMD_ASYNC,
|
||||
.data = &cmd,
|
||||
.data = { &cmd, },
|
||||
};
|
||||
|
||||
data = &(priv->sensitivity_data);
|
||||
|
@ -491,9 +491,9 @@ static int iwl_enhance_sensitivity_write(struct iwl_priv *priv)
|
|||
struct iwl_sensitivity_data *data = NULL;
|
||||
struct iwl_host_cmd cmd_out = {
|
||||
.id = SENSITIVITY_CMD,
|
||||
.len = sizeof(struct iwl_enhance_sensitivity_cmd),
|
||||
.len = { sizeof(struct iwl_enhance_sensitivity_cmd), },
|
||||
.flags = CMD_ASYNC,
|
||||
.data = &cmd,
|
||||
.data = { &cmd, },
|
||||
};
|
||||
|
||||
data = &(priv->sensitivity_data);
|
||||
|
|
|
@ -1140,8 +1140,7 @@ int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
|
|||
{
|
||||
struct iwl_host_cmd cmd = {
|
||||
.id = REPLY_SCAN_CMD,
|
||||
.len = sizeof(struct iwl_scan_cmd),
|
||||
.flags = CMD_SIZE_HUGE,
|
||||
.len = { sizeof(struct iwl_scan_cmd), },
|
||||
};
|
||||
struct iwl_scan_cmd *scan;
|
||||
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
|
||||
|
@ -1425,10 +1424,11 @@ int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
|
|||
return -EIO;
|
||||
}
|
||||
|
||||
cmd.len += le16_to_cpu(scan->tx_cmd.len) +
|
||||
cmd.len[0] += le16_to_cpu(scan->tx_cmd.len) +
|
||||
scan->channel_count * sizeof(struct iwl_scan_channel);
|
||||
cmd.data = scan;
|
||||
scan->len = cpu_to_le16(cmd.len);
|
||||
cmd.data[0] = scan;
|
||||
cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
|
||||
scan->len = cpu_to_le16(cmd.len[0]);
|
||||
|
||||
/* set scan bit here for PAN params */
|
||||
set_bit(STATUS_SCAN_HW, &priv->status);
|
||||
|
@ -1520,9 +1520,9 @@ int iwlagn_txfifo_flush(struct iwl_priv *priv, u16 flush_control)
|
|||
struct iwl_txfifo_flush_cmd flush_cmd;
|
||||
struct iwl_host_cmd cmd = {
|
||||
.id = REPLY_TXFIFO_FLUSH,
|
||||
.len = sizeof(struct iwl_txfifo_flush_cmd),
|
||||
.len = { sizeof(struct iwl_txfifo_flush_cmd), },
|
||||
.flags = CMD_SYNC,
|
||||
.data = &flush_cmd,
|
||||
.data = { &flush_cmd, },
|
||||
};
|
||||
|
||||
might_sleep();
|
||||
|
|
|
@ -335,6 +335,32 @@ static u8 rs_tl_add_packet(struct iwl_lq_sta *lq_data,
|
|||
return tid;
|
||||
}
|
||||
|
||||
#if defined(CONFIG_MAC80211_DEBUGFS) || defined(CONFIG_IWLWIFI_DEVICE_SVTOOL)
|
||||
static void rs_program_fix_rate(struct iwl_priv *priv,
|
||||
struct iwl_lq_sta *lq_sta)
|
||||
{
|
||||
struct iwl_station_priv *sta_priv =
|
||||
container_of(lq_sta, struct iwl_station_priv, lq_sta);
|
||||
struct iwl_rxon_context *ctx = sta_priv->common.ctx;
|
||||
|
||||
lq_sta->active_legacy_rate = 0x0FFF; /* 1 - 54 MBits, includes CCK */
|
||||
lq_sta->active_siso_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
|
||||
lq_sta->active_mimo2_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
|
||||
lq_sta->active_mimo3_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
|
||||
|
||||
lq_sta->dbg_fixed_rate = priv->dbg_fixed_rate;
|
||||
|
||||
IWL_DEBUG_RATE(priv, "sta_id %d rate 0x%X\n",
|
||||
lq_sta->lq.sta_id, priv->dbg_fixed_rate);
|
||||
|
||||
if (priv->dbg_fixed_rate) {
|
||||
rs_fill_link_cmd(NULL, lq_sta, priv->dbg_fixed_rate);
|
||||
iwl_send_lq_cmd(lq_sta->drv, ctx, &lq_sta->lq, CMD_ASYNC,
|
||||
false);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
get the traffic load value for tid
|
||||
*/
|
||||
|
@ -1046,7 +1072,10 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
|
|||
/* See if there's a better rate or modulation mode to try. */
|
||||
if (sta && sta->supp_rates[sband->band])
|
||||
rs_rate_scale_perform(priv, skb, sta, lq_sta);
|
||||
|
||||
#ifdef CONFIG_IWLWIFI_DEVICE_SVTOOL
|
||||
if (priv->dbg_fixed_rate != lq_sta->dbg_fixed_rate)
|
||||
rs_program_fix_rate(priv, lq_sta);
|
||||
#endif
|
||||
if (priv->cfg->bt_params && priv->cfg->bt_params->advanced_bt_coexist)
|
||||
rs_bt_update_lq(priv, ctx, lq_sta);
|
||||
}
|
||||
|
@ -2170,7 +2199,7 @@ static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search)
|
|||
* setup rate table in uCode
|
||||
* return rate_n_flags as used in the table
|
||||
*/
|
||||
static u32 rs_update_rate_tbl(struct iwl_priv *priv,
|
||||
static void rs_update_rate_tbl(struct iwl_priv *priv,
|
||||
struct iwl_rxon_context *ctx,
|
||||
struct iwl_lq_sta *lq_sta,
|
||||
struct iwl_scale_tbl_info *tbl,
|
||||
|
@ -2182,8 +2211,6 @@ static u32 rs_update_rate_tbl(struct iwl_priv *priv,
|
|||
rate = rate_n_flags_from_tbl(priv, tbl, index, is_green);
|
||||
rs_fill_link_cmd(priv, lq_sta, rate);
|
||||
iwl_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_ASYNC, false);
|
||||
|
||||
return rate;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -2212,7 +2239,6 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
|
|||
u8 update_lq = 0;
|
||||
struct iwl_scale_tbl_info *tbl, *tbl1;
|
||||
u16 rate_scale_index_msk = 0;
|
||||
u32 rate;
|
||||
u8 is_green = 0;
|
||||
u8 active_tbl = 0;
|
||||
u8 done_search = 0;
|
||||
|
@ -2299,8 +2325,8 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
|
|||
tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
|
||||
/* get "active" rate info */
|
||||
index = iwl_hwrate_to_plcp_idx(tbl->current_rate);
|
||||
rate = rs_update_rate_tbl(priv, ctx, lq_sta,
|
||||
tbl, index, is_green);
|
||||
rs_update_rate_tbl(priv, ctx, lq_sta, tbl,
|
||||
index, is_green);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
@ -2541,8 +2567,7 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
|
|||
lq_update:
|
||||
/* Replace uCode's rate table for the destination station. */
|
||||
if (update_lq)
|
||||
rate = rs_update_rate_tbl(priv, ctx, lq_sta,
|
||||
tbl, index, is_green);
|
||||
rs_update_rate_tbl(priv, ctx, lq_sta, tbl, index, is_green);
|
||||
|
||||
if (iwl_tx_ant_restriction(priv) == IWL_ANT_OK_MULTI) {
|
||||
/* Should we stay with this modulation mode,
|
||||
|
@ -2871,6 +2896,7 @@ void iwl_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_i
|
|||
lq_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE;
|
||||
lq_sta->is_agg = 0;
|
||||
|
||||
priv->dbg_fixed_rate = 0;
|
||||
#ifdef CONFIG_MAC80211_DEBUGFS
|
||||
lq_sta->dbg_fixed_rate = 0;
|
||||
#endif
|
||||
|
@ -3045,7 +3071,6 @@ static void rs_free_sta(void *priv_r, struct ieee80211_sta *sta,
|
|||
IWL_DEBUG_RATE(priv, "leave\n");
|
||||
}
|
||||
|
||||
|
||||
#ifdef CONFIG_MAC80211_DEBUGFS
|
||||
static int open_file_generic(struct inode *inode, struct file *file)
|
||||
{
|
||||
|
@ -3070,6 +3095,7 @@ static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
|
|||
IWL_DEBUG_RATE(priv, "Fixed rate ON\n");
|
||||
} else {
|
||||
lq_sta->dbg_fixed_rate = 0;
|
||||
priv->dbg_fixed_rate = 0;
|
||||
IWL_ERR(priv,
|
||||
"Invalid antenna selection 0x%X, Valid is 0x%X\n",
|
||||
ant_sel_tx, valid_tx_ant);
|
||||
|
@ -3088,9 +3114,7 @@ static ssize_t rs_sta_dbgfs_scale_table_write(struct file *file,
|
|||
char buf[64];
|
||||
size_t buf_size;
|
||||
u32 parsed_rate;
|
||||
struct iwl_station_priv *sta_priv =
|
||||
container_of(lq_sta, struct iwl_station_priv, lq_sta);
|
||||
struct iwl_rxon_context *ctx = sta_priv->common.ctx;
|
||||
|
||||
|
||||
priv = lq_sta->drv;
|
||||
memset(buf, 0, sizeof(buf));
|
||||
|
@ -3099,23 +3123,11 @@ static ssize_t rs_sta_dbgfs_scale_table_write(struct file *file,
|
|||
return -EFAULT;
|
||||
|
||||
if (sscanf(buf, "%x", &parsed_rate) == 1)
|
||||
lq_sta->dbg_fixed_rate = parsed_rate;
|
||||
priv->dbg_fixed_rate = lq_sta->dbg_fixed_rate = parsed_rate;
|
||||
else
|
||||
lq_sta->dbg_fixed_rate = 0;
|
||||
priv->dbg_fixed_rate = lq_sta->dbg_fixed_rate = 0;
|
||||
|
||||
lq_sta->active_legacy_rate = 0x0FFF; /* 1 - 54 MBits, includes CCK */
|
||||
lq_sta->active_siso_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
|
||||
lq_sta->active_mimo2_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
|
||||
lq_sta->active_mimo3_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
|
||||
|
||||
IWL_DEBUG_RATE(priv, "sta_id %d rate 0x%X\n",
|
||||
lq_sta->lq.sta_id, lq_sta->dbg_fixed_rate);
|
||||
|
||||
if (lq_sta->dbg_fixed_rate) {
|
||||
rs_fill_link_cmd(NULL, lq_sta, lq_sta->dbg_fixed_rate);
|
||||
iwl_send_lq_cmd(lq_sta->drv, ctx, &lq_sta->lq, CMD_ASYNC,
|
||||
false);
|
||||
}
|
||||
rs_program_fix_rate(priv, lq_sta);
|
||||
|
||||
return count;
|
||||
}
|
||||
|
@ -3143,7 +3155,7 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
|
|||
lq_sta->total_failed, lq_sta->total_success,
|
||||
lq_sta->active_legacy_rate);
|
||||
desc += sprintf(buff+desc, "fixed rate 0x%X\n",
|
||||
lq_sta->dbg_fixed_rate);
|
||||
priv->dbg_fixed_rate);
|
||||
desc += sprintf(buff+desc, "valid_tx_ant %s%s%s\n",
|
||||
(priv->hw_params.valid_tx_ant & ANT_A) ? "ANT_A," : "",
|
||||
(priv->hw_params.valid_tx_ant & ANT_B) ? "ANT_B," : "",
|
||||
|
@ -3254,15 +3266,11 @@ static const struct file_operations rs_sta_dbgfs_stats_table_ops = {
|
|||
static ssize_t rs_sta_dbgfs_rate_scale_data_read(struct file *file,
|
||||
char __user *user_buf, size_t count, loff_t *ppos)
|
||||
{
|
||||
struct iwl_lq_sta *lq_sta = file->private_data;
|
||||
struct iwl_scale_tbl_info *tbl = &lq_sta->lq_info[lq_sta->active_tbl];
|
||||
char buff[120];
|
||||
int desc = 0;
|
||||
|
||||
struct iwl_lq_sta *lq_sta = file->private_data;
|
||||
struct iwl_priv *priv;
|
||||
struct iwl_scale_tbl_info *tbl = &lq_sta->lq_info[lq_sta->active_tbl];
|
||||
|
||||
priv = lq_sta->drv;
|
||||
|
||||
if (is_Ht(tbl->lq_type))
|
||||
desc += sprintf(buff+desc,
|
||||
"Bit Rate= %d Mb/s\n",
|
||||
|
|
|
@ -289,7 +289,6 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
|
|||
/* cast away the const for active_rxon in this function */
|
||||
struct iwl_rxon_cmd *active = (void *)&ctx->active;
|
||||
bool new_assoc = !!(ctx->staging.filter_flags & RXON_FILTER_ASSOC_MSK);
|
||||
bool old_assoc = !!(ctx->active.filter_flags & RXON_FILTER_ASSOC_MSK);
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&priv->mutex);
|
||||
|
@ -389,11 +388,9 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
|
|||
* AP station must be done after the BSSID is set to correctly
|
||||
* set up filters in the device.
|
||||
*/
|
||||
if ((old_assoc && new_assoc) || !new_assoc) {
|
||||
ret = iwlagn_rxon_disconn(priv, ctx);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (new_assoc)
|
||||
return iwlagn_rxon_connect(priv, ctx);
|
||||
|
|
|
@ -144,7 +144,7 @@ static int iwl_send_static_wepkey_cmd(struct iwl_priv *priv,
|
|||
size_t cmd_size = sizeof(struct iwl_wep_cmd);
|
||||
struct iwl_host_cmd cmd = {
|
||||
.id = ctx->wep_key_cmd,
|
||||
.data = wep_cmd,
|
||||
.data = { wep_cmd, },
|
||||
.flags = CMD_SYNC,
|
||||
};
|
||||
|
||||
|
@ -172,7 +172,7 @@ static int iwl_send_static_wepkey_cmd(struct iwl_priv *priv,
|
|||
|
||||
cmd_size += sizeof(struct iwl_wep_key) * WEP_KEYS_MAX;
|
||||
|
||||
cmd.len = cmd_size;
|
||||
cmd.len[0] = cmd_size;
|
||||
|
||||
if (not_empty || send_if_empty)
|
||||
return iwl_send_cmd(priv, &cmd);
|
||||
|
|
|
@ -750,12 +750,10 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
|
|||
spin_unlock(&priv->sta_lock);
|
||||
|
||||
/* Attach buffers to TFD */
|
||||
priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
|
||||
txcmd_phys, firstlen, 1, 0);
|
||||
iwlagn_txq_attach_buf_to_tfd(priv, txq, txcmd_phys, firstlen, 1);
|
||||
if (secondlen > 0)
|
||||
priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
|
||||
phys_addr, secondlen,
|
||||
0, 0);
|
||||
iwlagn_txq_attach_buf_to_tfd(priv, txq, phys_addr,
|
||||
secondlen, 0);
|
||||
|
||||
scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
|
||||
offsetof(struct iwl_tx_cmd, scratch);
|
||||
|
@ -911,7 +909,7 @@ int iwlagn_txq_ctx_alloc(struct iwl_priv *priv)
|
|||
spin_lock_irqsave(&priv->lock, flags);
|
||||
|
||||
/* Turn off all Tx DMA fifos */
|
||||
priv->cfg->ops->lib->txq_set_sched(priv, 0);
|
||||
iwlagn_txq_set_sched(priv, 0);
|
||||
|
||||
/* Tell NIC where to find the "keep warm" buffer */
|
||||
iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
|
||||
|
@ -949,7 +947,7 @@ void iwlagn_txq_ctx_reset(struct iwl_priv *priv)
|
|||
spin_lock_irqsave(&priv->lock, flags);
|
||||
|
||||
/* Turn off all Tx DMA fifos */
|
||||
priv->cfg->ops->lib->txq_set_sched(priv, 0);
|
||||
iwlagn_txq_set_sched(priv, 0);
|
||||
|
||||
/* Tell NIC where to find the "keep warm" buffer */
|
||||
iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
|
||||
|
@ -975,7 +973,7 @@ void iwlagn_txq_ctx_stop(struct iwl_priv *priv)
|
|||
/* Turn off all Tx DMA fifos */
|
||||
spin_lock_irqsave(&priv->lock, flags);
|
||||
|
||||
priv->cfg->ops->lib->txq_set_sched(priv, 0);
|
||||
iwlagn_txq_set_sched(priv, 0);
|
||||
|
||||
/* Stop each Tx DMA channel, and wait for it to be idle */
|
||||
for (ch = 0; ch < priv->hw_params.dma_chnl_num; ch++) {
|
||||
|
@ -1258,7 +1256,7 @@ int iwlagn_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
|
|||
|
||||
iwlagn_txq_inval_byte_cnt_tbl(priv, txq);
|
||||
|
||||
priv->cfg->ops->lib->txq_free_tfd(priv, txq);
|
||||
iwlagn_txq_free_tfd(priv, txq);
|
||||
}
|
||||
return nfreed;
|
||||
}
|
||||
|
|
|
@ -217,8 +217,8 @@ static int iwlagn_send_calib_cfg(struct iwl_priv *priv)
|
|||
struct iwl_calib_cfg_cmd calib_cfg_cmd;
|
||||
struct iwl_host_cmd cmd = {
|
||||
.id = CALIBRATION_CFG_CMD,
|
||||
.len = sizeof(struct iwl_calib_cfg_cmd),
|
||||
.data = &calib_cfg_cmd,
|
||||
.len = { sizeof(struct iwl_calib_cfg_cmd), },
|
||||
.data = { &calib_cfg_cmd, },
|
||||
};
|
||||
|
||||
memset(&calib_cfg_cmd, 0, sizeof(calib_cfg_cmd));
|
||||
|
@ -440,7 +440,7 @@ static int iwlagn_alive_notify(struct iwl_priv *priv)
|
|||
IWL_MASK(0, priv->hw_params.max_txq_num));
|
||||
|
||||
/* Activate all Tx DMA/FIFO channels */
|
||||
priv->cfg->ops->lib->txq_set_sched(priv, IWL_MASK(0, 7));
|
||||
iwlagn_txq_set_sched(priv, IWL_MASK(0, 7));
|
||||
|
||||
/* map queues to FIFOs */
|
||||
if (priv->valid_contexts != BIT(IWL_RXON_CTX_BSS))
|
||||
|
|
|
@ -134,12 +134,10 @@ int iwlagn_send_beacon_cmd(struct iwl_priv *priv)
|
|||
struct iwl_tx_beacon_cmd *tx_beacon_cmd;
|
||||
struct iwl_host_cmd cmd = {
|
||||
.id = REPLY_TX_BEACON,
|
||||
.flags = CMD_SIZE_HUGE,
|
||||
};
|
||||
u32 frame_size;
|
||||
u32 rate_flags;
|
||||
u32 rate;
|
||||
int err;
|
||||
|
||||
/*
|
||||
* We have to set up the TX command, the TX Beacon command, and the
|
||||
|
@ -156,17 +154,15 @@ int iwlagn_send_beacon_cmd(struct iwl_priv *priv)
|
|||
if (WARN_ON(!priv->beacon_skb))
|
||||
return -EINVAL;
|
||||
|
||||
/* Allocate beacon memory */
|
||||
tx_beacon_cmd = kzalloc(sizeof(*tx_beacon_cmd) + priv->beacon_skb->len,
|
||||
GFP_KERNEL);
|
||||
/* Allocate beacon command */
|
||||
if (!priv->beacon_cmd)
|
||||
priv->beacon_cmd = kzalloc(sizeof(*tx_beacon_cmd), GFP_KERNEL);
|
||||
tx_beacon_cmd = priv->beacon_cmd;
|
||||
if (!tx_beacon_cmd)
|
||||
return -ENOMEM;
|
||||
|
||||
frame_size = priv->beacon_skb->len;
|
||||
|
||||
/* Set up TX beacon contents */
|
||||
memcpy(tx_beacon_cmd->frame, priv->beacon_skb->data, frame_size);
|
||||
|
||||
/* Set up TX command fields */
|
||||
tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size);
|
||||
tx_beacon_cmd->tx.sta_id = priv->beacon_ctx->bcast_sta_id;
|
||||
|
@ -175,7 +171,7 @@ int iwlagn_send_beacon_cmd(struct iwl_priv *priv)
|
|||
TX_CMD_FLG_TSF_MSK | TX_CMD_FLG_STA_RATE_MSK;
|
||||
|
||||
/* Set up TX beacon command fields */
|
||||
iwl_set_beacon_tim(priv, tx_beacon_cmd, (u8 *)tx_beacon_cmd->frame,
|
||||
iwl_set_beacon_tim(priv, tx_beacon_cmd, priv->beacon_skb->data,
|
||||
frame_size);
|
||||
|
||||
/* Set up packet rate and flags */
|
||||
|
@ -189,164 +185,14 @@ int iwlagn_send_beacon_cmd(struct iwl_priv *priv)
|
|||
rate_flags);
|
||||
|
||||
/* Submit command */
|
||||
cmd.len = sizeof(*tx_beacon_cmd) + frame_size;
|
||||
cmd.data = tx_beacon_cmd;
|
||||
cmd.len[0] = sizeof(*tx_beacon_cmd);
|
||||
cmd.data[0] = tx_beacon_cmd;
|
||||
cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
|
||||
cmd.len[1] = frame_size;
|
||||
cmd.data[1] = priv->beacon_skb->data;
|
||||
cmd.dataflags[1] = IWL_HCMD_DFL_NOCOPY;
|
||||
|
||||
err = iwl_send_cmd_sync(priv, &cmd);
|
||||
|
||||
/* Free temporary storage */
|
||||
kfree(tx_beacon_cmd);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static inline dma_addr_t iwl_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
|
||||
{
|
||||
struct iwl_tfd_tb *tb = &tfd->tbs[idx];
|
||||
|
||||
dma_addr_t addr = get_unaligned_le32(&tb->lo);
|
||||
if (sizeof(dma_addr_t) > sizeof(u32))
|
||||
addr |=
|
||||
((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16;
|
||||
|
||||
return addr;
|
||||
}
|
||||
|
||||
static inline u16 iwl_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
|
||||
{
|
||||
struct iwl_tfd_tb *tb = &tfd->tbs[idx];
|
||||
|
||||
return le16_to_cpu(tb->hi_n_len) >> 4;
|
||||
}
|
||||
|
||||
static inline void iwl_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
|
||||
dma_addr_t addr, u16 len)
|
||||
{
|
||||
struct iwl_tfd_tb *tb = &tfd->tbs[idx];
|
||||
u16 hi_n_len = len << 4;
|
||||
|
||||
put_unaligned_le32(addr, &tb->lo);
|
||||
if (sizeof(dma_addr_t) > sizeof(u32))
|
||||
hi_n_len |= ((addr >> 16) >> 16) & 0xF;
|
||||
|
||||
tb->hi_n_len = cpu_to_le16(hi_n_len);
|
||||
|
||||
tfd->num_tbs = idx + 1;
|
||||
}
|
||||
|
||||
static inline u8 iwl_tfd_get_num_tbs(struct iwl_tfd *tfd)
|
||||
{
|
||||
return tfd->num_tbs & 0x1f;
|
||||
}
|
||||
|
||||
/**
|
||||
* iwl_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
|
||||
* @priv - driver private data
|
||||
* @txq - tx queue
|
||||
*
|
||||
* Does NOT advance any TFD circular buffer read/write indexes
|
||||
* Does NOT free the TFD itself (which is within circular buffer)
|
||||
*/
|
||||
void iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
|
||||
{
|
||||
struct iwl_tfd *tfd_tmp = (struct iwl_tfd *)txq->tfds;
|
||||
struct iwl_tfd *tfd;
|
||||
struct pci_dev *dev = priv->pci_dev;
|
||||
int index = txq->q.read_ptr;
|
||||
int i;
|
||||
int num_tbs;
|
||||
|
||||
tfd = &tfd_tmp[index];
|
||||
|
||||
/* Sanity check on number of chunks */
|
||||
num_tbs = iwl_tfd_get_num_tbs(tfd);
|
||||
|
||||
if (num_tbs >= IWL_NUM_OF_TBS) {
|
||||
IWL_ERR(priv, "Too many chunks: %i\n", num_tbs);
|
||||
/* @todo issue fatal error, it is quite serious situation */
|
||||
return;
|
||||
}
|
||||
|
||||
/* Unmap tx_cmd */
|
||||
if (num_tbs)
|
||||
pci_unmap_single(dev,
|
||||
dma_unmap_addr(&txq->meta[index], mapping),
|
||||
dma_unmap_len(&txq->meta[index], len),
|
||||
PCI_DMA_BIDIRECTIONAL);
|
||||
|
||||
/* Unmap chunks, if any. */
|
||||
for (i = 1; i < num_tbs; i++)
|
||||
pci_unmap_single(dev, iwl_tfd_tb_get_addr(tfd, i),
|
||||
iwl_tfd_tb_get_len(tfd, i), PCI_DMA_TODEVICE);
|
||||
|
||||
/* free SKB */
|
||||
if (txq->txb) {
|
||||
struct sk_buff *skb;
|
||||
|
||||
skb = txq->txb[txq->q.read_ptr].skb;
|
||||
|
||||
/* can be called from irqs-disabled context */
|
||||
if (skb) {
|
||||
dev_kfree_skb_any(skb);
|
||||
txq->txb[txq->q.read_ptr].skb = NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
|
||||
struct iwl_tx_queue *txq,
|
||||
dma_addr_t addr, u16 len,
|
||||
u8 reset, u8 pad)
|
||||
{
|
||||
struct iwl_queue *q;
|
||||
struct iwl_tfd *tfd, *tfd_tmp;
|
||||
u32 num_tbs;
|
||||
|
||||
q = &txq->q;
|
||||
tfd_tmp = (struct iwl_tfd *)txq->tfds;
|
||||
tfd = &tfd_tmp[q->write_ptr];
|
||||
|
||||
if (reset)
|
||||
memset(tfd, 0, sizeof(*tfd));
|
||||
|
||||
num_tbs = iwl_tfd_get_num_tbs(tfd);
|
||||
|
||||
/* Each TFD can point to a maximum 20 Tx buffers */
|
||||
if (num_tbs >= IWL_NUM_OF_TBS) {
|
||||
IWL_ERR(priv, "Error can not send more than %d chunks\n",
|
||||
IWL_NUM_OF_TBS);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (WARN_ON(addr & ~DMA_BIT_MASK(36)))
|
||||
return -EINVAL;
|
||||
|
||||
if (unlikely(addr & ~IWL_TX_DMA_MASK))
|
||||
IWL_ERR(priv, "Unaligned address = %llx\n",
|
||||
(unsigned long long)addr);
|
||||
|
||||
iwl_tfd_set_tb(tfd, num_tbs, addr, len);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Tell nic where to find circular buffer of Tx Frame Descriptors for
|
||||
* given Tx queue, and enable the DMA channel used for that queue.
|
||||
*
|
||||
* supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA
|
||||
* channels supported in hardware.
|
||||
*/
|
||||
int iwl_hw_tx_queue_init(struct iwl_priv *priv,
|
||||
struct iwl_tx_queue *txq)
|
||||
{
|
||||
int txq_id = txq->q.id;
|
||||
|
||||
/* Circular buffer (TFD queue in DRAM) physical base address */
|
||||
iwl_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id),
|
||||
txq->q.dma_addr >> 8);
|
||||
|
||||
return 0;
|
||||
return iwl_send_cmd_sync(priv, &cmd);
|
||||
}
|
||||
|
||||
static void iwl_bg_beacon_update(struct work_struct *work)
|
||||
|
@ -1776,10 +1622,7 @@ static const char *desc_lookup(u32 num)
|
|||
|
||||
void iwl_dump_nic_error_log(struct iwl_priv *priv)
|
||||
{
|
||||
u32 data2, line;
|
||||
u32 desc, time, count, base, data1;
|
||||
u32 blink1, blink2, ilink1, ilink2;
|
||||
u32 pc, hcmd;
|
||||
u32 base;
|
||||
struct iwl_error_event_table table;
|
||||
|
||||
base = priv->device_pointers.error_event_table;
|
||||
|
@ -1802,37 +1645,40 @@ void iwl_dump_nic_error_log(struct iwl_priv *priv)
|
|||
|
||||
iwl_read_targ_mem_words(priv, base, &table, sizeof(table));
|
||||
|
||||
count = table.valid;
|
||||
|
||||
if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
|
||||
if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
|
||||
IWL_ERR(priv, "Start IWL Error Log Dump:\n");
|
||||
IWL_ERR(priv, "Status: 0x%08lX, count: %d\n",
|
||||
priv->status, count);
|
||||
priv->status, table.valid);
|
||||
}
|
||||
|
||||
desc = table.error_id;
|
||||
priv->isr_stats.err_code = desc;
|
||||
pc = table.pc;
|
||||
blink1 = table.blink1;
|
||||
blink2 = table.blink2;
|
||||
ilink1 = table.ilink1;
|
||||
ilink2 = table.ilink2;
|
||||
data1 = table.data1;
|
||||
data2 = table.data2;
|
||||
line = table.line;
|
||||
time = table.tsf_low;
|
||||
hcmd = table.hcmd;
|
||||
priv->isr_stats.err_code = table.error_id;
|
||||
|
||||
trace_iwlwifi_dev_ucode_error(priv, desc, time, data1, data2, line,
|
||||
blink1, blink2, ilink1, ilink2);
|
||||
|
||||
IWL_ERR(priv, "Desc Time "
|
||||
"data1 data2 line\n");
|
||||
IWL_ERR(priv, "%-28s (0x%04X) %010u 0x%08X 0x%08X %u\n",
|
||||
desc_lookup(desc), desc, time, data1, data2, line);
|
||||
IWL_ERR(priv, "pc blink1 blink2 ilink1 ilink2 hcmd\n");
|
||||
IWL_ERR(priv, "0x%05X 0x%05X 0x%05X 0x%05X 0x%05X 0x%05X\n",
|
||||
pc, blink1, blink2, ilink1, ilink2, hcmd);
|
||||
trace_iwlwifi_dev_ucode_error(priv, table.error_id, table.tsf_low,
|
||||
table.data1, table.data2, table.line,
|
||||
table.blink1, table.blink2, table.ilink1,
|
||||
table.ilink2, table.bcon_time, table.gp1,
|
||||
table.gp2, table.gp3, table.ucode_ver,
|
||||
table.hw_ver, table.brd_ver);
|
||||
IWL_ERR(priv, "0x%08X | %-28s\n", table.error_id,
|
||||
desc_lookup(table.error_id));
|
||||
IWL_ERR(priv, "0x%08X | uPc\n", table.pc);
|
||||
IWL_ERR(priv, "0x%08X | branchlink1\n", table.blink1);
|
||||
IWL_ERR(priv, "0x%08X | branchlink2\n", table.blink2);
|
||||
IWL_ERR(priv, "0x%08X | interruptlink1\n", table.ilink1);
|
||||
IWL_ERR(priv, "0x%08X | interruptlink2\n", table.ilink2);
|
||||
IWL_ERR(priv, "0x%08X | data1\n", table.data1);
|
||||
IWL_ERR(priv, "0x%08X | data2\n", table.data2);
|
||||
IWL_ERR(priv, "0x%08X | line\n", table.line);
|
||||
IWL_ERR(priv, "0x%08X | beacon time\n", table.bcon_time);
|
||||
IWL_ERR(priv, "0x%08X | tsf low\n", table.tsf_low);
|
||||
IWL_ERR(priv, "0x%08X | tsf hi\n", table.tsf_hi);
|
||||
IWL_ERR(priv, "0x%08X | time gp1\n", table.gp1);
|
||||
IWL_ERR(priv, "0x%08X | time gp2\n", table.gp2);
|
||||
IWL_ERR(priv, "0x%08X | time gp3\n", table.gp3);
|
||||
IWL_ERR(priv, "0x%08X | uCode version\n", table.ucode_ver);
|
||||
IWL_ERR(priv, "0x%08X | hw version\n", table.hw_ver);
|
||||
IWL_ERR(priv, "0x%08X | board version\n", table.brd_ver);
|
||||
IWL_ERR(priv, "0x%08X | hcmd\n", table.hcmd);
|
||||
}
|
||||
|
||||
#define EVENT_START_OFFSET (4 * sizeof(u32))
|
||||
|
@ -2114,8 +1960,8 @@ static int iwlagn_send_calib_cfg_rt(struct iwl_priv *priv, u32 cfg)
|
|||
struct iwl_calib_cfg_cmd calib_cfg_cmd;
|
||||
struct iwl_host_cmd cmd = {
|
||||
.id = CALIBRATION_CFG_CMD,
|
||||
.len = sizeof(struct iwl_calib_cfg_cmd),
|
||||
.data = &calib_cfg_cmd,
|
||||
.len = { sizeof(struct iwl_calib_cfg_cmd), },
|
||||
.data = { &calib_cfg_cmd, },
|
||||
};
|
||||
|
||||
memset(&calib_cfg_cmd, 0, sizeof(calib_cfg_cmd));
|
||||
|
@ -3395,6 +3241,7 @@ static void iwl_uninit_drv(struct iwl_priv *priv)
|
|||
iwlcore_free_geos(priv);
|
||||
iwl_free_channel_map(priv);
|
||||
kfree(priv->scan_cmd);
|
||||
kfree(priv->beacon_cmd);
|
||||
}
|
||||
|
||||
struct ieee80211_ops iwlagn_hw_ops = {
|
||||
|
@ -3812,6 +3659,7 @@ static void __devexit iwl_pci_remove(struct pci_dev *pdev)
|
|||
*/
|
||||
set_bit(STATUS_EXIT_PENDING, &priv->status);
|
||||
|
||||
iwl_testmode_cleanup(priv);
|
||||
iwl_leds_exit(priv);
|
||||
|
||||
if (priv->mac80211_registered) {
|
||||
|
|
|
@ -191,12 +191,10 @@ int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band);
|
|||
void iwl_setup_rx_handlers(struct iwl_priv *priv);
|
||||
|
||||
/* tx */
|
||||
void iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq);
|
||||
int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
|
||||
void iwlagn_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq);
|
||||
int iwlagn_txq_attach_buf_to_tfd(struct iwl_priv *priv,
|
||||
struct iwl_tx_queue *txq,
|
||||
dma_addr_t addr, u16 len, u8 reset, u8 pad);
|
||||
int iwl_hw_tx_queue_init(struct iwl_priv *priv,
|
||||
struct iwl_tx_queue *txq);
|
||||
dma_addr_t addr, u16 len, u8 reset);
|
||||
void iwlagn_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
|
||||
struct ieee80211_tx_info *info);
|
||||
int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb);
|
||||
|
@ -345,6 +343,7 @@ extern int iwl_alive_start(struct iwl_priv *priv);
|
|||
#ifdef CONFIG_IWLWIFI_DEVICE_SVTOOL
|
||||
extern int iwl_testmode_cmd(struct ieee80211_hw *hw, void *data, int len);
|
||||
extern void iwl_testmode_init(struct iwl_priv *priv);
|
||||
extern void iwl_testmode_cleanup(struct iwl_priv *priv);
|
||||
#else
|
||||
static inline
|
||||
int iwl_testmode_cmd(struct ieee80211_hw *hw, void *data, int len)
|
||||
|
@ -355,6 +354,10 @@ static inline
|
|||
void iwl_testmode_init(struct iwl_priv *priv)
|
||||
{
|
||||
}
|
||||
static inline
|
||||
void iwl_testmode_cleanup(struct iwl_priv *priv)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* __iwl_agn_h__ */
|
||||
|
|
|
@ -205,7 +205,6 @@ enum {
|
|||
#define QUEUE_TO_SEQ(q) (((q) & 0x1f) << 8)
|
||||
#define SEQ_TO_INDEX(s) ((s) & 0xff)
|
||||
#define INDEX_TO_SEQ(i) ((i) & 0xff)
|
||||
#define SEQ_HUGE_FRAME cpu_to_le16(0x4000)
|
||||
#define SEQ_RX_FRAME cpu_to_le16(0x8000)
|
||||
|
||||
/**
|
||||
|
@ -234,9 +233,7 @@ struct iwl_cmd_header {
|
|||
*
|
||||
* 0:7 tfd index - position within TX queue
|
||||
* 8:12 TX queue id
|
||||
* 13 reserved
|
||||
* 14 huge - driver sets this to indicate command is in the
|
||||
* 'huge' storage at the end of the command buffers
|
||||
* 13:14 reserved
|
||||
* 15 unsolicited RX or uCode-originated notification
|
||||
*/
|
||||
__le16 sequence;
|
||||
|
|
|
@ -127,16 +127,6 @@ struct iwl_temp_ops {
|
|||
struct iwl_lib_ops {
|
||||
/* set hw dependent parameters */
|
||||
int (*set_hw_params)(struct iwl_priv *priv);
|
||||
/* Handling TX */
|
||||
void (*txq_set_sched)(struct iwl_priv *priv, u32 mask);
|
||||
int (*txq_attach_buf_to_tfd)(struct iwl_priv *priv,
|
||||
struct iwl_tx_queue *txq,
|
||||
dma_addr_t addr,
|
||||
u16 len, u8 reset, u8 pad);
|
||||
void (*txq_free_tfd)(struct iwl_priv *priv,
|
||||
struct iwl_tx_queue *txq);
|
||||
int (*txq_init)(struct iwl_priv *priv,
|
||||
struct iwl_tx_queue *txq);
|
||||
/* setup Rx handler */
|
||||
void (*rx_handler_setup)(struct iwl_priv *priv);
|
||||
/* setup deferred work */
|
||||
|
|
|
@ -48,8 +48,6 @@
|
|||
#include "iwl-agn-rs.h"
|
||||
#include "iwl-agn-tt.h"
|
||||
|
||||
#define U32_PAD(n) ((4-(n))&0x3)
|
||||
|
||||
struct iwl_tx_queue;
|
||||
|
||||
/* CT-KILL constants */
|
||||
|
@ -83,7 +81,7 @@ struct iwl_tx_queue;
|
|||
#define MAX_RTS_THRESHOLD 2347U
|
||||
#define MAX_MSDU_SIZE 2304U
|
||||
#define MAX_MPDU_SIZE 2346U
|
||||
#define DEFAULT_BEACON_INTERVAL 100U
|
||||
#define DEFAULT_BEACON_INTERVAL 200U
|
||||
#define DEFAULT_SHORT_RETRY_LIMIT 7U
|
||||
#define DEFAULT_LONG_RETRY_LIMIT 4U
|
||||
|
||||
|
@ -112,8 +110,6 @@ struct iwl_cmd_meta {
|
|||
struct iwl_device_cmd *cmd,
|
||||
struct iwl_rx_packet *pkt);
|
||||
|
||||
/* The CMD_SIZE_HUGE flag bit indicates that the command
|
||||
* structure is stored at the end of the shared queue memory. */
|
||||
u32 flags;
|
||||
|
||||
DEFINE_DMA_UNMAP_ADDR(mapping);
|
||||
|
@ -123,7 +119,23 @@ struct iwl_cmd_meta {
|
|||
/*
|
||||
* Generic queue structure
|
||||
*
|
||||
* Contains common data for Rx and Tx queues
|
||||
* Contains common data for Rx and Tx queues.
|
||||
*
|
||||
* Note the difference between n_bd and n_window: the hardware
|
||||
* always assumes 256 descriptors, so n_bd is always 256 (unless
|
||||
* there might be HW changes in the future). For the normal TX
|
||||
* queues, n_window, which is the size of the software queue data
|
||||
* is also 256; however, for the command queue, n_window is only
|
||||
* 32 since we don't need so many commands pending. Since the HW
|
||||
* still uses 256 BDs for DMA though, n_bd stays 256. As a result,
|
||||
* the software buffers (in the variables @meta, @txb in struct
|
||||
* iwl_tx_queue) only have 32 entries, while the HW buffers (@tfds
|
||||
* in the same struct) have 256.
|
||||
* This means that we end up with the following:
|
||||
* HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 |
|
||||
* SW entries: | 0 | ... | 31 |
|
||||
* where N is a number between 0 and 7. This means that the SW
|
||||
* data is a window overlayed over the HW queue.
|
||||
*/
|
||||
struct iwl_queue {
|
||||
int n_bd; /* number of BDs in this queue */
|
||||
|
@ -165,7 +177,7 @@ struct iwl_tx_info {
|
|||
|
||||
struct iwl_tx_queue {
|
||||
struct iwl_queue q;
|
||||
void *tfds;
|
||||
struct iwl_tfd *tfds;
|
||||
struct iwl_device_cmd **cmd;
|
||||
struct iwl_cmd_meta *meta;
|
||||
struct iwl_tx_info *txb;
|
||||
|
@ -247,7 +259,6 @@ enum {
|
|||
CMD_SYNC = 0,
|
||||
CMD_SIZE_NORMAL = 0,
|
||||
CMD_NO_SKB = 0,
|
||||
CMD_SIZE_HUGE = (1 << 0),
|
||||
CMD_ASYNC = (1 << 1),
|
||||
CMD_WANT_SKB = (1 << 2),
|
||||
CMD_MAPPED = (1 << 3),
|
||||
|
@ -259,8 +270,8 @@ enum {
|
|||
* struct iwl_device_cmd
|
||||
*
|
||||
* For allocation of the command and tx queues, this establishes the overall
|
||||
* size of the largest command we send to uCode, except for a scan command
|
||||
* (which is relatively huge; space is allocated separately).
|
||||
* size of the largest command we send to uCode, except for commands that
|
||||
* aren't fully copied and use other TFD space.
|
||||
*/
|
||||
struct iwl_device_cmd {
|
||||
struct iwl_cmd_header hdr; /* uCode API */
|
||||
|
@ -277,15 +288,21 @@ struct iwl_device_cmd {
|
|||
|
||||
#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd))
|
||||
|
||||
#define IWL_MAX_CMD_TFDS 2
|
||||
|
||||
enum iwl_hcmd_dataflag {
|
||||
IWL_HCMD_DFL_NOCOPY = BIT(0),
|
||||
};
|
||||
|
||||
struct iwl_host_cmd {
|
||||
const void *data;
|
||||
const void *data[IWL_MAX_CMD_TFDS];
|
||||
unsigned long reply_page;
|
||||
void (*callback)(struct iwl_priv *priv,
|
||||
struct iwl_device_cmd *cmd,
|
||||
struct iwl_rx_packet *pkt);
|
||||
u32 flags;
|
||||
u16 len;
|
||||
u16 len[IWL_MAX_CMD_TFDS];
|
||||
u8 dataflags[IWL_MAX_CMD_TFDS];
|
||||
u8 id;
|
||||
};
|
||||
|
||||
|
@ -688,17 +705,8 @@ static inline int iwl_queue_used(const struct iwl_queue *q, int i)
|
|||
}
|
||||
|
||||
|
||||
static inline u8 get_cmd_index(struct iwl_queue *q, u32 index, int is_huge)
|
||||
static inline u8 get_cmd_index(struct iwl_queue *q, u32 index)
|
||||
{
|
||||
/*
|
||||
* This is for init calibration result and scan command which
|
||||
* required buffer > TFD_MAX_PAYLOAD_SIZE,
|
||||
* the big buffer at end of command array
|
||||
*/
|
||||
if (is_huge)
|
||||
return q->n_window; /* must be power of 2 */
|
||||
|
||||
/* Otherwise, use normal size buffers */
|
||||
return index & (q->n_window - 1);
|
||||
}
|
||||
|
||||
|
@ -1171,6 +1179,14 @@ enum iwl_scan_type {
|
|||
IWL_SCAN_OFFCH_TX,
|
||||
};
|
||||
|
||||
#ifdef CONFIG_IWLWIFI_DEVICE_SVTOOL
|
||||
struct iwl_testmode_trace {
|
||||
u8 *cpu_addr;
|
||||
u8 *trace_addr;
|
||||
dma_addr_t dma_addr;
|
||||
bool trace_enabled;
|
||||
};
|
||||
#endif
|
||||
struct iwl_priv {
|
||||
|
||||
/* ieee device used by generic ieee processing code */
|
||||
|
@ -1452,6 +1468,7 @@ struct iwl_priv {
|
|||
struct work_struct beacon_update;
|
||||
struct iwl_rxon_context *beacon_ctx;
|
||||
struct sk_buff *beacon_skb;
|
||||
void *beacon_cmd;
|
||||
|
||||
struct work_struct tt_work;
|
||||
struct work_struct ct_enter;
|
||||
|
@ -1501,6 +1518,11 @@ struct iwl_priv {
|
|||
struct led_classdev led;
|
||||
unsigned long blink_on, blink_off;
|
||||
bool led_registered;
|
||||
#ifdef CONFIG_IWLWIFI_DEVICE_SVTOOL
|
||||
struct iwl_testmode_trace testmode_trace;
|
||||
#endif
|
||||
u32 dbg_fixed_rate;
|
||||
|
||||
}; /*iwl_priv */
|
||||
|
||||
static inline void iwl_txq_ctx_activate(struct iwl_priv *priv, int txq_id)
|
||||
|
|
|
@ -137,20 +137,27 @@ TRACE_EVENT(iwlwifi_dev_ucode_wrap_event,
|
|||
#define TRACE_SYSTEM iwlwifi
|
||||
|
||||
TRACE_EVENT(iwlwifi_dev_hcmd,
|
||||
TP_PROTO(struct iwl_priv *priv, void *hcmd, size_t len, u32 flags),
|
||||
TP_ARGS(priv, hcmd, len, flags),
|
||||
TP_PROTO(struct iwl_priv *priv, u32 flags,
|
||||
const void *hcmd0, size_t len0,
|
||||
const void *hcmd1, size_t len1,
|
||||
const void *hcmd2, size_t len2),
|
||||
TP_ARGS(priv, flags, hcmd0, len0, hcmd1, len1, hcmd2, len2),
|
||||
TP_STRUCT__entry(
|
||||
PRIV_ENTRY
|
||||
__dynamic_array(u8, hcmd, len)
|
||||
__dynamic_array(u8, hcmd0, len0)
|
||||
__dynamic_array(u8, hcmd1, len1)
|
||||
__dynamic_array(u8, hcmd2, len2)
|
||||
__field(u32, flags)
|
||||
),
|
||||
TP_fast_assign(
|
||||
PRIV_ASSIGN;
|
||||
memcpy(__get_dynamic_array(hcmd), hcmd, len);
|
||||
memcpy(__get_dynamic_array(hcmd0), hcmd0, len0);
|
||||
memcpy(__get_dynamic_array(hcmd1), hcmd1, len1);
|
||||
memcpy(__get_dynamic_array(hcmd2), hcmd2, len2);
|
||||
__entry->flags = flags;
|
||||
),
|
||||
TP_printk("[%p] hcmd %#.2x (%ssync)",
|
||||
__entry->priv, ((u8 *)__get_dynamic_array(hcmd))[0],
|
||||
__entry->priv, ((u8 *)__get_dynamic_array(hcmd0))[0],
|
||||
__entry->flags & CMD_ASYNC ? "a" : "")
|
||||
);
|
||||
|
||||
|
@ -202,15 +209,18 @@ TRACE_EVENT(iwlwifi_dev_tx,
|
|||
);
|
||||
|
||||
TRACE_EVENT(iwlwifi_dev_ucode_error,
|
||||
TP_PROTO(struct iwl_priv *priv, u32 desc, u32 time,
|
||||
TP_PROTO(struct iwl_priv *priv, u32 desc, u32 tsf_low,
|
||||
u32 data1, u32 data2, u32 line, u32 blink1,
|
||||
u32 blink2, u32 ilink1, u32 ilink2),
|
||||
TP_ARGS(priv, desc, time, data1, data2, line,
|
||||
blink1, blink2, ilink1, ilink2),
|
||||
u32 blink2, u32 ilink1, u32 ilink2, u32 bcon_time,
|
||||
u32 gp1, u32 gp2, u32 gp3, u32 ucode_ver, u32 hw_ver,
|
||||
u32 brd_ver),
|
||||
TP_ARGS(priv, desc, tsf_low, data1, data2, line,
|
||||
blink1, blink2, ilink1, ilink2, bcon_time, gp1, gp2,
|
||||
gp3, ucode_ver, hw_ver, brd_ver),
|
||||
TP_STRUCT__entry(
|
||||
PRIV_ENTRY
|
||||
__field(u32, desc)
|
||||
__field(u32, time)
|
||||
__field(u32, tsf_low)
|
||||
__field(u32, data1)
|
||||
__field(u32, data2)
|
||||
__field(u32, line)
|
||||
|
@ -218,11 +228,18 @@ TRACE_EVENT(iwlwifi_dev_ucode_error,
|
|||
__field(u32, blink2)
|
||||
__field(u32, ilink1)
|
||||
__field(u32, ilink2)
|
||||
__field(u32, bcon_time)
|
||||
__field(u32, gp1)
|
||||
__field(u32, gp2)
|
||||
__field(u32, gp3)
|
||||
__field(u32, ucode_ver)
|
||||
__field(u32, hw_ver)
|
||||
__field(u32, brd_ver)
|
||||
),
|
||||
TP_fast_assign(
|
||||
PRIV_ASSIGN;
|
||||
__entry->desc = desc;
|
||||
__entry->time = time;
|
||||
__entry->tsf_low = tsf_low;
|
||||
__entry->data1 = data1;
|
||||
__entry->data2 = data2;
|
||||
__entry->line = line;
|
||||
|
@ -230,12 +247,25 @@ TRACE_EVENT(iwlwifi_dev_ucode_error,
|
|||
__entry->blink2 = blink2;
|
||||
__entry->ilink1 = ilink1;
|
||||
__entry->ilink2 = ilink2;
|
||||
__entry->bcon_time = bcon_time;
|
||||
__entry->gp1 = gp1;
|
||||
__entry->gp2 = gp2;
|
||||
__entry->gp3 = gp3;
|
||||
__entry->ucode_ver = ucode_ver;
|
||||
__entry->hw_ver = hw_ver;
|
||||
__entry->brd_ver = brd_ver;
|
||||
),
|
||||
TP_printk("[%p] #%02d %010u data 0x%08X 0x%08X line %u, "
|
||||
"blink 0x%05X 0x%05X ilink 0x%05X 0x%05X",
|
||||
__entry->priv, __entry->desc, __entry->time, __entry->data1,
|
||||
"blink 0x%05X 0x%05X ilink 0x%05X 0x%05X "
|
||||
"bcon_tm %010u gp 0x%08X 0x%08X 0x%08X uCode 0x%08X "
|
||||
"hw 0x%08X brd 0x%08X",
|
||||
__entry->priv, __entry->desc, __entry->tsf_low,
|
||||
__entry->data1,
|
||||
__entry->data2, __entry->line, __entry->blink1,
|
||||
__entry->blink2, __entry->ilink1, __entry->ilink2)
|
||||
__entry->blink2, __entry->ilink1, __entry->ilink2,
|
||||
__entry->bcon_time, __entry->gp1, __entry->gp2,
|
||||
__entry->gp3, __entry->ucode_ver, __entry->hw_ver,
|
||||
__entry->brd_ver)
|
||||
);
|
||||
|
||||
TRACE_EVENT(iwlwifi_dev_ucode_event,
|
||||
|
|
|
@ -216,9 +216,8 @@ static int iwl_eeprom_verify_signature(struct iwl_priv *priv)
|
|||
|
||||
static void iwl_set_otp_access(struct iwl_priv *priv, enum iwl_access_mode mode)
|
||||
{
|
||||
u32 otpgp;
|
||||
iwl_read32(priv, CSR_OTP_GP_REG);
|
||||
|
||||
otpgp = iwl_read32(priv, CSR_OTP_GP_REG);
|
||||
if (mode == IWL_OTP_ACCESS_ABSOLUTE)
|
||||
iwl_clear_bit(priv, CSR_OTP_GP_REG,
|
||||
CSR_OTP_GP_REG_OTP_ACCESS_MODE);
|
||||
|
|
|
@ -188,6 +188,7 @@ int iwl_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
|
|||
cmd_idx = iwl_enqueue_hcmd(priv, cmd);
|
||||
if (cmd_idx < 0) {
|
||||
ret = cmd_idx;
|
||||
clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
|
||||
IWL_ERR(priv, "Error sending %s: enqueue_hcmd failed: %d\n",
|
||||
get_cmd_string(cmd->id), ret);
|
||||
return ret;
|
||||
|
@ -264,8 +265,8 @@ int iwl_send_cmd_pdu(struct iwl_priv *priv, u8 id, u16 len, const void *data)
|
|||
{
|
||||
struct iwl_host_cmd cmd = {
|
||||
.id = id,
|
||||
.len = len,
|
||||
.data = data,
|
||||
.len = { len, },
|
||||
.data = { data, },
|
||||
};
|
||||
|
||||
return iwl_send_cmd_sync(priv, &cmd);
|
||||
|
@ -279,8 +280,8 @@ int iwl_send_cmd_pdu_async(struct iwl_priv *priv,
|
|||
{
|
||||
struct iwl_host_cmd cmd = {
|
||||
.id = id,
|
||||
.len = len,
|
||||
.data = data,
|
||||
.len = { len, },
|
||||
.data = { data, },
|
||||
};
|
||||
|
||||
cmd.flags |= CMD_ASYNC;
|
||||
|
|
|
@ -107,8 +107,8 @@ static int iwl_send_led_cmd(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd)
|
|||
{
|
||||
struct iwl_host_cmd cmd = {
|
||||
.id = REPLY_LEDS_CMD,
|
||||
.len = sizeof(struct iwl_led_cmd),
|
||||
.data = led_cmd,
|
||||
.len = { sizeof(struct iwl_led_cmd), },
|
||||
.data = { led_cmd, },
|
||||
.flags = CMD_ASYNC,
|
||||
.callback = NULL,
|
||||
};
|
||||
|
|
|
@ -141,7 +141,7 @@ int iwl_send_add_sta(struct iwl_priv *priv,
|
|||
struct iwl_host_cmd cmd = {
|
||||
.id = REPLY_ADD_STA,
|
||||
.flags = flags,
|
||||
.data = data,
|
||||
.data = { data, },
|
||||
};
|
||||
u8 sta_id __maybe_unused = sta->sta.sta_id;
|
||||
|
||||
|
@ -155,7 +155,7 @@ int iwl_send_add_sta(struct iwl_priv *priv,
|
|||
might_sleep();
|
||||
}
|
||||
|
||||
cmd.len = priv->cfg->ops->utils->build_addsta_hcmd(sta, data);
|
||||
cmd.len[0] = priv->cfg->ops->utils->build_addsta_hcmd(sta, data);
|
||||
ret = iwl_send_cmd(priv, &cmd);
|
||||
|
||||
if (ret || (flags & CMD_ASYNC))
|
||||
|
@ -401,9 +401,9 @@ static int iwl_send_remove_station(struct iwl_priv *priv,
|
|||
|
||||
struct iwl_host_cmd cmd = {
|
||||
.id = REPLY_REMOVE_STA,
|
||||
.len = sizeof(struct iwl_rem_sta_cmd),
|
||||
.len = { sizeof(struct iwl_rem_sta_cmd), },
|
||||
.flags = CMD_SYNC,
|
||||
.data = &rm_sta_cmd,
|
||||
.data = { &rm_sta_cmd, },
|
||||
};
|
||||
|
||||
memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd));
|
||||
|
@ -760,9 +760,9 @@ int iwl_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
|
|||
|
||||
struct iwl_host_cmd cmd = {
|
||||
.id = REPLY_TX_LINK_QUALITY_CMD,
|
||||
.len = sizeof(struct iwl_link_quality_cmd),
|
||||
.len = { sizeof(struct iwl_link_quality_cmd), },
|
||||
.flags = flags,
|
||||
.data = lq,
|
||||
.data = { lq, },
|
||||
};
|
||||
|
||||
if (WARN_ON(lq->sta_id == IWL_INVALID_STATION))
|
||||
|
|
|
@ -97,6 +97,13 @@ struct nla_policy iwl_testmode_gnl_msg_policy[IWL_TM_ATTR_MAX] = {
|
|||
|
||||
[IWL_TM_ATTR_SYNC_RSP] = { .type = NLA_UNSPEC, },
|
||||
[IWL_TM_ATTR_UCODE_RX_PKT] = { .type = NLA_UNSPEC, },
|
||||
|
||||
[IWL_TM_ATTR_EEPROM] = { .type = NLA_UNSPEC, },
|
||||
|
||||
[IWL_TM_ATTR_TRACE_ADDR] = { .type = NLA_UNSPEC, },
|
||||
[IWL_TM_ATTR_TRACE_DATA] = { .type = NLA_UNSPEC, },
|
||||
|
||||
[IWL_TM_ATTR_FIXRATE] = { .type = NLA_U32, },
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -167,6 +174,31 @@ static void iwl_testmode_ucode_rx_pkt(struct iwl_priv *priv,
|
|||
void iwl_testmode_init(struct iwl_priv *priv)
|
||||
{
|
||||
priv->pre_rx_handler = iwl_testmode_ucode_rx_pkt;
|
||||
priv->testmode_trace.trace_enabled = false;
|
||||
}
|
||||
|
||||
static void iwl_trace_cleanup(struct iwl_priv *priv)
|
||||
{
|
||||
struct device *dev = &priv->pci_dev->dev;
|
||||
|
||||
if (priv->testmode_trace.trace_enabled) {
|
||||
if (priv->testmode_trace.cpu_addr &&
|
||||
priv->testmode_trace.dma_addr)
|
||||
dma_free_coherent(dev,
|
||||
TRACE_TOTAL_SIZE,
|
||||
priv->testmode_trace.cpu_addr,
|
||||
priv->testmode_trace.dma_addr);
|
||||
priv->testmode_trace.trace_enabled = false;
|
||||
priv->testmode_trace.cpu_addr = NULL;
|
||||
priv->testmode_trace.trace_addr = NULL;
|
||||
priv->testmode_trace.dma_addr = 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void iwl_testmode_cleanup(struct iwl_priv *priv)
|
||||
{
|
||||
iwl_trace_cleanup(priv);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -198,10 +230,11 @@ static int iwl_testmode_ucode(struct ieee80211_hw *hw, struct nlattr **tb)
|
|||
}
|
||||
|
||||
cmd.id = nla_get_u8(tb[IWL_TM_ATTR_UCODE_CMD_ID]);
|
||||
cmd.data = nla_data(tb[IWL_TM_ATTR_UCODE_CMD_DATA]);
|
||||
cmd.len = nla_len(tb[IWL_TM_ATTR_UCODE_CMD_DATA]);
|
||||
cmd.data[0] = nla_data(tb[IWL_TM_ATTR_UCODE_CMD_DATA]);
|
||||
cmd.len[0] = nla_len(tb[IWL_TM_ATTR_UCODE_CMD_DATA]);
|
||||
cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
|
||||
IWL_INFO(priv, "testmode ucode command ID 0x%x, flags 0x%x,"
|
||||
" len %d\n", cmd.id, cmd.flags, cmd.len);
|
||||
" len %d\n", cmd.id, cmd.flags, cmd.len[0]);
|
||||
/* ok, let's submit the command to ucode */
|
||||
return iwl_send_cmd(priv, &cmd);
|
||||
}
|
||||
|
@ -388,6 +421,38 @@ static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb)
|
|||
"Error starting the device: %d\n", status);
|
||||
break;
|
||||
|
||||
case IWL_TM_CMD_APP2DEV_GET_EEPROM:
|
||||
if (priv->eeprom) {
|
||||
skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy,
|
||||
priv->cfg->base_params->eeprom_size + 20);
|
||||
if (!skb) {
|
||||
IWL_DEBUG_INFO(priv,
|
||||
"Error allocating memory\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
NLA_PUT_U32(skb, IWL_TM_ATTR_COMMAND,
|
||||
IWL_TM_CMD_DEV2APP_EEPROM_RSP);
|
||||
NLA_PUT(skb, IWL_TM_ATTR_EEPROM,
|
||||
priv->cfg->base_params->eeprom_size,
|
||||
priv->eeprom);
|
||||
status = cfg80211_testmode_reply(skb);
|
||||
if (status < 0)
|
||||
IWL_DEBUG_INFO(priv,
|
||||
"Error sending msg : %d\n",
|
||||
status);
|
||||
} else
|
||||
return -EFAULT;
|
||||
break;
|
||||
|
||||
case IWL_TM_CMD_APP2DEV_FIXRATE_REQ:
|
||||
if (!tb[IWL_TM_ATTR_FIXRATE]) {
|
||||
IWL_DEBUG_INFO(priv,
|
||||
"Error finding fixrate setting\n");
|
||||
return -ENOMSG;
|
||||
}
|
||||
priv->dbg_fixed_rate = nla_get_u32(tb[IWL_TM_ATTR_FIXRATE]);
|
||||
break;
|
||||
|
||||
default:
|
||||
IWL_DEBUG_INFO(priv, "Unknown testmode driver command ID\n");
|
||||
return -ENOSYS;
|
||||
|
@ -399,6 +464,102 @@ static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb)
|
|||
return -EMSGSIZE;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* This function handles the user application commands for uCode trace
|
||||
*
|
||||
* It retrieves command ID carried with IWL_TM_ATTR_COMMAND and calls to the
|
||||
* handlers respectively.
|
||||
*
|
||||
* If it's an unknown commdn ID, -ENOSYS is replied; otherwise, the returned
|
||||
* value of the actual command execution is replied to the user application.
|
||||
*
|
||||
* @hw: ieee80211_hw object that represents the device
|
||||
* @tb: gnl message fields from the user space
|
||||
*/
|
||||
static int iwl_testmode_trace(struct ieee80211_hw *hw, struct nlattr **tb)
|
||||
{
|
||||
struct iwl_priv *priv = hw->priv;
|
||||
struct sk_buff *skb;
|
||||
int status = 0;
|
||||
struct device *dev = &priv->pci_dev->dev;
|
||||
|
||||
switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
|
||||
case IWL_TM_CMD_APP2DEV_BEGIN_TRACE:
|
||||
if (priv->testmode_trace.trace_enabled)
|
||||
return -EBUSY;
|
||||
|
||||
priv->testmode_trace.cpu_addr =
|
||||
dma_alloc_coherent(dev,
|
||||
TRACE_TOTAL_SIZE,
|
||||
&priv->testmode_trace.dma_addr,
|
||||
GFP_KERNEL);
|
||||
if (!priv->testmode_trace.cpu_addr)
|
||||
return -ENOMEM;
|
||||
priv->testmode_trace.trace_enabled = true;
|
||||
priv->testmode_trace.trace_addr = (u8 *)PTR_ALIGN(
|
||||
priv->testmode_trace.cpu_addr, 0x100);
|
||||
memset(priv->testmode_trace.trace_addr, 0x03B,
|
||||
TRACE_BUFF_SIZE);
|
||||
skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy,
|
||||
sizeof(priv->testmode_trace.dma_addr) + 20);
|
||||
if (!skb) {
|
||||
IWL_DEBUG_INFO(priv,
|
||||
"Error allocating memory\n");
|
||||
iwl_trace_cleanup(priv);
|
||||
return -ENOMEM;
|
||||
}
|
||||
NLA_PUT(skb, IWL_TM_ATTR_TRACE_ADDR,
|
||||
sizeof(priv->testmode_trace.dma_addr),
|
||||
(u64 *)&priv->testmode_trace.dma_addr);
|
||||
status = cfg80211_testmode_reply(skb);
|
||||
if (status < 0) {
|
||||
IWL_DEBUG_INFO(priv,
|
||||
"Error sending msg : %d\n",
|
||||
status);
|
||||
}
|
||||
break;
|
||||
|
||||
case IWL_TM_CMD_APP2DEV_END_TRACE:
|
||||
iwl_trace_cleanup(priv);
|
||||
break;
|
||||
|
||||
case IWL_TM_CMD_APP2DEV_READ_TRACE:
|
||||
if (priv->testmode_trace.trace_enabled &&
|
||||
priv->testmode_trace.trace_addr) {
|
||||
skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy,
|
||||
20 + TRACE_BUFF_SIZE);
|
||||
if (skb == NULL) {
|
||||
IWL_DEBUG_INFO(priv,
|
||||
"Error allocating memory\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
NLA_PUT(skb, IWL_TM_ATTR_TRACE_DATA,
|
||||
TRACE_BUFF_SIZE,
|
||||
priv->testmode_trace.trace_addr);
|
||||
status = cfg80211_testmode_reply(skb);
|
||||
if (status < 0) {
|
||||
IWL_DEBUG_INFO(priv,
|
||||
"Error sending msg : %d\n", status);
|
||||
}
|
||||
} else
|
||||
return -EFAULT;
|
||||
break;
|
||||
|
||||
default:
|
||||
IWL_DEBUG_INFO(priv, "Unknown testmode mem command ID\n");
|
||||
return -ENOSYS;
|
||||
}
|
||||
return status;
|
||||
|
||||
nla_put_failure:
|
||||
kfree_skb(skb);
|
||||
if (nla_get_u32(tb[IWL_TM_ATTR_COMMAND]) ==
|
||||
IWL_TM_CMD_APP2DEV_BEGIN_TRACE)
|
||||
iwl_trace_cleanup(priv);
|
||||
return -EMSGSIZE;
|
||||
}
|
||||
|
||||
/* The testmode gnl message handler that takes the gnl message from the
|
||||
* user space and parses it per the policy iwl_testmode_gnl_msg_policy, then
|
||||
* invoke the corresponding handlers.
|
||||
|
@ -455,9 +616,19 @@ int iwl_testmode_cmd(struct ieee80211_hw *hw, void *data, int len)
|
|||
case IWL_TM_CMD_APP2DEV_LOAD_INIT_FW:
|
||||
case IWL_TM_CMD_APP2DEV_CFG_INIT_CALIB:
|
||||
case IWL_TM_CMD_APP2DEV_LOAD_RUNTIME_FW:
|
||||
case IWL_TM_CMD_APP2DEV_GET_EEPROM:
|
||||
case IWL_TM_CMD_APP2DEV_FIXRATE_REQ:
|
||||
IWL_DEBUG_INFO(priv, "testmode cmd to driver\n");
|
||||
result = iwl_testmode_driver(hw, tb);
|
||||
break;
|
||||
|
||||
case IWL_TM_CMD_APP2DEV_BEGIN_TRACE:
|
||||
case IWL_TM_CMD_APP2DEV_END_TRACE:
|
||||
case IWL_TM_CMD_APP2DEV_READ_TRACE:
|
||||
IWL_DEBUG_INFO(priv, "testmode uCode trace cmd to driver\n");
|
||||
result = iwl_testmode_trace(hw, tb);
|
||||
break;
|
||||
|
||||
default:
|
||||
IWL_DEBUG_INFO(priv, "Unknown testmode command\n");
|
||||
result = -ENOSYS;
|
||||
|
|
|
@ -88,9 +88,15 @@ enum iwl_tm_cmd_t {
|
|||
IWL_TM_CMD_APP2DEV_LOAD_INIT_FW,
|
||||
IWL_TM_CMD_APP2DEV_CFG_INIT_CALIB,
|
||||
IWL_TM_CMD_APP2DEV_LOAD_RUNTIME_FW,
|
||||
IWL_TM_CMD_APP2DEV_GET_EEPROM,
|
||||
IWL_TM_CMD_APP2DEV_FIXRATE_REQ,
|
||||
/* if there is other new command for the driver layer operation,
|
||||
* append them here */
|
||||
|
||||
/* commands fom user space for uCode trace operations */
|
||||
IWL_TM_CMD_APP2DEV_BEGIN_TRACE,
|
||||
IWL_TM_CMD_APP2DEV_END_TRACE,
|
||||
IWL_TM_CMD_APP2DEV_READ_TRACE,
|
||||
|
||||
/* commands from kernel space to carry the synchronous response
|
||||
* to user application */
|
||||
|
@ -99,6 +105,11 @@ enum iwl_tm_cmd_t {
|
|||
/* commands from kernel space to multicast the spontaneous messages
|
||||
* to user application */
|
||||
IWL_TM_CMD_DEV2APP_UCODE_RX_PKT,
|
||||
|
||||
/* commands from kernel space to carry the eeprom response
|
||||
* to user application */
|
||||
IWL_TM_CMD_DEV2APP_EEPROM_RSP,
|
||||
|
||||
IWL_TM_CMD_MAX,
|
||||
};
|
||||
|
||||
|
@ -144,8 +155,31 @@ enum iwl_tm_attr_t {
|
|||
* application */
|
||||
IWL_TM_ATTR_UCODE_RX_PKT,
|
||||
|
||||
/* When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_DEV2APP_EEPROM,
|
||||
* The mandatory fields are:
|
||||
* IWL_TM_ATTR_EEPROM for the data content responging to the user
|
||||
* application */
|
||||
IWL_TM_ATTR_EEPROM,
|
||||
|
||||
/* When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_XXX_TRACE,
|
||||
* The mandatory fields are:
|
||||
* IWL_TM_ATTR_MEM_TRACE_ADDR for the trace address
|
||||
*/
|
||||
IWL_TM_ATTR_TRACE_ADDR,
|
||||
IWL_TM_ATTR_TRACE_DATA,
|
||||
|
||||
/* When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_FIXRATE_REQ,
|
||||
* The mandatory fields are:
|
||||
* IWL_TM_ATTR_FIXRATE for the fixed rate
|
||||
*/
|
||||
IWL_TM_ATTR_FIXRATE,
|
||||
|
||||
IWL_TM_ATTR_MAX,
|
||||
};
|
||||
|
||||
/* uCode trace buffer */
|
||||
#define TRACE_BUFF_SIZE 0x20000
|
||||
#define TRACE_BUFF_PADD 0x2000
|
||||
#define TRACE_TOTAL_SIZE (TRACE_BUFF_SIZE + TRACE_BUFF_PADD)
|
||||
|
||||
#endif
|
||||
|
|
|
@ -32,6 +32,7 @@
|
|||
#include <linux/slab.h>
|
||||
#include <net/mac80211.h>
|
||||
#include "iwl-eeprom.h"
|
||||
#include "iwl-agn.h"
|
||||
#include "iwl-dev.h"
|
||||
#include "iwl-core.h"
|
||||
#include "iwl-sta.h"
|
||||
|
@ -85,6 +86,158 @@ void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
|
|||
txq->need_update = 0;
|
||||
}
|
||||
|
||||
static inline dma_addr_t iwl_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
|
||||
{
|
||||
struct iwl_tfd_tb *tb = &tfd->tbs[idx];
|
||||
|
||||
dma_addr_t addr = get_unaligned_le32(&tb->lo);
|
||||
if (sizeof(dma_addr_t) > sizeof(u32))
|
||||
addr |=
|
||||
((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16;
|
||||
|
||||
return addr;
|
||||
}
|
||||
|
||||
static inline u16 iwl_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
|
||||
{
|
||||
struct iwl_tfd_tb *tb = &tfd->tbs[idx];
|
||||
|
||||
return le16_to_cpu(tb->hi_n_len) >> 4;
|
||||
}
|
||||
|
||||
static inline void iwl_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
|
||||
dma_addr_t addr, u16 len)
|
||||
{
|
||||
struct iwl_tfd_tb *tb = &tfd->tbs[idx];
|
||||
u16 hi_n_len = len << 4;
|
||||
|
||||
put_unaligned_le32(addr, &tb->lo);
|
||||
if (sizeof(dma_addr_t) > sizeof(u32))
|
||||
hi_n_len |= ((addr >> 16) >> 16) & 0xF;
|
||||
|
||||
tb->hi_n_len = cpu_to_le16(hi_n_len);
|
||||
|
||||
tfd->num_tbs = idx + 1;
|
||||
}
|
||||
|
||||
static inline u8 iwl_tfd_get_num_tbs(struct iwl_tfd *tfd)
|
||||
{
|
||||
return tfd->num_tbs & 0x1f;
|
||||
}
|
||||
|
||||
static void iwlagn_unmap_tfd(struct iwl_priv *priv, struct iwl_cmd_meta *meta,
|
||||
struct iwl_tfd *tfd)
|
||||
{
|
||||
struct pci_dev *dev = priv->pci_dev;
|
||||
int i;
|
||||
int num_tbs;
|
||||
|
||||
/* Sanity check on number of chunks */
|
||||
num_tbs = iwl_tfd_get_num_tbs(tfd);
|
||||
|
||||
if (num_tbs >= IWL_NUM_OF_TBS) {
|
||||
IWL_ERR(priv, "Too many chunks: %i\n", num_tbs);
|
||||
/* @todo issue fatal error, it is quite serious situation */
|
||||
return;
|
||||
}
|
||||
|
||||
/* Unmap tx_cmd */
|
||||
if (num_tbs)
|
||||
pci_unmap_single(dev,
|
||||
dma_unmap_addr(meta, mapping),
|
||||
dma_unmap_len(meta, len),
|
||||
PCI_DMA_BIDIRECTIONAL);
|
||||
|
||||
/* Unmap chunks, if any. */
|
||||
for (i = 1; i < num_tbs; i++)
|
||||
pci_unmap_single(dev, iwl_tfd_tb_get_addr(tfd, i),
|
||||
iwl_tfd_tb_get_len(tfd, i), PCI_DMA_TODEVICE);
|
||||
}
|
||||
|
||||
/**
|
||||
* iwlagn_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
|
||||
* @priv - driver private data
|
||||
* @txq - tx queue
|
||||
*
|
||||
* Does NOT advance any TFD circular buffer read/write indexes
|
||||
* Does NOT free the TFD itself (which is within circular buffer)
|
||||
*/
|
||||
void iwlagn_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
|
||||
{
|
||||
struct iwl_tfd *tfd_tmp = txq->tfds;
|
||||
int index = txq->q.read_ptr;
|
||||
|
||||
iwlagn_unmap_tfd(priv, &txq->meta[index], &tfd_tmp[index]);
|
||||
|
||||
/* free SKB */
|
||||
if (txq->txb) {
|
||||
struct sk_buff *skb;
|
||||
|
||||
skb = txq->txb[txq->q.read_ptr].skb;
|
||||
|
||||
/* can be called from irqs-disabled context */
|
||||
if (skb) {
|
||||
dev_kfree_skb_any(skb);
|
||||
txq->txb[txq->q.read_ptr].skb = NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int iwlagn_txq_attach_buf_to_tfd(struct iwl_priv *priv,
|
||||
struct iwl_tx_queue *txq,
|
||||
dma_addr_t addr, u16 len,
|
||||
u8 reset)
|
||||
{
|
||||
struct iwl_queue *q;
|
||||
struct iwl_tfd *tfd, *tfd_tmp;
|
||||
u32 num_tbs;
|
||||
|
||||
q = &txq->q;
|
||||
tfd_tmp = txq->tfds;
|
||||
tfd = &tfd_tmp[q->write_ptr];
|
||||
|
||||
if (reset)
|
||||
memset(tfd, 0, sizeof(*tfd));
|
||||
|
||||
num_tbs = iwl_tfd_get_num_tbs(tfd);
|
||||
|
||||
/* Each TFD can point to a maximum 20 Tx buffers */
|
||||
if (num_tbs >= IWL_NUM_OF_TBS) {
|
||||
IWL_ERR(priv, "Error can not send more than %d chunks\n",
|
||||
IWL_NUM_OF_TBS);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (WARN_ON(addr & ~DMA_BIT_MASK(36)))
|
||||
return -EINVAL;
|
||||
|
||||
if (unlikely(addr & ~IWL_TX_DMA_MASK))
|
||||
IWL_ERR(priv, "Unaligned address = %llx\n",
|
||||
(unsigned long long)addr);
|
||||
|
||||
iwl_tfd_set_tb(tfd, num_tbs, addr, len);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Tell nic where to find circular buffer of Tx Frame Descriptors for
|
||||
* given Tx queue, and enable the DMA channel used for that queue.
|
||||
*
|
||||
* supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA
|
||||
* channels supported in hardware.
|
||||
*/
|
||||
static int iwlagn_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq)
|
||||
{
|
||||
int txq_id = txq->q.id;
|
||||
|
||||
/* Circular buffer (TFD queue in DRAM) physical base address */
|
||||
iwl_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id),
|
||||
txq->q.dma_addr >> 8);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* iwl_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's
|
||||
*/
|
||||
|
@ -97,7 +250,7 @@ void iwl_tx_queue_unmap(struct iwl_priv *priv, int txq_id)
|
|||
return;
|
||||
|
||||
while (q->write_ptr != q->read_ptr) {
|
||||
priv->cfg->ops->lib->txq_free_tfd(priv, txq);
|
||||
iwlagn_txq_free_tfd(priv, txq);
|
||||
q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
|
||||
}
|
||||
}
|
||||
|
@ -154,7 +307,7 @@ void iwl_cmd_queue_unmap(struct iwl_priv *priv)
|
|||
return;
|
||||
|
||||
while (q->read_ptr != q->write_ptr) {
|
||||
i = get_cmd_index(q, q->read_ptr, 0);
|
||||
i = get_cmd_index(q, q->read_ptr);
|
||||
|
||||
if (txq->meta[i].flags & CMD_MAPPED) {
|
||||
pci_unmap_single(priv->pci_dev,
|
||||
|
@ -166,15 +319,6 @@ void iwl_cmd_queue_unmap(struct iwl_priv *priv)
|
|||
|
||||
q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
|
||||
}
|
||||
|
||||
i = q->n_window;
|
||||
if (txq->meta[i].flags & CMD_MAPPED) {
|
||||
pci_unmap_single(priv->pci_dev,
|
||||
dma_unmap_addr(&txq->meta[i], mapping),
|
||||
dma_unmap_len(&txq->meta[i], len),
|
||||
PCI_DMA_BIDIRECTIONAL);
|
||||
txq->meta[i].flags = 0;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -194,7 +338,7 @@ void iwl_cmd_queue_free(struct iwl_priv *priv)
|
|||
iwl_cmd_queue_unmap(priv);
|
||||
|
||||
/* De-alloc array of command/tx buffers */
|
||||
for (i = 0; i <= TFD_CMD_SLOTS; i++)
|
||||
for (i = 0; i < TFD_CMD_SLOTS; i++)
|
||||
kfree(txq->cmd[i]);
|
||||
|
||||
/* De-alloc circular buffer of TFDs */
|
||||
|
@ -334,33 +478,17 @@ int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
|
|||
{
|
||||
int i, len;
|
||||
int ret;
|
||||
int actual_slots = slots_num;
|
||||
|
||||
/*
|
||||
* Alloc buffer array for commands (Tx or other types of commands).
|
||||
* For the command queue (#4/#9), allocate command space + one big
|
||||
* command for scan, since scan command is very huge; the system will
|
||||
* not have two scans at the same time, so only one is needed.
|
||||
* For normal Tx queues (all other queues), no super-size command
|
||||
* space is needed.
|
||||
*/
|
||||
if (txq_id == priv->cmd_queue)
|
||||
actual_slots++;
|
||||
|
||||
txq->meta = kzalloc(sizeof(struct iwl_cmd_meta) * actual_slots,
|
||||
txq->meta = kzalloc(sizeof(struct iwl_cmd_meta) * slots_num,
|
||||
GFP_KERNEL);
|
||||
txq->cmd = kzalloc(sizeof(struct iwl_device_cmd *) * actual_slots,
|
||||
txq->cmd = kzalloc(sizeof(struct iwl_device_cmd *) * slots_num,
|
||||
GFP_KERNEL);
|
||||
|
||||
if (!txq->meta || !txq->cmd)
|
||||
goto out_free_arrays;
|
||||
|
||||
len = sizeof(struct iwl_device_cmd);
|
||||
for (i = 0; i < actual_slots; i++) {
|
||||
/* only happens for cmd queue */
|
||||
if (i == slots_num)
|
||||
len = IWL_MAX_CMD_SIZE;
|
||||
|
||||
for (i = 0; i < slots_num; i++) {
|
||||
txq->cmd[i] = kmalloc(len, GFP_KERNEL);
|
||||
if (!txq->cmd[i])
|
||||
goto err;
|
||||
|
@ -391,11 +519,11 @@ int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
|
|||
return ret;
|
||||
|
||||
/* Tell device where to find queue */
|
||||
priv->cfg->ops->lib->txq_init(priv, txq);
|
||||
iwlagn_tx_queue_init(priv, txq);
|
||||
|
||||
return 0;
|
||||
err:
|
||||
for (i = 0; i < actual_slots; i++)
|
||||
for (i = 0; i < slots_num; i++)
|
||||
kfree(txq->cmd[i]);
|
||||
out_free_arrays:
|
||||
kfree(txq->meta);
|
||||
|
@ -420,7 +548,7 @@ void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq,
|
|||
iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
|
||||
|
||||
/* Tell device where to find queue */
|
||||
priv->cfg->ops->lib->txq_init(priv, txq);
|
||||
iwlagn_tx_queue_init(priv, txq);
|
||||
}
|
||||
|
||||
/*************** HOST COMMAND QUEUE FUNCTIONS *****/
|
||||
|
@ -443,23 +571,49 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
|
|||
dma_addr_t phys_addr;
|
||||
unsigned long flags;
|
||||
u32 idx;
|
||||
u16 fix_size;
|
||||
u16 copy_size, cmd_size;
|
||||
bool is_ct_kill = false;
|
||||
bool had_nocopy = false;
|
||||
int i;
|
||||
u8 *cmd_dest;
|
||||
#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
|
||||
const void *trace_bufs[IWL_MAX_CMD_TFDS + 1] = {};
|
||||
int trace_lens[IWL_MAX_CMD_TFDS + 1] = {};
|
||||
int trace_idx;
|
||||
#endif
|
||||
|
||||
fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr));
|
||||
if (test_bit(STATUS_FW_ERROR, &priv->status)) {
|
||||
IWL_WARN(priv, "fw recovery, no hcmd send\n");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
copy_size = sizeof(out_cmd->hdr);
|
||||
cmd_size = sizeof(out_cmd->hdr);
|
||||
|
||||
/* need one for the header if the first is NOCOPY */
|
||||
BUILD_BUG_ON(IWL_MAX_CMD_TFDS > IWL_NUM_OF_TBS - 1);
|
||||
|
||||
for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
|
||||
if (!cmd->len[i])
|
||||
continue;
|
||||
if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
|
||||
had_nocopy = true;
|
||||
} else {
|
||||
/* NOCOPY must not be followed by normal! */
|
||||
if (WARN_ON(had_nocopy))
|
||||
return -EINVAL;
|
||||
copy_size += cmd->len[i];
|
||||
}
|
||||
cmd_size += cmd->len[i];
|
||||
}
|
||||
|
||||
/*
|
||||
* If any of the command structures end up being larger than
|
||||
* the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then
|
||||
* we will need to increase the size of the TFD entries
|
||||
* Also, check to see if command buffer should not exceed the size
|
||||
* of device_cmd and max_cmd_size.
|
||||
* the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically
|
||||
* allocated into separate TFDs, then we will need to
|
||||
* increase the size of the buffers.
|
||||
*/
|
||||
if (WARN_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) &&
|
||||
!(cmd->flags & CMD_SIZE_HUGE)))
|
||||
return -EINVAL;
|
||||
|
||||
if (WARN_ON(fix_size > IWL_MAX_CMD_SIZE))
|
||||
if (WARN_ON(copy_size > TFD_MAX_PAYLOAD_SIZE))
|
||||
return -EINVAL;
|
||||
|
||||
if (iwl_is_rfkill(priv) || iwl_is_ctkill(priv)) {
|
||||
|
@ -468,14 +622,6 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
|
|||
return -EIO;
|
||||
}
|
||||
|
||||
/*
|
||||
* As we only have a single huge buffer, check that the command
|
||||
* is synchronous (otherwise buffers could end up being reused).
|
||||
*/
|
||||
|
||||
if (WARN_ON((cmd->flags & CMD_ASYNC) && (cmd->flags & CMD_SIZE_HUGE)))
|
||||
return -EINVAL;
|
||||
|
||||
spin_lock_irqsave(&priv->hcmd_lock, flags);
|
||||
|
||||
if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
|
||||
|
@ -490,7 +636,7 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
|
|||
return -ENOSPC;
|
||||
}
|
||||
|
||||
idx = get_cmd_index(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE);
|
||||
idx = get_cmd_index(q, q->write_ptr);
|
||||
out_cmd = txq->cmd[idx];
|
||||
out_meta = &txq->meta[idx];
|
||||
|
||||
|
@ -505,57 +651,84 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
|
|||
if (cmd->flags & CMD_ASYNC)
|
||||
out_meta->callback = cmd->callback;
|
||||
|
||||
/* set up the header */
|
||||
|
||||
out_cmd->hdr.cmd = cmd->id;
|
||||
memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len);
|
||||
|
||||
/* At this point, the out_cmd now has all of the incoming cmd
|
||||
* information */
|
||||
|
||||
out_cmd->hdr.flags = 0;
|
||||
out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(priv->cmd_queue) |
|
||||
INDEX_TO_SEQ(q->write_ptr));
|
||||
if (cmd->flags & CMD_SIZE_HUGE)
|
||||
out_cmd->hdr.sequence |= SEQ_HUGE_FRAME;
|
||||
|
||||
#ifdef CONFIG_IWLWIFI_DEBUG
|
||||
switch (out_cmd->hdr.cmd) {
|
||||
case REPLY_TX_LINK_QUALITY_CMD:
|
||||
case SENSITIVITY_CMD:
|
||||
IWL_DEBUG_HC_DUMP(priv, "Sending command %s (#%x), seq: 0x%04X, "
|
||||
"%d bytes at %d[%d]:%d\n",
|
||||
get_cmd_string(out_cmd->hdr.cmd),
|
||||
out_cmd->hdr.cmd,
|
||||
le16_to_cpu(out_cmd->hdr.sequence), fix_size,
|
||||
q->write_ptr, idx, priv->cmd_queue);
|
||||
/* and copy the data that needs to be copied */
|
||||
|
||||
cmd_dest = &out_cmd->cmd.payload[0];
|
||||
for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
|
||||
if (!cmd->len[i])
|
||||
continue;
|
||||
if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY)
|
||||
break;
|
||||
default:
|
||||
memcpy(cmd_dest, cmd->data[i], cmd->len[i]);
|
||||
cmd_dest += cmd->len[i];
|
||||
}
|
||||
|
||||
IWL_DEBUG_HC(priv, "Sending command %s (#%x), seq: 0x%04X, "
|
||||
"%d bytes at %d[%d]:%d\n",
|
||||
get_cmd_string(out_cmd->hdr.cmd),
|
||||
out_cmd->hdr.cmd,
|
||||
le16_to_cpu(out_cmd->hdr.sequence), fix_size,
|
||||
le16_to_cpu(out_cmd->hdr.sequence), cmd_size,
|
||||
q->write_ptr, idx, priv->cmd_queue);
|
||||
}
|
||||
#endif
|
||||
|
||||
phys_addr = pci_map_single(priv->pci_dev, &out_cmd->hdr,
|
||||
fix_size, PCI_DMA_BIDIRECTIONAL);
|
||||
copy_size, PCI_DMA_BIDIRECTIONAL);
|
||||
if (unlikely(pci_dma_mapping_error(priv->pci_dev, phys_addr))) {
|
||||
idx = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
dma_unmap_addr_set(out_meta, mapping, phys_addr);
|
||||
dma_unmap_len_set(out_meta, len, fix_size);
|
||||
dma_unmap_len_set(out_meta, len, copy_size);
|
||||
|
||||
iwlagn_txq_attach_buf_to_tfd(priv, txq, phys_addr, copy_size, 1);
|
||||
#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
|
||||
trace_bufs[0] = &out_cmd->hdr;
|
||||
trace_lens[0] = copy_size;
|
||||
trace_idx = 1;
|
||||
#endif
|
||||
|
||||
for (i = 0; i < IWL_MAX_CMD_TFDS; i++) {
|
||||
if (!cmd->len[i])
|
||||
continue;
|
||||
if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY))
|
||||
continue;
|
||||
phys_addr = pci_map_single(priv->pci_dev, (void *)cmd->data[i],
|
||||
cmd->len[i], PCI_DMA_TODEVICE);
|
||||
if (pci_dma_mapping_error(priv->pci_dev, phys_addr)) {
|
||||
iwlagn_unmap_tfd(priv, out_meta,
|
||||
&txq->tfds[q->write_ptr]);
|
||||
idx = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
iwlagn_txq_attach_buf_to_tfd(priv, txq, phys_addr,
|
||||
cmd->len[i], 0);
|
||||
#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
|
||||
trace_bufs[trace_idx] = cmd->data[i];
|
||||
trace_lens[trace_idx] = cmd->len[i];
|
||||
trace_idx++;
|
||||
#endif
|
||||
}
|
||||
|
||||
out_meta->flags = cmd->flags | CMD_MAPPED;
|
||||
|
||||
txq->need_update = 1;
|
||||
|
||||
trace_iwlwifi_dev_hcmd(priv, &out_cmd->hdr, fix_size, cmd->flags);
|
||||
|
||||
priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
|
||||
phys_addr, fix_size, 1,
|
||||
U32_PAD(cmd->len));
|
||||
/* check that tracing gets all possible blocks */
|
||||
BUILD_BUG_ON(IWL_MAX_CMD_TFDS + 1 != 3);
|
||||
#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
|
||||
trace_iwlwifi_dev_hcmd(priv, cmd->flags,
|
||||
trace_bufs[0], trace_lens[0],
|
||||
trace_bufs[1], trace_lens[1],
|
||||
trace_bufs[2], trace_lens[2]);
|
||||
#endif
|
||||
|
||||
/* Increment and update queue's write index */
|
||||
q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
|
||||
|
@ -614,7 +787,6 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
|
|||
int txq_id = SEQ_TO_QUEUE(sequence);
|
||||
int index = SEQ_TO_INDEX(sequence);
|
||||
int cmd_index;
|
||||
bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME);
|
||||
struct iwl_device_cmd *cmd;
|
||||
struct iwl_cmd_meta *meta;
|
||||
struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
|
||||
|
@ -632,14 +804,11 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
|
|||
return;
|
||||
}
|
||||
|
||||
cmd_index = get_cmd_index(&txq->q, index, huge);
|
||||
cmd_index = get_cmd_index(&txq->q, index);
|
||||
cmd = txq->cmd[cmd_index];
|
||||
meta = &txq->meta[cmd_index];
|
||||
|
||||
pci_unmap_single(priv->pci_dev,
|
||||
dma_unmap_addr(meta, mapping),
|
||||
dma_unmap_len(meta, len),
|
||||
PCI_DMA_BIDIRECTIONAL);
|
||||
iwlagn_unmap_tfd(priv, meta, &txq->tfds[index]);
|
||||
|
||||
/* Input error checking is done when commands are added to queue. */
|
||||
if (meta->flags & CMD_WANT_SKB) {
|
||||
|
|
Loading…
Reference in New Issue