Merge remote branch 'wireless-next/master' into ath6kl-next

This commit is contained in:
Kalle Valo 2011-11-21 17:02:46 +02:00
commit 9a97af7eb6
103 changed files with 3844 additions and 3426 deletions

View File

@ -25,6 +25,7 @@ config ATH9K
config ATH9K_PCI
bool "Atheros ath9k PCI/PCIe bus support"
default y
depends on ATH9K && PCI
---help---
This option enables the PCI bus support in ath9k.

View File

@ -121,10 +121,8 @@ static const struct ar9300_eeprom ar9300_default = {
* if the register is per chain
*/
.noiseFloorThreshCh = {-1, 0, 0},
.ob = {1, 1, 1},/* 3 chain */
.db_stage2 = {1, 1, 1}, /* 3 chain */
.db_stage3 = {0, 0, 0},
.db_stage4 = {0, 0, 0},
.reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
.quick_drop = 0,
.xpaBiasLvl = 0,
.txFrameToDataStart = 0x0e,
.txFrameToPaOn = 0x0e,
@ -144,7 +142,7 @@ static const struct ar9300_eeprom ar9300_default = {
},
.base_ext1 = {
.ant_div_control = 0,
.future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
.future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
},
.calFreqPier2G = {
FREQ2FBIN(2412, 1),
@ -323,10 +321,8 @@ static const struct ar9300_eeprom ar9300_default = {
.spurChans = {0, 0, 0, 0, 0},
/* noiseFloorThreshCh Check if the register is per chain */
.noiseFloorThreshCh = {-1, 0, 0},
.ob = {3, 3, 3}, /* 3 chain */
.db_stage2 = {3, 3, 3}, /* 3 chain */
.db_stage3 = {3, 3, 3}, /* doesn't exist for 2G */
.db_stage4 = {3, 3, 3}, /* don't exist for 2G */
.reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
.quick_drop = 0,
.xpaBiasLvl = 0,
.txFrameToDataStart = 0x0e,
.txFrameToPaOn = 0x0e,
@ -698,10 +694,8 @@ static const struct ar9300_eeprom ar9300_x113 = {
* if the register is per chain
*/
.noiseFloorThreshCh = {-1, 0, 0},
.ob = {1, 1, 1},/* 3 chain */
.db_stage2 = {1, 1, 1}, /* 3 chain */
.db_stage3 = {0, 0, 0},
.db_stage4 = {0, 0, 0},
.reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
.quick_drop = 0,
.xpaBiasLvl = 0,
.txFrameToDataStart = 0x0e,
.txFrameToPaOn = 0x0e,
@ -721,7 +715,7 @@ static const struct ar9300_eeprom ar9300_x113 = {
},
.base_ext1 = {
.ant_div_control = 0,
.future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
.future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
},
.calFreqPier2G = {
FREQ2FBIN(2412, 1),
@ -900,10 +894,8 @@ static const struct ar9300_eeprom ar9300_x113 = {
.spurChans = {FREQ2FBIN(5500, 0), 0, 0, 0, 0},
/* noiseFloorThreshCh Check if the register is per chain */
.noiseFloorThreshCh = {-1, 0, 0},
.ob = {3, 3, 3}, /* 3 chain */
.db_stage2 = {3, 3, 3}, /* 3 chain */
.db_stage3 = {3, 3, 3}, /* doesn't exist for 2G */
.db_stage4 = {3, 3, 3}, /* don't exist for 2G */
.reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
.quick_drop = 0,
.xpaBiasLvl = 0xf,
.txFrameToDataStart = 0x0e,
.txFrameToPaOn = 0x0e,
@ -1276,10 +1268,8 @@ static const struct ar9300_eeprom ar9300_h112 = {
* if the register is per chain
*/
.noiseFloorThreshCh = {-1, 0, 0},
.ob = {1, 1, 1},/* 3 chain */
.db_stage2 = {1, 1, 1}, /* 3 chain */
.db_stage3 = {0, 0, 0},
.db_stage4 = {0, 0, 0},
.reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
.quick_drop = 0,
.xpaBiasLvl = 0,
.txFrameToDataStart = 0x0e,
.txFrameToPaOn = 0x0e,
@ -1291,20 +1281,20 @@ static const struct ar9300_eeprom ar9300_h112 = {
.txEndToRxOn = 0x2,
.txFrameToXpaOn = 0xe,
.thresh62 = 28,
.papdRateMaskHt20 = LE32(0x80c080),
.papdRateMaskHt40 = LE32(0x80c080),
.papdRateMaskHt20 = LE32(0x0c80c080),
.papdRateMaskHt40 = LE32(0x0080c080),
.futureModal = {
0, 0, 0, 0, 0, 0, 0, 0,
},
},
.base_ext1 = {
.ant_div_control = 0,
.future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
.future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
},
.calFreqPier2G = {
FREQ2FBIN(2412, 1),
FREQ2FBIN(2437, 1),
FREQ2FBIN(2472, 1),
FREQ2FBIN(2462, 1),
},
/* ar9300_cal_data_per_freq_op_loop 2g */
.calPierData2G = {
@ -1314,7 +1304,7 @@ static const struct ar9300_eeprom ar9300_h112 = {
},
.calTarget_freqbin_Cck = {
FREQ2FBIN(2412, 1),
FREQ2FBIN(2484, 1),
FREQ2FBIN(2472, 1),
},
.calTarget_freqbin_2G = {
FREQ2FBIN(2412, 1),
@ -1478,10 +1468,8 @@ static const struct ar9300_eeprom ar9300_h112 = {
.spurChans = {0, 0, 0, 0, 0},
/* noiseFloorThreshCh Check if the register is per chain */
.noiseFloorThreshCh = {-1, 0, 0},
.ob = {3, 3, 3}, /* 3 chain */
.db_stage2 = {3, 3, 3}, /* 3 chain */
.db_stage3 = {3, 3, 3}, /* doesn't exist for 2G */
.db_stage4 = {3, 3, 3}, /* don't exist for 2G */
.reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
.quick_drop = 0,
.xpaBiasLvl = 0,
.txFrameToDataStart = 0x0e,
.txFrameToPaOn = 0x0e,
@ -1515,7 +1503,7 @@ static const struct ar9300_eeprom ar9300_h112 = {
FREQ2FBIN(5500, 0),
FREQ2FBIN(5600, 0),
FREQ2FBIN(5700, 0),
FREQ2FBIN(5825, 0)
FREQ2FBIN(5785, 0)
},
.calPierData5G = {
{
@ -1854,10 +1842,8 @@ static const struct ar9300_eeprom ar9300_x112 = {
* if the register is per chain
*/
.noiseFloorThreshCh = {-1, 0, 0},
.ob = {1, 1, 1},/* 3 chain */
.db_stage2 = {1, 1, 1}, /* 3 chain */
.db_stage3 = {0, 0, 0},
.db_stage4 = {0, 0, 0},
.reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
.quick_drop = 0,
.xpaBiasLvl = 0,
.txFrameToDataStart = 0x0e,
.txFrameToPaOn = 0x0e,
@ -1877,7 +1863,7 @@ static const struct ar9300_eeprom ar9300_x112 = {
},
.base_ext1 = {
.ant_div_control = 0,
.future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
.future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
},
.calFreqPier2G = {
FREQ2FBIN(2412, 1),
@ -2056,10 +2042,8 @@ static const struct ar9300_eeprom ar9300_x112 = {
.spurChans = {0, 0, 0, 0, 0},
/* noiseFloorThreshch check if the register is per chain */
.noiseFloorThreshCh = {-1, 0, 0},
.ob = {3, 3, 3}, /* 3 chain */
.db_stage2 = {3, 3, 3}, /* 3 chain */
.db_stage3 = {3, 3, 3}, /* doesn't exist for 2G */
.db_stage4 = {3, 3, 3}, /* don't exist for 2G */
.reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
.quick_drop = 0,
.xpaBiasLvl = 0,
.txFrameToDataStart = 0x0e,
.txFrameToPaOn = 0x0e,
@ -2431,10 +2415,8 @@ static const struct ar9300_eeprom ar9300_h116 = {
* if the register is per chain
*/
.noiseFloorThreshCh = {-1, 0, 0},
.ob = {1, 1, 1},/* 3 chain */
.db_stage2 = {1, 1, 1}, /* 3 chain */
.db_stage3 = {0, 0, 0},
.db_stage4 = {0, 0, 0},
.reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
.quick_drop = 0,
.xpaBiasLvl = 0,
.txFrameToDataStart = 0x0e,
.txFrameToPaOn = 0x0e,
@ -2454,12 +2436,12 @@ static const struct ar9300_eeprom ar9300_h116 = {
},
.base_ext1 = {
.ant_div_control = 0,
.future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
.future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
},
.calFreqPier2G = {
FREQ2FBIN(2412, 1),
FREQ2FBIN(2437, 1),
FREQ2FBIN(2472, 1),
FREQ2FBIN(2462, 1),
},
/* ar9300_cal_data_per_freq_op_loop 2g */
.calPierData2G = {
@ -2633,10 +2615,8 @@ static const struct ar9300_eeprom ar9300_h116 = {
.spurChans = {0, 0, 0, 0, 0},
/* noiseFloorThreshCh Check if the register is per chain */
.noiseFloorThreshCh = {-1, 0, 0},
.ob = {3, 3, 3}, /* 3 chain */
.db_stage2 = {3, 3, 3}, /* 3 chain */
.db_stage3 = {3, 3, 3}, /* doesn't exist for 2G */
.db_stage4 = {3, 3, 3}, /* don't exist for 2G */
.reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
.quick_drop = 0,
.xpaBiasLvl = 0,
.txFrameToDataStart = 0x0e,
.txFrameToPaOn = 0x0e,
@ -2663,7 +2643,7 @@ static const struct ar9300_eeprom ar9300_h116 = {
.xatten1MarginHigh = {0, 0, 0}
},
.calFreqPier5G = {
FREQ2FBIN(5180, 0),
FREQ2FBIN(5160, 0),
FREQ2FBIN(5220, 0),
FREQ2FBIN(5320, 0),
FREQ2FBIN(5400, 0),
@ -3023,6 +3003,8 @@ static u32 ath9k_hw_ar9300_get_eeprom(struct ath_hw *ah,
return eep->modalHeader5G.antennaGain;
case EEP_ANTENNA_GAIN_2G:
return eep->modalHeader2G.antennaGain;
case EEP_QUICK_DROP:
return pBase->miscConfiguration & BIT(1);
default:
return 0;
}
@ -3428,25 +3410,14 @@ static u32 ar9003_dump_modal_eeprom(char *buf, u32 len, u32 size,
PR_EEP("Chain0 NF Threshold", modal_hdr->noiseFloorThreshCh[0]);
PR_EEP("Chain1 NF Threshold", modal_hdr->noiseFloorThreshCh[1]);
PR_EEP("Chain2 NF Threshold", modal_hdr->noiseFloorThreshCh[2]);
PR_EEP("Quick Drop", modal_hdr->quick_drop);
PR_EEP("txEndToXpaOff", modal_hdr->txEndToXpaOff);
PR_EEP("xPA Bias Level", modal_hdr->xpaBiasLvl);
PR_EEP("txFrameToDataStart", modal_hdr->txFrameToDataStart);
PR_EEP("txFrameToPaOn", modal_hdr->txFrameToPaOn);
PR_EEP("txFrameToXpaOn", modal_hdr->txFrameToXpaOn);
PR_EEP("txClip", modal_hdr->txClip);
PR_EEP("ADC Desired size", modal_hdr->adcDesiredSize);
PR_EEP("Chain0 ob", modal_hdr->ob[0]);
PR_EEP("Chain1 ob", modal_hdr->ob[1]);
PR_EEP("Chain2 ob", modal_hdr->ob[2]);
PR_EEP("Chain0 db_stage2", modal_hdr->db_stage2[0]);
PR_EEP("Chain1 db_stage2", modal_hdr->db_stage2[1]);
PR_EEP("Chain2 db_stage2", modal_hdr->db_stage2[2]);
PR_EEP("Chain0 db_stage3", modal_hdr->db_stage3[0]);
PR_EEP("Chain1 db_stage3", modal_hdr->db_stage3[1]);
PR_EEP("Chain2 db_stage3", modal_hdr->db_stage3[2]);
PR_EEP("Chain0 db_stage4", modal_hdr->db_stage4[0]);
PR_EEP("Chain1 db_stage4", modal_hdr->db_stage4[1]);
PR_EEP("Chain2 db_stage4", modal_hdr->db_stage4[2]);
return len;
}
@ -3503,6 +3474,7 @@ static u32 ath9k_hw_ar9003_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
PR_EEP("Internal regulator", !!(pBase->featureEnable & BIT(4)));
PR_EEP("Enable Paprd", !!(pBase->featureEnable & BIT(5)));
PR_EEP("Driver Strength", !!(pBase->miscConfiguration & BIT(0)));
PR_EEP("Quick Drop", !!(pBase->miscConfiguration & BIT(1)));
PR_EEP("Chain mask Reduce", (pBase->miscConfiguration >> 0x3) & 0x1);
PR_EEP("Write enable Gpio", pBase->eepromWriteEnableGpio);
PR_EEP("WLAN Disable Gpio", pBase->wlanDisableGpio);
@ -3965,6 +3937,40 @@ static void ar9003_hw_apply_tuning_caps(struct ath_hw *ah)
}
}
static void ar9003_hw_quick_drop_apply(struct ath_hw *ah, u16 freq)
{
struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
int quick_drop = ath9k_hw_ar9300_get_eeprom(ah, EEP_QUICK_DROP);
s32 t[3], f[3] = {5180, 5500, 5785};
if (!quick_drop)
return;
if (freq < 4000)
quick_drop = eep->modalHeader2G.quick_drop;
else {
t[0] = eep->base_ext1.quick_drop_low;
t[1] = eep->modalHeader5G.quick_drop;
t[2] = eep->base_ext1.quick_drop_high;
quick_drop = ar9003_hw_power_interpolate(freq, f, t, 3);
}
REG_RMW_FIELD(ah, AR_PHY_AGC, AR_PHY_AGC_QUICK_DROP, quick_drop);
}
static void ar9003_hw_txend_to_xpa_off_apply(struct ath_hw *ah, u16 freq)
{
struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
u32 value;
value = (freq < 4000) ? eep->modalHeader2G.txEndToXpaOff :
eep->modalHeader5G.txEndToXpaOff;
REG_RMW_FIELD(ah, AR_PHY_XPA_TIMING_CTL,
AR_PHY_XPA_TIMING_CTL_TX_END_XPAB_OFF, value);
REG_RMW_FIELD(ah, AR_PHY_XPA_TIMING_CTL,
AR_PHY_XPA_TIMING_CTL_TX_END_XPAA_OFF, value);
}
static void ath9k_hw_ar9300_set_board_values(struct ath_hw *ah,
struct ath9k_channel *chan)
{
@ -3972,10 +3978,12 @@ static void ath9k_hw_ar9300_set_board_values(struct ath_hw *ah,
ar9003_hw_ant_ctrl_apply(ah, IS_CHAN_2GHZ(chan));
ar9003_hw_drive_strength_apply(ah);
ar9003_hw_atten_apply(ah, chan);
ar9003_hw_quick_drop_apply(ah, chan->channel);
if (!AR_SREV_9330(ah) && !AR_SREV_9340(ah))
ar9003_hw_internal_regulator_apply(ah);
if (AR_SREV_9485(ah) || AR_SREV_9330(ah) || AR_SREV_9340(ah))
ar9003_hw_apply_tuning_caps(ah);
ar9003_hw_txend_to_xpa_off_apply(ah, chan->channel);
}
static void ath9k_hw_ar9300_set_addac(struct ath_hw *ah,
@ -5051,6 +5059,8 @@ static void ath9k_hw_ar9300_set_txpower(struct ath_hw *ah,
regulatory->max_power_level = targetPowerValT2[i];
}
ath9k_hw_update_regulatory_maxpower(ah);
if (test)
return;

View File

@ -216,10 +216,8 @@ struct ar9300_modal_eep_header {
u8 spurChans[AR_EEPROM_MODAL_SPURS];
/* 3 Check if the register is per chain */
int8_t noiseFloorThreshCh[AR9300_MAX_CHAINS];
u8 ob[AR9300_MAX_CHAINS];
u8 db_stage2[AR9300_MAX_CHAINS];
u8 db_stage3[AR9300_MAX_CHAINS];
u8 db_stage4[AR9300_MAX_CHAINS];
u8 reserved[11];
int8_t quick_drop;
u8 xpaBiasLvl;
u8 txFrameToDataStart;
u8 txFrameToPaOn;
@ -269,7 +267,9 @@ struct cal_ctl_data_5g {
struct ar9300_BaseExtension_1 {
u8 ant_div_control;
u8 future[13];
u8 future[11];
int8_t quick_drop_low;
int8_t quick_drop_high;
} __packed;
struct ar9300_BaseExtension_2 {

View File

@ -389,6 +389,8 @@
#define AR_PHY_DAG_CTRLCCK_RSSI_THR_S 10
#define AR_PHY_RIFS_INIT_DELAY 0x3ff0000
#define AR_PHY_AGC_QUICK_DROP 0x03c00000
#define AR_PHY_AGC_QUICK_DROP_S 22
#define AR_PHY_AGC_COARSE_LOW 0x00007F80
#define AR_PHY_AGC_COARSE_LOW_S 7
#define AR_PHY_AGC_COARSE_HIGH 0x003F8000

View File

@ -35,6 +35,20 @@ struct ath_btcoex_config {
bool bt_hold_rx_clear;
};
static const u32 ar9003_wlan_weights[ATH_BTCOEX_STOMP_MAX]
[AR9300_NUM_WLAN_WEIGHTS] = {
{ 0xfffffff0, 0xfffffff0, 0xfffffff0, 0xfffffff0 }, /* STOMP_ALL */
{ 0x88888880, 0x88888880, 0x88888880, 0x88888880 }, /* STOMP_LOW */
{ 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, /* STOMP_NONE */
};
static const u32 ar9462_wlan_weights[ATH_BTCOEX_STOMP_MAX]
[AR9300_NUM_WLAN_WEIGHTS] = {
{ 0x01017d01, 0x41414101, 0x41414101, 0x41414141 }, /* STOMP_ALL */
{ 0x01017d01, 0x3b3b3b01, 0x3b3b3b01, 0x3b3b3b3b }, /* STOMP_LOW */
{ 0x01017d01, 0x01010101, 0x01010101, 0x01010101 }, /* STOMP_NONE */
{ 0x01017d01, 0x013b0101, 0x3b3b0101, 0x3b3b013b }, /* STOMP_LOW_FTP */
};
void ath9k_hw_init_btcoex_hw(struct ath_hw *ah, int qnum)
{
@ -151,27 +165,26 @@ EXPORT_SYMBOL(ath9k_hw_btcoex_set_weight);
static void ath9k_hw_btcoex_enable_3wire(struct ath_hw *ah)
{
struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
struct ath_btcoex_hw *btcoex = &ah->btcoex_hw;
u32 val;
int i;
/*
* Program coex mode and weight registers to
* enable coex 3-wire
*/
REG_WRITE(ah, AR_BT_COEX_MODE, btcoex_hw->bt_coex_mode);
REG_WRITE(ah, AR_BT_COEX_MODE2, btcoex_hw->bt_coex_mode2);
REG_WRITE(ah, AR_BT_COEX_MODE, btcoex->bt_coex_mode);
REG_WRITE(ah, AR_BT_COEX_MODE2, btcoex->bt_coex_mode2);
if (AR_SREV_9300_20_OR_LATER(ah)) {
REG_WRITE(ah, AR_BT_COEX_WL_WEIGHTS0, ah->bt_coex_wlan_weight[0]);
REG_WRITE(ah, AR_BT_COEX_WL_WEIGHTS1, ah->bt_coex_wlan_weight[1]);
REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS0, ah->bt_coex_bt_weight[0]);
REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS1, ah->bt_coex_bt_weight[1]);
REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS2, ah->bt_coex_bt_weight[2]);
REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS3, ah->bt_coex_bt_weight[3]);
REG_WRITE(ah, AR_BT_COEX_WL_WEIGHTS0, btcoex->wlan_weight[0]);
REG_WRITE(ah, AR_BT_COEX_WL_WEIGHTS1, btcoex->wlan_weight[1]);
for (i = 0; i < AR9300_NUM_BT_WEIGHTS; i++)
REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS(i),
btcoex->bt_weight[i]);
} else
REG_WRITE(ah, AR_BT_COEX_WEIGHT, btcoex_hw->bt_coex_weights);
REG_WRITE(ah, AR_BT_COEX_WEIGHT, btcoex->bt_coex_weights);
@ -184,10 +197,23 @@ static void ath9k_hw_btcoex_enable_3wire(struct ath_hw *ah)
REG_RMW_FIELD(ah, AR_QUIET1, AR_QUIET1_QUIET_ACK_CTS_ENABLE, 1);
REG_RMW_FIELD(ah, AR_PCU_MISC, AR_PCU_BT_ANT_PREVENT_RX, 0);
ath9k_hw_cfg_output(ah, btcoex_hw->wlanactive_gpio,
ath9k_hw_cfg_output(ah, btcoex->wlanactive_gpio,
AR_GPIO_OUTPUT_MUX_AS_RX_CLEAR_EXTERNAL);
}
static void ath9k_hw_btcoex_enable_mci(struct ath_hw *ah)
{
struct ath_btcoex_hw *btcoex = &ah->btcoex_hw;
int i;
for (i = 0; i < AR9300_NUM_BT_WEIGHTS; i++)
REG_WRITE(ah, AR_MCI_COEX_WL_WEIGHTS(i),
btcoex->wlan_weight[i]);
REG_RMW_FIELD(ah, AR_QUIET1, AR_QUIET1_QUIET_ACK_CTS_ENABLE, 1);
btcoex->enabled = true;
}
void ath9k_hw_btcoex_enable(struct ath_hw *ah)
{
struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
@ -201,6 +227,9 @@ void ath9k_hw_btcoex_enable(struct ath_hw *ah)
case ATH_BTCOEX_CFG_3WIRE:
ath9k_hw_btcoex_enable_3wire(ah);
break;
case ATH_BTCOEX_CFG_MCI:
ath9k_hw_btcoex_enable_mci(ah);
return;
}
REG_RMW(ah, AR_GPIO_PDPU,
@ -214,7 +243,15 @@ EXPORT_SYMBOL(ath9k_hw_btcoex_enable);
void ath9k_hw_btcoex_disable(struct ath_hw *ah)
{
struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
int i;
btcoex_hw->enabled = false;
if (btcoex_hw->scheme == ATH_BTCOEX_CFG_MCI) {
ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_NONE);
for (i = 0; i < AR9300_NUM_BT_WEIGHTS; i++)
REG_WRITE(ah, AR_MCI_COEX_WL_WEIGHTS(i),
btcoex_hw->wlan_weight[i]);
}
ath9k_hw_set_gpio(ah, btcoex_hw->wlanactive_gpio, 0);
ath9k_hw_cfg_output(ah, btcoex_hw->wlanactive_gpio,
@ -227,49 +264,27 @@ void ath9k_hw_btcoex_disable(struct ath_hw *ah)
if (AR_SREV_9300_20_OR_LATER(ah)) {
REG_WRITE(ah, AR_BT_COEX_WL_WEIGHTS0, 0);
REG_WRITE(ah, AR_BT_COEX_WL_WEIGHTS1, 0);
REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS0, 0);
REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS1, 0);
REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS2, 0);
REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS3, 0);
for (i = 0; i < AR9300_NUM_BT_WEIGHTS; i++)
REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS(i), 0);
} else
REG_WRITE(ah, AR_BT_COEX_WEIGHT, 0);
}
ah->btcoex_hw.enabled = false;
}
EXPORT_SYMBOL(ath9k_hw_btcoex_disable);
static void ar9003_btcoex_bt_stomp(struct ath_hw *ah,
enum ath_stomp_type stomp_type)
{
ah->bt_coex_bt_weight[0] = AR9300_BT_WGHT;
ah->bt_coex_bt_weight[1] = AR9300_BT_WGHT;
ah->bt_coex_bt_weight[2] = AR9300_BT_WGHT;
ah->bt_coex_bt_weight[3] = AR9300_BT_WGHT;
struct ath_btcoex_hw *btcoex = &ah->btcoex_hw;
const u32 *weight = AR_SREV_9462(ah) ? ar9003_wlan_weights[stomp_type] :
ar9462_wlan_weights[stomp_type];
int i;
switch (stomp_type) {
case ATH_BTCOEX_STOMP_ALL:
ah->bt_coex_wlan_weight[0] = AR9300_STOMP_ALL_WLAN_WGHT0;
ah->bt_coex_wlan_weight[1] = AR9300_STOMP_ALL_WLAN_WGHT1;
break;
case ATH_BTCOEX_STOMP_LOW:
ah->bt_coex_wlan_weight[0] = AR9300_STOMP_LOW_WLAN_WGHT0;
ah->bt_coex_wlan_weight[1] = AR9300_STOMP_LOW_WLAN_WGHT1;
break;
case ATH_BTCOEX_STOMP_NONE:
ah->bt_coex_wlan_weight[0] = AR9300_STOMP_NONE_WLAN_WGHT0;
ah->bt_coex_wlan_weight[1] = AR9300_STOMP_NONE_WLAN_WGHT1;
break;
default:
ath_dbg(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
"Invalid Stomptype\n");
break;
for (i = 0; i < AR9300_NUM_WLAN_WEIGHTS; i++) {
btcoex->bt_weight[i] = AR9300_BT_WGHT;
btcoex->wlan_weight[i] = weight[i];
}
ath9k_hw_btcoex_enable(ah);
}
/*
@ -301,7 +316,5 @@ void ath9k_hw_btcoex_bt_stomp(struct ath_hw *ah,
"Invalid Stomptype\n");
break;
}
ath9k_hw_btcoex_enable(ah);
}
EXPORT_SYMBOL(ath9k_hw_btcoex_bt_stomp);

View File

@ -36,18 +36,22 @@
#define ATH_BT_CNT_THRESHOLD 3
#define ATH_BT_CNT_SCAN_THRESHOLD 15
#define AR9300_NUM_BT_WEIGHTS 4
#define AR9300_NUM_WLAN_WEIGHTS 4
/* Defines the BT AR_BT_COEX_WGHT used */
enum ath_stomp_type {
ATH_BTCOEX_NO_STOMP,
ATH_BTCOEX_STOMP_ALL,
ATH_BTCOEX_STOMP_LOW,
ATH_BTCOEX_STOMP_NONE
ATH_BTCOEX_STOMP_NONE,
ATH_BTCOEX_STOMP_LOW_FTP,
ATH_BTCOEX_STOMP_MAX
};
enum ath_btcoex_scheme {
ATH_BTCOEX_CFG_NONE,
ATH_BTCOEX_CFG_2WIRE,
ATH_BTCOEX_CFG_3WIRE,
ATH_BTCOEX_CFG_MCI,
};
struct ath_btcoex_hw {
@ -59,6 +63,8 @@ struct ath_btcoex_hw {
u32 bt_coex_mode; /* Register setting for AR_BT_COEX_MODE */
u32 bt_coex_weights; /* Register setting for AR_BT_COEX_WEIGHT */
u32 bt_coex_mode2; /* Register setting for AR_BT_COEX_MODE2 */
u32 bt_weight[AR9300_NUM_BT_WEIGHTS];
u32 wlan_weight[AR9300_NUM_WLAN_WEIGHTS];
};
void ath9k_hw_btcoex_init_2wire(struct ath_hw *ah);

View File

@ -249,7 +249,8 @@ enum eeprom_param {
EEP_ANT_DIV_CTL1,
EEP_CHAIN_MASK_REDUCE,
EEP_ANTENNA_GAIN_2G,
EEP_ANTENNA_GAIN_5G
EEP_ANTENNA_GAIN_5G,
EEP_QUICK_DROP
};
enum ar5416_rates {

View File

@ -198,6 +198,7 @@ static void ath_btcoex_period_timer(unsigned long data)
ath9k_hw_btcoex_bt_stomp(ah, is_btscan ? ATH_BTCOEX_STOMP_ALL :
btcoex->bt_stomp_type);
ath9k_hw_btcoex_enable(ah);
spin_unlock_bh(&btcoex->btcoex_lock);
if (btcoex->btcoex_period != btcoex->btcoex_no_stomp) {
@ -240,6 +241,7 @@ static void ath_btcoex_no_stomp_timer(void *arg)
else if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_ALL)
ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_LOW);
ath9k_hw_btcoex_enable(ah);
spin_unlock_bh(&btcoex->btcoex_lock);
ath9k_ps_restore(sc);
}

View File

@ -80,6 +80,7 @@ static void ath_btcoex_period_work(struct work_struct *work)
ath9k_hw_btcoex_bt_stomp(priv->ah, is_btscan ? ATH_BTCOEX_STOMP_ALL :
btcoex->bt_stomp_type);
ath9k_hw_btcoex_enable(priv->ah);
timer_period = is_btscan ? btcoex->btscan_no_stomp :
btcoex->btcoex_no_stomp;
ieee80211_queue_delayed_work(priv->hw, &priv->duty_cycle_work,
@ -108,6 +109,7 @@ static void ath_btcoex_duty_cycle_work(struct work_struct *work)
ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_NONE);
else if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_ALL)
ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_LOW);
ath9k_hw_btcoex_enable(priv->ah);
}
void ath_htc_init_btcoex_work(struct ath9k_htc_priv *priv)

View File

@ -59,9 +59,6 @@
#define AT9285_COEX3WIRE_SA_SUBSYSID 0x30aa
#define AT9285_COEX3WIRE_DA_SUBSYSID 0x30ab
#define AR9300_NUM_BT_WEIGHTS 4
#define AR9300_NUM_WLAN_WEIGHTS 4
#define ATH_AMPDU_LIMIT_MAX (64 * 1024 - 1)
#define ATH_DEFAULT_NOISE_FLOOR -95
@ -802,8 +799,6 @@ struct ath_hw {
/* Bluetooth coexistance */
struct ath_btcoex_hw btcoex_hw;
u32 bt_coex_bt_weight[AR9300_NUM_BT_WEIGHTS];
u32 bt_coex_wlan_weight[AR9300_NUM_WLAN_WEIGHTS];
u32 intr_txqs;
u8 txchainmask;

View File

@ -1752,19 +1752,10 @@ enum {
#define AR_BT_COEX_WL_WEIGHTS0 0x8174
#define AR_BT_COEX_WL_WEIGHTS1 0x81c4
#define AR_MCI_COEX_WL_WEIGHTS(_i) (0x18b0 + (_i << 2))
#define AR_BT_COEX_BT_WEIGHTS(_i) (0x83ac + (_i << 2))
#define AR_BT_COEX_BT_WEIGHTS0 0x83ac
#define AR_BT_COEX_BT_WEIGHTS1 0x83b0
#define AR_BT_COEX_BT_WEIGHTS2 0x83b4
#define AR_BT_COEX_BT_WEIGHTS3 0x83b8
#define AR9300_BT_WGHT 0xcccc4444
#define AR9300_STOMP_ALL_WLAN_WGHT0 0xfffffff0
#define AR9300_STOMP_ALL_WLAN_WGHT1 0xfffffff0
#define AR9300_STOMP_LOW_WLAN_WGHT0 0x88888880
#define AR9300_STOMP_LOW_WLAN_WGHT1 0x88888880
#define AR9300_STOMP_NONE_WLAN_WGHT0 0x00000000
#define AR9300_STOMP_NONE_WLAN_WGHT1 0x00000000
#define AR9300_BT_WGHT 0xcccc4444
#define AR_BT_COEX_MODE2 0x817c
#define AR_BT_BCN_MISS_THRESH 0x000000ff

View File

@ -687,14 +687,6 @@ static int brcmf_sdbrcm_htclk(struct brcmf_bus *bus, bool on, bool pendok)
return -EBADE;
}
if (pendok && ((bus->ci->c_inf[1].id == PCMCIA_CORE_ID)
&& (bus->ci->c_inf[1].rev == 9))) {
u32 dummy, retries;
r_sdreg32(bus, &dummy,
offsetof(struct sdpcmd_regs, clockctlstatus),
&retries);
}
/* Check current status */
clkctl = brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_1,
SBSDIO_FUNC1_CHIPCLKCSR, &err);
@ -911,13 +903,6 @@ static int brcmf_sdbrcm_bussleep(struct brcmf_bus *bus, bool sleep)
brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
SBSDIO_FUNC1_CHIPCLKCSR, 0, NULL);
/* Force pad isolation off if possible
(in case power never toggled) */
if ((bus->ci->c_inf[1].id == PCMCIA_CORE_ID)
&& (bus->ci->c_inf[1].rev >= 10))
brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
SBSDIO_DEVICE_CTL, 0, NULL);
/* Make sure the controller has the bus up */
brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
@ -1107,6 +1092,28 @@ static uint brcmf_sdbrcm_glom_from_buf(struct brcmf_bus *bus, uint len)
return ret;
}
/* return total length of buffer chain */
static uint brcmf_sdbrcm_glom_len(struct brcmf_bus *bus)
{
struct sk_buff *p;
uint total;
total = 0;
skb_queue_walk(&bus->glom, p)
total += p->len;
return total;
}
static void brcmf_sdbrcm_free_glom(struct brcmf_bus *bus)
{
struct sk_buff *cur, *next;
skb_queue_walk_safe(&bus->glom, cur, next) {
skb_unlink(cur, &bus->glom);
brcmu_pkt_buf_free_skb(cur);
}
}
static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq)
{
u16 dlen, totlen;
@ -1191,11 +1198,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq)
}
pfirst = pnext = NULL;
} else {
if (!skb_queue_empty(&bus->glom))
skb_queue_walk_safe(&bus->glom, pfirst, pnext) {
skb_unlink(pfirst, &bus->glom);
brcmu_pkt_buf_free_skb(pfirst);
}
brcmf_sdbrcm_free_glom(bus);
num = 0;
}
@ -1218,7 +1221,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq)
}
pfirst = skb_peek(&bus->glom);
dlen = (u16) brcmu_pkttotlen(pfirst);
dlen = (u16) brcmf_sdbrcm_glom_len(bus);
/* Do an SDIO read for the superframe. Configurable iovar to
* read directly into the chained packet, or allocate a large
@ -1262,10 +1265,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq)
bus->glomerr = 0;
brcmf_sdbrcm_rxfail(bus, true, false);
bus->rxglomfail++;
skb_queue_walk_safe(&bus->glom, pfirst, pnext) {
skb_unlink(pfirst, &bus->glom);
brcmu_pkt_buf_free_skb(pfirst);
}
brcmf_sdbrcm_free_glom(bus);
}
return 0;
}
@ -1387,10 +1387,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq)
bus->glomerr = 0;
brcmf_sdbrcm_rxfail(bus, true, false);
bus->rxglomfail++;
skb_queue_walk_safe(&bus->glom, pfirst, pnext) {
skb_unlink(pfirst, &bus->glom);
brcmu_pkt_buf_free_skb(pfirst);
}
brcmf_sdbrcm_free_glom(bus);
}
bus->nextlen = 0;
return 0;
@ -3098,7 +3095,6 @@ static int brcmf_sdbrcm_download_state(struct brcmf_bus *bus, bool enter)
{
uint retries;
int bcmerror = 0;
u8 idx;
struct chip_info *ci = bus->ci;
/* To enter download state, disable ARM and reset SOCRAM.
@ -3107,11 +3103,9 @@ static int brcmf_sdbrcm_download_state(struct brcmf_bus *bus, bool enter)
if (enter) {
bus->alp_only = true;
idx = brcmf_sdio_chip_getinfidx(ci, BCMA_CORE_ARM_CM3);
brcmf_sdio_chip_coredisable(bus->sdiodev, ci->c_inf[idx].base);
ci->coredisable(bus->sdiodev, ci, BCMA_CORE_ARM_CM3);
idx = brcmf_sdio_chip_getinfidx(ci, BCMA_CORE_INTERNAL_MEM);
brcmf_sdio_chip_resetcore(bus->sdiodev, ci->c_inf[idx].base);
ci->resetcore(bus->sdiodev, ci, BCMA_CORE_INTERNAL_MEM);
/* Clear the top bit of memory */
if (bus->ramsize) {
@ -3120,9 +3114,7 @@ static int brcmf_sdbrcm_download_state(struct brcmf_bus *bus, bool enter)
(u8 *)&zeros, 4);
}
} else {
idx = brcmf_sdio_chip_getinfidx(ci, BCMA_CORE_INTERNAL_MEM);
if (!brcmf_sdio_chip_iscoreup(bus->sdiodev,
ci->c_inf[idx].base)) {
if (!ci->iscoreup(bus->sdiodev, ci, BCMA_CORE_INTERNAL_MEM)) {
brcmf_dbg(ERROR, "SOCRAM core is down after reset?\n");
bcmerror = -EBADE;
goto fail;
@ -3137,8 +3129,7 @@ static int brcmf_sdbrcm_download_state(struct brcmf_bus *bus, bool enter)
w_sdreg32(bus, 0xFFFFFFFF,
offsetof(struct sdpcmd_regs, intstatus), &retries);
idx = brcmf_sdio_chip_getinfidx(ci, BCMA_CORE_ARM_CM3);
brcmf_sdio_chip_resetcore(bus->sdiodev, ci->c_inf[idx].base);
ci->resetcore(bus->sdiodev, ci, BCMA_CORE_ARM_CM3);
/* Allow HT Clock now that the ARM is running. */
bus->alp_only = false;
@ -3363,8 +3354,6 @@ void brcmf_sdbrcm_bus_stop(struct brcmf_bus *bus)
u8 saveclk;
uint retries;
int err;
struct sk_buff *cur;
struct sk_buff *next;
brcmf_dbg(TRACE, "Enter\n");
@ -3424,11 +3413,7 @@ void brcmf_sdbrcm_bus_stop(struct brcmf_bus *bus)
/* Clear any held glomming stuff */
if (bus->glomd)
brcmu_pkt_buf_free_skb(bus->glomd);
if (!skb_queue_empty(&bus->glom))
skb_queue_walk_safe(&bus->glom, cur, next) {
skb_unlink(cur, &bus->glom);
brcmu_pkt_buf_free_skb(cur);
}
brcmf_sdbrcm_free_glom(bus);
/* Clear rx control and wake any waiters */
bus->rxlen = 0;

View File

@ -45,6 +45,14 @@
((((sbidh) & SSB_IDHIGH_RCHI) >> SSB_IDHIGH_RCHI_SHIFT) | \
((sbidh) & SSB_IDHIGH_RCLO))
/* SOC Interconnect types (aka chip types) */
#define SOCI_SB 0
#define SOCI_AI 1
/* EROM CompIdentB */
#define CIB_REV_MASK 0xff000000
#define CIB_REV_SHIFT 24
#define SDIOD_DRVSTR_KEY(chip, pmu) (((chip) << 16) | (pmu))
/* SDIO Pad drive strength to select value mappings */
struct sdiod_drive_str {
@ -96,148 +104,263 @@ brcmf_sdio_chip_getinfidx(struct chip_info *ci, u16 coreid)
}
static u32
brcmf_sdio_chip_corerev(struct brcmf_sdio_dev *sdiodev,
u32 corebase)
brcmf_sdio_sb_corerev(struct brcmf_sdio_dev *sdiodev,
struct chip_info *ci, u16 coreid)
{
u32 regdata;
u8 idx;
idx = brcmf_sdio_chip_getinfidx(ci, coreid);
regdata = brcmf_sdcard_reg_read(sdiodev,
CORE_SB(corebase, sbidhigh), 4);
CORE_SB(ci->c_inf[idx].base, sbidhigh), 4);
return SBCOREREV(regdata);
}
bool
brcmf_sdio_chip_iscoreup(struct brcmf_sdio_dev *sdiodev,
u32 corebase)
static u32
brcmf_sdio_ai_corerev(struct brcmf_sdio_dev *sdiodev,
struct chip_info *ci, u16 coreid)
{
u8 idx;
idx = brcmf_sdio_chip_getinfidx(ci, coreid);
return (ci->c_inf[idx].cib & CIB_REV_MASK) >> CIB_REV_SHIFT;
}
static bool
brcmf_sdio_sb_iscoreup(struct brcmf_sdio_dev *sdiodev,
struct chip_info *ci, u16 coreid)
{
u32 regdata;
u8 idx;
idx = brcmf_sdio_chip_getinfidx(ci, coreid);
regdata = brcmf_sdcard_reg_read(sdiodev,
CORE_SB(corebase, sbtmstatelow), 4);
CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4);
regdata &= (SSB_TMSLOW_RESET | SSB_TMSLOW_REJECT |
SSB_IMSTATE_REJECT | SSB_TMSLOW_CLOCK);
return (SSB_TMSLOW_CLOCK == regdata);
}
void
brcmf_sdio_chip_coredisable(struct brcmf_sdio_dev *sdiodev, u32 corebase)
static bool
brcmf_sdio_ai_iscoreup(struct brcmf_sdio_dev *sdiodev,
struct chip_info *ci, u16 coreid)
{
u32 regdata;
u8 idx;
bool ret;
idx = brcmf_sdio_chip_getinfidx(ci, coreid);
regdata = brcmf_sdcard_reg_read(sdiodev,
CORE_SB(corebase, sbtmstatelow), 4);
ci->c_inf[idx].wrapbase+BCMA_IOCTL, 4);
ret = (regdata & (BCMA_IOCTL_FGC | BCMA_IOCTL_CLK)) == BCMA_IOCTL_CLK;
regdata = brcmf_sdcard_reg_read(sdiodev,
ci->c_inf[idx].wrapbase+BCMA_RESET_CTL,
4);
ret = ret && ((regdata & BCMA_RESET_CTL_RESET) == 0);
return ret;
}
static void
brcmf_sdio_sb_coredisable(struct brcmf_sdio_dev *sdiodev,
struct chip_info *ci, u16 coreid)
{
u32 regdata;
u8 idx;
idx = brcmf_sdio_chip_getinfidx(ci, coreid);
regdata = brcmf_sdcard_reg_read(sdiodev,
CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4);
if (regdata & SSB_TMSLOW_RESET)
return;
regdata = brcmf_sdcard_reg_read(sdiodev,
CORE_SB(corebase, sbtmstatelow), 4);
CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4);
if ((regdata & SSB_TMSLOW_CLOCK) != 0) {
/*
* set target reject and spin until busy is clear
* (preserve core-specific bits)
*/
regdata = brcmf_sdcard_reg_read(sdiodev,
CORE_SB(corebase, sbtmstatelow), 4);
brcmf_sdcard_reg_write(sdiodev, CORE_SB(corebase, sbtmstatelow),
4, regdata | SSB_TMSLOW_REJECT);
CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4);
brcmf_sdcard_reg_write(sdiodev,
CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
4, regdata | SSB_TMSLOW_REJECT);
regdata = brcmf_sdcard_reg_read(sdiodev,
CORE_SB(corebase, sbtmstatelow), 4);
CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4);
udelay(1);
SPINWAIT((brcmf_sdcard_reg_read(sdiodev,
CORE_SB(corebase, sbtmstatehigh), 4) &
CORE_SB(ci->c_inf[idx].base, sbtmstatehigh), 4) &
SSB_TMSHIGH_BUSY), 100000);
regdata = brcmf_sdcard_reg_read(sdiodev,
CORE_SB(corebase, sbtmstatehigh), 4);
CORE_SB(ci->c_inf[idx].base, sbtmstatehigh), 4);
if (regdata & SSB_TMSHIGH_BUSY)
brcmf_dbg(ERROR, "core state still busy\n");
regdata = brcmf_sdcard_reg_read(sdiodev,
CORE_SB(corebase, sbidlow), 4);
CORE_SB(ci->c_inf[idx].base, sbidlow), 4);
if (regdata & SSB_IDLOW_INITIATOR) {
regdata = brcmf_sdcard_reg_read(sdiodev,
CORE_SB(corebase, sbimstate), 4) |
CORE_SB(ci->c_inf[idx].base, sbimstate), 4) |
SSB_IMSTATE_REJECT;
brcmf_sdcard_reg_write(sdiodev,
CORE_SB(corebase, sbimstate), 4,
CORE_SB(ci->c_inf[idx].base, sbimstate), 4,
regdata);
regdata = brcmf_sdcard_reg_read(sdiodev,
CORE_SB(corebase, sbimstate), 4);
CORE_SB(ci->c_inf[idx].base, sbimstate), 4);
udelay(1);
SPINWAIT((brcmf_sdcard_reg_read(sdiodev,
CORE_SB(corebase, sbimstate), 4) &
CORE_SB(ci->c_inf[idx].base, sbimstate), 4) &
SSB_IMSTATE_BUSY), 100000);
}
/* set reset and reject while enabling the clocks */
brcmf_sdcard_reg_write(sdiodev,
CORE_SB(corebase, sbtmstatelow), 4,
CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4,
(SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK |
SSB_TMSLOW_REJECT | SSB_TMSLOW_RESET));
regdata = brcmf_sdcard_reg_read(sdiodev,
CORE_SB(corebase, sbtmstatelow), 4);
CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4);
udelay(10);
/* clear the initiator reject bit */
regdata = brcmf_sdcard_reg_read(sdiodev,
CORE_SB(corebase, sbidlow), 4);
CORE_SB(ci->c_inf[idx].base, sbidlow), 4);
if (regdata & SSB_IDLOW_INITIATOR) {
regdata = brcmf_sdcard_reg_read(sdiodev,
CORE_SB(corebase, sbimstate), 4) &
CORE_SB(ci->c_inf[idx].base, sbimstate), 4) &
~SSB_IMSTATE_REJECT;
brcmf_sdcard_reg_write(sdiodev,
CORE_SB(corebase, sbimstate), 4,
CORE_SB(ci->c_inf[idx].base, sbimstate), 4,
regdata);
}
}
/* leave reset and reject asserted */
brcmf_sdcard_reg_write(sdiodev, CORE_SB(corebase, sbtmstatelow), 4,
brcmf_sdcard_reg_write(sdiodev,
CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4,
(SSB_TMSLOW_REJECT | SSB_TMSLOW_RESET));
udelay(1);
}
void
brcmf_sdio_chip_resetcore(struct brcmf_sdio_dev *sdiodev, u32 corebase)
static void
brcmf_sdio_ai_coredisable(struct brcmf_sdio_dev *sdiodev,
struct chip_info *ci, u16 coreid)
{
u8 idx;
u32 regdata;
idx = brcmf_sdio_chip_getinfidx(ci, coreid);
/* if core is already in reset, just return */
regdata = brcmf_sdcard_reg_read(sdiodev,
ci->c_inf[idx].wrapbase+BCMA_RESET_CTL,
4);
if ((regdata & BCMA_RESET_CTL_RESET) != 0)
return;
brcmf_sdcard_reg_write(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL,
4, 0);
regdata = brcmf_sdcard_reg_read(sdiodev,
ci->c_inf[idx].wrapbase+BCMA_IOCTL, 4);
udelay(10);
brcmf_sdcard_reg_write(sdiodev, ci->c_inf[idx].wrapbase+BCMA_RESET_CTL,
4, BCMA_RESET_CTL_RESET);
udelay(1);
}
static void
brcmf_sdio_sb_resetcore(struct brcmf_sdio_dev *sdiodev,
struct chip_info *ci, u16 coreid)
{
u32 regdata;
u8 idx;
idx = brcmf_sdio_chip_getinfidx(ci, coreid);
/*
* Must do the disable sequence first to work for
* arbitrary current core state.
*/
brcmf_sdio_chip_coredisable(sdiodev, corebase);
brcmf_sdio_sb_coredisable(sdiodev, ci, coreid);
/*
* Now do the initialization sequence.
* set reset while enabling the clock and
* forcing them on throughout the core
*/
brcmf_sdcard_reg_write(sdiodev, CORE_SB(corebase, sbtmstatelow), 4,
SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK | SSB_TMSLOW_RESET);
brcmf_sdcard_reg_write(sdiodev,
CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4,
SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK | SSB_TMSLOW_RESET);
regdata = brcmf_sdcard_reg_read(sdiodev,
CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4);
udelay(1);
/* clear any serror */
regdata = brcmf_sdcard_reg_read(sdiodev,
CORE_SB(corebase, sbtmstatehigh), 4);
CORE_SB(ci->c_inf[idx].base, sbtmstatehigh), 4);
if (regdata & SSB_TMSHIGH_SERR)
brcmf_sdcard_reg_write(sdiodev,
CORE_SB(corebase, sbtmstatehigh), 4, 0);
CORE_SB(ci->c_inf[idx].base, sbtmstatehigh), 4, 0);
regdata = brcmf_sdcard_reg_read(sdiodev,
CORE_SB(corebase, sbimstate), 4);
CORE_SB(ci->c_inf[idx].base, sbimstate), 4);
if (regdata & (SSB_IMSTATE_IBE | SSB_IMSTATE_TO))
brcmf_sdcard_reg_write(sdiodev, CORE_SB(corebase, sbimstate), 4,
brcmf_sdcard_reg_write(sdiodev,
CORE_SB(ci->c_inf[idx].base, sbimstate), 4,
regdata & ~(SSB_IMSTATE_IBE | SSB_IMSTATE_TO));
/* clear reset and allow it to propagate throughout the core */
brcmf_sdcard_reg_write(sdiodev, CORE_SB(corebase, sbtmstatelow), 4,
brcmf_sdcard_reg_write(sdiodev,
CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4,
SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK);
regdata = brcmf_sdcard_reg_read(sdiodev,
CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4);
udelay(1);
/* leave clock enabled */
brcmf_sdcard_reg_write(sdiodev, CORE_SB(corebase, sbtmstatelow),
brcmf_sdcard_reg_write(sdiodev,
CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
4, SSB_TMSLOW_CLOCK);
regdata = brcmf_sdcard_reg_read(sdiodev,
CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4);
udelay(1);
}
static void
brcmf_sdio_ai_resetcore(struct brcmf_sdio_dev *sdiodev,
struct chip_info *ci, u16 coreid)
{
u8 idx;
u32 regdata;
idx = brcmf_sdio_chip_getinfidx(ci, coreid);
/* must disable first to work for arbitrary current core state */
brcmf_sdio_ai_coredisable(sdiodev, ci, coreid);
/* now do initialization sequence */
brcmf_sdcard_reg_write(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL,
4, BCMA_IOCTL_FGC | BCMA_IOCTL_CLK);
regdata = brcmf_sdcard_reg_read(sdiodev,
ci->c_inf[idx].wrapbase+BCMA_IOCTL, 4);
brcmf_sdcard_reg_write(sdiodev, ci->c_inf[idx].wrapbase+BCMA_RESET_CTL,
4, 0);
udelay(1);
brcmf_sdcard_reg_write(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL,
4, BCMA_IOCTL_CLK);
regdata = brcmf_sdcard_reg_read(sdiodev,
ci->c_inf[idx].wrapbase+BCMA_IOCTL, 4);
udelay(1);
}
@ -258,6 +381,7 @@ static int brcmf_sdio_chip_recognition(struct brcmf_sdio_dev *sdiodev,
CORE_CC_REG(ci->c_inf[0].base, chipid), 4);
ci->chip = regdata & CID_ID_MASK;
ci->chiprev = (regdata & CID_REV_MASK) >> CID_REV_SHIFT;
ci->socitype = (regdata & CID_TYPE_MASK) >> CID_TYPE_SHIFT;
brcmf_dbg(INFO, "chipid=0x%x chiprev=%d\n", ci->chip, ci->chiprev);
@ -277,6 +401,24 @@ static int brcmf_sdio_chip_recognition(struct brcmf_sdio_dev *sdiodev,
return -ENODEV;
}
switch (ci->socitype) {
case SOCI_SB:
ci->iscoreup = brcmf_sdio_sb_iscoreup;
ci->corerev = brcmf_sdio_sb_corerev;
ci->coredisable = brcmf_sdio_sb_coredisable;
ci->resetcore = brcmf_sdio_sb_resetcore;
break;
case SOCI_AI:
ci->iscoreup = brcmf_sdio_ai_iscoreup;
ci->corerev = brcmf_sdio_ai_corerev;
ci->coredisable = brcmf_sdio_ai_coredisable;
ci->resetcore = brcmf_sdio_ai_resetcore;
break;
default:
brcmf_dbg(ERROR, "socitype %u not supported\n", ci->socitype);
return -ENODEV;
}
return 0;
}
@ -332,12 +474,8 @@ static void
brcmf_sdio_chip_buscoresetup(struct brcmf_sdio_dev *sdiodev,
struct chip_info *ci)
{
u32 regdata;
u8 idx;
/* get chipcommon rev */
ci->c_inf[0].rev =
brcmf_sdio_chip_corerev(sdiodev, ci->c_inf[0].base);
ci->c_inf[0].rev = ci->corerev(sdiodev, ci, ci->c_inf[0].id);
/* get chipcommon capabilites */
ci->c_inf[0].caps =
@ -351,10 +489,7 @@ brcmf_sdio_chip_buscoresetup(struct brcmf_sdio_dev *sdiodev,
ci->pmurev = ci->pmucaps & PCAP_REV_MASK;
}
ci->c_inf[1].rev = brcmf_sdio_chip_corerev(sdiodev, ci->c_inf[1].base);
regdata = brcmf_sdcard_reg_read(sdiodev,
CORE_SB(ci->c_inf[1].base, sbidhigh), 4);
ci->c_inf[1].id = (regdata & SSB_IDHIGH_CC) >> SSB_IDHIGH_CC_SHIFT;
ci->c_inf[1].rev = ci->corerev(sdiodev, ci, ci->c_inf[1].id);
brcmf_dbg(INFO, "ccrev=%d, pmurev=%d, buscore rev/type=%d/0x%x\n",
ci->c_inf[0].rev, ci->pmurev,
@ -364,8 +499,7 @@ brcmf_sdio_chip_buscoresetup(struct brcmf_sdio_dev *sdiodev,
* Make sure any on-chip ARM is off (in case strapping is wrong),
* or downloaded code was already running.
*/
idx = brcmf_sdio_chip_getinfidx(ci, BCMA_CORE_ARM_CM3);
brcmf_sdio_chip_coredisable(sdiodev, ci->c_inf[idx].base);
ci->coredisable(sdiodev, ci, BCMA_CORE_ARM_CM3);
}
int brcmf_sdio_chip_attach(struct brcmf_sdio_dev *sdiodev,

View File

@ -60,17 +60,28 @@ struct chip_core_info {
u32 base;
u32 wrapbase;
u32 caps;
u32 cib;
};
struct chip_info {
u32 chip;
u32 chiprev;
u32 socitype;
/* core info */
/* always put chipcommon core at 0, bus core at 1 */
struct chip_core_info c_inf[BRCMF_MAX_CORENUM];
u32 pmurev;
u32 pmucaps;
u32 ramsize;
bool (*iscoreup)(struct brcmf_sdio_dev *sdiodev, struct chip_info *ci,
u16 coreid);
u32 (*corerev)(struct brcmf_sdio_dev *sdiodev, struct chip_info *ci,
u16 coreid);
void (*coredisable)(struct brcmf_sdio_dev *sdiodev,
struct chip_info *ci, u16 coreid);
void (*resetcore)(struct brcmf_sdio_dev *sdiodev,
struct chip_info *ci, u16 coreid);
};
struct sbconfig {
@ -113,12 +124,6 @@ struct sbconfig {
u32 sbidhigh; /* identification */
};
extern void brcmf_sdio_chip_resetcore(struct brcmf_sdio_dev *sdiodev,
u32 corebase);
extern bool brcmf_sdio_chip_iscoreup(struct brcmf_sdio_dev *sdiodev,
u32 corebase);
extern void brcmf_sdio_chip_coredisable(struct brcmf_sdio_dev *sdiodev,
u32 corebase);
extern int brcmf_sdio_chip_attach(struct brcmf_sdio_dev *sdiodev,
struct chip_info **ci_ptr, u32 regs);
extern void brcmf_sdio_chip_detach(struct chip_info **ci_ptr);

View File

@ -2049,10 +2049,10 @@ static s32 brcmf_inform_single_bss(struct brcmf_cfg80211_priv *cfg_priv,
notify_timestamp, notify_capability, notify_interval, notify_ie,
notify_ielen, notify_signal, GFP_KERNEL);
if (!bss) {
WL_ERR("cfg80211_inform_bss_frame error\n");
return -EINVAL;
}
if (!bss)
return -ENOMEM;
cfg80211_put_bss(bss);
return err;
}
@ -2096,6 +2096,7 @@ static s32 wl_inform_ibss(struct brcmf_cfg80211_priv *cfg_priv,
struct ieee80211_channel *notify_channel;
struct brcmf_bss_info_le *bi = NULL;
struct ieee80211_supported_band *band;
struct cfg80211_bss *bss;
u8 *buf = NULL;
s32 err = 0;
u16 channel;
@ -2149,10 +2150,17 @@ static s32 wl_inform_ibss(struct brcmf_cfg80211_priv *cfg_priv,
WL_CONN("signal: %d\n", notify_signal);
WL_CONN("notify_timestamp: %#018llx\n", notify_timestamp);
cfg80211_inform_bss(wiphy, notify_channel, bssid,
bss = cfg80211_inform_bss(wiphy, notify_channel, bssid,
notify_timestamp, notify_capability, notify_interval,
notify_ie, notify_ielen, notify_signal, GFP_KERNEL);
if (!bss) {
err = -ENOMEM;
goto CleanUp;
}
cfg80211_put_bss(bss);
CleanUp:
kfree(buf);

View File

@ -16,6 +16,8 @@
* File contents: support functions for PCI/PCIe
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/delay.h>
#include <linux/pci.h>
@ -349,9 +351,9 @@
#define PCI_FORCEHT(si) (PCIE(si) && (si->pub.chip == BCM4716_CHIP_ID))
#ifdef BCMDBG
#define SI_MSG(args) printk args
#define SI_MSG(fmt, ...) pr_debug(fmt, ##__VA_ARGS__)
#else
#define SI_MSG(args)
#define SI_MSG(fmt, ...) no_printk(fmt, ##__VA_ARGS__)
#endif /* BCMDBG */
#define GOODCOREADDR(x, b) \
@ -1073,7 +1075,7 @@ static struct si_info *ai_doattach(struct si_info *sii,
/* scan for cores */
if (socitype == SOCI_AI) {
SI_MSG(("Found chip type AI (0x%08x)\n", w));
SI_MSG("Found chip type AI (0x%08x)\n", w);
/* pass chipc address instead of original core base */
ai_scan(&sii->pub, cc);
} else {
@ -1129,7 +1131,7 @@ static struct si_info *ai_doattach(struct si_info *sii,
* set chipControl register bit 15
*/
if (sih->chiprev == 0) {
SI_MSG(("Applying 43224A0 WARs\n"));
SI_MSG("Applying 43224A0 WARs\n");
ai_corereg(sih, SI_CC_IDX,
offsetof(struct chipcregs, chipcontrol),
CCTRL43224_GPIO_TOGGLE,
@ -1138,7 +1140,7 @@ static struct si_info *ai_doattach(struct si_info *sii,
CCTRL_43224A0_12MA_LED_DRIVE);
}
if (sih->chiprev >= 1) {
SI_MSG(("Applying 43224B0+ WARs\n"));
SI_MSG("Applying 43224B0+ WARs\n");
si_pmu_chipcontrol(sih, 0, CCTRL_43224B0_12MA_LED_DRIVE,
CCTRL_43224B0_12MA_LED_DRIVE);
}
@ -1149,7 +1151,7 @@ static struct si_info *ai_doattach(struct si_info *sii,
* enable 12 mA drive strenth for 4313 and
* set chipControl register bit 1
*/
SI_MSG(("Applying 4313 WARs\n"));
SI_MSG("Applying 4313 WARs\n");
si_pmu_chipcontrol(sih, 0, CCTRL_4313_12MA_LED_DRIVE,
CCTRL_4313_12MA_LED_DRIVE);
}

View File

@ -649,7 +649,7 @@ brcms_c_sendampdu(struct ampdu_info *ampdu, struct brcms_txq_info *qi,
len = roundup(len, 4);
ampdu_len += (len + (ndelim + 1) * AMPDU_DELIMITER_LEN);
dma_len += (u16) brcmu_pkttotlen(p);
dma_len += (u16) p->len;
BCMMSG(wlc->wiphy, "wl%d: ampdu_len %d"
" seg_cnt %d null delim %d\n",
@ -741,9 +741,7 @@ brcms_c_sendampdu(struct ampdu_info *ampdu, struct brcms_txq_info *qi,
if (p) {
if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) &&
((u8) (p->priority) == tid)) {
plen = brcmu_pkttotlen(p) +
AMPDU_MAX_MPDU_OVERHEAD;
plen = p->len + AMPDU_MAX_MPDU_OVERHEAD;
plen = max(scb_ampdu->min_len, plen);
if ((plen + ampdu_len) > max_ampdu_bytes) {

View File

@ -1153,121 +1153,6 @@ brcms_c_channel_set_chanspec(struct brcms_cm_info *wlc_cm, u16 chanspec,
&txpwr);
}
#ifdef POWER_DBG
static void wlc_phy_txpower_limits_dump(struct txpwr_limits *txpwr)
{
int i;
char buf[80];
char fraction[4][4] = { " ", ".25", ".5 ", ".75" };
sprintf(buf, "CCK ");
for (i = 0; i < BRCMS_NUM_RATES_CCK; i++)
sprintf(buf[strlen(buf)], " %2d%s",
txpwr->cck[i] / BRCMS_TXPWR_DB_FACTOR,
fraction[txpwr->cck[i] % BRCMS_TXPWR_DB_FACTOR]);
printk(KERN_DEBUG "%s\n", buf);
sprintf(buf, "20 MHz OFDM SISO ");
for (i = 0; i < BRCMS_NUM_RATES_OFDM; i++)
sprintf(buf[strlen(buf)], " %2d%s",
txpwr->ofdm[i] / BRCMS_TXPWR_DB_FACTOR,
fraction[txpwr->ofdm[i] % BRCMS_TXPWR_DB_FACTOR]);
printk(KERN_DEBUG "%s\n", buf);
sprintf(buf, "20 MHz OFDM CDD ");
for (i = 0; i < BRCMS_NUM_RATES_OFDM; i++)
sprintf(buf[strlen(buf)], " %2d%s",
txpwr->ofdm_cdd[i] / BRCMS_TXPWR_DB_FACTOR,
fraction[txpwr->ofdm_cdd[i] % BRCMS_TXPWR_DB_FACTOR]);
printk(KERN_DEBUG "%s\n", buf);
sprintf(buf, "40 MHz OFDM SISO ");
for (i = 0; i < BRCMS_NUM_RATES_OFDM; i++)
sprintf(buf[strlen(buf)], " %2d%s",
txpwr->ofdm_40_siso[i] / BRCMS_TXPWR_DB_FACTOR,
fraction[txpwr->ofdm_40_siso[i] %
BRCMS_TXPWR_DB_FACTOR]);
printk(KERN_DEBUG "%s\n", buf);
sprintf(buf, "40 MHz OFDM CDD ");
for (i = 0; i < BRCMS_NUM_RATES_OFDM; i++)
sprintf(buf[strlen(buf)], " %2d%s",
txpwr->ofdm_40_cdd[i] / BRCMS_TXPWR_DB_FACTOR,
fraction[txpwr->ofdm_40_cdd[i] %
BRCMS_TXPWR_DB_FACTOR]);
printk(KERN_DEBUG "%s\n", buf);
sprintf(buf, "20 MHz MCS0-7 SISO ");
for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++)
sprintf(buf[strlen(buf)], " %2d%s",
txpwr->mcs_20_siso[i] / BRCMS_TXPWR_DB_FACTOR,
fraction[txpwr->mcs_20_siso[i] %
BRCMS_TXPWR_DB_FACTOR]);
printk(KERN_DEBUG "%s\n", buf);
sprintf(buf, "20 MHz MCS0-7 CDD ");
for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++)
sprintf(buf[strlen(buf)], " %2d%s",
txpwr->mcs_20_cdd[i] / BRCMS_TXPWR_DB_FACTOR,
fraction[txpwr->mcs_20_cdd[i] %
BRCMS_TXPWR_DB_FACTOR]);
printk(KERN_DEBUG "%s\n", buf);
sprintf(buf, "20 MHz MCS0-7 STBC ");
for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++)
sprintf(buf[strlen(buf)], " %2d%s",
txpwr->mcs_20_stbc[i] / BRCMS_TXPWR_DB_FACTOR,
fraction[txpwr->mcs_20_stbc[i] %
BRCMS_TXPWR_DB_FACTOR]);
printk(KERN_DEBUG "%s\n", buf);
sprintf(buf, "20 MHz MCS8-15 SDM ");
for (i = 0; i < BRCMS_NUM_RATES_MCS_2_STREAM; i++)
sprintf(buf[strlen(buf)], " %2d%s",
txpwr->mcs_20_mimo[i] / BRCMS_TXPWR_DB_FACTOR,
fraction[txpwr->mcs_20_mimo[i] %
BRCMS_TXPWR_DB_FACTOR]);
printk(KERN_DEBUG "%s\n", buf);
sprintf(buf, "40 MHz MCS0-7 SISO ");
for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++)
sprintf(buf[strlen(buf)], " %2d%s",
txpwr->mcs_40_siso[i] / BRCMS_TXPWR_DB_FACTOR,
fraction[txpwr->mcs_40_siso[i] %
BRCMS_TXPWR_DB_FACTOR]);
printk(KERN_DEBUG "%s\n", buf);
sprintf(buf, "40 MHz MCS0-7 CDD ");
for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++)
sprintf(buf[strlen(buf)], " %2d%s",
txpwr->mcs_40_cdd[i] / BRCMS_TXPWR_DB_FACTOR,
fraction[txpwr->mcs_40_cdd[i] %
BRCMS_TXPWR_DB_FACTOR]);
printk(KERN_DEBUG "%s\n", buf);
sprintf(buf, "40 MHz MCS0-7 STBC ");
for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++)
sprintf(buf[strlen(buf)], " %2d%s",
txpwr->mcs_40_stbc[i] / BRCMS_TXPWR_DB_FACTOR,
fraction[txpwr->mcs_40_stbc[i] %
BRCMS_TXPWR_DB_FACTOR]);
printk(KERN_DEBUG "%s\n", buf);
sprintf(buf, "40 MHz MCS8-15 SDM ");
for (i = 0; i < BRCMS_NUM_RATES_MCS_2_STREAM; i++)
sprintf(buf[strlen(buf)], " %2d%s",
txpwr->mcs_40_mimo[i] / BRCMS_TXPWR_DB_FACTOR,
fraction[txpwr->mcs_40_mimo[i] %
BRCMS_TXPWR_DB_FACTOR]);
}
printk(KERN_DEBUG "%s\n", buf);
printk(KERN_DEBUG "MCS32 %2d%s\n",
txpwr->mcs32 / BRCMS_TXPWR_DB_FACTOR,
fraction[txpwr->mcs32 % BRCMS_TXPWR_DB_FACTOR]);
}
#endif /* POWER_DBG */
void
brcms_c_channel_reg_limits(struct brcms_cm_info *wlc_cm, u16 chanspec,
struct txpwr_limits *txpwr)
@ -1478,9 +1363,6 @@ brcms_c_channel_reg_limits(struct brcms_cm_info *wlc_cm, u16 chanspec,
txpwr->mcs_40_stbc[i] = txpwr->mcs_40_cdd[i];
}
#ifdef POWER_DBG
wlc_phy_txpower_limits_dump(txpwr);
#endif
return;
}

View File

@ -13,6 +13,9 @@
* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/pci.h>
@ -168,26 +171,25 @@
/* debug/trace */
#ifdef BCMDBG
#define DMA_ERROR(args) \
do { \
if (!(*di->msg_level & 1)) \
; \
else \
printk args; \
} while (0)
#define DMA_TRACE(args) \
do { \
if (!(*di->msg_level & 2)) \
; \
else \
printk args; \
} while (0)
#define DMA_ERROR(fmt, ...) \
do { \
if (*di->msg_level & 1) \
pr_debug("%s: " fmt, __func__, ##__VA_ARGS__); \
} while (0)
#define DMA_TRACE(fmt, ...) \
do { \
if (*di->msg_level & 2) \
pr_debug("%s: " fmt, __func__, ##__VA_ARGS__); \
} while (0)
#else
#define DMA_ERROR(args)
#define DMA_TRACE(args)
#define DMA_ERROR(fmt, ...) \
no_printk(fmt, ##__VA_ARGS__)
#define DMA_TRACE(fmt, ...) \
no_printk(fmt, ##__VA_ARGS__)
#endif /* BCMDBG */
#define DMA_NONE(args)
#define DMA_NONE(fmt, ...) \
no_printk(fmt, ##__VA_ARGS__)
#define MAXNAMEL 8 /* 8 char names */
@ -361,7 +363,7 @@ static uint _dma_ctrlflags(struct dma_info *di, uint mask, uint flags)
uint dmactrlflags;
if (di == NULL) {
DMA_ERROR(("_dma_ctrlflags: NULL dma handle\n"));
DMA_ERROR("NULL dma handle\n");
return 0;
}
@ -412,13 +414,13 @@ static bool _dma_isaddrext(struct dma_info *di)
/* not all tx or rx channel are available */
if (di->d64txregs != NULL) {
if (!_dma64_addrext(di->d64txregs))
DMA_ERROR(("%s: _dma_isaddrext: DMA64 tx doesn't have "
"AE set\n", di->name));
DMA_ERROR("%s: DMA64 tx doesn't have AE set\n",
di->name);
return true;
} else if (di->d64rxregs != NULL) {
if (!_dma64_addrext(di->d64rxregs))
DMA_ERROR(("%s: _dma_isaddrext: DMA64 rx doesn't have "
"AE set\n", di->name));
DMA_ERROR("%s: DMA64 rx doesn't have AE set\n",
di->name);
return true;
}
@ -519,8 +521,8 @@ static bool dma64_alloc(struct dma_info *di, uint direction)
va = dma_ringalloc(di, D64RINGALIGN, size, &align_bits,
&alloced, &di->txdpaorig);
if (va == NULL) {
DMA_ERROR(("%s: dma64_alloc: DMA_ALLOC_CONSISTENT(ntxd)"
" failed\n", di->name));
DMA_ERROR("%s: DMA_ALLOC_CONSISTENT(ntxd) failed\n",
di->name);
return false;
}
align = (1 << align_bits);
@ -533,8 +535,8 @@ static bool dma64_alloc(struct dma_info *di, uint direction)
va = dma_ringalloc(di, D64RINGALIGN, size, &align_bits,
&alloced, &di->rxdpaorig);
if (va == NULL) {
DMA_ERROR(("%s: dma64_alloc: DMA_ALLOC_CONSISTENT(nrxd)"
" failed\n", di->name));
DMA_ERROR("%s: DMA_ALLOC_CONSISTENT(nrxd) failed\n",
di->name);
return false;
}
align = (1 << align_bits);
@ -583,11 +585,10 @@ struct dma_pub *dma_attach(char *name, struct si_pub *sih,
*/
_dma_ctrlflags(di, DMA_CTRL_ROC | DMA_CTRL_PEN, 0);
DMA_TRACE(("%s: dma_attach: %s flags 0x%x ntxd %d nrxd %d "
"rxbufsize %d rxextheadroom %d nrxpost %d rxoffset %d "
"dmaregstx %p dmaregsrx %p\n", name, "DMA64",
di->dma.dmactrlflags, ntxd, nrxd, rxbufsize,
rxextheadroom, nrxpost, rxoffset, dmaregstx, dmaregsrx));
DMA_TRACE("%s: %s flags 0x%x ntxd %d nrxd %d rxbufsize %d rxextheadroom %d nrxpost %d rxoffset %d dmaregstx %p dmaregsrx %p\n",
name, "DMA64",
di->dma.dmactrlflags, ntxd, nrxd, rxbufsize,
rxextheadroom, nrxpost, rxoffset, dmaregstx, dmaregsrx);
/* make a private copy of our callers name */
strncpy(di->name, name, MAXNAMEL);
@ -645,8 +646,8 @@ struct dma_pub *dma_attach(char *name, struct si_pub *sih,
di->dmadesc_align = 4; /* 16 byte alignment */
}
DMA_NONE(("DMA descriptor align_needed %d, align %d\n",
di->aligndesc_4k, di->dmadesc_align));
DMA_NONE("DMA descriptor align_needed %d, align %d\n",
di->aligndesc_4k, di->dmadesc_align);
/* allocate tx packet pointer vector */
if (ntxd) {
@ -684,21 +685,21 @@ struct dma_pub *dma_attach(char *name, struct si_pub *sih,
if ((di->ddoffsetlow != 0) && !di->addrext) {
if (di->txdpa > SI_PCI_DMA_SZ) {
DMA_ERROR(("%s: dma_attach: txdpa 0x%x: addrext not "
"supported\n", di->name, (u32)di->txdpa));
DMA_ERROR("%s: txdpa 0x%x: addrext not supported\n",
di->name, (u32)di->txdpa);
goto fail;
}
if (di->rxdpa > SI_PCI_DMA_SZ) {
DMA_ERROR(("%s: dma_attach: rxdpa 0x%x: addrext not "
"supported\n", di->name, (u32)di->rxdpa));
DMA_ERROR("%s: rxdpa 0x%x: addrext not supported\n",
di->name, (u32)di->rxdpa);
goto fail;
}
}
DMA_TRACE(("ddoffsetlow 0x%x ddoffsethigh 0x%x dataoffsetlow 0x%x "
"dataoffsethigh " "0x%x addrext %d\n", di->ddoffsetlow,
di->ddoffsethigh, di->dataoffsetlow, di->dataoffsethigh,
di->addrext));
DMA_TRACE("ddoffsetlow 0x%x ddoffsethigh 0x%x dataoffsetlow 0x%x dataoffsethigh 0x%x addrext %d\n",
di->ddoffsetlow, di->ddoffsethigh,
di->dataoffsetlow, di->dataoffsethigh,
di->addrext);
return (struct dma_pub *) di;
@ -744,7 +745,7 @@ void dma_detach(struct dma_pub *pub)
{
struct dma_info *di = (struct dma_info *)pub;
DMA_TRACE(("%s: dma_detach\n", di->name));
DMA_TRACE("%s:\n", di->name);
/* free dma descriptor rings */
if (di->txd64)
@ -812,7 +813,7 @@ static void _dma_rxenable(struct dma_info *di)
uint dmactrlflags = di->dma.dmactrlflags;
u32 control;
DMA_TRACE(("%s: dma_rxenable\n", di->name));
DMA_TRACE("%s:\n", di->name);
control =
(R_REG(&di->d64rxregs->control) & D64_RC_AE) |
@ -832,7 +833,7 @@ void dma_rxinit(struct dma_pub *pub)
{
struct dma_info *di = (struct dma_info *)pub;
DMA_TRACE(("%s: dma_rxinit\n", di->name));
DMA_TRACE("%s:\n", di->name);
if (di->nrxd == 0)
return;
@ -926,7 +927,7 @@ int dma_rx(struct dma_pub *pub, struct sk_buff_head *skb_list)
return 0;
len = le16_to_cpu(*(__le16 *) (p->data));
DMA_TRACE(("%s: dma_rx len %d\n", di->name, len));
DMA_TRACE("%s: dma_rx len %d\n", di->name, len);
dma_spin_for_len(len, p);
/* set actual length */
@ -953,14 +954,14 @@ int dma_rx(struct dma_pub *pub, struct sk_buff_head *skb_list)
D64_RS0_CD_MASK) -
di->rcvptrbase) & D64_RS0_CD_MASK,
struct dma64desc);
DMA_ERROR(("dma_rx, rxin %d rxout %d, hw_curr %d\n",
di->rxin, di->rxout, cur));
DMA_ERROR("rxin %d rxout %d, hw_curr %d\n",
di->rxin, di->rxout, cur);
}
#endif /* BCMDBG */
if ((di->dma.dmactrlflags & DMA_CTRL_RXMULTI) == 0) {
DMA_ERROR(("%s: dma_rx: bad frame length (%d)\n",
di->name, len));
DMA_ERROR("%s: bad frame length (%d)\n",
di->name, len);
skb_queue_walk_safe(&dma_frames, p, next) {
skb_unlink(p, &dma_frames);
brcmu_pkt_buf_free_skb(p);
@ -977,7 +978,7 @@ int dma_rx(struct dma_pub *pub, struct sk_buff_head *skb_list)
static bool dma64_rxidle(struct dma_info *di)
{
DMA_TRACE(("%s: dma_rxidle\n", di->name));
DMA_TRACE("%s:\n", di->name);
if (di->nrxd == 0)
return true;
@ -1017,7 +1018,7 @@ bool dma_rxfill(struct dma_pub *pub)
n = di->nrxpost - nrxdactive(di, rxin, rxout);
DMA_TRACE(("%s: dma_rxfill: post %d\n", di->name, n));
DMA_TRACE("%s: post %d\n", di->name, n);
if (di->rxbufsize > BCMEXTRAHDROOM)
extra_offset = di->rxextrahdrroom;
@ -1030,11 +1031,9 @@ bool dma_rxfill(struct dma_pub *pub)
p = brcmu_pkt_buf_get_skb(di->rxbufsize + extra_offset);
if (p == NULL) {
DMA_ERROR(("%s: dma_rxfill: out of rxbufs\n",
di->name));
DMA_ERROR("%s: out of rxbufs\n", di->name);
if (i == 0 && dma64_rxidle(di)) {
DMA_ERROR(("%s: rxfill64: ring is empty !\n",
di->name));
DMA_ERROR("%s: ring is empty !\n", di->name);
ring_empty = true;
}
di->dma.rxnobuf++;
@ -1079,7 +1078,7 @@ void dma_rxreclaim(struct dma_pub *pub)
struct dma_info *di = (struct dma_info *)pub;
struct sk_buff *p;
DMA_TRACE(("%s: dma_rxreclaim\n", di->name));
DMA_TRACE("%s:\n", di->name);
while ((p = _dma_getnextrxp(di, true)))
brcmu_pkt_buf_free_skb(p);
@ -1110,7 +1109,7 @@ void dma_txinit(struct dma_pub *pub)
struct dma_info *di = (struct dma_info *)pub;
u32 control = D64_XC_XE;
DMA_TRACE(("%s: dma_txinit\n", di->name));
DMA_TRACE("%s:\n", di->name);
if (di->ntxd == 0)
return;
@ -1142,7 +1141,7 @@ void dma_txsuspend(struct dma_pub *pub)
{
struct dma_info *di = (struct dma_info *)pub;
DMA_TRACE(("%s: dma_txsuspend\n", di->name));
DMA_TRACE("%s:\n", di->name);
if (di->ntxd == 0)
return;
@ -1154,7 +1153,7 @@ void dma_txresume(struct dma_pub *pub)
{
struct dma_info *di = (struct dma_info *)pub;
DMA_TRACE(("%s: dma_txresume\n", di->name));
DMA_TRACE("%s:\n", di->name);
if (di->ntxd == 0)
return;
@ -1176,11 +1175,11 @@ void dma_txreclaim(struct dma_pub *pub, enum txd_range range)
struct dma_info *di = (struct dma_info *)pub;
struct sk_buff *p;
DMA_TRACE(("%s: dma_txreclaim %s\n", di->name,
(range == DMA_RANGE_ALL) ? "all" :
((range ==
DMA_RANGE_TRANSMITTED) ? "transmitted" :
"transferred")));
DMA_TRACE("%s: %s\n",
di->name,
range == DMA_RANGE_ALL ? "all" :
range == DMA_RANGE_TRANSMITTED ? "transmitted" :
"transferred");
if (di->txin == di->txout)
return;
@ -1250,7 +1249,7 @@ int dma_txfast(struct dma_pub *pub, struct sk_buff *p0, bool commit)
u32 flags = 0;
dma_addr_t pa;
DMA_TRACE(("%s: dma_txfast\n", di->name));
DMA_TRACE("%s:\n", di->name);
txout = di->txout;
@ -1314,7 +1313,7 @@ int dma_txfast(struct dma_pub *pub, struct sk_buff *p0, bool commit)
return 0;
outoftxd:
DMA_ERROR(("%s: dma_txfast: out of txds !!!\n", di->name));
DMA_ERROR("%s: out of txds !!!\n", di->name);
brcmu_pkt_buf_free_skb(p0);
di->dma.txavail = 0;
di->dma.txnobuf++;
@ -1338,11 +1337,11 @@ struct sk_buff *dma_getnexttxp(struct dma_pub *pub, enum txd_range range)
u16 active_desc;
struct sk_buff *txp;
DMA_TRACE(("%s: dma_getnexttxp %s\n", di->name,
(range == DMA_RANGE_ALL) ? "all" :
((range ==
DMA_RANGE_TRANSMITTED) ? "transmitted" :
"transferred")));
DMA_TRACE("%s: %s\n",
di->name,
range == DMA_RANGE_ALL ? "all" :
range == DMA_RANGE_TRANSMITTED ? "transmitted" :
"transferred");
if (di->ntxd == 0)
return NULL;
@ -1402,8 +1401,8 @@ struct sk_buff *dma_getnexttxp(struct dma_pub *pub, enum txd_range range)
return txp;
bogus:
DMA_NONE(("dma_getnexttxp: bogus curr: start %d end %d txout %d "
"force %d\n", start, end, di->txout, forceall));
DMA_NONE("bogus curr: start %d end %d txout %d\n",
start, end, di->txout);
return NULL;
}

View File

@ -619,13 +619,6 @@ brcms_ops_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
wl->pub->global_ampdu->scb = scb;
wl->pub->global_ampdu->max_pdu = 16;
sta->ht_cap.ht_supported = true;
sta->ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
sta->ht_cap.ampdu_density = AMPDU_DEF_MPDU_DENSITY;
sta->ht_cap.cap = IEEE80211_HT_CAP_GRN_FLD |
IEEE80211_HT_CAP_SGI_20 |
IEEE80211_HT_CAP_SGI_40 | IEEE80211_HT_CAP_40MHZ_INTOLERANT;
/*
* minstrel_ht initiates addBA on our behalf by calling
* ieee80211_start_tx_ba_session()

View File

@ -109,11 +109,6 @@
#define BPHY_PLCP_TIME 192
#define RIFS_11N_TIME 2
#define AC_BE 0
#define AC_BK 1
#define AC_VI 2
#define AC_VO 3
/* length of the BCN template area */
#define BCN_TMPL_LEN 512
@ -305,10 +300,22 @@ uint brcm_msg_level =
#endif /* BCMDBG */
/* TX FIFO number to WME/802.1E Access Category */
static const u8 wme_fifo2ac[] = { AC_BK, AC_BE, AC_VI, AC_VO, AC_BE, AC_BE };
static const u8 wme_fifo2ac[] = {
IEEE80211_AC_BK,
IEEE80211_AC_BE,
IEEE80211_AC_VI,
IEEE80211_AC_VO,
IEEE80211_AC_BE,
IEEE80211_AC_BE
};
/* WME/802.1E Access Category to TX FIFO number */
static const u8 wme_ac2fifo[] = { 1, 0, 2, 3 };
/* ieee80211 Access Category to TX FIFO number */
static const u8 wme_ac2fifo[] = {
TX_AC_VO_FIFO,
TX_AC_VI_FIFO,
TX_AC_BE_FIFO,
TX_AC_BK_FIFO
};
/* 802.1D Priority to precedence queue mapping */
const u8 wlc_prio2prec_map[] = {
@ -893,7 +900,7 @@ brcms_c_dotxstatus(struct brcms_c_info *wlc, struct tx_status *txs)
lfbl, /* Long Frame Rate Fallback Limit */
fbl;
if (queue < AC_COUNT) {
if (queue < IEEE80211_NUM_ACS) {
sfbl = GFIELD(wlc->wme_retries[wme_fifo2ac[queue]],
EDCF_SFB);
lfbl = GFIELD(wlc->wme_retries[wme_fifo2ac[queue]],
@ -942,7 +949,7 @@ brcms_c_dotxstatus(struct brcms_c_info *wlc, struct tx_status *txs)
tx_info->flags |= IEEE80211_TX_STAT_ACK;
}
totlen = brcmu_pkttotlen(p);
totlen = p->len;
free_pdu = true;
brcms_c_txfifo_complete(wlc, queue, 1);
@ -3576,42 +3583,30 @@ static void brcms_c_bandinit_ordered(struct brcms_c_info *wlc,
brcms_c_set_phy_chanspec(wlc, chanspec);
}
static void brcms_c_mac_bcn_promisc(struct brcms_c_info *wlc)
/*
* Set or clear maccontrol bits MCTL_PROMISC, MCTL_BCNS_PROMISC and
* MCTL_KEEPCONTROL
*/
static void brcms_c_mac_promisc(struct brcms_c_info *wlc)
{
u32 promisc_bits = 0;
if (wlc->bcnmisc_monitor)
brcms_b_mctrl(wlc->hw, MCTL_BCNS_PROMISC, MCTL_BCNS_PROMISC);
else
brcms_b_mctrl(wlc->hw, MCTL_BCNS_PROMISC, 0);
promisc_bits |= MCTL_BCNS_PROMISC;
if (wlc->monitor)
promisc_bits |=
MCTL_PROMISC | MCTL_BCNS_PROMISC | MCTL_KEEPCONTROL;
brcms_b_mctrl(wlc->hw,
MCTL_PROMISC | MCTL_BCNS_PROMISC | MCTL_KEEPCONTROL,
promisc_bits);
}
void brcms_c_mac_bcn_promisc_change(struct brcms_c_info *wlc, bool promisc)
{
wlc->bcnmisc_monitor = promisc;
brcms_c_mac_bcn_promisc(wlc);
}
/* set or clear maccontrol bits MCTL_PROMISC and MCTL_KEEPCONTROL */
static void brcms_c_mac_promisc(struct brcms_c_info *wlc)
{
u32 promisc_bits = 0;
/*
* promiscuous mode just sets MCTL_PROMISC
* Note: APs get all BSS traffic without the need to set
* the MCTL_PROMISC bit since all BSS data traffic is
* directed at the AP
*/
if (wlc->pub->promisc)
promisc_bits |= MCTL_PROMISC;
/* monitor mode needs both MCTL_PROMISC and MCTL_KEEPCONTROL
* Note: monitor mode also needs MCTL_BCNS_PROMISC, but that is
* handled in brcms_c_mac_bcn_promisc()
*/
if (wlc->monitor)
promisc_bits |= MCTL_PROMISC | MCTL_KEEPCONTROL;
brcms_b_mctrl(wlc->hw, MCTL_PROMISC | MCTL_KEEPCONTROL, promisc_bits);
brcms_c_mac_promisc(wlc);
}
/*
@ -3643,7 +3638,6 @@ static void brcms_c_ucode_mac_upd(struct brcms_c_info *wlc)
}
/* update the various promisc bits */
brcms_c_mac_bcn_promisc(wlc);
brcms_c_mac_promisc(wlc);
}
@ -4125,7 +4119,7 @@ void brcms_c_wme_setparams(struct brcms_c_info *wlc, u16 aci,
EDCF_TXOP2USEC(acp_shm.txop);
acp_shm.aifs = (params->aifs & EDCF_AIFSN_MASK);
if (aci == AC_VI && acp_shm.txop == 0
if (aci == IEEE80211_AC_VI && acp_shm.txop == 0
&& acp_shm.aifs < EDCF_AIFSN_MAX)
acp_shm.aifs++;
@ -4175,7 +4169,7 @@ static void brcms_c_edcf_setparams(struct brcms_c_info *wlc, bool suspend)
}; /* ucode needs these parameters during its initialization */
const struct edcf_acparam *edcf_acp = &default_edcf_acparams[0];
for (i_ac = 0; i_ac < AC_COUNT; i_ac++, edcf_acp++) {
for (i_ac = 0; i_ac < IEEE80211_NUM_ACS; i_ac++, edcf_acp++) {
/* find out which ac this set of params applies to */
aci = (edcf_acp->ACI & EDCF_ACI_MASK) >> EDCF_ACI_SHIFT;
@ -5172,7 +5166,7 @@ static void brcms_c_wme_retries_write(struct brcms_c_info *wlc)
if (!wlc->clk)
return;
for (ac = 0; ac < AC_COUNT; ac++)
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
brcms_b_write_shm(wlc->hw, M_AC_TXLMT_ADDR(ac),
wlc->wme_retries[ac]);
}
@ -5647,7 +5641,7 @@ int brcms_c_set_rate_limit(struct brcms_c_info *wlc, u16 srl, u16 lrl)
brcms_b_retrylimit_upd(wlc->hw, wlc->SRL, wlc->LRL);
for (ac = 0; ac < AC_COUNT; ac++) {
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
wlc->wme_retries[ac] = SFIELD(wlc->wme_retries[ac],
EDCF_SHORT, wlc->SRL);
wlc->wme_retries[ac] = SFIELD(wlc->wme_retries[ac],
@ -6709,7 +6703,7 @@ brcms_c_d11hdrs_mac80211(struct brcms_c_info *wlc, struct ieee80211_hw *hw,
qos = ieee80211_is_data_qos(h->frame_control);
/* compute length of frame in bytes for use in PLCP computations */
len = brcmu_pkttotlen(p);
len = p->len;
phylen = len + FCS_LEN;
/* Get tx_info */
@ -8358,7 +8352,7 @@ void brcms_c_init(struct brcms_c_info *wlc, bool mute_tx)
/* Uninitialized; read from HW */
int ac;
for (ac = 0; ac < AC_COUNT; ac++)
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
wlc->wme_retries[ac] =
brcms_b_read_shm(wlc->hw, M_AC_TXLMT_ADDR(ac));
}

View File

@ -44,8 +44,6 @@
/* transmit buffer max headroom for protocol headers */
#define TXOFF (D11_TXH_LEN + D11_PHY_HDR_LEN)
#define AC_COUNT 4
/* Macros for doing definition and get/set of bitfields
* Usage example, e.g. a three-bit field (bits 4-6):
* #define <NAME>_M BITFIELD_MASK(3)
@ -436,7 +434,7 @@ struct brcms_txq_info {
* bcn_li_dtim: beacon listen interval in # dtims.
* WDarmed: watchdog timer is armed.
* WDlast: last time wlc_watchdog() was called.
* edcf_txop[AC_COUNT]: current txop for each ac.
* edcf_txop[IEEE80211_NUM_ACS]: current txop for each ac.
* wme_retries: per-AC retry limits.
* tx_prec_map: Precedence map based on HW FIFO space.
* fifo2prec_map[NFIFO]: pointer to fifo2_prec map based on WME.
@ -535,9 +533,9 @@ struct brcms_c_info {
u32 WDlast;
/* WME */
u16 edcf_txop[AC_COUNT];
u16 edcf_txop[IEEE80211_NUM_ACS];
u16 wme_retries[AC_COUNT];
u16 wme_retries[IEEE80211_NUM_ACS];
u16 tx_prec_map;
u16 fifo2prec_map[NFIFO];

View File

@ -190,15 +190,7 @@ u16 read_radio_reg(struct brcms_phy *pi, u16 addr)
data = R_REG(&pi->regs->radioregdata);
} else {
W_REG_FLUSH(&pi->regs->phy4waddr, addr);
#ifdef __ARM_ARCH_4T__
__asm__(" .align 4 ");
__asm__(" nop ");
data = R_REG(&pi->regs->phy4wdatalo);
#else
data = R_REG(&pi->regs->phy4wdatalo);
#endif
}
pi->phy_wreg = 0;

View File

@ -248,7 +248,6 @@ enum brcms_srom_id {
};
#define BRCMS_NUMRATES 16 /* max # of rates in a rateset */
#define D11_PHY_HDR_LEN 6 /* Phy header length - 6 bytes */
/* phy types */
#define PHY_TYPE_A 0 /* Phy type A */
@ -382,7 +381,6 @@ struct brcms_pub {
uint _nbands; /* # bands supported */
uint now; /* # elapsed seconds */
bool promisc; /* promiscuous destination address */
bool delayed_down; /* down delayed */
bool associated; /* true:part of [I]BSS, false: not */
/* (union of stas_associated, aps_associated) */

View File

@ -638,7 +638,7 @@ _initvars_srom_pci(u8 sromrev, u16 *srom, struct list_head *var_list)
struct brcms_srom_list_head *entry;
enum brcms_srom_id id;
u16 w;
u32 val;
u32 val = 0;
const struct brcms_sromvar *srv;
uint width;
uint flags;
@ -835,6 +835,8 @@ static int otp_read_pci(struct si_pub *sih, u16 *buf, uint nwords)
*/
return -ENODATA;
/* fixup the endianness so crc8 will pass */
cpu_to_le16_buf(buf, sz);
if (crc8(brcms_srom_crc8_table, (u8 *) buf, sz * 2,
CRC8_INIT_VALUE) != CRC8_GOOD_VALUE(brcms_srom_crc8_table))
err = -EIO;

View File

@ -41,44 +41,20 @@ EXPORT_SYMBOL(brcmu_pkt_buf_get_skb);
/* Free the driver packet. Free the tag if present */
void brcmu_pkt_buf_free_skb(struct sk_buff *skb)
{
struct sk_buff *nskb;
int nest = 0;
/* perversion: we use skb->next to chain multi-skb packets */
while (skb) {
nskb = skb->next;
skb->next = NULL;
if (skb->destructor)
/* cannot kfree_skb() on hard IRQ (net/core/skbuff.c) if
* destructor exists
*/
dev_kfree_skb_any(skb);
else
/* can free immediately (even in_irq()) if destructor
* does not exist
*/
dev_kfree_skb(skb);
nest++;
skb = nskb;
}
WARN_ON(skb->next);
if (skb->destructor)
/* cannot kfree_skb() on hard IRQ (net/core/skbuff.c) if
* destructor exists
*/
dev_kfree_skb_any(skb);
else
/* can free immediately (even in_irq()) if destructor
* does not exist
*/
dev_kfree_skb(skb);
}
EXPORT_SYMBOL(brcmu_pkt_buf_free_skb);
/* return total length of buffer chain */
uint brcmu_pkttotlen(struct sk_buff *p)
{
uint total;
total = 0;
for (; p; p = p->next)
total += p->len;
return total;
}
EXPORT_SYMBOL(brcmu_pkttotlen);
/*
* osl multiple-precedence packet queue
* hi_prec is always >= the number of the highest non-empty precedence
@ -86,21 +62,13 @@ EXPORT_SYMBOL(brcmu_pkttotlen);
struct sk_buff *brcmu_pktq_penq(struct pktq *pq, int prec,
struct sk_buff *p)
{
struct pktq_prec *q;
struct sk_buff_head *q;
if (pktq_full(pq) || pktq_pfull(pq, prec))
return NULL;
q = &pq->q[prec];
if (q->head)
q->tail->prev = p;
else
q->head = p;
q->tail = p;
q->len++;
q = &pq->q[prec].skblist;
skb_queue_tail(q, p);
pq->len++;
if (pq->hi_prec < prec)
@ -113,20 +81,13 @@ EXPORT_SYMBOL(brcmu_pktq_penq);
struct sk_buff *brcmu_pktq_penq_head(struct pktq *pq, int prec,
struct sk_buff *p)
{
struct pktq_prec *q;
struct sk_buff_head *q;
if (pktq_full(pq) || pktq_pfull(pq, prec))
return NULL;
q = &pq->q[prec];
if (q->head == NULL)
q->tail = p;
p->prev = q->head;
q->head = p;
q->len++;
q = &pq->q[prec].skblist;
skb_queue_head(q, p);
pq->len++;
if (pq->hi_prec < prec)
@ -138,53 +99,30 @@ EXPORT_SYMBOL(brcmu_pktq_penq_head);
struct sk_buff *brcmu_pktq_pdeq(struct pktq *pq, int prec)
{
struct pktq_prec *q;
struct sk_buff_head *q;
struct sk_buff *p;
q = &pq->q[prec];
p = q->head;
q = &pq->q[prec].skblist;
p = skb_dequeue(q);
if (p == NULL)
return NULL;
q->head = p->prev;
if (q->head == NULL)
q->tail = NULL;
q->len--;
pq->len--;
p->prev = NULL;
return p;
}
EXPORT_SYMBOL(brcmu_pktq_pdeq);
struct sk_buff *brcmu_pktq_pdeq_tail(struct pktq *pq, int prec)
{
struct pktq_prec *q;
struct sk_buff *p, *prev;
struct sk_buff_head *q;
struct sk_buff *p;
q = &pq->q[prec];
p = q->head;
q = &pq->q[prec].skblist;
p = skb_dequeue_tail(q);
if (p == NULL)
return NULL;
for (prev = NULL; p != q->tail; p = p->prev)
prev = p;
if (prev)
prev->prev = NULL;
else
q->head = NULL;
q->tail = prev;
q->len--;
pq->len--;
return p;
}
EXPORT_SYMBOL(brcmu_pktq_pdeq_tail);
@ -193,31 +131,17 @@ void
brcmu_pktq_pflush(struct pktq *pq, int prec, bool dir,
bool (*fn)(struct sk_buff *, void *), void *arg)
{
struct pktq_prec *q;
struct sk_buff *p, *prev = NULL;
struct sk_buff_head *q;
struct sk_buff *p, *next;
q = &pq->q[prec];
p = q->head;
while (p) {
q = &pq->q[prec].skblist;
skb_queue_walk_safe(q, p, next) {
if (fn == NULL || (*fn) (p, arg)) {
bool head = (p == q->head);
if (head)
q->head = p->prev;
else
prev->prev = p->prev;
p->prev = NULL;
skb_unlink(p, q);
brcmu_pkt_buf_free_skb(p);
q->len--;
pq->len--;
p = (head ? q->head : prev->prev);
} else {
prev = p;
p = p->prev;
}
}
if (q->head == NULL)
q->tail = NULL;
}
EXPORT_SYMBOL(brcmu_pktq_pflush);
@ -242,8 +166,10 @@ void brcmu_pktq_init(struct pktq *pq, int num_prec, int max_len)
pq->max = (u16) max_len;
for (prec = 0; prec < num_prec; prec++)
for (prec = 0; prec < num_prec; prec++) {
pq->q[prec].max = pq->max;
skb_queue_head_init(&pq->q[prec].skblist);
}
}
EXPORT_SYMBOL(brcmu_pktq_init);
@ -255,13 +181,13 @@ struct sk_buff *brcmu_pktq_peek_tail(struct pktq *pq, int *prec_out)
return NULL;
for (prec = 0; prec < pq->hi_prec; prec++)
if (pq->q[prec].head)
if (!skb_queue_empty(&pq->q[prec].skblist))
break;
if (prec_out)
*prec_out = prec;
return pq->q[prec].tail;
return skb_peek_tail(&pq->q[prec].skblist);
}
EXPORT_SYMBOL(brcmu_pktq_peek_tail);
@ -274,7 +200,7 @@ int brcmu_pktq_mlen(struct pktq *pq, uint prec_bmp)
for (prec = 0; prec <= pq->hi_prec; prec++)
if (prec_bmp & (1 << prec))
len += pq->q[prec].len;
len += pq->q[prec].skblist.qlen;
return len;
}
@ -284,39 +210,32 @@ EXPORT_SYMBOL(brcmu_pktq_mlen);
struct sk_buff *brcmu_pktq_mdeq(struct pktq *pq, uint prec_bmp,
int *prec_out)
{
struct pktq_prec *q;
struct sk_buff_head *q;
struct sk_buff *p;
int prec;
if (pq->len == 0)
return NULL;
while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL)
while ((prec = pq->hi_prec) > 0 &&
skb_queue_empty(&pq->q[prec].skblist))
pq->hi_prec--;
while ((prec_bmp & (1 << prec)) == 0 || pq->q[prec].head == NULL)
while ((prec_bmp & (1 << prec)) == 0 ||
skb_queue_empty(&pq->q[prec].skblist))
if (prec-- == 0)
return NULL;
q = &pq->q[prec];
p = q->head;
q = &pq->q[prec].skblist;
p = skb_dequeue(q);
if (p == NULL)
return NULL;
q->head = p->prev;
if (q->head == NULL)
q->tail = NULL;
q->len--;
pq->len--;
if (prec_out)
*prec_out = prec;
pq->len--;
p->prev = NULL;
return p;
}
EXPORT_SYMBOL(brcmu_pktq_mdeq);

View File

@ -65,9 +65,7 @@
#define ETHER_ADDR_STR_LEN 18
struct pktq_prec {
struct sk_buff *head; /* first packet to dequeue */
struct sk_buff *tail; /* last packet to dequeue */
u16 len; /* number of queued packets */
struct sk_buff_head skblist;
u16 max; /* maximum number of queued packets */
};
@ -88,32 +86,32 @@ struct pktq {
static inline int pktq_plen(struct pktq *pq, int prec)
{
return pq->q[prec].len;
return pq->q[prec].skblist.qlen;
}
static inline int pktq_pavail(struct pktq *pq, int prec)
{
return pq->q[prec].max - pq->q[prec].len;
return pq->q[prec].max - pq->q[prec].skblist.qlen;
}
static inline bool pktq_pfull(struct pktq *pq, int prec)
{
return pq->q[prec].len >= pq->q[prec].max;
return pq->q[prec].skblist.qlen >= pq->q[prec].max;
}
static inline bool pktq_pempty(struct pktq *pq, int prec)
{
return pq->q[prec].len == 0;
return skb_queue_empty(&pq->q[prec].skblist);
}
static inline struct sk_buff *pktq_ppeek(struct pktq *pq, int prec)
{
return pq->q[prec].head;
return skb_peek(&pq->q[prec].skblist);
}
static inline struct sk_buff *pktq_ppeek_tail(struct pktq *pq, int prec)
{
return pq->q[prec].tail;
return skb_peek_tail(&pq->q[prec].skblist);
}
extern struct sk_buff *brcmu_pktq_penq(struct pktq *pq, int prec,
@ -172,9 +170,6 @@ extern void brcmu_pktq_flush(struct pktq *pq, bool dir,
bool (*fn)(struct sk_buff *, void *), void *arg);
/* externs */
/* packet */
extern uint brcmu_pkttotlen(struct sk_buff *p);
/* ip address */
struct ipv4_addr;

View File

@ -3871,8 +3871,8 @@ static void prism2_get_drvinfo(struct net_device *dev,
iface = netdev_priv(dev);
local = iface->local;
strncpy(info->driver, "hostap", sizeof(info->driver) - 1);
snprintf(info->fw_version, sizeof(info->fw_version) - 1,
strlcpy(info->driver, "hostap", sizeof(info->driver));
snprintf(info->fw_version, sizeof(info->fw_version),
"%d.%d.%d", (local->sta_fw_ver >> 16) & 0xff,
(local->sta_fw_ver >> 8) & 0xff,
local->sta_fw_ver & 0xff);

View File

@ -5981,8 +5981,8 @@ static void ipw_ethtool_get_drvinfo(struct net_device *dev,
struct ipw2100_priv *priv = libipw_priv(dev);
char fw_ver[64], ucode_ver[64];
strcpy(info->driver, DRV_NAME);
strcpy(info->version, DRV_VERSION);
strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
strlcpy(info->version, DRV_VERSION, sizeof(info->version));
ipw2100_get_fwversion(priv, fw_ver, sizeof(fw_ver));
ipw2100_get_ucodeversion(priv, ucode_ver, sizeof(ucode_ver));
@ -5990,7 +5990,8 @@ static void ipw_ethtool_get_drvinfo(struct net_device *dev,
snprintf(info->fw_version, sizeof(info->fw_version), "%s:%d:%s",
fw_ver, priv->eeprom_version, ucode_ver);
strcpy(info->bus_info, pci_name(priv->pci_dev));
strlcpy(info->bus_info, pci_name(priv->pci_dev),
sizeof(info->bus_info));
}
static u32 ipw2100_ethtool_get_link(struct net_device *dev)

View File

@ -131,6 +131,14 @@ static struct ieee80211_rate ipw2200_rates[] = {
#define ipw2200_bg_rates (ipw2200_rates + 0)
#define ipw2200_num_bg_rates 12
/* Ugly macro to convert literal channel numbers into their mhz equivalents
* There are certianly some conditions that will break this (like feeding it '30')
* but they shouldn't arise since nothing talks on channel 30. */
#define ieee80211chan2mhz(x) \
(((x) <= 14) ? \
(((x) == 14) ? 2484 : ((x) * 5) + 2407) : \
((x) + 1000) * 5)
#ifdef CONFIG_IPW2200_QOS
static int qos_enable = 0;
static int qos_burst_enable = 0;
@ -10540,8 +10548,8 @@ static void ipw_ethtool_get_drvinfo(struct net_device *dev,
char date[32];
u32 len;
strcpy(info->driver, DRV_NAME);
strcpy(info->version, DRV_VERSION);
strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
strlcpy(info->version, DRV_VERSION, sizeof(info->version));
len = sizeof(vers);
ipw_get_ordinal(p, IPW_ORD_STAT_FW_VERSION, vers, &len);
@ -10550,7 +10558,8 @@ static void ipw_ethtool_get_drvinfo(struct net_device *dev,
snprintf(info->fw_version, sizeof(info->fw_version), "%s (%s)",
vers, date);
strcpy(info->bus_info, pci_name(p->pci_dev));
strlcpy(info->bus_info, pci_name(p->pci_dev),
sizeof(info->bus_info));
info->eedump_len = IPW_EEPROM_IMAGE_SIZE;
}

View File

@ -1,6 +1,6 @@
# WIFI
obj-$(CONFIG_IWLWIFI) += iwlwifi.o
iwlwifi-objs := iwl-agn.o iwl-agn-rs.o
iwlwifi-objs := iwl-agn.o iwl-agn-rs.o iwl-mac80211.o
iwlwifi-objs += iwl-agn-ucode.o iwl-agn-tx.o
iwlwifi-objs += iwl-agn-lib.o iwl-agn-calib.o iwl-io.o
iwlwifi-objs += iwl-agn-tt.o iwl-agn-sta.o iwl-agn-rx.o

View File

@ -270,11 +270,6 @@ struct iwl_cfg iwl2000_2bgn_cfg = {
.ht_params = &iwl2000_ht_params,
};
struct iwl_cfg iwl2000_2bg_cfg = {
.name = "2000 Series 2x2 BG",
IWL_DEVICE_2000,
};
struct iwl_cfg iwl2000_2bgn_d_cfg = {
.name = "2000D Series 2x2 BGN",
IWL_DEVICE_2000,
@ -304,11 +299,6 @@ struct iwl_cfg iwl2030_2bgn_cfg = {
.ht_params = &iwl2000_ht_params,
};
struct iwl_cfg iwl2030_2bg_cfg = {
.name = "2000 Series 2x2 BG/BT",
IWL_DEVICE_2030,
};
#define IWL_DEVICE_105 \
.fw_name_pre = IWL105_FW_PRE, \
.ucode_api_max = IWL105_UCODE_API_MAX, \
@ -326,11 +316,6 @@ struct iwl_cfg iwl2030_2bg_cfg = {
.rx_with_siso_diversity = true, \
.iq_invert = true \
struct iwl_cfg iwl105_bg_cfg = {
.name = "105 Series 1x1 BG",
IWL_DEVICE_105,
};
struct iwl_cfg iwl105_bgn_cfg = {
.name = "105 Series 1x1 BGN",
IWL_DEVICE_105,
@ -361,11 +346,6 @@ struct iwl_cfg iwl105_bgn_d_cfg = {
.rx_with_siso_diversity = true, \
.iq_invert = true \
struct iwl_cfg iwl135_bg_cfg = {
.name = "135 Series 1x1 BG/BT",
IWL_DEVICE_135,
};
struct iwl_cfg iwl135_bgn_cfg = {
.name = "135 Series 1x1 BGN/BT",
IWL_DEVICE_135,

View File

@ -439,16 +439,6 @@ struct iwl_cfg iwl6035_2agn_cfg = {
.ht_params = &iwl6000_ht_params,
};
struct iwl_cfg iwl6035_2abg_cfg = {
.name = "6035 Series 2x2 ABG/BT",
IWL_DEVICE_6030,
};
struct iwl_cfg iwl6035_2bg_cfg = {
.name = "6035 Series 2x2 BG/BT",
IWL_DEVICE_6030,
};
struct iwl_cfg iwl1030_bgn_cfg = {
.name = "Intel(R) Centrino(R) Wireless-N 1030 BGN",
IWL_DEVICE_6030,

View File

@ -827,6 +827,7 @@ static int iwl_get_idle_rx_chain_count(struct iwl_priv *priv, int active_cnt)
case IEEE80211_SMPS_STATIC:
case IEEE80211_SMPS_DYNAMIC:
return IWL_NUM_IDLE_CHAINS_SINGLE;
case IEEE80211_SMPS_AUTOMATIC:
case IEEE80211_SMPS_OFF:
return active_cnt;
default:
@ -983,3 +984,360 @@ void iwlagn_remove_notification(struct iwl_priv *priv,
list_del(&wait_entry->list);
spin_unlock_bh(&priv->notif_wait_lock);
}
#ifdef CONFIG_PM_SLEEP
static void iwlagn_convert_p1k(u16 *p1k, __le16 *out)
{
int i;
for (i = 0; i < IWLAGN_P1K_SIZE; i++)
out[i] = cpu_to_le16(p1k[i]);
}
struct wowlan_key_data {
struct iwl_rxon_context *ctx;
struct iwlagn_wowlan_rsc_tsc_params_cmd *rsc_tsc;
struct iwlagn_wowlan_tkip_params_cmd *tkip;
const u8 *bssid;
bool error, use_rsc_tsc, use_tkip;
};
static void iwlagn_wowlan_program_keys(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct ieee80211_key_conf *key,
void *_data)
{
struct iwl_priv *priv = hw->priv;
struct wowlan_key_data *data = _data;
struct iwl_rxon_context *ctx = data->ctx;
struct aes_sc *aes_sc, *aes_tx_sc = NULL;
struct tkip_sc *tkip_sc, *tkip_tx_sc = NULL;
struct iwlagn_p1k_cache *rx_p1ks;
u8 *rx_mic_key;
struct ieee80211_key_seq seq;
u32 cur_rx_iv32 = 0;
u16 p1k[IWLAGN_P1K_SIZE];
int ret, i;
mutex_lock(&priv->shrd->mutex);
if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
key->cipher == WLAN_CIPHER_SUITE_WEP104) &&
!sta && !ctx->key_mapping_keys)
ret = iwl_set_default_wep_key(priv, ctx, key);
else
ret = iwl_set_dynamic_key(priv, ctx, key, sta);
if (ret) {
IWL_ERR(priv, "Error setting key during suspend!\n");
data->error = true;
}
switch (key->cipher) {
case WLAN_CIPHER_SUITE_TKIP:
if (sta) {
tkip_sc = data->rsc_tsc->all_tsc_rsc.tkip.unicast_rsc;
tkip_tx_sc = &data->rsc_tsc->all_tsc_rsc.tkip.tsc;
rx_p1ks = data->tkip->rx_uni;
ieee80211_get_key_tx_seq(key, &seq);
tkip_tx_sc->iv16 = cpu_to_le16(seq.tkip.iv16);
tkip_tx_sc->iv32 = cpu_to_le32(seq.tkip.iv32);
ieee80211_get_tkip_p1k_iv(key, seq.tkip.iv32, p1k);
iwlagn_convert_p1k(p1k, data->tkip->tx.p1k);
memcpy(data->tkip->mic_keys.tx,
&key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
IWLAGN_MIC_KEY_SIZE);
rx_mic_key = data->tkip->mic_keys.rx_unicast;
} else {
tkip_sc =
data->rsc_tsc->all_tsc_rsc.tkip.multicast_rsc;
rx_p1ks = data->tkip->rx_multi;
rx_mic_key = data->tkip->mic_keys.rx_mcast;
}
/*
* For non-QoS this relies on the fact that both the uCode and
* mac80211 use TID 0 (as they need to to avoid replay attacks)
* for checking the IV in the frames.
*/
for (i = 0; i < IWLAGN_NUM_RSC; i++) {
ieee80211_get_key_rx_seq(key, i, &seq);
tkip_sc[i].iv16 = cpu_to_le16(seq.tkip.iv16);
tkip_sc[i].iv32 = cpu_to_le32(seq.tkip.iv32);
/* wrapping isn't allowed, AP must rekey */
if (seq.tkip.iv32 > cur_rx_iv32)
cur_rx_iv32 = seq.tkip.iv32;
}
ieee80211_get_tkip_rx_p1k(key, data->bssid, cur_rx_iv32, p1k);
iwlagn_convert_p1k(p1k, rx_p1ks[0].p1k);
ieee80211_get_tkip_rx_p1k(key, data->bssid,
cur_rx_iv32 + 1, p1k);
iwlagn_convert_p1k(p1k, rx_p1ks[1].p1k);
memcpy(rx_mic_key,
&key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],
IWLAGN_MIC_KEY_SIZE);
data->use_tkip = true;
data->use_rsc_tsc = true;
break;
case WLAN_CIPHER_SUITE_CCMP:
if (sta) {
u8 *pn = seq.ccmp.pn;
aes_sc = data->rsc_tsc->all_tsc_rsc.aes.unicast_rsc;
aes_tx_sc = &data->rsc_tsc->all_tsc_rsc.aes.tsc;
ieee80211_get_key_tx_seq(key, &seq);
aes_tx_sc->pn = cpu_to_le64(
(u64)pn[5] |
((u64)pn[4] << 8) |
((u64)pn[3] << 16) |
((u64)pn[2] << 24) |
((u64)pn[1] << 32) |
((u64)pn[0] << 40));
} else
aes_sc = data->rsc_tsc->all_tsc_rsc.aes.multicast_rsc;
/*
* For non-QoS this relies on the fact that both the uCode and
* mac80211 use TID 0 for checking the IV in the frames.
*/
for (i = 0; i < IWLAGN_NUM_RSC; i++) {
u8 *pn = seq.ccmp.pn;
ieee80211_get_key_rx_seq(key, i, &seq);
aes_sc->pn = cpu_to_le64(
(u64)pn[5] |
((u64)pn[4] << 8) |
((u64)pn[3] << 16) |
((u64)pn[2] << 24) |
((u64)pn[1] << 32) |
((u64)pn[0] << 40));
}
data->use_rsc_tsc = true;
break;
}
mutex_unlock(&priv->shrd->mutex);
}
int iwlagn_send_patterns(struct iwl_priv *priv,
struct cfg80211_wowlan *wowlan)
{
struct iwlagn_wowlan_patterns_cmd *pattern_cmd;
struct iwl_host_cmd cmd = {
.id = REPLY_WOWLAN_PATTERNS,
.dataflags[0] = IWL_HCMD_DFL_NOCOPY,
.flags = CMD_SYNC,
};
int i, err;
if (!wowlan->n_patterns)
return 0;
cmd.len[0] = sizeof(*pattern_cmd) +
wowlan->n_patterns * sizeof(struct iwlagn_wowlan_pattern);
pattern_cmd = kmalloc(cmd.len[0], GFP_KERNEL);
if (!pattern_cmd)
return -ENOMEM;
pattern_cmd->n_patterns = cpu_to_le32(wowlan->n_patterns);
for (i = 0; i < wowlan->n_patterns; i++) {
int mask_len = DIV_ROUND_UP(wowlan->patterns[i].pattern_len, 8);
memcpy(&pattern_cmd->patterns[i].mask,
wowlan->patterns[i].mask, mask_len);
memcpy(&pattern_cmd->patterns[i].pattern,
wowlan->patterns[i].pattern,
wowlan->patterns[i].pattern_len);
pattern_cmd->patterns[i].mask_size = mask_len;
pattern_cmd->patterns[i].pattern_size =
wowlan->patterns[i].pattern_len;
}
cmd.data[0] = pattern_cmd;
err = iwl_trans_send_cmd(trans(priv), &cmd);
kfree(pattern_cmd);
return err;
}
int iwlagn_suspend(struct iwl_priv *priv,
struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
{
struct iwlagn_wowlan_wakeup_filter_cmd wakeup_filter_cmd;
struct iwl_rxon_cmd rxon;
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
struct iwlagn_wowlan_kek_kck_material_cmd kek_kck_cmd;
struct iwlagn_wowlan_tkip_params_cmd tkip_cmd = {};
struct iwlagn_d3_config_cmd d3_cfg_cmd = {};
struct wowlan_key_data key_data = {
.ctx = ctx,
.bssid = ctx->active.bssid_addr,
.use_rsc_tsc = false,
.tkip = &tkip_cmd,
.use_tkip = false,
};
int ret, i;
u16 seq;
key_data.rsc_tsc = kzalloc(sizeof(*key_data.rsc_tsc), GFP_KERNEL);
if (!key_data.rsc_tsc)
return -ENOMEM;
memset(&wakeup_filter_cmd, 0, sizeof(wakeup_filter_cmd));
/*
* We know the last used seqno, and the uCode expects to know that
* one, it will increment before TX.
*/
seq = le16_to_cpu(priv->last_seq_ctl) & IEEE80211_SCTL_SEQ;
wakeup_filter_cmd.non_qos_seq = cpu_to_le16(seq);
/*
* For QoS counters, we store the one to use next, so subtract 0x10
* since the uCode will add 0x10 before using the value.
*/
for (i = 0; i < 8; i++) {
seq = priv->shrd->tid_data[IWL_AP_ID][i].seq_number;
seq -= 0x10;
wakeup_filter_cmd.qos_seq[i] = cpu_to_le16(seq);
}
if (wowlan->disconnect)
wakeup_filter_cmd.enabled |=
cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_BEACON_MISS |
IWLAGN_WOWLAN_WAKEUP_LINK_CHANGE);
if (wowlan->magic_pkt)
wakeup_filter_cmd.enabled |=
cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_MAGIC_PACKET);
if (wowlan->gtk_rekey_failure)
wakeup_filter_cmd.enabled |=
cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_GTK_REKEY_FAIL);
if (wowlan->eap_identity_req)
wakeup_filter_cmd.enabled |=
cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_EAP_IDENT_REQ);
if (wowlan->four_way_handshake)
wakeup_filter_cmd.enabled |=
cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_4WAY_HANDSHAKE);
if (wowlan->n_patterns)
wakeup_filter_cmd.enabled |=
cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_PATTERN_MATCH);
if (wowlan->rfkill_release)
d3_cfg_cmd.wakeup_flags |=
cpu_to_le32(IWLAGN_D3_WAKEUP_RFKILL);
iwl_scan_cancel_timeout(priv, 200);
memcpy(&rxon, &ctx->active, sizeof(rxon));
iwl_trans_stop_device(trans(priv));
priv->shrd->wowlan = true;
ret = iwlagn_load_ucode_wait_alive(priv, IWL_UCODE_WOWLAN);
if (ret)
goto out;
/* now configure WoWLAN ucode */
ret = iwl_alive_start(priv);
if (ret)
goto out;
memcpy(&ctx->staging, &rxon, sizeof(rxon));
ret = iwlagn_commit_rxon(priv, ctx);
if (ret)
goto out;
ret = iwl_power_update_mode(priv, true);
if (ret)
goto out;
if (!iwlagn_mod_params.sw_crypto) {
/* mark all keys clear */
priv->ucode_key_table = 0;
ctx->key_mapping_keys = 0;
/*
* This needs to be unlocked due to lock ordering
* constraints. Since we're in the suspend path
* that isn't really a problem though.
*/
mutex_unlock(&priv->shrd->mutex);
ieee80211_iter_keys(priv->hw, ctx->vif,
iwlagn_wowlan_program_keys,
&key_data);
mutex_lock(&priv->shrd->mutex);
if (key_data.error) {
ret = -EIO;
goto out;
}
if (key_data.use_rsc_tsc) {
struct iwl_host_cmd rsc_tsc_cmd = {
.id = REPLY_WOWLAN_TSC_RSC_PARAMS,
.flags = CMD_SYNC,
.data[0] = key_data.rsc_tsc,
.dataflags[0] = IWL_HCMD_DFL_NOCOPY,
.len[0] = sizeof(key_data.rsc_tsc),
};
ret = iwl_trans_send_cmd(trans(priv), &rsc_tsc_cmd);
if (ret)
goto out;
}
if (key_data.use_tkip) {
ret = iwl_trans_send_cmd_pdu(trans(priv),
REPLY_WOWLAN_TKIP_PARAMS,
CMD_SYNC, sizeof(tkip_cmd),
&tkip_cmd);
if (ret)
goto out;
}
if (priv->have_rekey_data) {
memset(&kek_kck_cmd, 0, sizeof(kek_kck_cmd));
memcpy(kek_kck_cmd.kck, priv->kck, NL80211_KCK_LEN);
kek_kck_cmd.kck_len = cpu_to_le16(NL80211_KCK_LEN);
memcpy(kek_kck_cmd.kek, priv->kek, NL80211_KEK_LEN);
kek_kck_cmd.kek_len = cpu_to_le16(NL80211_KEK_LEN);
kek_kck_cmd.replay_ctr = priv->replay_ctr;
ret = iwl_trans_send_cmd_pdu(trans(priv),
REPLY_WOWLAN_KEK_KCK_MATERIAL,
CMD_SYNC, sizeof(kek_kck_cmd),
&kek_kck_cmd);
if (ret)
goto out;
}
}
ret = iwl_trans_send_cmd_pdu(trans(priv), REPLY_D3_CONFIG, CMD_SYNC,
sizeof(d3_cfg_cmd), &d3_cfg_cmd);
if (ret)
goto out;
ret = iwl_trans_send_cmd_pdu(trans(priv), REPLY_WOWLAN_WAKEUP_FILTER,
CMD_SYNC, sizeof(wakeup_filter_cmd),
&wakeup_filter_cmd);
if (ret)
goto out;
ret = iwlagn_send_patterns(priv, wowlan);
out:
kfree(key_data.rsc_tsc);
return ret;
}
#endif

View File

@ -1458,10 +1458,8 @@ static int rs_move_legacy_other(struct iwl_priv *priv,
break;
case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
/* avoid antenna B unless MIMO */
valid_tx_ant =
first_antenna(hw_params(priv).valid_tx_ant);
if (tbl->action == IWL_LEGACY_SWITCH_ANTENNA2)
tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
tbl->action = IWL_LEGACY_SWITCH_SISO;
break;
case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
@ -1636,10 +1634,8 @@ static int rs_move_siso_to_other(struct iwl_priv *priv,
break;
case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
/* avoid antenna B unless MIMO */
valid_tx_ant =
first_antenna(hw_params(priv).valid_tx_ant);
if (tbl->action == IWL_SISO_SWITCH_ANTENNA2)
tbl->action = IWL_SISO_SWITCH_ANTENNA1;
tbl->action = IWL_SISO_SWITCH_MIMO2_AB;
break;
case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:

View File

@ -800,7 +800,8 @@ static void iwlagn_pass_packet_to_mac80211(struct iwl_priv *priv,
ctx->active.bssid_addr))
continue;
ctx->last_tx_rejected = false;
iwl_trans_wake_any_queue(trans(priv), ctx->ctxid);
iwl_trans_wake_any_queue(trans(priv), ctx->ctxid,
"channel got active");
}
}
@ -1032,6 +1033,50 @@ static int iwlagn_rx_reply_rx(struct iwl_priv *priv,
return 0;
}
static int iwlagn_rx_noa_notification(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb,
struct iwl_device_cmd *cmd)
{
struct iwl_wipan_noa_data *new_data, *old_data;
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_wipan_noa_notification *noa_notif = (void *)pkt->u.raw;
/* no condition -- we're in softirq */
old_data = rcu_dereference_protected(priv->noa_data, true);
if (noa_notif->noa_active) {
u32 len = le16_to_cpu(noa_notif->noa_attribute.length);
u32 copylen = len;
/* EID, len, OUI, subtype */
len += 1 + 1 + 3 + 1;
/* P2P id, P2P length */
len += 1 + 2;
copylen += 1 + 2;
new_data = kmalloc(sizeof(*new_data) + len, GFP_ATOMIC);
if (new_data) {
new_data->length = len;
new_data->data[0] = WLAN_EID_VENDOR_SPECIFIC;
new_data->data[1] = len - 2; /* not counting EID, len */
new_data->data[2] = (WLAN_OUI_WFA >> 16) & 0xff;
new_data->data[3] = (WLAN_OUI_WFA >> 8) & 0xff;
new_data->data[4] = (WLAN_OUI_WFA >> 0) & 0xff;
new_data->data[5] = WLAN_OUI_TYPE_WFA_P2P;
memcpy(&new_data->data[6], &noa_notif->noa_attribute,
copylen);
}
} else
new_data = NULL;
rcu_assign_pointer(priv->noa_data, new_data);
if (old_data)
kfree_rcu(old_data, rcu_head);
return 0;
}
/**
* iwl_setup_rx_handlers - Initialize Rx handler callbacks
*
@ -1055,6 +1100,8 @@ void iwl_setup_rx_handlers(struct iwl_priv *priv)
handlers[BEACON_NOTIFICATION] = iwlagn_rx_beacon_notif;
handlers[REPLY_ADD_STA] = iwl_add_sta_callback;
handlers[REPLY_WIPAN_NOA_NOTIFICATION] = iwlagn_rx_noa_notification;
/*
* The same handler is used for both the REPLY to a discrete
* statistics request from the host as well as for the periodic

View File

@ -45,7 +45,8 @@ static int iwlagn_disable_bss(struct iwl_priv *priv,
send->filter_flags = old_filter;
if (ret)
IWL_ERR(priv, "Error clearing ASSOC_MSK on BSS (%d)\n", ret);
IWL_DEBUG_QUIET_RFKILL(priv,
"Error clearing ASSOC_MSK on BSS (%d)\n", ret);
return ret;
}
@ -116,7 +117,7 @@ static void iwlagn_update_qos(struct iwl_priv *priv,
if (ctx->ht.enabled)
ctx->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK;
IWL_DEBUG_QOS(priv, "send QoS cmd with Qos active=%d FLAGS=0x%X\n",
IWL_DEBUG_INFO(priv, "send QoS cmd with Qos active=%d FLAGS=0x%X\n",
ctx->qos_data.qos_active,
ctx->qos_data.def_qos_parm.qos_flags);
@ -124,7 +125,7 @@ static void iwlagn_update_qos(struct iwl_priv *priv,
sizeof(struct iwl_qosparam_cmd),
&ctx->qos_data.def_qos_parm);
if (ret)
IWL_ERR(priv, "Failed to update QoS\n");
IWL_DEBUG_QUIET_RFKILL(priv, "Failed to update QoS\n");
}
static int iwlagn_update_beacon(struct iwl_priv *priv,
@ -541,6 +542,9 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
mutex_lock(&priv->shrd->mutex);
if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
goto out;
if (unlikely(test_bit(STATUS_SCANNING, &priv->shrd->status))) {
IWL_DEBUG_MAC80211(priv, "leave - scanning\n");
goto out;
@ -840,7 +844,8 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
if (ctx->last_tx_rejected) {
ctx->last_tx_rejected = false;
iwl_trans_wake_any_queue(trans(priv),
ctx->ctxid);
ctx->ctxid,
"Disassoc: flush queue");
}
ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;

View File

@ -647,7 +647,7 @@ void iwl_reprogram_ap_sta(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
int ret;
struct iwl_addsta_cmd sta_cmd;
struct iwl_link_quality_cmd lq;
bool active;
bool active, have_lq = false;
spin_lock_irqsave(&priv->shrd->sta_lock, flags);
if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE)) {
@ -657,7 +657,10 @@ void iwl_reprogram_ap_sta(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(sta_cmd));
sta_cmd.mode = 0;
memcpy(&lq, priv->stations[sta_id].lq, sizeof(lq));
if (priv->stations[sta_id].lq) {
memcpy(&lq, priv->stations[sta_id].lq, sizeof(lq));
have_lq = true;
}
active = priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE;
priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
@ -679,7 +682,8 @@ void iwl_reprogram_ap_sta(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
if (ret)
IWL_ERR(priv, "failed to re-add STA %pM (%d)\n",
priv->stations[sta_id].sta.sta.addr, ret);
iwl_send_lq_cmd(priv, ctx, &lq, CMD_SYNC, true);
if (have_lq)
iwl_send_lq_cmd(priv, ctx, &lq, CMD_SYNC, true);
}
int iwl_get_free_ucode_key_offset(struct iwl_priv *priv)
@ -825,28 +829,6 @@ int iwl_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
return ret;
}
int iwlagn_mac_sta_remove(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
struct iwl_priv *priv = hw->priv;
struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
int ret;
IWL_DEBUG_MAC80211(priv, "enter: received request to remove "
"station %pM\n", sta->addr);
mutex_lock(&priv->shrd->mutex);
IWL_DEBUG_INFO(priv, "proceeding to remove station %pM\n",
sta->addr);
ret = iwl_remove_station(priv, sta_priv->sta_id, sta->addr);
if (ret)
IWL_ERR(priv, "Error removing station %pM\n",
sta->addr);
mutex_unlock(&priv->shrd->mutex);
IWL_DEBUG_MAC80211(priv, "leave\n");
return ret;
}
void iwl_sta_fill_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
u8 sta_id, struct iwl_link_quality_cmd *link_cmd)
@ -1464,20 +1446,7 @@ int iwl_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
}
static void iwl_sta_modify_ps_wake(struct iwl_priv *priv, int sta_id)
{
unsigned long flags;
spin_lock_irqsave(&priv->shrd->sta_lock, flags);
priv->stations[sta_id].sta.station_flags &= ~STA_FLG_PWR_SAVE_MSK;
priv->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK;
priv->stations[sta_id].sta.sta.modify_mask = 0;
priv->stations[sta_id].sta.sleep_tx_count = 0;
priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
}
void iwl_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt)
{
@ -1494,36 +1463,3 @@ void iwl_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt)
spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
}
void iwlagn_mac_sta_notify(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
enum sta_notify_cmd cmd,
struct ieee80211_sta *sta)
{
struct iwl_priv *priv = hw->priv;
struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
int sta_id;
IWL_DEBUG_MAC80211(priv, "enter\n");
switch (cmd) {
case STA_NOTIFY_SLEEP:
WARN_ON(!sta_priv->client);
sta_priv->asleep = true;
if (atomic_read(&sta_priv->pending_frames) > 0)
ieee80211_sta_block_awake(hw, sta, true);
break;
case STA_NOTIFY_AWAKE:
WARN_ON(!sta_priv->client);
if (!sta_priv->asleep)
break;
sta_priv->asleep = false;
sta_id = iwl_sta_id(sta);
if (sta_id != IWL_INVALID_STATION)
iwl_sta_modify_ps_wake(priv, sta_id);
break;
default:
break;
}
IWL_DEBUG_MAC80211(priv, "leave\n");
}

View File

@ -283,6 +283,19 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
IWL_DEBUG_TX(priv, "Sending REASSOC frame\n");
#endif
if (unlikely(ieee80211_is_probe_resp(fc))) {
struct iwl_wipan_noa_data *noa_data =
rcu_dereference(priv->noa_data);
if (noa_data &&
pskb_expand_head(skb, 0, noa_data->length,
GFP_ATOMIC) == 0) {
memcpy(skb_put(skb, noa_data->length),
noa_data->data, noa_data->length);
hdr = (struct ieee80211_hdr *)skb->data;
}
}
hdr_len = ieee80211_hdrlen(fc);
/* For management frames use broadcast id to do not break aggregation */
@ -800,7 +813,8 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb,
iwl_is_associated_ctx(ctx) && ctx->vif &&
ctx->vif->type == NL80211_IFTYPE_STATION) {
ctx->last_tx_rejected = true;
iwl_trans_stop_queue(trans(priv), txq_id);
iwl_trans_stop_queue(trans(priv), txq_id,
"Tx on passive channel");
IWL_DEBUG_TX_REPLY(priv,
"TXQ %d status %s (0x%08x) "

View File

@ -31,6 +31,7 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/dma-mapping.h>
#include "iwl-dev.h"
#include "iwl-core.h"
@ -72,51 +73,98 @@ static struct iwl_wimax_coex_event_entry cu_priorities[COEX_NUM_OF_EVENTS] = {
{COEX_CU_RSRVD2_RP, COEX_CU_RSRVD2_WP, 0, COEX_RSRVD2_FLAGS}
};
/******************************************************************************
*
* uCode download functions
*
******************************************************************************/
static void iwl_free_fw_desc(struct iwl_bus *bus, struct fw_desc *desc)
{
if (desc->v_addr)
dma_free_coherent(bus->dev, desc->len,
desc->v_addr, desc->p_addr);
desc->v_addr = NULL;
desc->len = 0;
}
static void iwl_free_fw_img(struct iwl_bus *bus, struct fw_img *img)
{
iwl_free_fw_desc(bus, &img->code);
iwl_free_fw_desc(bus, &img->data);
}
void iwl_dealloc_ucode(struct iwl_trans *trans)
{
iwl_free_fw_img(bus(trans), &trans->ucode_rt);
iwl_free_fw_img(bus(trans), &trans->ucode_init);
iwl_free_fw_img(bus(trans), &trans->ucode_wowlan);
}
int iwl_alloc_fw_desc(struct iwl_bus *bus, struct fw_desc *desc,
const void *data, size_t len)
{
if (!len) {
desc->v_addr = NULL;
return -EINVAL;
}
desc->v_addr = dma_alloc_coherent(bus->dev, len,
&desc->p_addr, GFP_KERNEL);
if (!desc->v_addr)
return -ENOMEM;
desc->len = len;
memcpy(desc->v_addr, data, len);
return 0;
}
/*
* ucode
*/
static int iwlagn_load_section(struct iwl_priv *priv, const char *name,
static int iwlagn_load_section(struct iwl_trans *trans, const char *name,
struct fw_desc *image, u32 dst_addr)
{
struct iwl_bus *bus = bus(trans);
dma_addr_t phy_addr = image->p_addr;
u32 byte_cnt = image->len;
int ret;
priv->ucode_write_complete = 0;
trans->ucode_write_complete = 0;
iwl_write_direct32(bus(priv),
iwl_write_direct32(bus,
FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
iwl_write_direct32(bus(priv),
iwl_write_direct32(bus,
FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL), dst_addr);
iwl_write_direct32(bus(priv),
iwl_write_direct32(bus,
FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
iwl_write_direct32(bus(priv),
iwl_write_direct32(bus,
FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
(iwl_get_dma_hi_addr(phy_addr)
<< FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
iwl_write_direct32(bus(priv),
iwl_write_direct32(bus,
FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
iwl_write_direct32(bus(priv),
iwl_write_direct32(bus,
FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
IWL_DEBUG_FW(priv, "%s uCode section being loaded...\n", name);
ret = wait_event_timeout(priv->shrd->wait_command_queue,
priv->ucode_write_complete, 5 * HZ);
IWL_DEBUG_FW(bus, "%s uCode section being loaded...\n", name);
ret = wait_event_timeout(trans->shrd->wait_command_queue,
trans->ucode_write_complete, 5 * HZ);
if (!ret) {
IWL_ERR(priv, "Could not load the %s uCode section\n",
IWL_ERR(trans, "Could not load the %s uCode section\n",
name);
return -ETIMEDOUT;
}
@ -124,17 +172,41 @@ static int iwlagn_load_section(struct iwl_priv *priv, const char *name,
return 0;
}
static int iwlagn_load_given_ucode(struct iwl_priv *priv,
struct fw_img *image)
static inline struct fw_img *iwl_get_ucode_image(struct iwl_trans *trans,
enum iwl_ucode_type ucode_type)
{
switch (ucode_type) {
case IWL_UCODE_INIT:
return &trans->ucode_init;
case IWL_UCODE_WOWLAN:
return &trans->ucode_wowlan;
case IWL_UCODE_REGULAR:
return &trans->ucode_rt;
case IWL_UCODE_NONE:
break;
}
return NULL;
}
static int iwlagn_load_given_ucode(struct iwl_trans *trans,
enum iwl_ucode_type ucode_type)
{
int ret = 0;
struct fw_img *image = iwl_get_ucode_image(trans, ucode_type);
ret = iwlagn_load_section(priv, "INST", &image->code,
if (!image) {
IWL_ERR(trans, "Invalid ucode requested (%d)\n",
ucode_type);
return -EINVAL;
}
ret = iwlagn_load_section(trans, "INST", &image->code,
IWLAGN_RTC_INST_LOWER_BOUND);
if (ret)
return ret;
return iwlagn_load_section(priv, "DATA", &image->data,
return iwlagn_load_section(trans, "DATA", &image->data,
IWLAGN_RTC_DATA_LOWER_BOUND);
}
@ -418,7 +490,7 @@ static int iwlagn_alive_notify(struct iwl_priv *priv)
* using sample data 100 bytes apart. If these sample points are good,
* it's a pretty good bet that everything between them is good, too.
*/
static int iwl_verify_inst_sparse(struct iwl_priv *priv,
static int iwl_verify_inst_sparse(struct iwl_bus *bus,
struct fw_desc *fw_desc)
{
__le32 *image = (__le32 *)fw_desc->v_addr;
@ -426,15 +498,15 @@ static int iwl_verify_inst_sparse(struct iwl_priv *priv,
u32 val;
u32 i;
IWL_DEBUG_FW(priv, "ucode inst image size is %u\n", len);
IWL_DEBUG_FW(bus, "ucode inst image size is %u\n", len);
for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) {
/* read data comes through single port, auto-incr addr */
/* NOTE: Use the debugless read so we don't flood kernel log
* if IWL_DL_IO is set */
iwl_write_direct32(bus(priv), HBUS_TARG_MEM_RADDR,
iwl_write_direct32(bus, HBUS_TARG_MEM_RADDR,
i + IWLAGN_RTC_INST_LOWER_BOUND);
val = iwl_read32(bus(priv), HBUS_TARG_MEM_RDAT);
val = iwl_read32(bus, HBUS_TARG_MEM_RDAT);
if (val != le32_to_cpu(*image))
return -EIO;
}
@ -442,7 +514,7 @@ static int iwl_verify_inst_sparse(struct iwl_priv *priv,
return 0;
}
static void iwl_print_mismatch_inst(struct iwl_priv *priv,
static void iwl_print_mismatch_inst(struct iwl_bus *bus,
struct fw_desc *fw_desc)
{
__le32 *image = (__le32 *)fw_desc->v_addr;
@ -451,18 +523,18 @@ static void iwl_print_mismatch_inst(struct iwl_priv *priv,
u32 offs;
int errors = 0;
IWL_DEBUG_FW(priv, "ucode inst image size is %u\n", len);
IWL_DEBUG_FW(bus, "ucode inst image size is %u\n", len);
iwl_write_direct32(bus(priv), HBUS_TARG_MEM_RADDR,
iwl_write_direct32(bus, HBUS_TARG_MEM_RADDR,
IWLAGN_RTC_INST_LOWER_BOUND);
for (offs = 0;
offs < len && errors < 20;
offs += sizeof(u32), image++) {
/* read data comes through single port, auto-incr addr */
val = iwl_read32(bus(priv), HBUS_TARG_MEM_RDAT);
val = iwl_read32(bus, HBUS_TARG_MEM_RDAT);
if (val != le32_to_cpu(*image)) {
IWL_ERR(priv, "uCode INST section at "
IWL_ERR(bus, "uCode INST section at "
"offset 0x%x, is 0x%x, s/b 0x%x\n",
offs, val, le32_to_cpu(*image));
errors++;
@ -474,16 +546,24 @@ static void iwl_print_mismatch_inst(struct iwl_priv *priv,
* iwl_verify_ucode - determine which instruction image is in SRAM,
* and verify its contents
*/
static int iwl_verify_ucode(struct iwl_priv *priv, struct fw_img *img)
static int iwl_verify_ucode(struct iwl_trans *trans,
enum iwl_ucode_type ucode_type)
{
if (!iwl_verify_inst_sparse(priv, &img->code)) {
IWL_DEBUG_FW(priv, "uCode is good in inst SRAM\n");
struct fw_img *img = iwl_get_ucode_image(trans, ucode_type);
if (!img) {
IWL_ERR(trans, "Invalid ucode requested (%d)\n", ucode_type);
return -EINVAL;
}
if (!iwl_verify_inst_sparse(bus(trans), &img->code)) {
IWL_DEBUG_FW(trans, "uCode is good in inst SRAM\n");
return 0;
}
IWL_ERR(priv, "UCODE IMAGE IN INSTRUCTION SRAM NOT VALID!!\n");
IWL_ERR(trans, "UCODE IMAGE IN INSTRUCTION SRAM NOT VALID!!\n");
iwl_print_mismatch_inst(priv, &img->code);
iwl_print_mismatch_inst(bus(trans), &img->code);
return -EIO;
}
@ -519,13 +599,12 @@ static void iwlagn_alive_fn(struct iwl_priv *priv,
#define UCODE_CALIB_TIMEOUT (2*HZ)
int iwlagn_load_ucode_wait_alive(struct iwl_priv *priv,
struct fw_img *image,
enum iwlagn_ucode_type ucode_type)
enum iwl_ucode_type ucode_type)
{
struct iwl_notification_wait alive_wait;
struct iwlagn_alive_data alive_data;
int ret;
enum iwlagn_ucode_type old_type;
enum iwl_ucode_type old_type;
ret = iwl_trans_start_device(trans(priv));
if (ret)
@ -537,7 +616,7 @@ int iwlagn_load_ucode_wait_alive(struct iwl_priv *priv,
old_type = priv->ucode_type;
priv->ucode_type = ucode_type;
ret = iwlagn_load_given_ucode(priv, image);
ret = iwlagn_load_given_ucode(trans(priv), ucode_type);
if (ret) {
priv->ucode_type = old_type;
iwlagn_remove_notification(priv, &alive_wait);
@ -568,7 +647,7 @@ int iwlagn_load_ucode_wait_alive(struct iwl_priv *priv,
* skip it for WoWLAN.
*/
if (ucode_type != IWL_UCODE_WOWLAN) {
ret = iwl_verify_ucode(priv, image);
ret = iwl_verify_ucode(trans(priv), ucode_type);
if (ret) {
priv->ucode_type = old_type;
return ret;
@ -597,7 +676,7 @@ int iwlagn_run_init_ucode(struct iwl_priv *priv)
lockdep_assert_held(&priv->shrd->mutex);
/* No init ucode required? Curious, but maybe ok */
if (!priv->ucode_init.code.len)
if (!trans(priv)->ucode_init.code.len)
return 0;
if (priv->ucode_type != IWL_UCODE_NONE)
@ -608,8 +687,7 @@ int iwlagn_run_init_ucode(struct iwl_priv *priv)
NULL, NULL);
/* Will also start the device */
ret = iwlagn_load_ucode_wait_alive(priv, &priv->ucode_init,
IWL_UCODE_INIT);
ret = iwlagn_load_ucode_wait_alive(priv, IWL_UCODE_INIT);
if (ret)
goto error;

File diff suppressed because it is too large Load Diff

View File

@ -65,6 +65,12 @@
#include "iwl-dev.h"
struct iwlagn_ucode_capabilities {
u32 max_probe_length;
u32 standard_phy_calibration_size;
u32 flags;
};
extern struct ieee80211_ops iwlagn_hw_ops;
int iwl_reset_ict(struct iwl_trans *trans);
@ -77,6 +83,15 @@ static inline void iwl_set_calib_hdr(struct iwl_calib_hdr *hdr, u8 cmd)
hdr->data_valid = 1;
}
void __iwl_down(struct iwl_priv *priv);
void iwl_down(struct iwl_priv *priv);
void iwlagn_prepare_restart(struct iwl_priv *priv);
/* MAC80211 */
struct ieee80211_hw *iwl_alloc_all(void);
int iwlagn_mac_setup_register(struct iwl_priv *priv,
struct iwlagn_ucode_capabilities *capa);
/* RXON */
int iwlagn_set_pan_params(struct iwl_priv *priv);
int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
@ -95,8 +110,7 @@ int iwlagn_send_bt_env(struct iwl_priv *priv, u8 action, u8 type);
void iwlagn_send_prio_tbl(struct iwl_priv *priv);
int iwlagn_run_init_ucode(struct iwl_priv *priv);
int iwlagn_load_ucode_wait_alive(struct iwl_priv *priv,
struct fw_img *image,
enum iwlagn_ucode_type ucode_type);
enum iwl_ucode_type ucode_type);
/* lib */
int iwlagn_send_tx_power(struct iwl_priv *priv);
@ -105,6 +119,12 @@ u16 iwlagn_eeprom_calib_version(struct iwl_priv *priv);
int iwlagn_txfifo_flush(struct iwl_priv *priv, u16 flush_control);
void iwlagn_dev_txfifo_flush(struct iwl_priv *priv, u16 flush_control);
int iwlagn_send_beacon_cmd(struct iwl_priv *priv);
#ifdef CONFIG_PM_SLEEP
int iwlagn_send_patterns(struct iwl_priv *priv,
struct cfg80211_wowlan *wowlan);
int iwlagn_suspend(struct iwl_priv *priv,
struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan);
#endif
/* rx */
int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band);
@ -196,9 +216,6 @@ int iwl_add_station_common(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
struct ieee80211_sta *sta, u8 *sta_id_r);
int iwl_remove_station(struct iwl_priv *priv, const u8 sta_id,
const u8 *addr);
int iwlagn_mac_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
u8 iwl_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
const u8 *addr, bool is_ap, struct ieee80211_sta *sta);
@ -316,10 +333,6 @@ void iwl_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt);
int iwl_update_bcast_station(struct iwl_priv *priv,
struct iwl_rxon_context *ctx);
int iwl_update_bcast_stations(struct iwl_priv *priv);
void iwlagn_mac_sta_notify(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
enum sta_notify_cmd cmd,
struct ieee80211_sta *sta);
/* rate */
static inline u32 iwl_ant_idx_to_flags(u8 ant_idx)

View File

@ -101,17 +101,11 @@ extern struct iwl_cfg iwl100_bg_cfg;
extern struct iwl_cfg iwl130_bgn_cfg;
extern struct iwl_cfg iwl130_bg_cfg;
extern struct iwl_cfg iwl2000_2bgn_cfg;
extern struct iwl_cfg iwl2000_2bg_cfg;
extern struct iwl_cfg iwl2000_2bgn_d_cfg;
extern struct iwl_cfg iwl2030_2bgn_cfg;
extern struct iwl_cfg iwl2030_2bg_cfg;
extern struct iwl_cfg iwl6035_2agn_cfg;
extern struct iwl_cfg iwl6035_2abg_cfg;
extern struct iwl_cfg iwl6035_2bg_cfg;
extern struct iwl_cfg iwl105_bg_cfg;
extern struct iwl_cfg iwl105_bgn_cfg;
extern struct iwl_cfg iwl105_bgn_d_cfg;
extern struct iwl_cfg iwl135_bg_cfg;
extern struct iwl_cfg iwl135_bgn_cfg;
#endif /* __iwl_pci_h__ */

View File

@ -1120,229 +1120,8 @@ int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear)
&statistics_cmd);
}
int iwlagn_mac_conf_tx(struct ieee80211_hw *hw,
struct ieee80211_vif *vif, u16 queue,
const struct ieee80211_tx_queue_params *params)
{
struct iwl_priv *priv = hw->priv;
struct iwl_rxon_context *ctx;
unsigned long flags;
int q;
IWL_DEBUG_MAC80211(priv, "enter\n");
if (!iwl_is_ready_rf(priv->shrd)) {
IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
return -EIO;
}
if (queue >= AC_NUM) {
IWL_DEBUG_MAC80211(priv, "leave - queue >= AC_NUM %d\n", queue);
return 0;
}
q = AC_NUM - 1 - queue;
spin_lock_irqsave(&priv->shrd->lock, flags);
/*
* MULTI-FIXME
* This may need to be done per interface in nl80211/cfg80211/mac80211.
*/
for_each_context(priv, ctx) {
ctx->qos_data.def_qos_parm.ac[q].cw_min =
cpu_to_le16(params->cw_min);
ctx->qos_data.def_qos_parm.ac[q].cw_max =
cpu_to_le16(params->cw_max);
ctx->qos_data.def_qos_parm.ac[q].aifsn = params->aifs;
ctx->qos_data.def_qos_parm.ac[q].edca_txop =
cpu_to_le16((params->txop * 32));
ctx->qos_data.def_qos_parm.ac[q].reserved1 = 0;
}
spin_unlock_irqrestore(&priv->shrd->lock, flags);
IWL_DEBUG_MAC80211(priv, "leave\n");
return 0;
}
int iwlagn_mac_tx_last_beacon(struct ieee80211_hw *hw)
{
struct iwl_priv *priv = hw->priv;
return priv->ibss_manager == IWL_IBSS_MANAGER;
}
static int iwl_set_mode(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
{
iwl_connection_init_rx_config(priv, ctx);
iwlagn_set_rxon_chain(priv, ctx);
return iwlagn_commit_rxon(priv, ctx);
}
static int iwl_setup_interface(struct iwl_priv *priv,
struct iwl_rxon_context *ctx)
{
struct ieee80211_vif *vif = ctx->vif;
int err;
lockdep_assert_held(&priv->shrd->mutex);
/*
* This variable will be correct only when there's just
* a single context, but all code using it is for hardware
* that supports only one context.
*/
priv->iw_mode = vif->type;
ctx->is_active = true;
err = iwl_set_mode(priv, ctx);
if (err) {
if (!ctx->always_active)
ctx->is_active = false;
return err;
}
if (priv->cfg->bt_params && priv->cfg->bt_params->advanced_bt_coexist &&
vif->type == NL80211_IFTYPE_ADHOC) {
/*
* pretend to have high BT traffic as long as we
* are operating in IBSS mode, as this will cause
* the rate scaling etc. to behave as intended.
*/
priv->bt_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_HIGH;
}
return 0;
}
int iwlagn_mac_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct iwl_priv *priv = hw->priv;
struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
struct iwl_rxon_context *tmp, *ctx = NULL;
int err;
enum nl80211_iftype viftype = ieee80211_vif_type_p2p(vif);
IWL_DEBUG_MAC80211(priv, "enter: type %d, addr %pM\n",
viftype, vif->addr);
cancel_delayed_work_sync(&priv->hw_roc_disable_work);
mutex_lock(&priv->shrd->mutex);
iwlagn_disable_roc(priv);
if (!iwl_is_ready_rf(priv->shrd)) {
IWL_WARN(priv, "Try to add interface when device not ready\n");
err = -EINVAL;
goto out;
}
for_each_context(priv, tmp) {
u32 possible_modes =
tmp->interface_modes | tmp->exclusive_interface_modes;
if (tmp->vif) {
/* check if this busy context is exclusive */
if (tmp->exclusive_interface_modes &
BIT(tmp->vif->type)) {
err = -EINVAL;
goto out;
}
continue;
}
if (!(possible_modes & BIT(viftype)))
continue;
/* have maybe usable context w/o interface */
ctx = tmp;
break;
}
if (!ctx) {
err = -EOPNOTSUPP;
goto out;
}
vif_priv->ctx = ctx;
ctx->vif = vif;
err = iwl_setup_interface(priv, ctx);
if (!err)
goto out;
ctx->vif = NULL;
priv->iw_mode = NL80211_IFTYPE_STATION;
out:
mutex_unlock(&priv->shrd->mutex);
IWL_DEBUG_MAC80211(priv, "leave\n");
return err;
}
static void iwl_teardown_interface(struct iwl_priv *priv,
struct ieee80211_vif *vif,
bool mode_change)
{
struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
lockdep_assert_held(&priv->shrd->mutex);
if (priv->scan_vif == vif) {
iwl_scan_cancel_timeout(priv, 200);
iwl_force_scan_end(priv);
}
if (!mode_change) {
iwl_set_mode(priv, ctx);
if (!ctx->always_active)
ctx->is_active = false;
}
/*
* When removing the IBSS interface, overwrite the
* BT traffic load with the stored one from the last
* notification, if any. If this is a device that
* doesn't implement this, this has no effect since
* both values are the same and zero.
*/
if (vif->type == NL80211_IFTYPE_ADHOC)
priv->bt_traffic_load = priv->last_bt_traffic_load;
}
void iwlagn_mac_remove_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct iwl_priv *priv = hw->priv;
struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
IWL_DEBUG_MAC80211(priv, "enter\n");
mutex_lock(&priv->shrd->mutex);
if (WARN_ON(ctx->vif != vif)) {
struct iwl_rxon_context *tmp;
IWL_ERR(priv, "ctx->vif = %p, vif = %p\n", ctx->vif, vif);
for_each_context(priv, tmp)
IWL_ERR(priv, "\tID = %d:\tctx = %p\tctx->vif = %p\n",
tmp->ctxid, tmp, tmp->vif);
}
ctx->vif = NULL;
iwl_teardown_interface(priv, vif, false);
mutex_unlock(&priv->shrd->mutex);
IWL_DEBUG_MAC80211(priv, "leave\n");
}
#ifdef CONFIG_IWLWIFI_DEBUGFS
@ -1649,97 +1428,13 @@ int iwl_force_reset(struct iwl_priv *priv, int mode, bool external)
return 0;
}
int iwlagn_mac_change_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
enum nl80211_iftype newtype, bool newp2p)
{
struct iwl_priv *priv = hw->priv;
struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
struct iwl_rxon_context *bss_ctx = &priv->contexts[IWL_RXON_CTX_BSS];
struct iwl_rxon_context *tmp;
enum nl80211_iftype newviftype = newtype;
u32 interface_modes;
int err;
IWL_DEBUG_MAC80211(priv, "enter\n");
newtype = ieee80211_iftype_p2p(newtype, newp2p);
mutex_lock(&priv->shrd->mutex);
if (!ctx->vif || !iwl_is_ready_rf(priv->shrd)) {
/*
* Huh? But wait ... this can maybe happen when
* we're in the middle of a firmware restart!
*/
err = -EBUSY;
goto out;
}
interface_modes = ctx->interface_modes | ctx->exclusive_interface_modes;
if (!(interface_modes & BIT(newtype))) {
err = -EBUSY;
goto out;
}
/*
* Refuse a change that should be done by moving from the PAN
* context to the BSS context instead, if the BSS context is
* available and can support the new interface type.
*/
if (ctx->ctxid == IWL_RXON_CTX_PAN && !bss_ctx->vif &&
(bss_ctx->interface_modes & BIT(newtype) ||
bss_ctx->exclusive_interface_modes & BIT(newtype))) {
BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
err = -EBUSY;
goto out;
}
if (ctx->exclusive_interface_modes & BIT(newtype)) {
for_each_context(priv, tmp) {
if (ctx == tmp)
continue;
if (!tmp->vif)
continue;
/*
* The current mode switch would be exclusive, but
* another context is active ... refuse the switch.
*/
err = -EBUSY;
goto out;
}
}
/* success */
iwl_teardown_interface(priv, vif, true);
vif->type = newviftype;
vif->p2p = newp2p;
err = iwl_setup_interface(priv, ctx);
WARN_ON(err);
/*
* We've switched internally, but submitting to the
* device may have failed for some reason. Mask this
* error, because otherwise mac80211 will not switch
* (and set the interface type back) and we'll be
* out of sync with it.
*/
err = 0;
out:
mutex_unlock(&priv->shrd->mutex);
IWL_DEBUG_MAC80211(priv, "leave\n");
return err;
}
int iwl_cmd_echo_test(struct iwl_priv *priv)
{
int ret;
struct iwl_host_cmd cmd = {
.id = REPLY_ECHO,
.len = { 0 },
.flags = CMD_SYNC,
};

View File

@ -237,10 +237,6 @@ struct iwl_cfg {
* L i b *
***************************/
int iwlagn_mac_conf_tx(struct ieee80211_hw *hw,
struct ieee80211_vif *vif, u16 queue,
const struct ieee80211_tx_queue_params *params);
int iwlagn_mac_tx_last_beacon(struct ieee80211_hw *hw);
void iwl_set_rxon_hwcrypto(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
int hw_decrypt);
int iwl_check_rxon_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
@ -260,13 +256,6 @@ bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
void iwl_connection_init_rx_config(struct iwl_priv *priv,
struct iwl_rxon_context *ctx);
void iwl_set_rate(struct iwl_priv *priv);
int iwlagn_mac_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
void iwlagn_mac_remove_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
int iwlagn_mac_change_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
enum nl80211_iftype newtype, bool newp2p);
int iwl_cmd_echo_test(struct iwl_priv *priv);
#ifdef CONFIG_IWLWIFI_DEBUGFS
int iwl_alloc_traffic_mem(struct iwl_priv *priv);
@ -323,9 +312,6 @@ void iwl_init_scan_params(struct iwl_priv *priv);
int iwl_scan_cancel(struct iwl_priv *priv);
void iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms);
void iwl_force_scan_end(struct iwl_priv *priv);
int iwlagn_mac_hw_scan(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct cfg80211_scan_request *req);
void iwl_internal_short_hw_scan(struct iwl_priv *priv);
int iwl_force_reset(struct iwl_priv *priv, int mode, bool external);
u16 iwl_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame,

View File

@ -70,10 +70,25 @@ do { \
DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \
} while (0)
#define IWL_DEBUG_QUIET_RFKILL(p, fmt, args...) \
do { \
if (!iwl_is_rfkill(p->shrd)) \
dev_printk(KERN_ERR, bus(p)->dev, "%c %s " fmt, \
(in_interrupt() ? 'I' : 'U'), __func__ , ##args); \
else if (iwl_get_debug_level(p->shrd) & IWL_DL_RADIO) \
dev_printk(KERN_ERR, bus(p)->dev, "(RFKILL) %c %s " fmt, \
(in_interrupt() ? 'I' : 'U'), __func__ , ##args); \
} while (0)
#else
#define IWL_DEBUG(m, level, fmt, args...)
#define IWL_DEBUG_LIMIT(m, level, fmt, args...)
#define iwl_print_hex_dump(m, level, p, len)
#define IWL_DEBUG_QUIET_RFKILL(p, fmt, args...) \
do { \
if (!iwl_is_rfkill(p->shrd)) \
IWL_ERR(p, fmt, ##args); \
} while (0)
#endif /* CONFIG_IWLWIFI_DEBUG */
#ifdef CONFIG_IWLWIFI_DEBUGFS
@ -151,7 +166,7 @@ static inline void iwl_dbgfs_unregister(struct iwl_priv *priv)
#define IWL_DL_11H (1 << 28)
#define IWL_DL_STATS (1 << 29)
#define IWL_DL_TX_REPLY (1 << 30)
#define IWL_DL_QOS (1 << 31)
#define IWL_DL_TX_QUEUES (1 << 31)
#define IWL_DEBUG_INFO(p, f, a...) IWL_DEBUG(p, IWL_DL_INFO, f, ## a)
#define IWL_DEBUG_MAC80211(p, f, a...) IWL_DEBUG(p, IWL_DL_MAC80211, f, ## a)
@ -188,7 +203,7 @@ static inline void iwl_dbgfs_unregister(struct iwl_priv *priv)
#define IWL_DEBUG_TX_REPLY(p, f, a...) IWL_DEBUG(p, IWL_DL_TX_REPLY, f, ## a)
#define IWL_DEBUG_TX_REPLY_LIMIT(p, f, a...) \
IWL_DEBUG_LIMIT(p, IWL_DL_TX_REPLY, f, ## a)
#define IWL_DEBUG_QOS(p, f, a...) IWL_DEBUG(p, IWL_DL_QOS, f, ## a)
#define IWL_DEBUG_TX_QUEUES(p, f, a...) IWL_DEBUG(p, IWL_DL_TX_QUEUES, f, ## a)
#define IWL_DEBUG_RADIO(p, f, a...) IWL_DEBUG(p, IWL_DL_RADIO, f, ## a)
#define IWL_DEBUG_POWER(p, f, a...) IWL_DEBUG(p, IWL_DL_POWER, f, ## a)
#define IWL_DEBUG_11H(p, f, a...) IWL_DEBUG(p, IWL_DL_11H, f, ## a)

View File

@ -236,9 +236,9 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file,
if (!priv->dbgfs_sram_offset && !priv->dbgfs_sram_len) {
priv->dbgfs_sram_offset = 0x800000;
if (priv->ucode_type == IWL_UCODE_INIT)
priv->dbgfs_sram_len = priv->ucode_init.data.len;
priv->dbgfs_sram_len = trans(priv)->ucode_init.data.len;
else
priv->dbgfs_sram_len = priv->ucode_rt.data.len;
priv->dbgfs_sram_len = trans(priv)->ucode_rt.data.len;
}
len = priv->dbgfs_sram_len;
@ -341,7 +341,7 @@ static ssize_t iwl_dbgfs_wowlan_sram_read(struct file *file,
return simple_read_from_buffer(user_buf, count, ppos,
priv->wowlan_sram,
priv->ucode_wowlan.data.len);
trans(priv)->ucode_wowlan.data.len);
}
static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
@ -430,7 +430,7 @@ static ssize_t iwl_dbgfs_nvm_read(struct file *file,
eeprom_ver = iwl_eeprom_query16(priv, EEPROM_VERSION);
pos += scnprintf(buf + pos, buf_size - pos, "NVM Type: %s, "
"version: 0x%x\n",
(priv->nvm_device_type == NVM_DEVICE_TYPE_OTP)
(trans(priv)->nvm_device_type == NVM_DEVICE_TYPE_OTP)
? "OTP" : "EEPROM", eeprom_ver);
for (ofs = 0 ; ofs < eeprom_len ; ofs += 16) {
pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x ", ofs);

View File

@ -230,17 +230,6 @@ struct iwl_vif_priv {
u8 ibss_bssid_sta_id;
};
/* one for each uCode image (inst/data, boot/init/runtime) */
struct fw_desc {
void *v_addr; /* access by driver */
dma_addr_t p_addr; /* access by card's busmaster DMA */
u32 len; /* bytes */
};
struct fw_img {
struct fw_desc code, data;
};
/* v1/v2 uCode file layout */
struct iwl_ucode_header {
__le32 ver; /* major/minor/API/serial */
@ -805,13 +794,6 @@ enum iwl_scan_type {
IWL_SCAN_ROC,
};
enum iwlagn_ucode_type {
IWL_UCODE_NONE,
IWL_UCODE_REGULAR,
IWL_UCODE_INIT,
IWL_UCODE_WOWLAN,
};
#ifdef CONFIG_IWLWIFI_DEVICE_SVTOOL
struct iwl_testmode_trace {
u32 buff_size;
@ -824,6 +806,12 @@ struct iwl_testmode_trace {
};
#endif
struct iwl_wipan_noa_data {
struct rcu_head rcu_head;
u32 length;
u8 data[];
};
struct iwl_priv {
/*data shared among all the driver's layers */
@ -883,6 +871,8 @@ struct iwl_priv {
/* init calibration results */
struct iwl_calib_result calib_results[IWL_CALIB_MAX];
struct iwl_wipan_noa_data __rcu *noa_data;
/* Scan related variables */
unsigned long scan_start;
unsigned long scan_start_tsf;
@ -907,12 +897,7 @@ struct iwl_priv {
u32 ucode_ver; /* version of ucode, copy of
iwl_ucode.ver */
struct fw_img ucode_rt;
struct fw_img ucode_init;
struct fw_img ucode_wowlan;
enum iwlagn_ucode_type ucode_type;
u8 ucode_write_complete; /* the image write is complete */
enum iwl_ucode_type ucode_type;
char firmware_name[25];
struct iwl_rxon_context contexts[NUM_IWL_RXON_CTX];
@ -959,7 +944,6 @@ struct iwl_priv {
/* eeprom -- this is in the card's little endian byte order */
u8 *eeprom;
int nvm_device_type;
struct iwl_eeprom_calib_info *calib_info;
enum nl80211_iftype iw_mode;

View File

@ -149,23 +149,23 @@ static const u8 iwl_eeprom_band_7[] = { /* 5.2 ht40 channel */
* EEPROM chip, not a single event, so even reads could conflict if they
* weren't arbitrated by the semaphore.
*/
static int iwl_eeprom_acquire_semaphore(struct iwl_priv *priv)
static int iwl_eeprom_acquire_semaphore(struct iwl_bus *bus)
{
u16 count;
int ret;
for (count = 0; count < EEPROM_SEM_RETRY_LIMIT; count++) {
/* Request semaphore */
iwl_set_bit(bus(priv), CSR_HW_IF_CONFIG_REG,
iwl_set_bit(bus, CSR_HW_IF_CONFIG_REG,
CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
/* See if we got it */
ret = iwl_poll_bit(bus(priv), CSR_HW_IF_CONFIG_REG,
ret = iwl_poll_bit(bus, CSR_HW_IF_CONFIG_REG,
CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
EEPROM_SEM_TIMEOUT);
if (ret >= 0) {
IWL_DEBUG_EEPROM(priv,
IWL_DEBUG_EEPROM(bus,
"Acquired semaphore after %d tries.\n",
count+1);
return ret;
@ -175,39 +175,39 @@ static int iwl_eeprom_acquire_semaphore(struct iwl_priv *priv)
return ret;
}
static void iwl_eeprom_release_semaphore(struct iwl_priv *priv)
static void iwl_eeprom_release_semaphore(struct iwl_bus *bus)
{
iwl_clear_bit(bus(priv), CSR_HW_IF_CONFIG_REG,
iwl_clear_bit(bus, CSR_HW_IF_CONFIG_REG,
CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
}
static int iwl_eeprom_verify_signature(struct iwl_priv *priv)
static int iwl_eeprom_verify_signature(struct iwl_trans *trans)
{
u32 gp = iwl_read32(bus(priv), CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK;
u32 gp = iwl_read32(bus(trans), CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK;
int ret = 0;
IWL_DEBUG_EEPROM(priv, "EEPROM signature=0x%08x\n", gp);
IWL_DEBUG_EEPROM(trans, "EEPROM signature=0x%08x\n", gp);
switch (gp) {
case CSR_EEPROM_GP_BAD_SIG_EEP_GOOD_SIG_OTP:
if (priv->nvm_device_type != NVM_DEVICE_TYPE_OTP) {
IWL_ERR(priv, "EEPROM with bad signature: 0x%08x\n",
if (trans->nvm_device_type != NVM_DEVICE_TYPE_OTP) {
IWL_ERR(trans, "EEPROM with bad signature: 0x%08x\n",
gp);
ret = -ENOENT;
}
break;
case CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K:
case CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K:
if (priv->nvm_device_type != NVM_DEVICE_TYPE_EEPROM) {
IWL_ERR(priv, "OTP with bad signature: 0x%08x\n", gp);
if (trans->nvm_device_type != NVM_DEVICE_TYPE_EEPROM) {
IWL_ERR(trans, "OTP with bad signature: 0x%08x\n", gp);
ret = -ENOENT;
}
break;
case CSR_EEPROM_GP_BAD_SIGNATURE_BOTH_EEP_AND_OTP:
default:
IWL_ERR(priv, "bad EEPROM/OTP signature, type=%s, "
IWL_ERR(trans, "bad EEPROM/OTP signature, type=%s, "
"EEPROM_GP=0x%08x\n",
(priv->nvm_device_type == NVM_DEVICE_TYPE_OTP)
(trans->nvm_device_type == NVM_DEVICE_TYPE_OTP)
? "OTP" : "EEPROM", gp);
ret = -ENOENT;
break;
@ -302,19 +302,19 @@ void iwl_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac)
*
******************************************************************************/
static void iwl_set_otp_access(struct iwl_priv *priv, enum iwl_access_mode mode)
static void iwl_set_otp_access(struct iwl_bus *bus, enum iwl_access_mode mode)
{
iwl_read32(bus(priv), CSR_OTP_GP_REG);
iwl_read32(bus, CSR_OTP_GP_REG);
if (mode == IWL_OTP_ACCESS_ABSOLUTE)
iwl_clear_bit(bus(priv), CSR_OTP_GP_REG,
iwl_clear_bit(bus, CSR_OTP_GP_REG,
CSR_OTP_GP_REG_OTP_ACCESS_MODE);
else
iwl_set_bit(bus(priv), CSR_OTP_GP_REG,
iwl_set_bit(bus, CSR_OTP_GP_REG,
CSR_OTP_GP_REG_OTP_ACCESS_MODE);
}
static int iwl_get_nvm_type(struct iwl_priv *priv, u32 hw_rev)
static int iwl_get_nvm_type(struct iwl_bus *bus, u32 hw_rev)
{
u32 otpgp;
int nvm_type;
@ -322,7 +322,7 @@ static int iwl_get_nvm_type(struct iwl_priv *priv, u32 hw_rev)
/* OTP only valid for CP/PP and after */
switch (hw_rev & CSR_HW_REV_TYPE_MSK) {
case CSR_HW_REV_TYPE_NONE:
IWL_ERR(priv, "Unknown hardware type\n");
IWL_ERR(bus, "Unknown hardware type\n");
return -ENOENT;
case CSR_HW_REV_TYPE_5300:
case CSR_HW_REV_TYPE_5350:
@ -331,7 +331,7 @@ static int iwl_get_nvm_type(struct iwl_priv *priv, u32 hw_rev)
nvm_type = NVM_DEVICE_TYPE_EEPROM;
break;
default:
otpgp = iwl_read32(bus(priv), CSR_OTP_GP_REG);
otpgp = iwl_read32(bus, CSR_OTP_GP_REG);
if (otpgp & CSR_OTP_GP_REG_DEVICE_SELECT)
nvm_type = NVM_DEVICE_TYPE_OTP;
else
@ -341,73 +341,73 @@ static int iwl_get_nvm_type(struct iwl_priv *priv, u32 hw_rev)
return nvm_type;
}
static int iwl_init_otp_access(struct iwl_priv *priv)
static int iwl_init_otp_access(struct iwl_bus *bus)
{
int ret;
/* Enable 40MHz radio clock */
iwl_write32(bus(priv), CSR_GP_CNTRL,
iwl_read32(bus(priv), CSR_GP_CNTRL) |
iwl_write32(bus, CSR_GP_CNTRL,
iwl_read32(bus, CSR_GP_CNTRL) |
CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
/* wait for clock to be ready */
ret = iwl_poll_bit(bus(priv), CSR_GP_CNTRL,
ret = iwl_poll_bit(bus, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
25000);
if (ret < 0)
IWL_ERR(priv, "Time out access OTP\n");
IWL_ERR(bus, "Time out access OTP\n");
else {
iwl_set_bits_prph(bus(priv), APMG_PS_CTRL_REG,
iwl_set_bits_prph(bus, APMG_PS_CTRL_REG,
APMG_PS_CTRL_VAL_RESET_REQ);
udelay(5);
iwl_clear_bits_prph(bus(priv), APMG_PS_CTRL_REG,
iwl_clear_bits_prph(bus, APMG_PS_CTRL_REG,
APMG_PS_CTRL_VAL_RESET_REQ);
/*
* CSR auto clock gate disable bit -
* this is only applicable for HW with OTP shadow RAM
*/
if (priv->cfg->base_params->shadow_ram_support)
iwl_set_bit(bus(priv), CSR_DBG_LINK_PWR_MGMT_REG,
if (priv(bus)->cfg->base_params->shadow_ram_support)
iwl_set_bit(bus, CSR_DBG_LINK_PWR_MGMT_REG,
CSR_RESET_LINK_PWR_MGMT_DISABLED);
}
return ret;
}
static int iwl_read_otp_word(struct iwl_priv *priv, u16 addr, __le16 *eeprom_data)
static int iwl_read_otp_word(struct iwl_bus *bus, u16 addr, __le16 *eeprom_data)
{
int ret = 0;
u32 r;
u32 otpgp;
iwl_write32(bus(priv), CSR_EEPROM_REG,
iwl_write32(bus, CSR_EEPROM_REG,
CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
ret = iwl_poll_bit(bus(priv), CSR_EEPROM_REG,
ret = iwl_poll_bit(bus, CSR_EEPROM_REG,
CSR_EEPROM_REG_READ_VALID_MSK,
CSR_EEPROM_REG_READ_VALID_MSK,
IWL_EEPROM_ACCESS_TIMEOUT);
if (ret < 0) {
IWL_ERR(priv, "Time out reading OTP[%d]\n", addr);
IWL_ERR(bus, "Time out reading OTP[%d]\n", addr);
return ret;
}
r = iwl_read32(bus(priv), CSR_EEPROM_REG);
r = iwl_read32(bus, CSR_EEPROM_REG);
/* check for ECC errors: */
otpgp = iwl_read32(bus(priv), CSR_OTP_GP_REG);
otpgp = iwl_read32(bus, CSR_OTP_GP_REG);
if (otpgp & CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK) {
/* stop in this case */
/* set the uncorrectable OTP ECC bit for acknowledgement */
iwl_set_bit(bus(priv), CSR_OTP_GP_REG,
iwl_set_bit(bus, CSR_OTP_GP_REG,
CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK);
IWL_ERR(priv, "Uncorrectable OTP ECC error, abort OTP read\n");
IWL_ERR(bus, "Uncorrectable OTP ECC error, abort OTP read\n");
return -EINVAL;
}
if (otpgp & CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK) {
/* continue in this case */
/* set the correctable OTP ECC bit for acknowledgement */
iwl_set_bit(bus(priv), CSR_OTP_GP_REG,
iwl_set_bit(bus, CSR_OTP_GP_REG,
CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK);
IWL_ERR(priv, "Correctable OTP ECC error, continue read\n");
IWL_ERR(bus, "Correctable OTP ECC error, continue read\n");
}
*eeprom_data = cpu_to_le16(r >> 16);
return 0;
@ -416,20 +416,20 @@ static int iwl_read_otp_word(struct iwl_priv *priv, u16 addr, __le16 *eeprom_dat
/*
* iwl_is_otp_empty: check for empty OTP
*/
static bool iwl_is_otp_empty(struct iwl_priv *priv)
static bool iwl_is_otp_empty(struct iwl_bus *bus)
{
u16 next_link_addr = 0;
__le16 link_value;
bool is_empty = false;
/* locate the beginning of OTP link list */
if (!iwl_read_otp_word(priv, next_link_addr, &link_value)) {
if (!iwl_read_otp_word(bus, next_link_addr, &link_value)) {
if (!link_value) {
IWL_ERR(priv, "OTP is empty\n");
IWL_ERR(bus, "OTP is empty\n");
is_empty = true;
}
} else {
IWL_ERR(priv, "Unable to read first block of OTP list.\n");
IWL_ERR(bus, "Unable to read first block of OTP list.\n");
is_empty = true;
}
@ -446,7 +446,7 @@ static bool iwl_is_otp_empty(struct iwl_priv *priv)
* we should read and used to configure the device.
* only perform this operation if shadow RAM is disabled
*/
static int iwl_find_otp_image(struct iwl_priv *priv,
static int iwl_find_otp_image(struct iwl_bus *bus,
u16 *validblockaddr)
{
u16 next_link_addr = 0, valid_addr;
@ -454,10 +454,10 @@ static int iwl_find_otp_image(struct iwl_priv *priv,
int usedblocks = 0;
/* set addressing mode to absolute to traverse the link list */
iwl_set_otp_access(priv, IWL_OTP_ACCESS_ABSOLUTE);
iwl_set_otp_access(bus, IWL_OTP_ACCESS_ABSOLUTE);
/* checking for empty OTP or error */
if (iwl_is_otp_empty(priv))
if (iwl_is_otp_empty(bus))
return -EINVAL;
/*
@ -471,9 +471,9 @@ static int iwl_find_otp_image(struct iwl_priv *priv,
*/
valid_addr = next_link_addr;
next_link_addr = le16_to_cpu(link_value) * sizeof(u16);
IWL_DEBUG_EEPROM(priv, "OTP blocks %d addr 0x%x\n",
IWL_DEBUG_EEPROM(bus, "OTP blocks %d addr 0x%x\n",
usedblocks, next_link_addr);
if (iwl_read_otp_word(priv, next_link_addr, &link_value))
if (iwl_read_otp_word(bus, next_link_addr, &link_value))
return -EINVAL;
if (!link_value) {
/*
@ -488,10 +488,10 @@ static int iwl_find_otp_image(struct iwl_priv *priv,
}
/* more in the link list, continue */
usedblocks++;
} while (usedblocks <= priv->cfg->base_params->max_ll_items);
} while (usedblocks <= priv(bus)->cfg->base_params->max_ll_items);
/* OTP has no valid blocks */
IWL_DEBUG_EEPROM(priv, "OTP has no valid blocks\n");
IWL_DEBUG_EEPROM(bus, "OTP has no valid blocks\n");
return -EINVAL;
}
@ -504,28 +504,28 @@ static int iwl_find_otp_image(struct iwl_priv *priv,
* iwl_get_max_txpower_avg - get the highest tx power from all chains.
* find the highest tx power from all chains for the channel
*/
static s8 iwl_get_max_txpower_avg(struct iwl_priv *priv,
static s8 iwl_get_max_txpower_avg(struct iwl_cfg *cfg,
struct iwl_eeprom_enhanced_txpwr *enhanced_txpower,
int element, s8 *max_txpower_in_half_dbm)
{
s8 max_txpower_avg = 0; /* (dBm) */
/* Take the highest tx power from any valid chains */
if ((priv->cfg->valid_tx_ant & ANT_A) &&
if ((cfg->valid_tx_ant & ANT_A) &&
(enhanced_txpower[element].chain_a_max > max_txpower_avg))
max_txpower_avg = enhanced_txpower[element].chain_a_max;
if ((priv->cfg->valid_tx_ant & ANT_B) &&
if ((cfg->valid_tx_ant & ANT_B) &&
(enhanced_txpower[element].chain_b_max > max_txpower_avg))
max_txpower_avg = enhanced_txpower[element].chain_b_max;
if ((priv->cfg->valid_tx_ant & ANT_C) &&
if ((cfg->valid_tx_ant & ANT_C) &&
(enhanced_txpower[element].chain_c_max > max_txpower_avg))
max_txpower_avg = enhanced_txpower[element].chain_c_max;
if (((priv->cfg->valid_tx_ant == ANT_AB) |
(priv->cfg->valid_tx_ant == ANT_BC) |
(priv->cfg->valid_tx_ant == ANT_AC)) &&
if (((cfg->valid_tx_ant == ANT_AB) |
(cfg->valid_tx_ant == ANT_BC) |
(cfg->valid_tx_ant == ANT_AC)) &&
(enhanced_txpower[element].mimo2_max > max_txpower_avg))
max_txpower_avg = enhanced_txpower[element].mimo2_max;
if ((priv->cfg->valid_tx_ant == ANT_ABC) &&
if ((cfg->valid_tx_ant == ANT_ABC) &&
(enhanced_txpower[element].mimo3_max > max_txpower_avg))
max_txpower_avg = enhanced_txpower[element].mimo3_max;
@ -627,7 +627,7 @@ void iwl_eeprom_enhanced_txpower(struct iwl_priv *priv)
((txp->delta_20_in_40 & 0xf0) >> 4),
(txp->delta_20_in_40 & 0x0f));
max_txp_avg = iwl_get_max_txpower_avg(priv, txp_array, idx,
max_txp_avg = iwl_get_max_txpower_avg(priv->cfg, txp_array, idx,
&max_txp_avg_halfdbm);
/*
@ -660,8 +660,8 @@ int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev)
u16 validblockaddr = 0;
u16 cache_addr = 0;
priv->nvm_device_type = iwl_get_nvm_type(priv, hw_rev);
if (priv->nvm_device_type == -ENOENT)
trans(priv)->nvm_device_type = iwl_get_nvm_type(bus(priv), hw_rev);
if (trans(priv)->nvm_device_type == -ENOENT)
return -ENOENT;
/* allocate eeprom */
sz = priv->cfg->base_params->eeprom_size;
@ -675,7 +675,7 @@ int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev)
iwl_apm_init(priv);
ret = iwl_eeprom_verify_signature(priv);
ret = iwl_eeprom_verify_signature(trans(priv));
if (ret < 0) {
IWL_ERR(priv, "EEPROM not found, EEPROM_GP=0x%08x\n", gp);
ret = -ENOENT;
@ -683,16 +683,16 @@ int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev)
}
/* Make sure driver (instead of uCode) is allowed to read EEPROM */
ret = iwl_eeprom_acquire_semaphore(priv);
ret = iwl_eeprom_acquire_semaphore(bus(priv));
if (ret < 0) {
IWL_ERR(priv, "Failed to acquire EEPROM semaphore.\n");
ret = -ENOENT;
goto err;
}
if (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP) {
if (trans(priv)->nvm_device_type == NVM_DEVICE_TYPE_OTP) {
ret = iwl_init_otp_access(priv);
ret = iwl_init_otp_access(bus(priv));
if (ret) {
IWL_ERR(priv, "Failed to initialize OTP access.\n");
ret = -ENOENT;
@ -707,7 +707,7 @@ int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev)
CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK);
/* traversing the linked list if no shadow ram supported */
if (!priv->cfg->base_params->shadow_ram_support) {
if (iwl_find_otp_image(priv, &validblockaddr)) {
if (iwl_find_otp_image(bus(priv), &validblockaddr)) {
ret = -ENOENT;
goto done;
}
@ -716,7 +716,7 @@ int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev)
addr += sizeof(u16)) {
__le16 eeprom_data;
ret = iwl_read_otp_word(priv, addr, &eeprom_data);
ret = iwl_read_otp_word(bus(priv), addr, &eeprom_data);
if (ret)
goto done;
e[cache_addr / 2] = eeprom_data;
@ -744,13 +744,13 @@ int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev)
}
IWL_DEBUG_EEPROM(priv, "NVM Type: %s, version: 0x%x\n",
(priv->nvm_device_type == NVM_DEVICE_TYPE_OTP)
(trans(priv)->nvm_device_type == NVM_DEVICE_TYPE_OTP)
? "OTP" : "EEPROM",
iwl_eeprom_query16(priv, EEPROM_VERSION));
ret = 0;
done:
iwl_eeprom_release_semaphore(priv);
iwl_eeprom_release_semaphore(bus(priv));
err:
if (ret)

File diff suppressed because it is too large Load Diff

View File

@ -255,6 +255,8 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
{IWL_PCI_DEVICE(0x0082, 0xC020, iwl6005_2agn_sff_cfg)},
{IWL_PCI_DEVICE(0x0085, 0xC220, iwl6005_2agn_sff_cfg)},
{IWL_PCI_DEVICE(0x0082, 0x1341, iwl6005_2agn_d_cfg)},
{IWL_PCI_DEVICE(0x0082, 0x1304, iwl6005_2agn_cfg)},/* low 5GHz active */
{IWL_PCI_DEVICE(0x0082, 0x1305, iwl6005_2agn_cfg)},/* high 5GHz active */
/* 6x30 Series */
{IWL_PCI_DEVICE(0x008A, 0x5305, iwl1030_bgn_cfg)},
@ -324,46 +326,28 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
{IWL_PCI_DEVICE(0x0890, 0x4022, iwl2000_2bgn_cfg)},
{IWL_PCI_DEVICE(0x0891, 0x4222, iwl2000_2bgn_cfg)},
{IWL_PCI_DEVICE(0x0890, 0x4422, iwl2000_2bgn_cfg)},
{IWL_PCI_DEVICE(0x0890, 0x4026, iwl2000_2bg_cfg)},
{IWL_PCI_DEVICE(0x0891, 0x4226, iwl2000_2bg_cfg)},
{IWL_PCI_DEVICE(0x0890, 0x4426, iwl2000_2bg_cfg)},
{IWL_PCI_DEVICE(0x0890, 0x4822, iwl2000_2bgn_d_cfg)},
/* 2x30 Series */
{IWL_PCI_DEVICE(0x0887, 0x4062, iwl2030_2bgn_cfg)},
{IWL_PCI_DEVICE(0x0888, 0x4262, iwl2030_2bgn_cfg)},
{IWL_PCI_DEVICE(0x0887, 0x4462, iwl2030_2bgn_cfg)},
{IWL_PCI_DEVICE(0x0887, 0x4066, iwl2030_2bg_cfg)},
{IWL_PCI_DEVICE(0x0888, 0x4266, iwl2030_2bg_cfg)},
{IWL_PCI_DEVICE(0x0887, 0x4466, iwl2030_2bg_cfg)},
/* 6x35 Series */
{IWL_PCI_DEVICE(0x088E, 0x4060, iwl6035_2agn_cfg)},
{IWL_PCI_DEVICE(0x088F, 0x4260, iwl6035_2agn_cfg)},
{IWL_PCI_DEVICE(0x088E, 0x4460, iwl6035_2agn_cfg)},
{IWL_PCI_DEVICE(0x088E, 0x4064, iwl6035_2abg_cfg)},
{IWL_PCI_DEVICE(0x088F, 0x4264, iwl6035_2abg_cfg)},
{IWL_PCI_DEVICE(0x088E, 0x4464, iwl6035_2abg_cfg)},
{IWL_PCI_DEVICE(0x088E, 0x4066, iwl6035_2bg_cfg)},
{IWL_PCI_DEVICE(0x088F, 0x4266, iwl6035_2bg_cfg)},
{IWL_PCI_DEVICE(0x088E, 0x4466, iwl6035_2bg_cfg)},
/* 105 Series */
{IWL_PCI_DEVICE(0x0894, 0x0022, iwl105_bgn_cfg)},
{IWL_PCI_DEVICE(0x0895, 0x0222, iwl105_bgn_cfg)},
{IWL_PCI_DEVICE(0x0894, 0x0422, iwl105_bgn_cfg)},
{IWL_PCI_DEVICE(0x0894, 0x0026, iwl105_bg_cfg)},
{IWL_PCI_DEVICE(0x0895, 0x0226, iwl105_bg_cfg)},
{IWL_PCI_DEVICE(0x0894, 0x0426, iwl105_bg_cfg)},
{IWL_PCI_DEVICE(0x0894, 0x0822, iwl105_bgn_d_cfg)},
/* 135 Series */
{IWL_PCI_DEVICE(0x0892, 0x0062, iwl135_bgn_cfg)},
{IWL_PCI_DEVICE(0x0893, 0x0262, iwl135_bgn_cfg)},
{IWL_PCI_DEVICE(0x0892, 0x0462, iwl135_bgn_cfg)},
{IWL_PCI_DEVICE(0x0892, 0x0066, iwl135_bg_cfg)},
{IWL_PCI_DEVICE(0x0893, 0x0266, iwl135_bg_cfg)},
{IWL_PCI_DEVICE(0x0892, 0x0466, iwl135_bg_cfg)},
{0}
};

View File

@ -416,6 +416,8 @@ static u16 iwl_limit_dwell(struct iwl_priv *priv, u16 dwell_time)
if (!iwl_is_associated_ctx(ctx))
continue;
if (ctx->staging.dev_type == RXON_DEV_TYPE_P2P)
continue;
value = ctx->beacon_int;
if (!value)
value = IWL_PASSIVE_DWELL_BASE;
@ -939,51 +941,6 @@ int __must_check iwl_scan_initiate(struct iwl_priv *priv,
return 0;
}
int iwlagn_mac_hw_scan(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct cfg80211_scan_request *req)
{
struct iwl_priv *priv = hw->priv;
int ret;
IWL_DEBUG_MAC80211(priv, "enter\n");
if (req->n_channels == 0)
return -EINVAL;
mutex_lock(&priv->shrd->mutex);
/*
* If an internal scan is in progress, just set
* up the scan_request as per above.
*/
if (priv->scan_type != IWL_SCAN_NORMAL) {
IWL_DEBUG_SCAN(priv,
"SCAN request during internal scan - defer\n");
priv->scan_request = req;
priv->scan_vif = vif;
ret = 0;
} else {
priv->scan_request = req;
priv->scan_vif = vif;
/*
* mac80211 will only ask for one band at a time
* so using channels[0] here is ok
*/
ret = iwl_scan_initiate(priv, vif, IWL_SCAN_NORMAL,
req->channels[0]->band);
if (ret) {
priv->scan_request = NULL;
priv->scan_vif = NULL;
}
}
IWL_DEBUG_MAC80211(priv, "leave\n");
mutex_unlock(&priv->shrd->mutex);
return ret;
}
/*
* internal short scan, this function should only been called while associated.

View File

@ -396,8 +396,7 @@ static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb)
break;
case IWL_TM_CMD_APP2DEV_LOAD_INIT_FW:
status = iwlagn_load_ucode_wait_alive(priv, &priv->ucode_init,
IWL_UCODE_INIT);
status = iwlagn_load_ucode_wait_alive(priv, IWL_UCODE_INIT);
if (status)
IWL_DEBUG_INFO(priv,
"Error loading init ucode: %d\n", status);
@ -409,9 +408,7 @@ static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb)
break;
case IWL_TM_CMD_APP2DEV_LOAD_RUNTIME_FW:
status = iwlagn_load_ucode_wait_alive(priv,
&priv->ucode_rt,
IWL_UCODE_REGULAR);
status = iwlagn_load_ucode_wait_alive(priv, IWL_UCODE_REGULAR);
if (status) {
IWL_DEBUG_INFO(priv,
"Error loading runtime ucode: %d\n", status);

View File

@ -355,7 +355,7 @@ static inline void iwl_set_swq_id(struct iwl_tx_queue *txq, u8 ac, u8 hwq)
}
static inline void iwl_wake_queue(struct iwl_trans *trans,
struct iwl_tx_queue *txq)
struct iwl_tx_queue *txq, const char *msg)
{
u8 queue = txq->swq_id;
u8 ac = queue & 3;
@ -363,13 +363,22 @@ static inline void iwl_wake_queue(struct iwl_trans *trans,
struct iwl_trans_pcie *trans_pcie =
IWL_TRANS_GET_PCIE_TRANS(trans);
if (test_and_clear_bit(hwq, trans_pcie->queue_stopped))
if (atomic_dec_return(&trans_pcie->queue_stop_count[ac]) <= 0)
if (test_and_clear_bit(hwq, trans_pcie->queue_stopped)) {
if (atomic_dec_return(&trans_pcie->queue_stop_count[ac]) <= 0) {
iwl_wake_sw_queue(priv(trans), ac);
IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d ac %d. %s",
hwq, ac, msg);
} else {
IWL_DEBUG_TX_QUEUES(trans, "Don't wake hwq %d ac %d"
" stop count %d. %s",
hwq, ac, atomic_read(&trans_pcie->
queue_stop_count[ac]), msg);
}
}
}
static inline void iwl_stop_queue(struct iwl_trans *trans,
struct iwl_tx_queue *txq)
struct iwl_tx_queue *txq, const char *msg)
{
u8 queue = txq->swq_id;
u8 ac = queue & 3;
@ -377,9 +386,23 @@ static inline void iwl_stop_queue(struct iwl_trans *trans,
struct iwl_trans_pcie *trans_pcie =
IWL_TRANS_GET_PCIE_TRANS(trans);
if (!test_and_set_bit(hwq, trans_pcie->queue_stopped))
if (atomic_inc_return(&trans_pcie->queue_stop_count[ac]) > 0)
if (!test_and_set_bit(hwq, trans_pcie->queue_stopped)) {
if (atomic_inc_return(&trans_pcie->queue_stop_count[ac]) > 0) {
iwl_stop_sw_queue(priv(trans), ac);
IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d ac %d"
" stop count %d. %s",
hwq, ac, atomic_read(&trans_pcie->
queue_stop_count[ac]), msg);
} else {
IWL_DEBUG_TX_QUEUES(trans, "Don't stop hwq %d ac %d"
" stop count %d. %s",
hwq, ac, atomic_read(&trans_pcie->
queue_stop_count[ac]), msg);
}
} else {
IWL_DEBUG_TX_QUEUES(trans, "stop hwq %d, but it is stopped/ %s",
hwq, msg);
}
}
#ifdef ieee80211_stop_queue

View File

@ -1108,7 +1108,7 @@ void iwl_irq_tasklet(struct iwl_trans *trans)
isr_stats->tx++;
handled |= CSR_INT_BIT_FH_TX;
/* Wake up uCode load routine, now that load is complete */
priv(trans)->ucode_write_complete = 1;
trans->ucode_write_complete = 1;
wake_up(&trans->shrd->wait_command_queue);
}

View File

@ -430,7 +430,7 @@ void iwl_trans_tx_queue_set_status(struct iwl_trans *trans,
txq->sched_retry = scd_retry;
IWL_DEBUG_INFO(trans, "%s %s Queue %d on FIFO %d\n",
IWL_DEBUG_TX_QUEUES(trans, "%s %s Queue %d on FIFO %d\n",
active ? "Activate" : "Deactivate",
scd_retry ? "BA" : "AC/CMD", txq_id, tx_fifo_id);
}
@ -561,12 +561,13 @@ int iwl_trans_pcie_tx_agg_alloc(struct iwl_trans *trans,
tid_data = &trans->shrd->tid_data[sta_id][tid];
if (tid_data->tfds_in_queue == 0) {
IWL_DEBUG_HT(trans, "HW queue is empty\n");
IWL_DEBUG_TX_QUEUES(trans, "HW queue is empty\n");
tid_data->agg.state = IWL_AGG_ON;
iwl_start_tx_ba_trans_ready(priv(trans), ctx, sta_id, tid);
} else {
IWL_DEBUG_HT(trans, "HW queue is NOT empty: %d packets in HW"
"queue\n", tid_data->tfds_in_queue);
IWL_DEBUG_TX_QUEUES(trans,
"HW queue is NOT empty: %d packets in HW"
" queue\n", tid_data->tfds_in_queue);
tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
}
spin_unlock_irqrestore(&trans->shrd->sta_lock, flags);
@ -643,14 +644,15 @@ int iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans,
/* The queue is not empty */
if (write_ptr != read_ptr) {
IWL_DEBUG_HT(trans, "Stopping a non empty AGG HW QUEUE\n");
IWL_DEBUG_TX_QUEUES(trans,
"Stopping a non empty AGG HW QUEUE\n");
trans->shrd->tid_data[sta_id][tid].agg.state =
IWL_EMPTYING_HW_QUEUE_DELBA;
spin_unlock_irqrestore(&trans->shrd->sta_lock, flags);
return 0;
}
IWL_DEBUG_HT(trans, "HW queue is empty\n");
IWL_DEBUG_TX_QUEUES(trans, "HW queue is empty\n");
turn_off:
trans->shrd->tid_data[sta_id][tid].agg.state = IWL_AGG_OFF;
@ -982,7 +984,8 @@ static int iwl_send_cmd_async(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
ret = iwl_enqueue_hcmd(trans, cmd);
if (ret < 0) {
IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n",
IWL_DEBUG_QUIET_RFKILL(trans,
"Error sending %s: enqueue_hcmd failed: %d\n",
get_cmd_string(cmd->id), ret);
return ret;
}
@ -1000,6 +1003,20 @@ static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n",
get_cmd_string(cmd->id));
if (test_bit(STATUS_EXIT_PENDING, &trans->shrd->status))
return -EBUSY;
if (test_bit(STATUS_RF_KILL_HW, &trans->shrd->status)) {
IWL_ERR(trans, "Command %s aborted: RF KILL Switch\n",
get_cmd_string(cmd->id));
return -ECANCELED;
}
if (test_bit(STATUS_FW_ERROR, &trans->shrd->status)) {
IWL_ERR(trans, "Command %s failed: FW Error\n",
get_cmd_string(cmd->id));
return -EIO;
}
set_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n",
get_cmd_string(cmd->id));
@ -1008,7 +1025,8 @@ static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
if (cmd_idx < 0) {
ret = cmd_idx;
clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n",
IWL_DEBUG_QUIET_RFKILL(trans,
"Error sending %s: enqueue_hcmd failed: %d\n",
get_cmd_string(cmd->id), ret);
return ret;
}
@ -1022,12 +1040,12 @@ static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
&trans_pcie->txq[trans->shrd->cmd_queue];
struct iwl_queue *q = &txq->q;
IWL_ERR(trans,
IWL_DEBUG_QUIET_RFKILL(trans,
"Error sending %s: time out after %dms.\n",
get_cmd_string(cmd->id),
jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
IWL_ERR(trans,
IWL_DEBUG_QUIET_RFKILL(trans,
"Current CMD queue read_ptr %d write_ptr %d\n",
q->read_ptr, q->write_ptr);
@ -1039,18 +1057,6 @@ static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
}
}
if (test_bit(STATUS_RF_KILL_HW, &trans->shrd->status)) {
IWL_ERR(trans, "Command %s aborted: RF KILL Switch\n",
get_cmd_string(cmd->id));
ret = -ECANCELED;
goto fail;
}
if (test_bit(STATUS_FW_ERROR, &trans->shrd->status)) {
IWL_ERR(trans, "Command %s failed: FW Error\n",
get_cmd_string(cmd->id));
ret = -EIO;
goto fail;
}
if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_page) {
IWL_ERR(trans, "Error: Response NULL in '%s'\n",
get_cmd_string(cmd->id));
@ -1071,7 +1077,7 @@ static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
trans_pcie->txq[trans->shrd->cmd_queue].meta[cmd_idx].flags &=
~CMD_WANT_SKB;
}
fail:
if (cmd->reply_page) {
iwl_free_pages(trans->shrd, cmd->reply_page);
cmd->reply_page = 0;

View File

@ -1231,7 +1231,7 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
txq->need_update = 1;
iwl_txq_update_write_ptr(trans, txq);
} else {
iwl_stop_queue(trans, txq);
iwl_stop_queue(trans, txq, "Queue is full");
}
}
return 0;
@ -1283,20 +1283,21 @@ static int iwlagn_txq_check_empty(struct iwl_trans *trans,
/* aggregated HW queue */
if ((txq_id == tid_data->agg.txq_id) &&
(q->read_ptr == q->write_ptr)) {
IWL_DEBUG_HT(trans,
IWL_DEBUG_TX_QUEUES(trans,
"HW queue empty: continue DELBA flow\n");
iwl_trans_pcie_txq_agg_disable(trans, txq_id);
tid_data->agg.state = IWL_AGG_OFF;
iwl_stop_tx_ba_trans_ready(priv(trans),
NUM_IWL_RXON_CTX,
sta_id, tid);
iwl_wake_queue(trans, &trans_pcie->txq[txq_id]);
iwl_wake_queue(trans, &trans_pcie->txq[txq_id],
"DELBA flow complete");
}
break;
case IWL_EMPTYING_HW_QUEUE_ADDBA:
/* We are reclaiming the last packet of the queue */
if (tid_data->tfds_in_queue == 0) {
IWL_DEBUG_HT(trans,
IWL_DEBUG_TX_QUEUES(trans,
"HW queue empty: continue ADDBA flow\n");
tid_data->agg.state = IWL_AGG_ON;
iwl_start_tx_ba_trans_ready(priv(trans),
@ -1354,7 +1355,7 @@ static void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int sta_id, int tid,
ssn , tfd_num, txq_id, txq->swq_id);
freed = iwl_tx_queue_reclaim(trans, txq_id, tfd_num, skbs);
if (iwl_queue_space(&txq->q) > txq->q.low_mark && cond)
iwl_wake_queue(trans, txq);
iwl_wake_queue(trans, txq, "Packets reclaimed");
}
iwl_free_tfds_in_queue(trans, sta_id, tid, freed);
@ -1418,7 +1419,8 @@ static int iwl_trans_pcie_resume(struct iwl_trans *trans)
#endif /* CONFIG_PM_SLEEP */
static void iwl_trans_pcie_wake_any_queue(struct iwl_trans *trans,
enum iwl_rxon_context_id ctx)
enum iwl_rxon_context_id ctx,
const char *msg)
{
u8 ac, txq_id;
struct iwl_trans_pcie *trans_pcie =
@ -1426,11 +1428,11 @@ static void iwl_trans_pcie_wake_any_queue(struct iwl_trans *trans,
for (ac = 0; ac < AC_NUM; ac++) {
txq_id = trans_pcie->ac_to_queue[ctx][ac];
IWL_DEBUG_INFO(trans, "Queue Status: Q[%d] %s\n",
IWL_DEBUG_TX_QUEUES(trans, "Queue Status: Q[%d] %s\n",
ac,
(atomic_read(&trans_pcie->queue_stop_count[ac]) > 0)
? "stopped" : "awake");
iwl_wake_queue(trans, &trans_pcie->txq[txq_id]);
iwl_wake_queue(trans, &trans_pcie->txq[txq_id], msg);
}
}
@ -1453,11 +1455,12 @@ static struct iwl_trans *iwl_trans_pcie_alloc(struct iwl_shared *shrd)
return iwl_trans;
}
static void iwl_trans_pcie_stop_queue(struct iwl_trans *trans, int txq_id)
static void iwl_trans_pcie_stop_queue(struct iwl_trans *trans, int txq_id,
const char *msg)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
iwl_stop_queue(trans, &trans_pcie->txq[txq_id]);
iwl_stop_queue(trans, &trans_pcie->txq[txq_id], msg);
}
#define IWL_FLUSH_WAIT_MS 2000

View File

@ -171,7 +171,8 @@ struct iwl_trans_ops {
void (*tx_start)(struct iwl_trans *trans);
void (*wake_any_queue)(struct iwl_trans *trans,
enum iwl_rxon_context_id ctx);
enum iwl_rxon_context_id ctx,
const char *msg);
int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
@ -196,7 +197,7 @@ struct iwl_trans_ops {
void (*free)(struct iwl_trans *trans);
void (*stop_queue)(struct iwl_trans *trans, int q);
void (*stop_queue)(struct iwl_trans *trans, int q, const char *msg);
int (*dbgfs_register)(struct iwl_trans *trans, struct dentry* dir);
int (*check_stuck_queue)(struct iwl_trans *trans, int q);
@ -207,17 +208,48 @@ struct iwl_trans_ops {
#endif
};
/* one for each uCode image (inst/data, boot/init/runtime) */
struct fw_desc {
dma_addr_t p_addr; /* hardware address */
void *v_addr; /* software address */
u32 len; /* size in bytes */
};
struct fw_img {
struct fw_desc code; /* firmware code image */
struct fw_desc data; /* firmware data image */
};
enum iwl_ucode_type {
IWL_UCODE_NONE,
IWL_UCODE_REGULAR,
IWL_UCODE_INIT,
IWL_UCODE_WOWLAN,
};
/**
* struct iwl_trans - transport common data
* @ops - pointer to iwl_trans_ops
* @shrd - pointer to iwl_shared which holds shared data from the upper layer
* @hcmd_lock: protects HCMD
* @ucode_write_complete: indicates that the ucode has been copied.
* @ucode_rt: run time ucode image
* @ucode_init: init ucode image
* @ucode_wowlan: wake on wireless ucode image (optional)
*/
struct iwl_trans {
const struct iwl_trans_ops *ops;
struct iwl_shared *shrd;
spinlock_t hcmd_lock;
u8 ucode_write_complete; /* the image write is complete */
struct fw_img ucode_rt;
struct fw_img ucode_init;
struct fw_img ucode_wowlan;
/* eeprom related variables */
int nvm_device_type;
/* pointer to trans specific struct */
/*Ensure that this pointer will always be aligned to sizeof pointer */
char trans_specific[0] __attribute__((__aligned__(sizeof(void *))));
@ -249,9 +281,10 @@ static inline void iwl_trans_tx_start(struct iwl_trans *trans)
}
static inline void iwl_trans_wake_any_queue(struct iwl_trans *trans,
enum iwl_rxon_context_id ctx)
enum iwl_rxon_context_id ctx,
const char *msg)
{
trans->ops->wake_any_queue(trans, ctx);
trans->ops->wake_any_queue(trans, ctx, msg);
}
@ -311,9 +344,10 @@ static inline void iwl_trans_free(struct iwl_trans *trans)
trans->ops->free(trans);
}
static inline void iwl_trans_stop_queue(struct iwl_trans *trans, int q)
static inline void iwl_trans_stop_queue(struct iwl_trans *trans, int q,
const char *msg)
{
trans->ops->stop_queue(trans, q);
trans->ops->stop_queue(trans, q, msg);
}
static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans)
@ -348,4 +382,8 @@ static inline int iwl_trans_resume(struct iwl_trans *trans)
******************************************************/
extern const struct iwl_trans_ops trans_ops_pcie;
int iwl_alloc_fw_desc(struct iwl_bus *bus, struct fw_desc *desc,
const void *data, size_t len);
void iwl_dealloc_ucode(struct iwl_trans *trans);
#endif /* __iwl_trans_h__ */

View File

@ -485,6 +485,7 @@ static int lbs_cfg_set_channel(struct wiphy *wiphy,
static int lbs_ret_scan(struct lbs_private *priv, unsigned long dummy,
struct cmd_header *resp)
{
struct cfg80211_bss *bss;
struct cmd_ds_802_11_scan_rsp *scanresp = (void *)resp;
int bsssize;
const u8 *pos;
@ -632,12 +633,14 @@ static int lbs_ret_scan(struct lbs_private *priv, unsigned long dummy,
LBS_SCAN_RSSI_TO_MBM(rssi)/100);
if (channel &&
!(channel->flags & IEEE80211_CHAN_DISABLED))
cfg80211_inform_bss(wiphy, channel,
!(channel->flags & IEEE80211_CHAN_DISABLED)) {
bss = cfg80211_inform_bss(wiphy, channel,
bssid, le64_to_cpu(*(__le64 *)tsfdesc),
capa, intvl, ie, ielen,
LBS_SCAN_RSSI_TO_MBM(rssi),
GFP_KERNEL);
cfg80211_put_bss(bss);
}
} else
lbs_deb_scan("scan response: missing BSS channel IE\n");
@ -1720,6 +1723,7 @@ static void lbs_join_post(struct lbs_private *priv,
2 + 2 + /* atim */
2 + 8]; /* extended rates */
u8 *fake = fake_ie;
struct cfg80211_bss *bss;
lbs_deb_enter(LBS_DEB_CFG80211);
@ -1763,14 +1767,15 @@ static void lbs_join_post(struct lbs_private *priv,
*fake++ = 0x6c;
lbs_deb_hex(LBS_DEB_CFG80211, "IE", fake_ie, fake - fake_ie);
cfg80211_inform_bss(priv->wdev->wiphy,
params->channel,
bssid,
0,
capability,
params->beacon_interval,
fake_ie, fake - fake_ie,
0, GFP_KERNEL);
bss = cfg80211_inform_bss(priv->wdev->wiphy,
params->channel,
bssid,
0,
capability,
params->beacon_interval,
fake_ie, fake - fake_ie,
0, GFP_KERNEL);
cfg80211_put_bss(bss);
memcpy(priv->wdev->ssid, params->ssid, params->ssid_len);
priv->wdev->ssid_len = params->ssid_len;

View File

@ -13,13 +13,14 @@ static void lbs_ethtool_get_drvinfo(struct net_device *dev,
{
struct lbs_private *priv = dev->ml_priv;
snprintf(info->fw_version, 32, "%u.%u.%u.p%u",
snprintf(info->fw_version, sizeof(info->fw_version),
"%u.%u.%u.p%u",
priv->fwrelease >> 24 & 0xff,
priv->fwrelease >> 16 & 0xff,
priv->fwrelease >> 8 & 0xff,
priv->fwrelease & 0xff);
strcpy(info->driver, "libertas");
strcpy(info->version, lbs_driver_version);
strlcpy(info->driver, "libertas", sizeof(info->driver));
strlcpy(info->version, lbs_driver_version, sizeof(info->version));
}
/*

View File

@ -33,7 +33,7 @@
* Since the buffer is linear, the function uses rotation to simulate
* circular buffer.
*/
static int
static void
mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv,
struct mwifiex_rx_reorder_tbl
*rx_reor_tbl_ptr, int start_win)
@ -71,8 +71,6 @@ mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv,
rx_reor_tbl_ptr->start_win = start_win;
spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
return 0;
}
/*
@ -83,7 +81,7 @@ mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv,
* Since the buffer is linear, the function uses rotation to simulate
* circular buffer.
*/
static int
static void
mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
struct mwifiex_rx_reorder_tbl *rx_reor_tbl_ptr)
{
@ -119,7 +117,6 @@ mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
rx_reor_tbl_ptr->start_win = (rx_reor_tbl_ptr->start_win + i)
&(MAX_TID_VALUE - 1);
spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
return 0;
}
/*
@ -405,7 +402,7 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
u8 *ta, u8 pkt_type, void *payload)
{
struct mwifiex_rx_reorder_tbl *rx_reor_tbl_ptr;
int start_win, end_win, win_size, ret;
int start_win, end_win, win_size;
u16 pkt_index;
rx_reor_tbl_ptr =
@ -452,11 +449,8 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
start_win = (end_win - win_size) + 1;
else
start_win = (MAX_TID_VALUE - (win_size - seq_num)) + 1;
ret = mwifiex_11n_dispatch_pkt_until_start_win(priv,
mwifiex_11n_dispatch_pkt_until_start_win(priv,
rx_reor_tbl_ptr, start_win);
if (ret)
return ret;
}
if (pkt_type != PKT_TYPE_BAR) {
@ -475,9 +469,9 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
* Dispatch all packets sequentially from start_win until a
* hole is found and adjust the start_win appropriately
*/
ret = mwifiex_11n_scan_and_dispatch(priv, rx_reor_tbl_ptr);
mwifiex_11n_scan_and_dispatch(priv, rx_reor_tbl_ptr);
return ret;
return 0;
}
/*

View File

@ -10,12 +10,12 @@ config MWIFIEX
mwifiex.
config MWIFIEX_SDIO
tristate "Marvell WiFi-Ex Driver for SD8787"
tristate "Marvell WiFi-Ex Driver for SD8787/SD8797"
depends on MWIFIEX && MMC
select FW_LOADER
---help---
This adds support for wireless adapters based on Marvell
8787 chipset with SDIO interface.
8787/8797 chipsets with SDIO interface.
If you choose to build it as a module, it will be called
mwifiex_sdio.

View File

@ -780,6 +780,7 @@ static int mwifiex_cfg80211_inform_ibss_bss(struct mwifiex_private *priv)
{
struct ieee80211_channel *chan;
struct mwifiex_bss_info bss_info;
struct cfg80211_bss *bss;
int ie_len;
u8 ie_buf[IEEE80211_MAX_SSID_LEN + sizeof(struct ieee_types_header)];
enum ieee80211_band band;
@ -800,9 +801,10 @@ static int mwifiex_cfg80211_inform_ibss_bss(struct mwifiex_private *priv)
ieee80211_channel_to_frequency(bss_info.bss_chan,
band));
cfg80211_inform_bss(priv->wdev->wiphy, chan,
bss = cfg80211_inform_bss(priv->wdev->wiphy, chan,
bss_info.bssid, 0, WLAN_CAPABILITY_IBSS,
0, ie_buf, ie_len, 0, GFP_KERNEL);
cfg80211_put_bss(bss);
memcpy(priv->cfg_bssid, bss_info.bssid, ETH_ALEN);
return 0;

View File

@ -75,18 +75,32 @@ static u8 supported_rates_n[N_SUPPORTED_RATES] = { 0x02, 0x04, 0 };
* This function maps an index in supported rates table into
* the corresponding data rate.
*/
u32 mwifiex_index_to_data_rate(u8 index, u8 ht_info)
u32 mwifiex_index_to_data_rate(struct mwifiex_private *priv, u8 index,
u8 ht_info)
{
u16 mcs_rate[4][8] = {
{0x1b, 0x36, 0x51, 0x6c, 0xa2, 0xd8, 0xf3, 0x10e}
, /* LG 40M */
{0x1e, 0x3c, 0x5a, 0x78, 0xb4, 0xf0, 0x10e, 0x12c}
, /* SG 40M */
{0x0d, 0x1a, 0x27, 0x34, 0x4e, 0x68, 0x75, 0x82}
, /* LG 20M */
{0x0e, 0x1c, 0x2b, 0x39, 0x56, 0x73, 0x82, 0x90}
}; /* SG 20M */
/*
* For every mcs_rate line, the first 8 bytes are for stream 1x1,
* and all 16 bytes are for stream 2x2.
*/
u16 mcs_rate[4][16] = {
/* LGI 40M */
{ 0x1b, 0x36, 0x51, 0x6c, 0xa2, 0xd8, 0xf3, 0x10e,
0x36, 0x6c, 0xa2, 0xd8, 0x144, 0x1b0, 0x1e6, 0x21c },
/* SGI 40M */
{ 0x1e, 0x3c, 0x5a, 0x78, 0xb4, 0xf0, 0x10e, 0x12c,
0x3c, 0x78, 0xb4, 0xf0, 0x168, 0x1e0, 0x21c, 0x258 },
/* LGI 20M */
{ 0x0d, 0x1a, 0x27, 0x34, 0x4e, 0x68, 0x75, 0x82,
0x1a, 0x34, 0x4e, 0x68, 0x9c, 0xd0, 0xea, 0x104 },
/* SGI 20M */
{ 0x0e, 0x1c, 0x2b, 0x39, 0x56, 0x73, 0x82, 0x90,
0x1c, 0x39, 0x56, 0x73, 0xad, 0xe7, 0x104, 0x120 }
};
u32 mcs_num_supp =
(priv->adapter->hw_dev_mcs_support == HT_STREAM_2X2) ? 16 : 8;
u32 rate;
if (ht_info & BIT(0)) {
@ -95,7 +109,7 @@ u32 mwifiex_index_to_data_rate(u8 index, u8 ht_info)
rate = 0x0D; /* MCS 32 SGI rate */
else
rate = 0x0C; /* MCS 32 LGI rate */
} else if (index < 8) {
} else if (index < mcs_num_supp) {
if (ht_info & BIT(1)) {
if (ht_info & BIT(2))
/* SGI, 40M */

View File

@ -165,6 +165,7 @@ enum MWIFIEX_802_11_WEP_STATUS {
#define GET_RXMCSSUPP(DevMCSSupported) (DevMCSSupported & 0x0f)
#define SETHT_MCS32(x) (x[4] |= 1)
#define HT_STREAM_2X2 0x22
#define SET_SECONDARYCHAN(RadioType, SECCHAN) (RadioType |= (SECCHAN << 4))
@ -673,7 +674,7 @@ struct host_cmd_ds_802_11_ad_hoc_start {
union ieee_types_phy_param_set phy_param_set;
u16 reserved1;
__le16 cap_info_bitmap;
u8 DataRate[HOSTCMD_SUPPORTED_RATES];
u8 data_rate[HOSTCMD_SUPPORTED_RATES];
} __packed;
struct host_cmd_ds_802_11_ad_hoc_result {

View File

@ -187,8 +187,6 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter)
struct mwifiex_opt_sleep_confirm *sleep_cfm_buf = NULL;
skb_put(adapter->sleep_cfm, sizeof(struct mwifiex_opt_sleep_confirm));
sleep_cfm_buf = (struct mwifiex_opt_sleep_confirm *)
(adapter->sleep_cfm->data);
adapter->cmd_sent = false;
@ -254,6 +252,8 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter)
mwifiex_wmm_init(adapter);
if (adapter->sleep_cfm) {
sleep_cfm_buf = (struct mwifiex_opt_sleep_confirm *)
adapter->sleep_cfm->data;
memset(sleep_cfm_buf, 0, adapter->sleep_cfm->len);
sleep_cfm_buf->command =
cpu_to_le16(HostCmd_CMD_802_11_PS_MODE_ENH);

View File

@ -724,8 +724,8 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv,
u32 cmd_append_size = 0;
u32 i;
u16 tmp_cap;
uint16_t ht_cap_info;
struct mwifiex_ie_types_chan_list_param_set *chan_tlv;
u8 radio_type;
struct mwifiex_ie_types_htcap *ht_cap;
struct mwifiex_ie_types_htinfo *ht_info;
@ -837,8 +837,8 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv,
bss_desc->privacy = MWIFIEX_802_11_PRIV_FILTER_ACCEPT_ALL;
}
memset(adhoc_start->DataRate, 0, sizeof(adhoc_start->DataRate));
mwifiex_get_active_data_rates(priv, adhoc_start->DataRate);
memset(adhoc_start->data_rate, 0, sizeof(adhoc_start->data_rate));
mwifiex_get_active_data_rates(priv, adhoc_start->data_rate);
if ((adapter->adhoc_start_band & BAND_G) &&
(priv->curr_pkt_filter & HostCmd_ACT_MAC_ADHOC_G_PROTECTION_ON)) {
if (mwifiex_send_cmd_async(priv, HostCmd_CMD_MAC_CONTROL,
@ -850,20 +850,19 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv,
}
}
/* Find the last non zero */
for (i = 0; i < sizeof(adhoc_start->DataRate) &&
adhoc_start->DataRate[i];
i++)
;
for (i = 0; i < sizeof(adhoc_start->data_rate); i++)
if (!adhoc_start->data_rate[i])
break;
priv->curr_bss_params.num_of_rates = i;
/* Copy the ad-hoc creating rates into Current BSS rate structure */
memcpy(&priv->curr_bss_params.data_rates,
&adhoc_start->DataRate, priv->curr_bss_params.num_of_rates);
&adhoc_start->data_rate, priv->curr_bss_params.num_of_rates);
dev_dbg(adapter->dev, "info: ADHOC_S_CMD: rates=%02x %02x %02x %02x\n",
adhoc_start->DataRate[0], adhoc_start->DataRate[1],
adhoc_start->DataRate[2], adhoc_start->DataRate[3]);
adhoc_start->data_rate[0], adhoc_start->data_rate[1],
adhoc_start->data_rate[2], adhoc_start->data_rate[3]);
dev_dbg(adapter->dev, "info: ADHOC_S_CMD: AD-HOC Start command is ready\n");
@ -914,55 +913,40 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv,
}
if (adapter->adhoc_11n_enabled) {
{
ht_cap = (struct mwifiex_ie_types_htcap *) pos;
memset(ht_cap, 0,
sizeof(struct mwifiex_ie_types_htcap));
ht_cap->header.type =
cpu_to_le16(WLAN_EID_HT_CAPABILITY);
ht_cap->header.len =
cpu_to_le16(sizeof(struct ieee80211_ht_cap));
ht_cap_info = le16_to_cpu(ht_cap->ht_cap.cap_info);
/* Fill HT CAPABILITY */
ht_cap = (struct mwifiex_ie_types_htcap *) pos;
memset(ht_cap, 0, sizeof(struct mwifiex_ie_types_htcap));
ht_cap->header.type = cpu_to_le16(WLAN_EID_HT_CAPABILITY);
ht_cap->header.len =
cpu_to_le16(sizeof(struct ieee80211_ht_cap));
radio_type = mwifiex_band_to_radio_type(
priv->adapter->config_bands);
mwifiex_fill_cap_info(priv, radio_type, ht_cap);
ht_cap_info |= IEEE80211_HT_CAP_SGI_20;
if (adapter->chan_offset) {
ht_cap_info |= IEEE80211_HT_CAP_SGI_40;
ht_cap_info |= IEEE80211_HT_CAP_DSSSCCK40;
ht_cap_info |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
SETHT_MCS32(ht_cap->ht_cap.mcs.rx_mask);
}
pos += sizeof(struct mwifiex_ie_types_htcap);
cmd_append_size +=
sizeof(struct mwifiex_ie_types_htcap);
ht_cap->ht_cap.ampdu_params_info
= IEEE80211_HT_MAX_AMPDU_64K;
ht_cap->ht_cap.mcs.rx_mask[0] = 0xff;
pos += sizeof(struct mwifiex_ie_types_htcap);
cmd_append_size +=
sizeof(struct mwifiex_ie_types_htcap);
}
{
ht_info = (struct mwifiex_ie_types_htinfo *) pos;
memset(ht_info, 0,
sizeof(struct mwifiex_ie_types_htinfo));
ht_info->header.type =
cpu_to_le16(WLAN_EID_HT_INFORMATION);
ht_info->header.len =
cpu_to_le16(sizeof(struct ieee80211_ht_info));
ht_info->ht_info.control_chan =
(u8) priv->curr_bss_params.bss_descriptor.
channel;
if (adapter->chan_offset) {
ht_info->ht_info.ht_param =
adapter->chan_offset;
ht_info->ht_info.ht_param |=
/* Fill HT INFORMATION */
ht_info = (struct mwifiex_ie_types_htinfo *) pos;
memset(ht_info, 0, sizeof(struct mwifiex_ie_types_htinfo));
ht_info->header.type = cpu_to_le16(WLAN_EID_HT_INFORMATION);
ht_info->header.len =
cpu_to_le16(sizeof(struct ieee80211_ht_info));
ht_info->ht_info.control_chan =
(u8) priv->curr_bss_params.bss_descriptor.channel;
if (adapter->chan_offset) {
ht_info->ht_info.ht_param = adapter->chan_offset;
ht_info->ht_info.ht_param |=
IEEE80211_HT_PARAM_CHAN_WIDTH_ANY;
}
ht_info->ht_info.operation_mode =
cpu_to_le16(IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
ht_info->ht_info.basic_set[0] = 0xff;
pos += sizeof(struct mwifiex_ie_types_htinfo);
cmd_append_size +=
sizeof(struct mwifiex_ie_types_htinfo);
}
ht_info->ht_info.operation_mode =
cpu_to_le16(IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
ht_info->ht_info.basic_set[0] = 0xff;
pos += sizeof(struct mwifiex_ie_types_htinfo);
cmd_append_size +=
sizeof(struct mwifiex_ie_types_htinfo);
}
cmd->size = cpu_to_le16((u16)

View File

@ -775,7 +775,8 @@ struct mwifiex_chan_freq_power *
struct mwifiex_chan_freq_power *mwifiex_get_cfp_by_band_and_freq_from_cfg80211(
struct mwifiex_private *priv,
u8 band, u32 freq);
u32 mwifiex_index_to_data_rate(u8 index, u8 ht_info);
u32 mwifiex_index_to_data_rate(struct mwifiex_private *priv, u8 index,
u8 ht_info);
u32 mwifiex_find_freq_from_band_chan(u8, u8);
int mwifiex_cmd_append_vsie_tlv(struct mwifiex_private *priv, u16 vsie_mask,
u8 **buffer);

View File

@ -1535,11 +1535,6 @@ mwifiex_update_curr_bss_params(struct mwifiex_private *priv, u8 *bssid,
return 0;
}
static void mwifiex_free_bss_priv(struct cfg80211_bss *bss)
{
kfree(bss->priv);
}
/*
* This function handles the command response of scan.
*
@ -1765,7 +1760,7 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
cap_info_bitmap, beacon_period,
ie_buf, ie_len, rssi, GFP_KERNEL);
*(u8 *)bss->priv = band;
bss->free_priv = mwifiex_free_bss_priv;
cfg80211_put_bss(bss);
if (priv->media_connected && !memcmp(bssid,
priv->curr_bss_params.bss_descriptor

View File

@ -256,10 +256,13 @@ static int mwifiex_sdio_resume(struct device *dev)
/* Device ID for SD8787 */
#define SDIO_DEVICE_ID_MARVELL_8787 (0x9119)
/* Device ID for SD8797 */
#define SDIO_DEVICE_ID_MARVELL_8797 (0x9129)
/* WLAN IDs */
static const struct sdio_device_id mwifiex_ids[] = {
{SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8787)},
{SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8797)},
{},
};
@ -1573,7 +1576,16 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
sdio_set_drvdata(func, card);
adapter->dev = &func->dev;
strcpy(adapter->fw_name, SD8787_DEFAULT_FW_NAME);
switch (func->device) {
case SDIO_DEVICE_ID_MARVELL_8797:
strcpy(adapter->fw_name, SD8797_DEFAULT_FW_NAME);
break;
case SDIO_DEVICE_ID_MARVELL_8787:
default:
strcpy(adapter->fw_name, SD8787_DEFAULT_FW_NAME);
break;
}
return 0;
@ -1774,4 +1786,5 @@ MODULE_AUTHOR("Marvell International Ltd.");
MODULE_DESCRIPTION("Marvell WiFi-Ex SDIO Driver version " SDIO_VERSION);
MODULE_VERSION(SDIO_VERSION);
MODULE_LICENSE("GPL v2");
MODULE_FIRMWARE("mrvl/sd8787_uapsta.bin");
MODULE_FIRMWARE(SD8787_DEFAULT_FW_NAME);
MODULE_FIRMWARE(SD8797_DEFAULT_FW_NAME);

View File

@ -29,6 +29,7 @@
#include "main.h"
#define SD8787_DEFAULT_FW_NAME "mrvl/sd8787_uapsta.bin"
#define SD8797_DEFAULT_FW_NAME "mrvl/sd8797_uapsta.bin"
#define BLOCK_MODE 1
#define BYTE_MODE 0

View File

@ -508,7 +508,7 @@ static int mwifiex_ret_802_11_tx_rate_query(struct mwifiex_private *priv,
priv->tx_htinfo = resp->params.tx_rate.ht_info;
if (!priv->is_data_rate_auto)
priv->data_rate =
mwifiex_index_to_data_rate(priv->tx_rate,
mwifiex_index_to_data_rate(priv, priv->tx_rate,
priv->tx_htinfo);
return 0;

View File

@ -832,8 +832,8 @@ int mwifiex_drv_get_data_rate(struct mwifiex_private *priv,
if (!ret) {
if (rate->is_rate_auto)
rate->rate = mwifiex_index_to_data_rate(priv->tx_rate,
priv->tx_htinfo);
rate->rate = mwifiex_index_to_data_rate(priv,
priv->tx_rate, priv->tx_htinfo);
else
rate->rate = priv->data_rate;
} else {

View File

@ -126,6 +126,9 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_adapter *adapter,
u16 rx_pkt_type;
struct mwifiex_private *priv = adapter->priv[rx_info->bss_index];
if (!priv)
return -1;
local_rx_pd = (struct rxpd *) (skb->data);
rx_pkt_type = local_rx_pd->rx_pkt_type;
@ -189,12 +192,11 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_adapter *adapter,
(u8) local_rx_pd->rx_pkt_type,
skb);
if (ret || (rx_pkt_type == PKT_TYPE_BAR)) {
if (priv && (ret == -1))
priv->stats.rx_dropped++;
if (ret || (rx_pkt_type == PKT_TYPE_BAR))
dev_kfree_skb_any(skb);
}
if (ret)
priv->stats.rx_dropped++;
return ret;
}

View File

@ -581,11 +581,7 @@ static void p54spi_op_stop(struct ieee80211_hw *dev)
struct p54s_priv *priv = dev->priv;
unsigned long flags;
if (mutex_lock_interruptible(&priv->mutex)) {
/* FIXME: how to handle this error? */
return;
}
mutex_lock(&priv->mutex);
WARN_ON(priv->fw_state != FW_STATE_READY);
cancel_work_sync(&priv->work);

View File

@ -793,8 +793,8 @@ islpci_set_multicast_list(struct net_device *dev)
static void islpci_ethtool_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
strcpy(info->driver, DRV_NAME);
strcpy(info->version, DRV_VERSION);
strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
strlcpy(info->version, DRV_VERSION, sizeof(info->version));
}
static const struct ethtool_ops islpci_ethtool_ops = {

View File

@ -1160,6 +1160,15 @@ enum nl80211_commands {
*
* @NL80211_ATTR_FEATURE_FLAGS: This u32 attribute contains flags from
* &enum nl80211_feature_flags and is advertised in wiphy information.
* @NL80211_ATTR_PROBE_RESP_OFFLOAD: Indicates that the HW responds to probe
*
* requests while operating in AP-mode.
* This attribute holds a bitmap of the supported protocols for
* offloading (see &enum nl80211_probe_resp_offload_support_attr).
*
* @NL80211_ATTR_PROBE_RESP: Probe Response template data. Contains the entire
* probe-response frame. The DA field in the 802.11 header is zero-ed out,
* to be filled by the FW.
*
* @NL80211_ATTR_MAX: highest attribute number currently defined
* @__NL80211_ATTR_AFTER_LAST: internal use
@ -1395,6 +1404,10 @@ enum nl80211_attrs {
NL80211_ATTR_FEATURE_FLAGS,
NL80211_ATTR_PROBE_RESP_OFFLOAD,
NL80211_ATTR_PROBE_RESP,
/* add attributes here, update the policy in nl80211.c */
__NL80211_ATTR_AFTER_LAST,
@ -2727,4 +2740,25 @@ enum nl80211_feature_flags {
NL80211_FEATURE_SK_TX_STATUS = 1 << 0,
};
/**
* enum nl80211_probe_resp_offload_support_attr - optional supported
* protocols for probe-response offloading by the driver/FW.
* To be used with the %NL80211_ATTR_PROBE_RESP_OFFLOAD attribute.
* Each enum value represents a bit in the bitmap of supported
* protocols. Typically a subset of probe-requests belonging to a
* supported protocol will be excluded from offload and uploaded
* to the host.
*
* @NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS: Support for WPS ver. 1
* @NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2: Support for WPS ver. 2
* @NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P: Support for P2P
* @NL80211_PROBE_RESP_OFFLOAD_SUPPORT_80211U: Support for 802.11u
*/
enum nl80211_probe_resp_offload_support_attr {
NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS = 1<<0,
NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 = 1<<1,
NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P = 1<<2,
NL80211_PROBE_RESP_OFFLOAD_SUPPORT_80211U = 1<<3,
};
#endif /* __LINUX_NL80211_H */

View File

@ -391,6 +391,8 @@ struct cfg80211_crypto_settings {
* @assocresp_ies: extra information element(s) to add into (Re)Association
* Response frames or %NULL
* @assocresp_ies_len: length of assocresp_ies in octets
* @probe_resp_len: length of probe response template (@probe_resp)
* @probe_resp: probe response template (AP mode only)
*/
struct beacon_parameters {
u8 *head, *tail;
@ -408,6 +410,8 @@ struct beacon_parameters {
size_t proberesp_ies_len;
const u8 *assocresp_ies;
size_t assocresp_ies_len;
int probe_resp_len;
u8 *probe_resp;
};
/**
@ -1342,6 +1346,9 @@ struct cfg80211_gtk_rekey_data {
* doesn't verify much. Note, however, that the passed netdev may be
* %NULL as well if the user requested changing the channel for the
* device itself, or for a monitor interface.
* @get_channel: Get the current operating channel, should return %NULL if
* there's no single defined operating channel if for example the
* device implements channel hopping for multi-channel virtual interfaces.
*
* @scan: Request to do a scan. If returning zero, the scan request is given
* the driver, and will be valid until passed to cfg80211_scan_done().
@ -1627,6 +1634,8 @@ struct cfg80211_ops {
int (*probe_client)(struct wiphy *wiphy, struct net_device *dev,
const u8 *peer, u64 *cookie);
struct ieee80211_channel *(*get_channel)(struct wiphy *wiphy);
};
/*
@ -1689,6 +1698,8 @@ struct cfg80211_ops {
* @WIPHY_FLAG_REPORTS_OBSS: the device will report beacons from other BSSes
* when there are virtual interfaces in AP mode by calling
* cfg80211_report_obss_beacon().
* @WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD: When operating as an AP, the device
* responds to probe-requests in hardware.
*/
enum wiphy_flags {
WIPHY_FLAG_CUSTOM_REGULATORY = BIT(0),
@ -1709,6 +1720,7 @@ enum wiphy_flags {
WIPHY_FLAG_TDLS_EXTERNAL_SETUP = BIT(16),
WIPHY_FLAG_HAVE_AP_SME = BIT(17),
WIPHY_FLAG_REPORTS_OBSS = BIT(18),
WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD = BIT(19),
};
/**
@ -1977,6 +1989,13 @@ struct wiphy {
u32 available_antennas_tx;
u32 available_antennas_rx;
/*
* Bitmap of supported protocols for probe response offloading
* see &enum nl80211_probe_resp_offload_support_attr. Only valid
* when the wiphy flag @WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD is set.
*/
u32 probe_resp_offload;
/* If multiple wiphys are registered and you're handed e.g.
* a regular netdev with assigned ieee80211_ptr, you won't
* know whether it points to a wiphy your driver has registered

View File

@ -271,14 +271,6 @@ enum ieee80211_radiotap_type {
#define IEEE80211_RADIOTAP_MCS_FEC_LDPC 0x10
/* Ugly macro to convert literal channel numbers into their mhz equivalents
* There are certianly some conditions that will break this (like feeding it '30')
* but they shouldn't arise since nothing talks on channel 30. */
#define ieee80211chan2mhz(x) \
(((x) <= 14) ? \
(((x) == 14) ? 2484 : ((x) * 5) + 2407) : \
((x) + 1000) * 5)
/* helpers */
static inline int ieee80211_get_radiotap_len(unsigned char *data)
{

View File

@ -166,6 +166,7 @@ struct ieee80211_low_level_stats {
* that it is only ever disabled for station mode.
* @BSS_CHANGED_IDLE: Idle changed for this BSS/interface.
* @BSS_CHANGED_SSID: SSID changed for this BSS (AP mode)
* @BSS_CHANGED_AP_PROBE_RESP: Probe Response changed for this BSS (AP mode)
*/
enum ieee80211_bss_change {
BSS_CHANGED_ASSOC = 1<<0,
@ -184,6 +185,7 @@ enum ieee80211_bss_change {
BSS_CHANGED_QOS = 1<<13,
BSS_CHANGED_IDLE = 1<<14,
BSS_CHANGED_SSID = 1<<15,
BSS_CHANGED_AP_PROBE_RESP = 1<<16,
/* when adding here, make sure to change ieee80211_reconfig */
};
@ -2674,6 +2676,19 @@ static inline struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw,
return ieee80211_beacon_get_tim(hw, vif, NULL, NULL);
}
/**
* ieee80211_proberesp_get - retrieve a Probe Response template
* @hw: pointer obtained from ieee80211_alloc_hw().
* @vif: &struct ieee80211_vif pointer from the add_interface callback.
*
* Creates a Probe Response template which can, for example, be uploaded to
* hardware. The destination address should be set by the caller.
*
* Can only be called in AP mode.
*/
struct sk_buff *ieee80211_proberesp_get(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
/**
* ieee80211_pspoll_get - retrieve a PS Poll template
* @hw: pointer obtained from ieee80211_alloc_hw().

View File

@ -34,31 +34,30 @@
#define NCI_MAX_NUM_CONN 10
/* NCI Status Codes */
#define NCI_STATUS_OK 0x00
#define NCI_STATUS_REJECTED 0x01
#define NCI_STATUS_MESSAGE_CORRUPTED 0x02
#define NCI_STATUS_BUFFER_FULL 0x03
#define NCI_STATUS_FAILED 0x04
#define NCI_STATUS_NOT_INITIALIZED 0x05
#define NCI_STATUS_SYNTAX_ERROR 0x06
#define NCI_STATUS_SEMANTIC_ERROR 0x07
#define NCI_STATUS_UNKNOWN_GID 0x08
#define NCI_STATUS_UNKNOWN_OID 0x09
#define NCI_STATUS_INVALID_PARAM 0x0a
#define NCI_STATUS_MESSAGE_SIZE_EXCEEDED 0x0b
#define NCI_STATUS_OK 0x00
#define NCI_STATUS_REJECTED 0x01
#define NCI_STATUS_RF_FRAME_CORRUPTED 0x02
#define NCI_STATUS_FAILED 0x03
#define NCI_STATUS_NOT_INITIALIZED 0x04
#define NCI_STATUS_SYNTAX_ERROR 0x05
#define NCI_STATUS_SEMANTIC_ERROR 0x06
#define NCI_STATUS_UNKNOWN_GID 0x07
#define NCI_STATUS_UNKNOWN_OID 0x08
#define NCI_STATUS_INVALID_PARAM 0x09
#define NCI_STATUS_MESSAGE_SIZE_EXCEEDED 0x0a
/* Discovery Specific Status Codes */
#define NCI_STATUS_DISCOVERY_ALREADY_STARTED 0xa0
#define NCI_STATUS_DISCOVERY_TARGET_ACTIVATION_FAILED 0xa1
#define NCI_STATUS_DISCOVERY_ALREADY_STARTED 0xa0
#define NCI_STATUS_DISCOVERY_TARGET_ACTIVATION_FAILED 0xa1
#define NCI_STATUS_DISCOVERY_TEAR_DOWN 0xa2
/* RF Interface Specific Status Codes */
#define NCI_STATUS_RF_TRANSMISSION_ERROR 0xb0
#define NCI_STATUS_RF_PROTOCOL_ERROR 0xb1
#define NCI_STATUS_RF_TIMEOUT_ERROR 0xb2
#define NCI_STATUS_RF_LINK_LOSS_ERROR 0xb3
#define NCI_STATUS_RF_TRANSMISSION_ERROR 0xb0
#define NCI_STATUS_RF_PROTOCOL_ERROR 0xb1
#define NCI_STATUS_RF_TIMEOUT_ERROR 0xb2
/* NFCEE Interface Specific Status Codes */
#define NCI_STATUS_MAX_ACTIVE_NFCEE_INTERFACES_REACHED 0xc0
#define NCI_STATUS_NFCEE_INTERFACE_ACTIVATION_FAILED 0xc1
#define NCI_STATUS_NFCEE_TRANSMISSION_ERROR 0xc2
#define NCI_STATUS_NFCEE_PROTOCOL_ERROR 0xc3
#define NCI_STATUS_MAX_ACTIVE_NFCEE_INTERFACES_REACHED 0xc0
#define NCI_STATUS_NFCEE_INTERFACE_ACTIVATION_FAILED 0xc1
#define NCI_STATUS_NFCEE_TRANSMISSION_ERROR 0xc2
#define NCI_STATUS_NFCEE_PROTOCOL_ERROR 0xc3
#define NCI_STATUS_NFCEE_TIMEOUT_ERROR 0xc4
/* NCI RF Technology and Mode */
@ -73,6 +72,21 @@
#define NCI_NFC_A_ACTIVE_LISTEN_MODE 0x83
#define NCI_NFC_F_ACTIVE_LISTEN_MODE 0x85
/* NCI RF Technologies */
#define NCI_NFC_RF_TECHNOLOGY_A 0x00
#define NCI_NFC_RF_TECHNOLOGY_B 0x01
#define NCI_NFC_RF_TECHNOLOGY_F 0x02
#define NCI_NFC_RF_TECHNOLOGY_15693 0x03
/* NCI Bit Rates */
#define NCI_NFC_BIT_RATE_106 0x00
#define NCI_NFC_BIT_RATE_212 0x01
#define NCI_NFC_BIT_RATE_424 0x02
#define NCI_NFC_BIT_RATE_848 0x03
#define NCI_NFC_BIT_RATE_1696 0x04
#define NCI_NFC_BIT_RATE_3392 0x05
#define NCI_NFC_BIT_RATE_6784 0x06
/* NCI RF Protocols */
#define NCI_RF_PROTOCOL_UNKNOWN 0x00
#define NCI_RF_PROTOCOL_T1T 0x01
@ -82,10 +96,20 @@
#define NCI_RF_PROTOCOL_NFC_DEP 0x05
/* NCI RF Interfaces */
#define NCI_RF_INTERFACE_RFU 0x00
#define NCI_RF_INTERFACE_FRAME 0x01
#define NCI_RF_INTERFACE_ISO_DEP 0x02
#define NCI_RF_INTERFACE_NFC_DEP 0x03
#define NCI_RF_INTERFACE_NFCEE_DIRECT 0x00
#define NCI_RF_INTERFACE_FRAME 0x01
#define NCI_RF_INTERFACE_ISO_DEP 0x02
#define NCI_RF_INTERFACE_NFC_DEP 0x03
/* NCI Reset types */
#define NCI_RESET_TYPE_KEEP_CONFIG 0x00
#define NCI_RESET_TYPE_RESET_CONFIG 0x01
/* NCI Static RF connection ID */
#define NCI_STATIC_RF_CONN_ID 0x00
/* NCI Data Flow Control */
#define NCI_DATA_FLOW_CONTROL_NOT_USED 0xff
/* NCI RF_DISCOVER_MAP_CMD modes */
#define NCI_DISC_MAP_MODE_POLL 0x01
@ -94,25 +118,22 @@
/* NCI Discovery Types */
#define NCI_DISCOVERY_TYPE_POLL_A_PASSIVE 0x00
#define NCI_DISCOVERY_TYPE_POLL_B_PASSIVE 0x01
#define NCI_DISCOVERY_TYPE_POLL_F_PASSIVE 0x02
#define NCI_DISCOVERY_TYPE_POLL_A_ACTIVE 0x03
#define NCI_DISCOVERY_TYPE_POLL_F_ACTIVE 0x05
#define NCI_DISCOVERY_TYPE_WAKEUP_A_PASSIVE 0x06
#define NCI_DISCOVERY_TYPE_WAKEUP_B_PASSIVE 0x07
#define NCI_DISCOVERY_TYPE_WAKEUP_A_ACTIVE 0x09
#define NCI_DISCOVERY_TYPE_LISTEN_A_PASSIVE 0x80
#define NCI_DISCOVERY_TYPE_LISTEN_B_PASSIVE 0x81
#define NCI_DISCOVERY_TYPE_LISTEN_F_PASSIVE 0x82
#define NCI_DISCOVERY_TYPE_LISTEN_A_ACTIVE 0x83
#define NCI_DISCOVERY_TYPE_LISTEN_F_ACTIVE 0x85
#define NCI_DISCOVERY_TYPE_POLL_B_PASSIVE 0x01
#define NCI_DISCOVERY_TYPE_POLL_F_PASSIVE 0x02
#define NCI_DISCOVERY_TYPE_POLL_A_ACTIVE 0x03
#define NCI_DISCOVERY_TYPE_POLL_F_ACTIVE 0x05
#define NCI_DISCOVERY_TYPE_WAKEUP_A_ACTIVE 0x09
#define NCI_DISCOVERY_TYPE_LISTEN_A_PASSIVE 0x80
#define NCI_DISCOVERY_TYPE_LISTEN_B_PASSIVE 0x81
#define NCI_DISCOVERY_TYPE_LISTEN_F_PASSIVE 0x82
#define NCI_DISCOVERY_TYPE_LISTEN_A_ACTIVE 0x83
#define NCI_DISCOVERY_TYPE_LISTEN_F_ACTIVE 0x85
/* NCI Deactivation Type */
#define NCI_DEACTIVATE_TYPE_IDLE_MODE 0x00
#define NCI_DEACTIVATE_TYPE_SLEEP_MODE 0x01
#define NCI_DEACTIVATE_TYPE_SLEEP_AF_MODE 0x02
#define NCI_DEACTIVATE_TYPE_RF_LINK_LOSS 0x03
#define NCI_DEACTIVATE_TYPE_DISCOVERY_ERROR 0x04
#define NCI_DEACTIVATE_TYPE_IDLE_MODE 0x00
#define NCI_DEACTIVATE_TYPE_SLEEP_MODE 0x01
#define NCI_DEACTIVATE_TYPE_SLEEP_AF_MODE 0x02
#define NCI_DEACTIVATE_TYPE_DISCOVERY 0x03
/* Message Type (MT) */
#define NCI_MT_DATA_PKT 0x00
@ -144,10 +165,10 @@
#define nci_conn_id(hdr) (__u8)(((hdr)[0])&0x0f)
/* GID values */
#define NCI_GID_CORE 0x0
#define NCI_GID_RF_MGMT 0x1
#define NCI_GID_NFCEE_MGMT 0x2
#define NCI_GID_PROPRIETARY 0xf
#define NCI_GID_CORE 0x0
#define NCI_GID_RF_MGMT 0x1
#define NCI_GID_NFCEE_MGMT 0x2
#define NCI_GID_PROPRIETARY 0xf
/* ---- NCI Packet structures ---- */
#define NCI_CTRL_HDR_SIZE 3
@ -169,18 +190,11 @@ struct nci_data_hdr {
/* ----- NCI Commands ---- */
/* ------------------------ */
#define NCI_OP_CORE_RESET_CMD nci_opcode_pack(NCI_GID_CORE, 0x00)
#define NCI_OP_CORE_INIT_CMD nci_opcode_pack(NCI_GID_CORE, 0x01)
#define NCI_OP_CORE_SET_CONFIG_CMD nci_opcode_pack(NCI_GID_CORE, 0x02)
#define NCI_OP_CORE_CONN_CREATE_CMD nci_opcode_pack(NCI_GID_CORE, 0x04)
struct nci_core_conn_create_cmd {
__u8 target_handle;
__u8 num_target_specific_params;
struct nci_core_reset_cmd {
__u8 reset_type;
} __packed;
#define NCI_OP_CORE_CONN_CLOSE_CMD nci_opcode_pack(NCI_GID_CORE, 0x06)
#define NCI_OP_CORE_INIT_CMD nci_opcode_pack(NCI_GID_CORE, 0x01)
#define NCI_OP_RF_DISCOVER_MAP_CMD nci_opcode_pack(NCI_GID_RF_MGMT, 0x00)
struct disc_map_config {
@ -218,6 +232,7 @@ struct nci_rf_deactivate_cmd {
struct nci_core_reset_rsp {
__u8 status;
__u8 nci_ver;
__u8 config_status;
} __packed;
#define NCI_OP_CORE_INIT_RSP nci_opcode_pack(NCI_GID_CORE, 0x01)
@ -232,24 +247,14 @@ struct nci_core_init_rsp_1 {
struct nci_core_init_rsp_2 {
__u8 max_logical_connections;
__le16 max_routing_table_size;
__u8 max_control_packet_payload_length;
__le16 rf_sending_buffer_size;
__le16 rf_receiving_buffer_size;
__le16 manufacturer_id;
} __packed;
#define NCI_OP_CORE_SET_CONFIG_RSP nci_opcode_pack(NCI_GID_CORE, 0x02)
#define NCI_OP_CORE_CONN_CREATE_RSP nci_opcode_pack(NCI_GID_CORE, 0x04)
struct nci_core_conn_create_rsp {
__u8 status;
__u8 max_pkt_payload_size;
__u8 max_ctrl_pkt_payload_len;
__le16 max_size_for_large_params;
__u8 max_data_pkt_payload_size;
__u8 initial_num_credits;
__u8 conn_id;
__u8 manufact_id;
__le32 manufact_specific_info;
} __packed;
#define NCI_OP_CORE_CONN_CLOSE_RSP nci_opcode_pack(NCI_GID_CORE, 0x06)
#define NCI_OP_RF_DISCOVER_MAP_RSP nci_opcode_pack(NCI_GID_RF_MGMT, 0x00)
#define NCI_OP_RF_DISCOVER_RSP nci_opcode_pack(NCI_GID_RF_MGMT, 0x03)
@ -270,12 +275,7 @@ struct nci_core_conn_credit_ntf {
struct conn_credit_entry conn_entries[NCI_MAX_NUM_CONN];
} __packed;
#define NCI_OP_RF_FIELD_INFO_NTF nci_opcode_pack(NCI_GID_CORE, 0x08)
struct nci_rf_field_info_ntf {
__u8 rf_field_status;
} __packed;
#define NCI_OP_RF_ACTIVATE_NTF nci_opcode_pack(NCI_GID_RF_MGMT, 0x05)
#define NCI_OP_RF_INTF_ACTIVATED_NTF nci_opcode_pack(NCI_GID_RF_MGMT, 0x05)
struct rf_tech_specific_params_nfca_poll {
__u16 sens_res;
__u8 nfcid1_len; /* 0, 4, 7, or 10 Bytes */
@ -289,17 +289,20 @@ struct activation_params_nfca_poll_iso_dep {
__u8 rats_res[20];
};
struct nci_rf_activate_ntf {
__u8 target_handle;
struct nci_rf_intf_activated_ntf {
__u8 rf_discovery_id;
__u8 rf_interface_type;
__u8 rf_protocol;
__u8 rf_tech_and_mode;
__u8 activation_rf_tech_and_mode;
__u8 rf_tech_specific_params_len;
union {
struct rf_tech_specific_params_nfca_poll nfca_poll;
} rf_tech_specific_params;
__u8 rf_interface_type;
__u8 data_exch_rf_tech_and_mode;
__u8 data_exch_tx_bit_rate;
__u8 data_exch_rx_bit_rate;
__u8 activation_params_len;
union {
@ -309,5 +312,9 @@ struct nci_rf_activate_ntf {
} __packed;
#define NCI_OP_RF_DEACTIVATE_NTF nci_opcode_pack(NCI_GID_RF_MGMT, 0x06)
struct nci_rf_deactivate_ntf {
__u8 type;
__u8 reason;
} __packed;
#endif /* __NCI_H */

View File

@ -109,15 +109,12 @@ struct nci_dev {
[NCI_MAX_SUPPORTED_RF_INTERFACES];
__u8 max_logical_connections;
__u16 max_routing_table_size;
__u8 max_control_packet_payload_length;
__u16 rf_sending_buffer_size;
__u16 rf_receiving_buffer_size;
__u16 manufacturer_id;
/* received during NCI_OP_CORE_CONN_CREATE_RSP for static conn 0 */
__u8 max_pkt_payload_size;
__u8 max_ctrl_pkt_payload_len;
__u16 max_size_for_large_params;
__u8 max_data_pkt_payload_size;
__u8 initial_num_credits;
__u8 conn_id;
__u8 manufact_id;
__u32 manufact_specific_info;
/* stored during nci_data_exchange */
data_exchange_cb_t data_exchange_cb;

View File

@ -491,6 +491,31 @@ static void ieee80211_config_ap_ssid(struct ieee80211_sub_if_data *sdata,
(params->hidden_ssid != NL80211_HIDDEN_SSID_NOT_IN_USE);
}
static int ieee80211_set_probe_resp(struct ieee80211_sub_if_data *sdata,
u8 *resp, size_t resp_len)
{
struct sk_buff *new, *old;
if (!resp || !resp_len)
return -EINVAL;
old = sdata->u.ap.probe_resp;
new = dev_alloc_skb(resp_len);
if (!new)
return -ENOMEM;
memcpy(skb_put(new, resp_len), resp, resp_len);
rcu_assign_pointer(sdata->u.ap.probe_resp, new);
synchronize_rcu();
if (old)
dev_kfree_skb(old);
return 0;
}
/*
* This handles both adding a beacon and setting new beacon info
*/
@ -501,6 +526,7 @@ static int ieee80211_config_beacon(struct ieee80211_sub_if_data *sdata,
int new_head_len, new_tail_len;
int size;
int err = -EINVAL;
u32 changed = 0;
old = rtnl_dereference(sdata->u.ap.beacon);
@ -584,11 +610,17 @@ static int ieee80211_config_beacon(struct ieee80211_sub_if_data *sdata,
kfree(old);
ieee80211_config_ap_ssid(sdata, params);
err = ieee80211_set_probe_resp(sdata, params->probe_resp,
params->probe_resp_len);
if (!err)
changed |= BSS_CHANGED_AP_PROBE_RESP;
ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED |
BSS_CHANGED_BEACON |
BSS_CHANGED_SSID);
ieee80211_config_ap_ssid(sdata, params);
changed |= BSS_CHANGED_BEACON_ENABLED |
BSS_CHANGED_BEACON |
BSS_CHANGED_SSID;
ieee80211_bss_info_change_notify(sdata, changed);
return 0;
}
@ -869,7 +901,12 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
sta_apply_parameters(local, sta, params);
rate_control_rate_init(sta);
/*
* for TDLS, rate control should be initialized only when supported
* rates are known.
*/
if (!test_sta_flag(sta, WLAN_STA_TDLS_PEER))
rate_control_rate_init(sta);
layer2_update = sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
sdata->vif.type == NL80211_IFTYPE_AP;
@ -953,6 +990,9 @@ static int ieee80211_change_station(struct wiphy *wiphy,
sta_apply_parameters(local, sta, params);
if (test_sta_flag(sta, WLAN_STA_TDLS_PEER) && params->supported_rates)
rate_control_rate_init(sta);
rcu_read_unlock();
if (sdata->vif.type == NL80211_IFTYPE_STATION &&
@ -2530,12 +2570,13 @@ static int ieee80211_probe_client(struct wiphy *wiphy, struct net_device *dev,
rcu_read_lock();
sta = sta_info_get(sdata, peer);
if (sta)
if (sta) {
qos = test_sta_flag(sta, WLAN_STA_WME);
rcu_read_unlock();
if (!sta)
rcu_read_unlock();
} else {
rcu_read_unlock();
return -ENOLINK;
}
if (qos) {
fc = cpu_to_le16(IEEE80211_FTYPE_DATA |
@ -2582,6 +2623,14 @@ static int ieee80211_probe_client(struct wiphy *wiphy, struct net_device *dev,
return 0;
}
static struct ieee80211_channel *
ieee80211_wiphy_get_channel(struct wiphy *wiphy)
{
struct ieee80211_local *local = wiphy_priv(wiphy);
return local->oper_channel;
}
struct cfg80211_ops mac80211_config_ops = {
.add_virtual_intf = ieee80211_add_iface,
.del_virtual_intf = ieee80211_del_iface,
@ -2648,4 +2697,5 @@ struct cfg80211_ops mac80211_config_ops = {
.tdls_oper = ieee80211_tdls_oper,
.tdls_mgmt = ieee80211_tdls_mgmt,
.probe_client = ieee80211_probe_client,
.get_channel = ieee80211_wiphy_get_channel,
};

View File

@ -97,6 +97,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
/* if merging, indicate to driver that we leave the old IBSS */
if (sdata->vif.bss_conf.ibss_joined) {
sdata->vif.bss_conf.ibss_joined = false;
netif_carrier_off(sdata->dev);
ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_IBSS);
}
@ -207,6 +208,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
bss = cfg80211_inform_bss_frame(local->hw.wiphy, local->hw.conf.channel,
mgmt, skb->len, 0, GFP_KERNEL);
cfg80211_put_bss(bss);
netif_carrier_on(sdata->dev);
cfg80211_ibss_joined(sdata->dev, ifibss->bssid, GFP_KERNEL);
}
@ -990,6 +992,7 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata)
}
sta_info_flush(sdata->local, sdata);
netif_carrier_off(sdata->dev);
/* remove beacon */
kfree(sdata->u.ibss.ie);

View File

@ -232,6 +232,7 @@ struct beacon_data {
struct ieee80211_if_ap {
struct beacon_data __rcu *beacon;
struct sk_buff __rcu *probe_resp;
struct list_head vlans;
@ -728,17 +729,16 @@ enum {
* operating channel
* @SCAN_SET_CHANNEL: Set the next channel to be scanned
* @SCAN_SEND_PROBE: Send probe requests and wait for probe responses
* @SCAN_LEAVE_OPER_CHANNEL: Leave the operating channel, notify the AP
* about us leaving the channel and stop all associated STA interfaces
* @SCAN_ENTER_OPER_CHANNEL: Enter the operating channel again, notify the
* AP about us being back and restart all associated STA interfaces
* @SCAN_SUSPEND: Suspend the scan and go back to operating channel to
* send out data
* @SCAN_RESUME: Resume the scan and scan the next channel
*/
enum mac80211_scan_state {
SCAN_DECISION,
SCAN_SET_CHANNEL,
SCAN_SEND_PROBE,
SCAN_LEAVE_OPER_CHANNEL,
SCAN_ENTER_OPER_CHANNEL,
SCAN_SUSPEND,
SCAN_RESUME,
};
struct ieee80211_local {

View File

@ -293,7 +293,8 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
changed |= ieee80211_reset_erp_info(sdata);
ieee80211_bss_info_change_notify(sdata, changed);
if (sdata->vif.type == NL80211_IFTYPE_STATION)
if (sdata->vif.type == NL80211_IFTYPE_STATION ||
sdata->vif.type == NL80211_IFTYPE_ADHOC)
netif_carrier_off(dev);
else
netif_carrier_on(dev);
@ -461,15 +462,19 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
struct ieee80211_sub_if_data *vlan, *tmpsdata;
struct beacon_data *old_beacon =
rtnl_dereference(sdata->u.ap.beacon);
struct sk_buff *old_probe_resp =
rtnl_dereference(sdata->u.ap.probe_resp);
/* sdata_running will return false, so this will disable */
ieee80211_bss_info_change_notify(sdata,
BSS_CHANGED_BEACON_ENABLED);
/* remove beacon */
/* remove beacon and probe response */
RCU_INIT_POINTER(sdata->u.ap.beacon, NULL);
RCU_INIT_POINTER(sdata->u.ap.probe_resp, NULL);
synchronize_rcu();
kfree(old_beacon);
kfree_skb(old_probe_resp);
/* down all dependent devices, that is VLANs */
list_for_each_entry_safe(vlan, tmpsdata, &sdata->u.ap.vlans,

View File

@ -871,6 +871,7 @@ static void mesh_queue_preq(struct mesh_path *mpath, u8 flags)
if (mpath->flags & MESH_PATH_REQ_QUEUED) {
spin_unlock_bh(&mpath->state_lock);
spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
kfree(preq_node);
return;
}

View File

@ -1357,9 +1357,6 @@ static void __ieee80211_connection_loss(struct ieee80211_sub_if_data *sdata)
ieee80211_set_disassoc(sdata, true, true);
mutex_unlock(&ifmgd->mtx);
mutex_lock(&local->mtx);
ieee80211_recalc_idle(local);
mutex_unlock(&local->mtx);
/*
* must be outside lock due to cfg80211,
* but that's not a problem.
@ -1368,6 +1365,10 @@ static void __ieee80211_connection_loss(struct ieee80211_sub_if_data *sdata)
IEEE80211_STYPE_DEAUTH,
WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY,
NULL, true);
mutex_lock(&local->mtx);
ieee80211_recalc_idle(local);
mutex_unlock(&local->mtx);
}
void ieee80211_beacon_connection_loss_work(struct work_struct *work)
@ -2134,9 +2135,6 @@ static void ieee80211_sta_connection_lost(struct ieee80211_sub_if_data *sdata,
ieee80211_set_disassoc(sdata, true, true);
mutex_unlock(&ifmgd->mtx);
mutex_lock(&local->mtx);
ieee80211_recalc_idle(local);
mutex_unlock(&local->mtx);
/*
* must be outside lock due to cfg80211,
* but that's not a problem.
@ -2144,6 +2142,11 @@ static void ieee80211_sta_connection_lost(struct ieee80211_sub_if_data *sdata,
ieee80211_send_deauth_disassoc(sdata, bssid,
IEEE80211_STYPE_DEAUTH, reason,
NULL, true);
mutex_lock(&local->mtx);
ieee80211_recalc_idle(local);
mutex_unlock(&local->mtx);
mutex_lock(&ifmgd->mtx);
}

View File

@ -211,8 +211,6 @@ static void ieee80211_hw_roc_start(struct work_struct *work)
return;
}
ieee80211_recalc_idle(local);
if (local->hw_roc_skb) {
sdata = IEEE80211_DEV_TO_SUB_IF(local->hw_roc_dev);
ieee80211_tx_skb(sdata, local->hw_roc_skb);
@ -226,6 +224,8 @@ static void ieee80211_hw_roc_start(struct work_struct *work)
GFP_KERNEL);
}
ieee80211_recalc_idle(local);
mutex_unlock(&local->mtx);
}

View File

@ -334,8 +334,8 @@ minstrel_get_rate(void *priv, struct ieee80211_sta *sta,
static void
calc_rate_durations(struct minstrel_sta_info *mi, struct ieee80211_local *local,
struct minstrel_rate *d, struct ieee80211_rate *rate)
calc_rate_durations(struct ieee80211_local *local, struct minstrel_rate *d,
struct ieee80211_rate *rate)
{
int erp = !!(rate->flags & IEEE80211_RATE_ERP_G);
@ -402,8 +402,7 @@ minstrel_rate_init(void *priv, struct ieee80211_supported_band *sband,
mr->rix = i;
mr->bitrate = sband->bitrates[i].bitrate / 5;
calc_rate_durations(mi, local, mr,
&sband->bitrates[i]);
calc_rate_durations(local, mr, &sband->bitrates[i]);
/* calculate maximum number of retransmissions before
* fallback (based on maximum segment size) */

View File

@ -36,8 +36,17 @@
/* Transmit duration for the raw data part of an average sized packet */
#define MCS_DURATION(streams, sgi, bps) MCS_SYMBOL_TIME(sgi, MCS_NSYMS((streams) * (bps)))
/*
* Define group sort order: HT40 -> SGI -> #streams
*/
#define GROUP_IDX(_streams, _sgi, _ht40) \
MINSTREL_MAX_STREAMS * 2 * _ht40 + \
MINSTREL_MAX_STREAMS * _sgi + \
_streams - 1
/* MCS rate information for an MCS group */
#define MCS_GROUP(_streams, _sgi, _ht40) { \
#define MCS_GROUP(_streams, _sgi, _ht40) \
[GROUP_IDX(_streams, _sgi, _ht40)] = { \
.streams = _streams, \
.flags = \
(_sgi ? IEEE80211_TX_RC_SHORT_GI : 0) | \
@ -58,6 +67,9 @@
* To enable sufficiently targeted rate sampling, MCS rates are divided into
* groups, based on the number of streams and flags (HT40, SGI) that they
* use.
*
* Sortorder has to be fixed for GROUP_IDX macro to be applicable:
* HT40 -> SGI -> #streams
*/
const struct mcs_group minstrel_mcs_groups[] = {
MCS_GROUP(1, 0, 0),
@ -102,21 +114,9 @@ minstrel_ewma(int old, int new, int weight)
static int
minstrel_ht_get_group_idx(struct ieee80211_tx_rate *rate)
{
int streams = (rate->idx / MCS_GROUP_RATES) + 1;
u32 flags = IEEE80211_TX_RC_SHORT_GI | IEEE80211_TX_RC_40_MHZ_WIDTH;
int i;
for (i = 0; i < ARRAY_SIZE(minstrel_mcs_groups); i++) {
if (minstrel_mcs_groups[i].streams != streams)
continue;
if (minstrel_mcs_groups[i].flags != (rate->flags & flags))
continue;
return i;
}
WARN_ON(1);
return 0;
return GROUP_IDX((rate->idx / MCS_GROUP_RATES) + 1,
!!(rate->flags & IEEE80211_TX_RC_SHORT_GI),
!!(rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH));
}
static inline struct minstrel_rate_stats *
@ -130,7 +130,7 @@ minstrel_get_ratestats(struct minstrel_ht_sta *mi, int index)
* Recalculate success probabilities and counters for a rate using EWMA
*/
static void
minstrel_calc_rate_ewma(struct minstrel_priv *mp, struct minstrel_rate_stats *mr)
minstrel_calc_rate_ewma(struct minstrel_rate_stats *mr)
{
if (unlikely(mr->attempts > 0)) {
mr->sample_skipped = 0;
@ -156,8 +156,7 @@ minstrel_calc_rate_ewma(struct minstrel_priv *mp, struct minstrel_rate_stats *mr
* the expected number of retransmissions and their expected length
*/
static void
minstrel_ht_calc_tp(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
int group, int rate)
minstrel_ht_calc_tp(struct minstrel_ht_sta *mi, int group, int rate)
{
struct minstrel_rate_stats *mr;
unsigned int usecs;
@ -226,8 +225,8 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
mr = &mg->rates[i];
mr->retry_updated = false;
index = MCS_GROUP_RATES * group + i;
minstrel_calc_rate_ewma(mp, mr);
minstrel_ht_calc_tp(mp, mi, group, i);
minstrel_calc_rate_ewma(mr);
minstrel_ht_calc_tp(mi, group, i);
if (!mr->cur_tp)
continue;
@ -300,10 +299,10 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
static bool
minstrel_ht_txstat_valid(struct ieee80211_tx_rate *rate)
{
if (!rate->count)
if (rate->idx < 0)
return false;
if (rate->idx < 0)
if (!rate->count)
return false;
return !!(rate->flags & IEEE80211_TX_RC_MCS);
@ -357,7 +356,7 @@ minstrel_downgrade_rate(struct minstrel_ht_sta *mi, unsigned int *idx,
}
static void
minstrel_aggr_check(struct minstrel_priv *mp, struct ieee80211_sta *pubsta, struct sk_buff *skb)
minstrel_aggr_check(struct ieee80211_sta *pubsta, struct sk_buff *skb)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
@ -455,7 +454,7 @@ minstrel_ht_tx_status(void *priv, struct ieee80211_supported_band *sband,
if (time_after(jiffies, mi->stats_update + (mp->update_interval / 2 * HZ) / 1000)) {
minstrel_ht_update_stats(mp, mi);
if (!(info->flags & IEEE80211_TX_CTL_AMPDU))
minstrel_aggr_check(mp, sta, skb);
minstrel_aggr_check(sta, skb);
}
}
@ -515,7 +514,6 @@ minstrel_calc_retransmit(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
static void
minstrel_ht_set_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
struct ieee80211_tx_rate *rate, int index,
struct ieee80211_tx_rate_control *txrc,
bool sample, bool rtscts)
{
const struct mcs_group *group = &minstrel_mcs_groups[index / MCS_GROUP_RATES];
@ -628,11 +626,11 @@ minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
if (sample_idx >= 0) {
sample = true;
minstrel_ht_set_rate(mp, mi, &ar[0], sample_idx,
txrc, true, false);
true, false);
info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
} else {
minstrel_ht_set_rate(mp, mi, &ar[0], mi->max_tp_rate,
txrc, false, false);
false, false);
}
if (mp->hw->max_rates >= 3) {
@ -643,13 +641,13 @@ minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
*/
if (sample_idx >= 0)
minstrel_ht_set_rate(mp, mi, &ar[1], mi->max_tp_rate,
txrc, false, false);
false, false);
else
minstrel_ht_set_rate(mp, mi, &ar[1], mi->max_tp_rate2,
txrc, false, true);
false, true);
minstrel_ht_set_rate(mp, mi, &ar[2], mi->max_prob_rate,
txrc, false, !sample);
false, !sample);
ar[3].count = 0;
ar[3].idx = -1;
@ -660,7 +658,7 @@ minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
* max_tp_rate -> max_prob_rate by default.
*/
minstrel_ht_set_rate(mp, mi, &ar[1], mi->max_prob_rate,
txrc, false, !sample);
false, !sample);
ar[2].count = 0;
ar[2].idx = -1;

View File

@ -212,12 +212,7 @@ ieee80211_scan_rx(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
if (bss)
ieee80211_rx_bss_put(sdata->local, bss);
/* If we are on-operating-channel, and this packet is for the
* current channel, pass the pkt on up the stack so that
* the rest of the stack can make use of it.
*/
if (ieee80211_cfg_on_oper_channel(sdata->local)
&& (channel == sdata->local->oper_channel))
if (channel == sdata->local->oper_channel)
return RX_CONTINUE;
dev_kfree_skb(skb);
@ -263,8 +258,6 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted,
bool was_hw_scan)
{
struct ieee80211_local *local = hw_to_local(hw);
bool on_oper_chan;
bool enable_beacons = false;
lockdep_assert_held(&local->mtx);
@ -297,25 +290,13 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted,
local->scanning = 0;
local->scan_channel = NULL;
on_oper_chan = ieee80211_cfg_on_oper_channel(local);
if (was_hw_scan || !on_oper_chan)
ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
else
/* Set power back to normal operating levels. */
ieee80211_hw_config(local, 0);
/* Set power back to normal operating levels. */
ieee80211_hw_config(local, 0);
if (!was_hw_scan) {
bool on_oper_chan2;
ieee80211_configure_filter(local);
drv_sw_scan_complete(local);
on_oper_chan2 = ieee80211_cfg_on_oper_channel(local);
/* We should always be on-channel at this point. */
WARN_ON(!on_oper_chan2);
if (on_oper_chan2 && (on_oper_chan != on_oper_chan2))
enable_beacons = true;
ieee80211_offchannel_return(local, enable_beacons, true);
ieee80211_offchannel_return(local, true, true);
}
ieee80211_recalc_idle(local);
@ -360,11 +341,7 @@ static int ieee80211_start_sw_scan(struct ieee80211_local *local)
local->next_scan_state = SCAN_DECISION;
local->scan_channel_idx = 0;
/* We always want to use off-channel PS, even if we
* are not really leaving oper-channel. Don't
* tell the AP though, as long as we are on-channel.
*/
ieee80211_offchannel_enable_all_ps(local, false);
ieee80211_offchannel_stop_vifs(local, true);
ieee80211_configure_filter(local);
@ -372,8 +349,7 @@ static int ieee80211_start_sw_scan(struct ieee80211_local *local)
ieee80211_hw_config(local, 0);
ieee80211_queue_delayed_work(&local->hw,
&local->scan_work,
IEEE80211_CHANNEL_TIME);
&local->scan_work, 0);
return 0;
}
@ -509,98 +485,41 @@ static void ieee80211_scan_state_decision(struct ieee80211_local *local,
next_chan = local->scan_req->channels[local->scan_channel_idx];
if (ieee80211_cfg_on_oper_channel(local)) {
/* We're currently on operating channel. */
if (next_chan == local->oper_channel)
/* We don't need to move off of operating channel. */
local->next_scan_state = SCAN_SET_CHANNEL;
else
/*
* We do need to leave operating channel, as next
* scan is somewhere else.
*/
local->next_scan_state = SCAN_LEAVE_OPER_CHANNEL;
} else {
/*
* we're currently scanning a different channel, let's
* see if we can scan another channel without interfering
* with the current traffic situation.
*
* Since we don't know if the AP has pending frames for us
* we can only check for our tx queues and use the current
* pm_qos requirements for rx. Hence, if no tx traffic occurs
* at all we will scan as many channels in a row as the pm_qos
* latency allows us to. Additionally we also check for the
* currently negotiated listen interval to prevent losing
* frames unnecessarily.
*
* Otherwise switch back to the operating channel.
*/
/*
* we're currently scanning a different channel, let's
* see if we can scan another channel without interfering
* with the current traffic situation.
*
* Since we don't know if the AP has pending frames for us
* we can only check for our tx queues and use the current
* pm_qos requirements for rx. Hence, if no tx traffic occurs
* at all we will scan as many channels in a row as the pm_qos
* latency allows us to. Additionally we also check for the
* currently negotiated listen interval to prevent losing
* frames unnecessarily.
*
* Otherwise switch back to the operating channel.
*/
bad_latency = time_after(jiffies +
ieee80211_scan_get_channel_time(next_chan),
local->leave_oper_channel_time +
usecs_to_jiffies(pm_qos_request(PM_QOS_NETWORK_LATENCY)));
bad_latency = time_after(jiffies +
ieee80211_scan_get_channel_time(next_chan),
local->leave_oper_channel_time +
usecs_to_jiffies(pm_qos_request(PM_QOS_NETWORK_LATENCY)));
listen_int_exceeded = time_after(jiffies +
ieee80211_scan_get_channel_time(next_chan),
local->leave_oper_channel_time +
usecs_to_jiffies(min_beacon_int * 1024) *
local->hw.conf.listen_interval);
listen_int_exceeded = time_after(jiffies +
ieee80211_scan_get_channel_time(next_chan),
local->leave_oper_channel_time +
usecs_to_jiffies(min_beacon_int * 1024) *
local->hw.conf.listen_interval);
if (associated && ( !tx_empty || bad_latency ||
listen_int_exceeded))
local->next_scan_state = SCAN_ENTER_OPER_CHANNEL;
else
local->next_scan_state = SCAN_SET_CHANNEL;
}
if (associated && (!tx_empty || bad_latency || listen_int_exceeded))
local->next_scan_state = SCAN_SUSPEND;
else
local->next_scan_state = SCAN_SET_CHANNEL;
*next_delay = 0;
}
static void ieee80211_scan_state_leave_oper_channel(struct ieee80211_local *local,
unsigned long *next_delay)
{
/* PS will already be in off-channel mode,
* we do that once at the beginning of scanning.
*/
ieee80211_offchannel_stop_vifs(local, false);
/*
* What if the nullfunc frames didn't arrive?
*/
drv_flush(local, false);
if (local->ops->flush)
*next_delay = 0;
else
*next_delay = HZ / 10;
/* remember when we left the operating channel */
local->leave_oper_channel_time = jiffies;
/* advance to the next channel to be scanned */
local->next_scan_state = SCAN_SET_CHANNEL;
}
static void ieee80211_scan_state_enter_oper_channel(struct ieee80211_local *local,
unsigned long *next_delay)
{
/* switch back to the operating channel */
local->scan_channel = NULL;
if (!ieee80211_cfg_on_oper_channel(local))
ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
/*
* Re-enable vifs and beaconing. Leave PS
* in off-channel state..will put that back
* on-channel at the end of scanning.
*/
ieee80211_offchannel_return(local, true, false);
*next_delay = HZ / 5;
local->next_scan_state = SCAN_DECISION;
}
static void ieee80211_scan_state_set_channel(struct ieee80211_local *local,
unsigned long *next_delay)
{
@ -612,10 +531,8 @@ static void ieee80211_scan_state_set_channel(struct ieee80211_local *local,
local->scan_channel = chan;
/* Only call hw-config if we really need to change channels. */
if (chan != local->hw.conf.channel)
if (ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL))
skip = 1;
if (ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL))
skip = 1;
/* advance state machine to next channel/band */
local->scan_channel_idx++;
@ -672,6 +589,44 @@ static void ieee80211_scan_state_send_probe(struct ieee80211_local *local,
local->next_scan_state = SCAN_DECISION;
}
static void ieee80211_scan_state_suspend(struct ieee80211_local *local,
unsigned long *next_delay)
{
/* switch back to the operating channel */
local->scan_channel = NULL;
ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
/*
* Re-enable vifs and beaconing. Leave PS
* in off-channel state..will put that back
* on-channel at the end of scanning.
*/
ieee80211_offchannel_return(local, true, false);
*next_delay = HZ / 5;
/* afterwards, resume scan & go to next channel */
local->next_scan_state = SCAN_RESUME;
}
static void ieee80211_scan_state_resume(struct ieee80211_local *local,
unsigned long *next_delay)
{
/* PS already is in off-channel mode */
ieee80211_offchannel_stop_vifs(local, false);
if (local->ops->flush) {
drv_flush(local, false);
*next_delay = 0;
} else
*next_delay = HZ / 10;
/* remember when we left the operating channel */
local->leave_oper_channel_time = jiffies;
/* advance to the next channel to be scanned */
local->next_scan_state = SCAN_DECISION;
}
void ieee80211_scan_work(struct work_struct *work)
{
struct ieee80211_local *local =
@ -742,11 +697,11 @@ void ieee80211_scan_work(struct work_struct *work)
case SCAN_SEND_PROBE:
ieee80211_scan_state_send_probe(local, &next_delay);
break;
case SCAN_LEAVE_OPER_CHANNEL:
ieee80211_scan_state_leave_oper_channel(local, &next_delay);
case SCAN_SUSPEND:
ieee80211_scan_state_suspend(local, &next_delay);
break;
case SCAN_ENTER_OPER_CHANNEL:
ieee80211_scan_state_enter_oper_channel(local, &next_delay);
case SCAN_RESUME:
ieee80211_scan_state_resume(local, &next_delay);
break;
}
} while (next_delay == 0);

View File

@ -571,8 +571,6 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
switch (tx->key->conf.cipher) {
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
if (ieee80211_is_auth(hdr->frame_control))
break;
case WLAN_CIPHER_SUITE_TKIP:
if (!ieee80211_is_data_present(hdr->frame_control))
tx->key = NULL;
@ -2415,6 +2413,37 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
}
EXPORT_SYMBOL(ieee80211_beacon_get_tim);
struct sk_buff *ieee80211_proberesp_get(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct ieee80211_if_ap *ap = NULL;
struct sk_buff *presp = NULL, *skb = NULL;
struct ieee80211_hdr *hdr;
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
if (sdata->vif.type != NL80211_IFTYPE_AP)
return NULL;
rcu_read_lock();
ap = &sdata->u.ap;
presp = rcu_dereference(ap->probe_resp);
if (!presp)
goto out;
skb = skb_copy(presp, GFP_ATOMIC);
if (!skb)
goto out;
hdr = (struct ieee80211_hdr *) skb->data;
memset(hdr->addr1, 0, sizeof(hdr->addr1));
out:
rcu_read_unlock();
return skb;
}
EXPORT_SYMBOL(ieee80211_proberesp_get);
struct sk_buff *ieee80211_pspoll_get(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{

View File

@ -1071,7 +1071,8 @@ int ieee80211_reconfig(struct ieee80211_local *local)
changed |= BSS_CHANGED_IBSS;
/* fall through */
case NL80211_IFTYPE_AP:
changed |= BSS_CHANGED_SSID;
changed |= BSS_CHANGED_SSID |
BSS_CHANGED_AP_PROBE_RESP;
/* fall through */
case NL80211_IFTYPE_MESH_POINT:
changed |= BSS_CHANGED_BEACON |
@ -1093,6 +1094,8 @@ int ieee80211_reconfig(struct ieee80211_local *local)
}
}
ieee80211_recalc_ps(local, -1);
/*
* Clear the WLAN_STA_BLOCK_BA flag so new aggregation
* sessions can be established after a resume.

View File

@ -415,7 +415,8 @@ static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
memmove(pos, pos + CCMP_HDR_LEN, hdrlen);
/* the HW only needs room for the IV, but not the actual IV */
if (info->control.hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)
if (info->control.hw_key &&
(info->control.hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE))
return 0;
hdr = (struct ieee80211_hdr *) pos;

View File

@ -125,7 +125,10 @@ static inline int nci_request(struct nci_dev *ndev,
static void nci_reset_req(struct nci_dev *ndev, unsigned long opt)
{
nci_send_cmd(ndev, NCI_OP_CORE_RESET_CMD, 0, NULL);
struct nci_core_reset_cmd cmd;
cmd.reset_type = NCI_RESET_TYPE_RESET_CONFIG;
nci_send_cmd(ndev, NCI_OP_CORE_RESET_CMD, 1, &cmd);
}
static void nci_init_req(struct nci_dev *ndev, unsigned long opt)
@ -135,17 +138,11 @@ static void nci_init_req(struct nci_dev *ndev, unsigned long opt)
static void nci_init_complete_req(struct nci_dev *ndev, unsigned long opt)
{
struct nci_core_conn_create_cmd conn_cmd;
struct nci_rf_disc_map_cmd cmd;
struct disc_map_config *cfg = cmd.mapping_configs;
__u8 *num = &cmd.num_mapping_configs;
int i;
/* create static rf connection */
conn_cmd.target_handle = 0;
conn_cmd.num_target_specific_params = 0;
nci_send_cmd(ndev, NCI_OP_CORE_CONN_CREATE_CMD, 2, &conn_cmd);
/* set rf mapping configurations */
*num = 0;
@ -469,7 +466,7 @@ static int nci_data_exchange(struct nfc_dev *nfc_dev, __u32 target_idx,
ndev->data_exchange_cb = cb;
ndev->data_exchange_cb_context = cb_context;
rc = nci_send_data(ndev, ndev->conn_id, skb);
rc = nci_send_data(ndev, NCI_STATIC_RF_CONN_ID, skb);
if (rc)
clear_bit(NCI_DATA_EXCHANGE, &ndev->flags);
@ -725,7 +722,10 @@ static void nci_tx_work(struct work_struct *work)
if (!skb)
return;
atomic_dec(&ndev->credits_cnt);
/* Check if data flow control is used */
if (atomic_read(&ndev->credits_cnt) !=
NCI_DATA_FLOW_CONTROL_NOT_USED)
atomic_dec(&ndev->credits_cnt);
nfc_dbg("NCI TX: MT=data, PBF=%d, conn_id=%d, plen=%d",
nci_pbf(skb->data),

View File

@ -95,7 +95,8 @@ static int nci_queue_tx_data_frags(struct nci_dev *ndev,
__skb_queue_head_init(&frags_q);
while (total_len) {
frag_len = min_t(int, total_len, ndev->max_pkt_payload_size);
frag_len =
min_t(int, total_len, ndev->max_data_pkt_payload_size);
skb_frag = nci_skb_alloc(ndev,
(NCI_DATA_HDR_SIZE + frag_len),
@ -151,7 +152,7 @@ int nci_send_data(struct nci_dev *ndev, __u8 conn_id, struct sk_buff *skb)
nfc_dbg("entry, conn_id 0x%x, plen %d", conn_id, skb->len);
/* check if the packet need to be fragmented */
if (skb->len <= ndev->max_pkt_payload_size) {
if (skb->len <= ndev->max_data_pkt_payload_size) {
/* no need to fragment packet */
nci_push_data_hdr(ndev, conn_id, skb, NCI_PBF_LAST);

View File

@ -42,12 +42,9 @@ int nci_to_errno(__u8 code)
case NCI_STATUS_REJECTED:
return -EBUSY;
case NCI_STATUS_MESSAGE_CORRUPTED:
case NCI_STATUS_RF_FRAME_CORRUPTED:
return -EBADMSG;
case NCI_STATUS_BUFFER_FULL:
return -ENOBUFS;
case NCI_STATUS_NOT_INITIALIZED:
return -EHOSTDOWN;
@ -80,9 +77,6 @@ int nci_to_errno(__u8 code)
case NCI_STATUS_NFCEE_TIMEOUT_ERROR:
return -ETIMEDOUT;
case NCI_STATUS_RF_LINK_LOSS_ERROR:
return -ENOLINK;
case NCI_STATUS_MAX_ACTIVE_NFCEE_INTERFACES_REACHED:
return -EDQUOT;

View File

@ -54,7 +54,7 @@ static void nci_core_conn_credits_ntf_packet(struct nci_dev *ndev,
ntf->conn_entries[i].conn_id,
ntf->conn_entries[i].credits);
if (ntf->conn_entries[i].conn_id == ndev->conn_id) {
if (ntf->conn_entries[i].conn_id == NCI_STATIC_RF_CONN_ID) {
/* found static rf connection */
atomic_add(ntf->conn_entries[i].credits,
&ndev->credits_cnt);
@ -66,22 +66,12 @@ static void nci_core_conn_credits_ntf_packet(struct nci_dev *ndev,
queue_work(ndev->tx_wq, &ndev->tx_work);
}
static void nci_rf_field_info_ntf_packet(struct nci_dev *ndev,
struct sk_buff *skb)
{
struct nci_rf_field_info_ntf *ntf = (void *) skb->data;
nfc_dbg("entry, rf_field_status %d", ntf->rf_field_status);
}
static int nci_rf_activate_nfca_passive_poll(struct nci_dev *ndev,
struct nci_rf_activate_ntf *ntf, __u8 *data)
static __u8 *nci_extract_rf_params_nfca_passive_poll(struct nci_dev *ndev,
struct nci_rf_intf_activated_ntf *ntf, __u8 *data)
{
struct rf_tech_specific_params_nfca_poll *nfca_poll;
struct activation_params_nfca_poll_iso_dep *nfca_poll_iso_dep;
nfca_poll = &ntf->rf_tech_specific_params.nfca_poll;
nfca_poll_iso_dep = &ntf->activation_params.nfca_poll_iso_dep;
nfca_poll->sens_res = __le16_to_cpu(*((__u16 *)data));
data += 2;
@ -100,32 +90,32 @@ static int nci_rf_activate_nfca_passive_poll(struct nci_dev *ndev,
if (nfca_poll->sel_res_len != 0)
nfca_poll->sel_res = *data++;
ntf->rf_interface_type = *data++;
ntf->activation_params_len = *data++;
nfc_dbg("sel_res_len %d, sel_res 0x%x, rf_interface_type %d, activation_params_len %d",
nfc_dbg("sel_res_len %d, sel_res 0x%x",
nfca_poll->sel_res_len,
nfca_poll->sel_res,
ntf->rf_interface_type,
ntf->activation_params_len);
nfca_poll->sel_res);
switch (ntf->rf_interface_type) {
case NCI_RF_INTERFACE_ISO_DEP:
nfca_poll_iso_dep->rats_res_len = *data++;
if (nfca_poll_iso_dep->rats_res_len > 0) {
memcpy(nfca_poll_iso_dep->rats_res,
return data;
}
static int nci_extract_activation_params_iso_dep(struct nci_dev *ndev,
struct nci_rf_intf_activated_ntf *ntf, __u8 *data)
{
struct activation_params_nfca_poll_iso_dep *nfca_poll;
switch (ntf->activation_rf_tech_and_mode) {
case NCI_NFC_A_PASSIVE_POLL_MODE:
nfca_poll = &ntf->activation_params.nfca_poll_iso_dep;
nfca_poll->rats_res_len = *data++;
if (nfca_poll->rats_res_len > 0) {
memcpy(nfca_poll->rats_res,
data,
nfca_poll_iso_dep->rats_res_len);
nfca_poll->rats_res_len);
}
break;
case NCI_RF_INTERFACE_FRAME:
/* no activation params */
break;
default:
nfc_err("unsupported rf_interface_type 0x%x",
ntf->rf_interface_type);
nfc_err("unsupported activation_rf_tech_and_mode 0x%x",
ntf->activation_rf_tech_and_mode);
return -EPROTO;
}
@ -133,7 +123,7 @@ static int nci_rf_activate_nfca_passive_poll(struct nci_dev *ndev,
}
static void nci_target_found(struct nci_dev *ndev,
struct nci_rf_activate_ntf *ntf)
struct nci_rf_intf_activated_ntf *ntf)
{
struct nfc_target nfc_tgt;
@ -141,6 +131,8 @@ static void nci_target_found(struct nci_dev *ndev,
nfc_tgt.supported_protocols = NFC_PROTO_MIFARE_MASK;
else if (ntf->rf_protocol == NCI_RF_PROTOCOL_ISO_DEP) /* 4A */
nfc_tgt.supported_protocols = NFC_PROTO_ISO14443_MASK;
else
nfc_tgt.supported_protocols = 0;
nfc_tgt.sens_res = ntf->rf_tech_specific_params.nfca_poll.sens_res;
nfc_tgt.sel_res = ntf->rf_tech_specific_params.nfca_poll.sel_res;
@ -158,49 +150,86 @@ static void nci_target_found(struct nci_dev *ndev,
nfc_targets_found(ndev->nfc_dev, &nfc_tgt, 1);
}
static void nci_rf_activate_ntf_packet(struct nci_dev *ndev,
struct sk_buff *skb)
static void nci_rf_intf_activated_ntf_packet(struct nci_dev *ndev,
struct sk_buff *skb)
{
struct nci_rf_activate_ntf ntf;
struct nci_rf_intf_activated_ntf ntf;
__u8 *data = skb->data;
int rc = -1;
int err = 0;
clear_bit(NCI_DISCOVERY, &ndev->flags);
set_bit(NCI_POLL_ACTIVE, &ndev->flags);
ntf.target_handle = *data++;
ntf.rf_discovery_id = *data++;
ntf.rf_interface_type = *data++;
ntf.rf_protocol = *data++;
ntf.rf_tech_and_mode = *data++;
ntf.activation_rf_tech_and_mode = *data++;
ntf.rf_tech_specific_params_len = *data++;
nfc_dbg("target_handle %d, rf_protocol 0x%x, rf_tech_and_mode 0x%x, rf_tech_specific_params_len %d",
ntf.target_handle,
ntf.rf_protocol,
ntf.rf_tech_and_mode,
nfc_dbg("rf_discovery_id %d", ntf.rf_discovery_id);
nfc_dbg("rf_interface_type 0x%x", ntf.rf_interface_type);
nfc_dbg("rf_protocol 0x%x", ntf.rf_protocol);
nfc_dbg("activation_rf_tech_and_mode 0x%x",
ntf.activation_rf_tech_and_mode);
nfc_dbg("rf_tech_specific_params_len %d",
ntf.rf_tech_specific_params_len);
switch (ntf.rf_tech_and_mode) {
case NCI_NFC_A_PASSIVE_POLL_MODE:
rc = nci_rf_activate_nfca_passive_poll(ndev, &ntf,
data);
break;
if (ntf.rf_tech_specific_params_len > 0) {
switch (ntf.activation_rf_tech_and_mode) {
case NCI_NFC_A_PASSIVE_POLL_MODE:
data = nci_extract_rf_params_nfca_passive_poll(ndev,
&ntf, data);
break;
default:
nfc_err("unsupported rf_tech_and_mode 0x%x",
ntf.rf_tech_and_mode);
return;
default:
nfc_err("unsupported activation_rf_tech_and_mode 0x%x",
ntf.activation_rf_tech_and_mode);
return;
}
}
if (!rc)
ntf.data_exch_rf_tech_and_mode = *data++;
ntf.data_exch_tx_bit_rate = *data++;
ntf.data_exch_rx_bit_rate = *data++;
ntf.activation_params_len = *data++;
nfc_dbg("data_exch_rf_tech_and_mode 0x%x",
ntf.data_exch_rf_tech_and_mode);
nfc_dbg("data_exch_tx_bit_rate 0x%x",
ntf.data_exch_tx_bit_rate);
nfc_dbg("data_exch_rx_bit_rate 0x%x",
ntf.data_exch_rx_bit_rate);
nfc_dbg("activation_params_len %d",
ntf.activation_params_len);
if (ntf.activation_params_len > 0) {
switch (ntf.rf_interface_type) {
case NCI_RF_INTERFACE_ISO_DEP:
err = nci_extract_activation_params_iso_dep(ndev,
&ntf, data);
break;
case NCI_RF_INTERFACE_FRAME:
/* no activation params */
break;
default:
nfc_err("unsupported rf_interface_type 0x%x",
ntf.rf_interface_type);
return;
}
}
if (!err)
nci_target_found(ndev, &ntf);
}
static void nci_rf_deactivate_ntf_packet(struct nci_dev *ndev,
struct sk_buff *skb)
{
__u8 type = skb->data[0];
struct nci_rf_deactivate_ntf *ntf = (void *) skb->data;
nfc_dbg("entry, type 0x%x", type);
nfc_dbg("entry, type 0x%x, reason 0x%x", ntf->type, ntf->reason);
clear_bit(NCI_POLL_ACTIVE, &ndev->flags);
ndev->target_active_prot = 0;
@ -214,6 +243,9 @@ static void nci_rf_deactivate_ntf_packet(struct nci_dev *ndev,
ndev->rx_data_reassembly = 0;
}
/* set the available credits to initial value */
atomic_set(&ndev->credits_cnt, ndev->initial_num_credits);
/* complete the data exchange transaction, if exists */
if (test_bit(NCI_DATA_EXCHANGE, &ndev->flags))
nci_data_exchange_complete(ndev, NULL, -EIO);
@ -237,12 +269,8 @@ void nci_ntf_packet(struct nci_dev *ndev, struct sk_buff *skb)
nci_core_conn_credits_ntf_packet(ndev, skb);
break;
case NCI_OP_RF_FIELD_INFO_NTF:
nci_rf_field_info_ntf_packet(ndev, skb);
break;
case NCI_OP_RF_ACTIVATE_NTF:
nci_rf_activate_ntf_packet(ndev, skb);
case NCI_OP_RF_INTF_ACTIVATED_NTF:
nci_rf_intf_activated_ntf_packet(ndev, skb);
break;
case NCI_OP_RF_DEACTIVATE_NTF:

View File

@ -42,10 +42,11 @@ static void nci_core_reset_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb)
nfc_dbg("entry, status 0x%x", rsp->status);
if (rsp->status == NCI_STATUS_OK)
if (rsp->status == NCI_STATUS_OK) {
ndev->nci_ver = rsp->nci_ver;
nfc_dbg("nci_ver 0x%x", ndev->nci_ver);
nfc_dbg("nci_ver 0x%x, config_status 0x%x",
rsp->nci_ver, rsp->config_status);
}
nci_req_complete(ndev, rsp->status);
}
@ -58,13 +59,13 @@ static void nci_core_init_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb)
nfc_dbg("entry, status 0x%x", rsp_1->status);
if (rsp_1->status != NCI_STATUS_OK)
return;
goto exit;
ndev->nfcc_features = __le32_to_cpu(rsp_1->nfcc_features);
ndev->num_supported_rf_interfaces = rsp_1->num_supported_rf_interfaces;
if (ndev->num_supported_rf_interfaces >
NCI_MAX_SUPPORTED_RF_INTERFACES) {
NCI_MAX_SUPPORTED_RF_INTERFACES) {
ndev->num_supported_rf_interfaces =
NCI_MAX_SUPPORTED_RF_INTERFACES;
}
@ -73,20 +74,26 @@ static void nci_core_init_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb)
rsp_1->supported_rf_interfaces,
ndev->num_supported_rf_interfaces);
rsp_2 = (void *) (skb->data + 6 + ndev->num_supported_rf_interfaces);
rsp_2 = (void *) (skb->data + 6 + rsp_1->num_supported_rf_interfaces);
ndev->max_logical_connections =
rsp_2->max_logical_connections;
ndev->max_routing_table_size =
__le16_to_cpu(rsp_2->max_routing_table_size);
ndev->max_control_packet_payload_length =
rsp_2->max_control_packet_payload_length;
ndev->rf_sending_buffer_size =
__le16_to_cpu(rsp_2->rf_sending_buffer_size);
ndev->rf_receiving_buffer_size =
__le16_to_cpu(rsp_2->rf_receiving_buffer_size);
ndev->manufacturer_id =
__le16_to_cpu(rsp_2->manufacturer_id);
ndev->max_ctrl_pkt_payload_len =
rsp_2->max_ctrl_pkt_payload_len;
ndev->max_size_for_large_params =
__le16_to_cpu(rsp_2->max_size_for_large_params);
ndev->max_data_pkt_payload_size =
rsp_2->max_data_pkt_payload_size;
ndev->initial_num_credits =
rsp_2->initial_num_credits;
ndev->manufact_id =
rsp_2->manufact_id;
ndev->manufact_specific_info =
__le32_to_cpu(rsp_2->manufact_specific_info);
atomic_set(&ndev->credits_cnt, ndev->initial_num_credits);
nfc_dbg("nfcc_features 0x%x",
ndev->nfcc_features);
@ -104,39 +111,23 @@ static void nci_core_init_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb)
ndev->max_logical_connections);
nfc_dbg("max_routing_table_size %d",
ndev->max_routing_table_size);
nfc_dbg("max_control_packet_payload_length %d",
ndev->max_control_packet_payload_length);
nfc_dbg("rf_sending_buffer_size %d",
ndev->rf_sending_buffer_size);
nfc_dbg("rf_receiving_buffer_size %d",
ndev->rf_receiving_buffer_size);
nfc_dbg("manufacturer_id 0x%x",
ndev->manufacturer_id);
nfc_dbg("max_ctrl_pkt_payload_len %d",
ndev->max_ctrl_pkt_payload_len);
nfc_dbg("max_size_for_large_params %d",
ndev->max_size_for_large_params);
nfc_dbg("max_data_pkt_payload_size %d",
ndev->max_data_pkt_payload_size);
nfc_dbg("initial_num_credits %d",
ndev->initial_num_credits);
nfc_dbg("manufact_id 0x%x",
ndev->manufact_id);
nfc_dbg("manufact_specific_info 0x%x",
ndev->manufact_specific_info);
exit:
nci_req_complete(ndev, rsp_1->status);
}
static void nci_core_conn_create_rsp_packet(struct nci_dev *ndev,
struct sk_buff *skb)
{
struct nci_core_conn_create_rsp *rsp = (void *) skb->data;
nfc_dbg("entry, status 0x%x", rsp->status);
if (rsp->status != NCI_STATUS_OK)
return;
ndev->max_pkt_payload_size = rsp->max_pkt_payload_size;
ndev->initial_num_credits = rsp->initial_num_credits;
ndev->conn_id = rsp->conn_id;
atomic_set(&ndev->credits_cnt, ndev->initial_num_credits);
nfc_dbg("max_pkt_payload_size %d", ndev->max_pkt_payload_size);
nfc_dbg("initial_num_credits %d", ndev->initial_num_credits);
nfc_dbg("conn_id %d", ndev->conn_id);
}
static void nci_rf_disc_map_rsp_packet(struct nci_dev *ndev,
struct sk_buff *skb)
{
@ -196,10 +187,6 @@ void nci_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb)
nci_core_init_rsp_packet(ndev, skb);
break;
case NCI_OP_CORE_CONN_CREATE_RSP:
nci_core_conn_create_rsp_packet(ndev, skb);
break;
case NCI_OP_RF_DISCOVER_MAP_RSP:
nci_rf_disc_map_rsp_packet(ndev, skb);
break;

Some files were not shown because too many files have changed in this diff Show More