mirror of https://gitee.com/openkylin/linux.git
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (145 commits) bnx2x: use pci_pcie_cap() bnx2x: fix bnx2x_stop_on_error flow in bnx2x_sp_rtnl_task bnx2x: enable internal target-read for 57712 and up only bnx2x: count statistic ramrods on EQ to prevent MC assert bnx2x: fix loopback for non 10G link bnx2x: dcb - send all unmapped priorities to same COS as L2 iwlwifi: Fix build with CONFIG_PM disabled. gre: fix improper error handling ipv4: use RT_TOS after some rt_tos conversions via-velocity: remove duplicated #include qlge: remove duplicated #include igb: remove duplicated #include can: c_can: remove duplicated #include bnad: remove duplicated #include net: allow netif_carrier to be called safely from IRQ bna: Header File Consolidation bna: HW Error Counter Fix bna: Add HW Semaphore Unlock Logic bna: IOC Event Name Change bna: Mboxq Flush When IOC Disabled ...
This commit is contained in:
commit
ee05eff6f7
10
MAINTAINERS
10
MAINTAINERS
|
@ -4106,6 +4106,12 @@ S: Maintained
|
|||
F: drivers/net/mv643xx_eth.*
|
||||
F: include/linux/mv643xx.h
|
||||
|
||||
MARVELL MWIFIEX WIRELESS DRIVER
|
||||
M: Bing Zhao <bzhao@marvell.com>
|
||||
L: linux-wireless@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/net/wireless/mwifiex/
|
||||
|
||||
MARVELL MWL8K WIRELESS DRIVER
|
||||
M: Lennert Buytenhek <buytenh@wantstofly.org>
|
||||
L: linux-wireless@vger.kernel.org
|
||||
|
@ -6972,9 +6978,9 @@ S: Maintained
|
|||
F: drivers/input/misc/wistron_btns.c
|
||||
|
||||
WL1251 WIRELESS DRIVER
|
||||
M: Kalle Valo <kvalo@adurom.com>
|
||||
M: Luciano Coelho <coelho@ti.com>
|
||||
L: linux-wireless@vger.kernel.org
|
||||
W: http://wireless.kernel.org
|
||||
W: http://wireless.kernel.org/en/users/Drivers/wl1251
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-testing.git
|
||||
S: Maintained
|
||||
F: drivers/net/wireless/wl1251/*
|
||||
|
|
|
@ -50,3 +50,75 @@ int bcma_core_enable(struct bcma_device *core, u32 flags)
|
|||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bcma_core_enable);
|
||||
|
||||
void bcma_core_set_clockmode(struct bcma_device *core,
|
||||
enum bcma_clkmode clkmode)
|
||||
{
|
||||
u16 i;
|
||||
|
||||
WARN_ON(core->id.id != BCMA_CORE_CHIPCOMMON &&
|
||||
core->id.id != BCMA_CORE_PCIE &&
|
||||
core->id.id != BCMA_CORE_80211);
|
||||
|
||||
switch (clkmode) {
|
||||
case BCMA_CLKMODE_FAST:
|
||||
bcma_set32(core, BCMA_CLKCTLST, BCMA_CLKCTLST_FORCEHT);
|
||||
udelay(64);
|
||||
for (i = 0; i < 1500; i++) {
|
||||
if (bcma_read32(core, BCMA_CLKCTLST) &
|
||||
BCMA_CLKCTLST_HAVEHT) {
|
||||
i = 0;
|
||||
break;
|
||||
}
|
||||
udelay(10);
|
||||
}
|
||||
if (i)
|
||||
pr_err("HT force timeout\n");
|
||||
break;
|
||||
case BCMA_CLKMODE_DYNAMIC:
|
||||
pr_warn("Dynamic clockmode not supported yet!\n");
|
||||
break;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bcma_core_set_clockmode);
|
||||
|
||||
void bcma_core_pll_ctl(struct bcma_device *core, u32 req, u32 status, bool on)
|
||||
{
|
||||
u16 i;
|
||||
|
||||
WARN_ON(req & ~BCMA_CLKCTLST_EXTRESREQ);
|
||||
WARN_ON(status & ~BCMA_CLKCTLST_EXTRESST);
|
||||
|
||||
if (on) {
|
||||
bcma_set32(core, BCMA_CLKCTLST, req);
|
||||
for (i = 0; i < 10000; i++) {
|
||||
if ((bcma_read32(core, BCMA_CLKCTLST) & status) ==
|
||||
status) {
|
||||
i = 0;
|
||||
break;
|
||||
}
|
||||
udelay(10);
|
||||
}
|
||||
if (i)
|
||||
pr_err("PLL enable timeout\n");
|
||||
} else {
|
||||
pr_warn("Disabling PLL not supported yet!\n");
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bcma_core_pll_ctl);
|
||||
|
||||
u32 bcma_core_dma_translation(struct bcma_device *core)
|
||||
{
|
||||
switch (core->bus->hosttype) {
|
||||
case BCMA_HOSTTYPE_PCI:
|
||||
if (bcma_aread32(core, BCMA_IOST) & BCMA_IOST_DMA64)
|
||||
return BCMA_DMA_TRANSLATION_DMA64_CMT;
|
||||
else
|
||||
return BCMA_DMA_TRANSLATION_DMA32_CMT;
|
||||
default:
|
||||
pr_err("DMA translation unknown for host %d\n",
|
||||
core->bus->hosttype);
|
||||
}
|
||||
return BCMA_DMA_TRANSLATION_NONE;
|
||||
}
|
||||
EXPORT_SYMBOL(bcma_core_dma_translation);
|
||||
|
|
|
@ -23,6 +23,9 @@ static inline u32 bcma_cc_write32_masked(struct bcma_drv_cc *cc, u16 offset,
|
|||
|
||||
void bcma_core_chipcommon_init(struct bcma_drv_cc *cc)
|
||||
{
|
||||
u32 leddc_on = 10;
|
||||
u32 leddc_off = 90;
|
||||
|
||||
if (cc->core->id.rev >= 11)
|
||||
cc->status = bcma_cc_read32(cc, BCMA_CC_CHIPSTAT);
|
||||
cc->capabilities = bcma_cc_read32(cc, BCMA_CC_CAP);
|
||||
|
@ -38,6 +41,17 @@ void bcma_core_chipcommon_init(struct bcma_drv_cc *cc)
|
|||
bcma_pmu_init(cc);
|
||||
if (cc->capabilities & BCMA_CC_CAP_PCTL)
|
||||
pr_err("Power control not implemented!\n");
|
||||
|
||||
if (cc->core->id.rev >= 16) {
|
||||
if (cc->core->bus->sprom.leddc_on_time &&
|
||||
cc->core->bus->sprom.leddc_off_time) {
|
||||
leddc_on = cc->core->bus->sprom.leddc_on_time;
|
||||
leddc_off = cc->core->bus->sprom.leddc_off_time;
|
||||
}
|
||||
bcma_cc_write32(cc, BCMA_CC_GPIOTIMER,
|
||||
((leddc_on << BCMA_CC_GPIOTIMER_ONTIME_SHIFT) |
|
||||
(leddc_off << BCMA_CC_GPIOTIMER_OFFTIME_SHIFT)));
|
||||
}
|
||||
}
|
||||
|
||||
/* Set chip watchdog reset timer to fire in 'ticks' backplane cycles */
|
||||
|
|
|
@ -172,8 +172,10 @@ static bool bcma_core_pci_is_in_hostmode(struct bcma_drv_pci *pc)
|
|||
chipid_top != 0x5300)
|
||||
return false;
|
||||
|
||||
#ifdef CONFIG_SSB_DRIVER_PCICORE
|
||||
if (bus->sprom.boardflags_lo & SSB_PCICORE_BFL_NOPCI)
|
||||
return false;
|
||||
#endif /* CONFIG_SSB_DRIVER_PCICORE */
|
||||
|
||||
#if 0
|
||||
/* TODO: on BCMA we use address from EROM instead of magic formula */
|
||||
|
|
|
@ -20,12 +20,12 @@
|
|||
* R/W ops.
|
||||
**************************************************/
|
||||
|
||||
static void bcma_sprom_read(struct bcma_bus *bus, u16 *sprom)
|
||||
static void bcma_sprom_read(struct bcma_bus *bus, u16 offset, u16 *sprom)
|
||||
{
|
||||
int i;
|
||||
for (i = 0; i < SSB_SPROMSIZE_WORDS_R4; i++)
|
||||
sprom[i] = bcma_read16(bus->drv_cc.core,
|
||||
BCMA_CC_SPROM + (i * 2));
|
||||
offset + (i * 2));
|
||||
}
|
||||
|
||||
/**************************************************
|
||||
|
@ -112,7 +112,7 @@ static int bcma_sprom_valid(const u16 *sprom)
|
|||
return err;
|
||||
|
||||
revision = sprom[SSB_SPROMSIZE_WORDS_R4 - 1] & SSB_SPROM_REVISION_REV;
|
||||
if (revision != 8) {
|
||||
if (revision != 8 && revision != 9) {
|
||||
pr_err("Unsupported SPROM revision: %d\n", revision);
|
||||
return -ENOENT;
|
||||
}
|
||||
|
@ -137,6 +137,7 @@ static void bcma_sprom_extract_r8(struct bcma_bus *bus, const u16 *sprom)
|
|||
|
||||
int bcma_sprom_get(struct bcma_bus *bus)
|
||||
{
|
||||
u16 offset;
|
||||
u16 *sprom;
|
||||
int err = 0;
|
||||
|
||||
|
@ -151,7 +152,12 @@ int bcma_sprom_get(struct bcma_bus *bus)
|
|||
if (!sprom)
|
||||
return -ENOMEM;
|
||||
|
||||
bcma_sprom_read(bus, sprom);
|
||||
/* Most cards have SPROM moved by additional offset 0x30 (48 dwords).
|
||||
* According to brcm80211 this applies to cards with PCIe rev >= 6
|
||||
* TODO: understand this condition and use it */
|
||||
offset = (bus->chipinfo.id == 0x4331) ? BCMA_CC_SPROM :
|
||||
BCMA_CC_SPROM_PCIE6;
|
||||
bcma_sprom_read(bus, offset, sprom);
|
||||
|
||||
err = bcma_sprom_valid(sprom);
|
||||
if (err)
|
||||
|
|
|
@ -223,26 +223,31 @@ bfa_cee_isr(void *cbarg, struct bfi_mbmsg *m)
|
|||
}
|
||||
|
||||
/**
|
||||
* bfa_cee_hbfail()
|
||||
* bfa_cee_notify()
|
||||
*
|
||||
* @brief CEE module heart-beat failure handler.
|
||||
* @brief CEE module IOC event handler.
|
||||
*
|
||||
* @param[in] Pointer to the CEE module data structure.
|
||||
* @param[in] IOC event type
|
||||
*
|
||||
* @return void
|
||||
*/
|
||||
|
||||
static void
|
||||
bfa_cee_hbfail(void *arg)
|
||||
bfa_cee_notify(void *arg, enum bfa_ioc_event event)
|
||||
{
|
||||
struct bfa_cee *cee;
|
||||
cee = arg;
|
||||
cee = (struct bfa_cee *) arg;
|
||||
|
||||
switch (event) {
|
||||
case BFA_IOC_E_DISABLED:
|
||||
case BFA_IOC_E_FAILED:
|
||||
if (cee->get_attr_pending == true) {
|
||||
cee->get_attr_status = BFA_STATUS_FAILED;
|
||||
cee->get_attr_pending = false;
|
||||
if (cee->cbfn.get_attr_cbfn) {
|
||||
cee->cbfn.get_attr_cbfn(cee->cbfn.get_attr_cbarg,
|
||||
cee->cbfn.get_attr_cbfn(
|
||||
cee->cbfn.get_attr_cbarg,
|
||||
BFA_STATUS_FAILED);
|
||||
}
|
||||
}
|
||||
|
@ -250,7 +255,8 @@ bfa_cee_hbfail(void *arg)
|
|||
cee->get_stats_status = BFA_STATUS_FAILED;
|
||||
cee->get_stats_pending = false;
|
||||
if (cee->cbfn.get_stats_cbfn) {
|
||||
cee->cbfn.get_stats_cbfn(cee->cbfn.get_stats_cbarg,
|
||||
cee->cbfn.get_stats_cbfn(
|
||||
cee->cbfn.get_stats_cbarg,
|
||||
BFA_STATUS_FAILED);
|
||||
}
|
||||
}
|
||||
|
@ -258,10 +264,16 @@ bfa_cee_hbfail(void *arg)
|
|||
cee->reset_stats_status = BFA_STATUS_FAILED;
|
||||
cee->reset_stats_pending = false;
|
||||
if (cee->cbfn.reset_stats_cbfn) {
|
||||
cee->cbfn.reset_stats_cbfn(cee->cbfn.reset_stats_cbarg,
|
||||
cee->cbfn.reset_stats_cbfn(
|
||||
cee->cbfn.reset_stats_cbarg,
|
||||
BFA_STATUS_FAILED);
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -286,6 +298,7 @@ bfa_nw_cee_attach(struct bfa_cee *cee, struct bfa_ioc *ioc,
|
|||
cee->ioc = ioc;
|
||||
|
||||
bfa_nw_ioc_mbox_regisr(cee->ioc, BFI_MC_CEE, bfa_cee_isr, cee);
|
||||
bfa_ioc_hbfail_init(&cee->hbfail, bfa_cee_hbfail, cee);
|
||||
bfa_nw_ioc_hbfail_register(cee->ioc, &cee->hbfail);
|
||||
bfa_q_qe_init(&cee->ioc_notify);
|
||||
bfa_ioc_notify_init(&cee->ioc_notify, bfa_cee_notify, cee);
|
||||
bfa_nw_ioc_notify_register(cee->ioc, &cee->ioc_notify);
|
||||
}
|
||||
|
|
|
@ -25,7 +25,6 @@
|
|||
typedef void (*bfa_cee_get_attr_cbfn_t) (void *dev, enum bfa_status status);
|
||||
typedef void (*bfa_cee_get_stats_cbfn_t) (void *dev, enum bfa_status status);
|
||||
typedef void (*bfa_cee_reset_stats_cbfn_t) (void *dev, enum bfa_status status);
|
||||
typedef void (*bfa_cee_hbfail_cbfn_t) (void *dev, enum bfa_status status);
|
||||
|
||||
struct bfa_cee_cbfn {
|
||||
bfa_cee_get_attr_cbfn_t get_attr_cbfn;
|
||||
|
@ -45,7 +44,7 @@ struct bfa_cee {
|
|||
enum bfa_status get_stats_status;
|
||||
enum bfa_status reset_stats_status;
|
||||
struct bfa_cee_cbfn cbfn;
|
||||
struct bfa_ioc_hbfail_notify hbfail;
|
||||
struct bfa_ioc_notify ioc_notify;
|
||||
struct bfa_cee_attr *attr;
|
||||
struct bfa_cee_stats *stats;
|
||||
struct bfa_dma attr_dma;
|
||||
|
|
|
@ -11,20 +11,24 @@
|
|||
* General Public License for more details.
|
||||
*/
|
||||
/*
|
||||
* Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
|
||||
* Copyright (c) 2005-2011 Brocade Communications Systems, Inc.
|
||||
* All rights reserved
|
||||
* www.brocade.com
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file bfasm.h State machine defines
|
||||
* @file bfa_cs.h BFA common services
|
||||
*/
|
||||
|
||||
#ifndef __BFA_SM_H__
|
||||
#define __BFA_SM_H__
|
||||
#ifndef __BFA_CS_H__
|
||||
#define __BFA_CS_H__
|
||||
|
||||
#include "cna.h"
|
||||
|
||||
/**
|
||||
* @ BFA state machine interfaces
|
||||
*/
|
||||
|
||||
typedef void (*bfa_sm_t)(void *sm, int event);
|
||||
|
||||
/**
|
||||
|
@ -85,4 +89,52 @@ bfa_sm_to_state(const struct bfa_sm_table *smt, bfa_sm_t sm)
|
|||
i++;
|
||||
return smt[i].state;
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* @ Generic wait counter.
|
||||
*/
|
||||
|
||||
typedef void (*bfa_wc_resume_t) (void *cbarg);
|
||||
|
||||
struct bfa_wc {
|
||||
bfa_wc_resume_t wc_resume;
|
||||
void *wc_cbarg;
|
||||
int wc_count;
|
||||
};
|
||||
|
||||
static inline void
|
||||
bfa_wc_up(struct bfa_wc *wc)
|
||||
{
|
||||
wc->wc_count++;
|
||||
}
|
||||
|
||||
static inline void
|
||||
bfa_wc_down(struct bfa_wc *wc)
|
||||
{
|
||||
wc->wc_count--;
|
||||
if (wc->wc_count == 0)
|
||||
wc->wc_resume(wc->wc_cbarg);
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize a waiting counter.
|
||||
*/
|
||||
static inline void
|
||||
bfa_wc_init(struct bfa_wc *wc, bfa_wc_resume_t wc_resume, void *wc_cbarg)
|
||||
{
|
||||
wc->wc_resume = wc_resume;
|
||||
wc->wc_cbarg = wc_cbarg;
|
||||
wc->wc_count = 0;
|
||||
bfa_wc_up(wc);
|
||||
}
|
||||
|
||||
/**
|
||||
* Wait for counter to reach zero
|
||||
*/
|
||||
static inline void
|
||||
bfa_wc_wait(struct bfa_wc *wc)
|
||||
{
|
||||
bfa_wc_down(wc);
|
||||
}
|
||||
|
||||
#endif /* __BFA_CS_H__ */
|
|
@ -153,6 +153,7 @@ struct bfa_ioc_drv_stats {
|
|||
u32 enable_reqs;
|
||||
u32 disable_replies;
|
||||
u32 enable_replies;
|
||||
u32 rsvd;
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
|
@ -58,6 +58,7 @@ static bool bfa_nw_auto_recover = true;
|
|||
/*
|
||||
* forward declarations
|
||||
*/
|
||||
static void bfa_ioc_hw_sem_init(struct bfa_ioc *ioc);
|
||||
static void bfa_ioc_hw_sem_get(struct bfa_ioc *ioc);
|
||||
static void bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc);
|
||||
static void bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force);
|
||||
|
@ -68,9 +69,10 @@ static void bfa_ioc_hb_monitor(struct bfa_ioc *ioc);
|
|||
static void bfa_ioc_hb_stop(struct bfa_ioc *ioc);
|
||||
static void bfa_ioc_reset(struct bfa_ioc *ioc, bool force);
|
||||
static void bfa_ioc_mbox_poll(struct bfa_ioc *ioc);
|
||||
static void bfa_ioc_mbox_hbfail(struct bfa_ioc *ioc);
|
||||
static void bfa_ioc_mbox_flush(struct bfa_ioc *ioc);
|
||||
static void bfa_ioc_recover(struct bfa_ioc *ioc);
|
||||
static void bfa_ioc_check_attr_wwns(struct bfa_ioc *ioc);
|
||||
static void bfa_ioc_event_notify(struct bfa_ioc *, enum bfa_ioc_event);
|
||||
static void bfa_ioc_disable_comp(struct bfa_ioc *ioc);
|
||||
static void bfa_ioc_lpu_stop(struct bfa_ioc *ioc);
|
||||
static void bfa_ioc_fail_notify(struct bfa_ioc *ioc);
|
||||
|
@ -107,7 +109,7 @@ enum ioc_event {
|
|||
IOC_E_FWRSP_GETATTR = 6, /*!< IOC get attribute response */
|
||||
IOC_E_DISABLED = 7, /*!< f/w disabled */
|
||||
IOC_E_INITFAILED = 8, /*!< failure notice by iocpf sm */
|
||||
IOC_E_PFAILED = 9, /*!< failure notice by iocpf sm */
|
||||
IOC_E_PFFAILED = 9, /*!< failure notice by iocpf sm */
|
||||
IOC_E_HBFAIL = 10, /*!< heartbeat failure */
|
||||
IOC_E_HWERROR = 11, /*!< hardware error interrupt */
|
||||
IOC_E_TIMEOUT = 12, /*!< timeout */
|
||||
|
@ -239,7 +241,7 @@ bfa_ioc_sm_uninit(struct bfa_ioc *ioc, enum ioc_event event)
|
|||
break;
|
||||
|
||||
default:
|
||||
bfa_sm_fault(ioc, event);
|
||||
bfa_sm_fault(event);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -272,7 +274,7 @@ bfa_ioc_sm_reset(struct bfa_ioc *ioc, enum ioc_event event)
|
|||
break;
|
||||
|
||||
default:
|
||||
bfa_sm_fault(ioc, event);
|
||||
bfa_sm_fault(event);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -294,12 +296,12 @@ bfa_ioc_sm_enabling(struct bfa_ioc *ioc, enum ioc_event event)
|
|||
bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
|
||||
break;
|
||||
|
||||
case IOC_E_PFAILED:
|
||||
case IOC_E_PFFAILED:
|
||||
/* !!! fall through !!! */
|
||||
case IOC_E_HWERROR:
|
||||
ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
|
||||
bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
|
||||
if (event != IOC_E_PFAILED)
|
||||
if (event != IOC_E_PFFAILED)
|
||||
bfa_iocpf_initfail(ioc);
|
||||
break;
|
||||
|
||||
|
@ -316,7 +318,7 @@ bfa_ioc_sm_enabling(struct bfa_ioc *ioc, enum ioc_event event)
|
|||
break;
|
||||
|
||||
default:
|
||||
bfa_sm_fault(ioc, event);
|
||||
bfa_sm_fault(event);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -344,14 +346,14 @@ bfa_ioc_sm_getattr(struct bfa_ioc *ioc, enum ioc_event event)
|
|||
bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
|
||||
break;
|
||||
|
||||
case IOC_E_PFAILED:
|
||||
case IOC_E_PFFAILED:
|
||||
case IOC_E_HWERROR:
|
||||
del_timer(&ioc->ioc_timer);
|
||||
/* fall through */
|
||||
case IOC_E_TIMEOUT:
|
||||
ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
|
||||
bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
|
||||
if (event != IOC_E_PFAILED)
|
||||
if (event != IOC_E_PFFAILED)
|
||||
bfa_iocpf_getattrfail(ioc);
|
||||
break;
|
||||
|
||||
|
@ -364,7 +366,7 @@ bfa_ioc_sm_getattr(struct bfa_ioc *ioc, enum ioc_event event)
|
|||
break;
|
||||
|
||||
default:
|
||||
bfa_sm_fault(ioc, event);
|
||||
bfa_sm_fault(event);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -387,7 +389,7 @@ bfa_ioc_sm_op(struct bfa_ioc *ioc, enum ioc_event event)
|
|||
bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
|
||||
break;
|
||||
|
||||
case IOC_E_PFAILED:
|
||||
case IOC_E_PFFAILED:
|
||||
case IOC_E_HWERROR:
|
||||
bfa_ioc_hb_stop(ioc);
|
||||
/* !!! fall through !!! */
|
||||
|
@ -398,12 +400,12 @@ bfa_ioc_sm_op(struct bfa_ioc *ioc, enum ioc_event event)
|
|||
else
|
||||
bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
|
||||
|
||||
if (event != IOC_E_PFAILED)
|
||||
if (event != IOC_E_PFFAILED)
|
||||
bfa_iocpf_fail(ioc);
|
||||
break;
|
||||
|
||||
default:
|
||||
bfa_sm_fault(ioc, event);
|
||||
bfa_sm_fault(event);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -434,7 +436,7 @@ bfa_ioc_sm_disabling(struct bfa_ioc *ioc, enum ioc_event event)
|
|||
break;
|
||||
|
||||
default:
|
||||
bfa_sm_fault(ioc, event);
|
||||
bfa_sm_fault(event);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -465,7 +467,7 @@ bfa_ioc_sm_disabled(struct bfa_ioc *ioc, enum ioc_event event)
|
|||
break;
|
||||
|
||||
default:
|
||||
bfa_sm_fault(ioc, event);
|
||||
bfa_sm_fault(event);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -485,13 +487,13 @@ bfa_ioc_sm_fail_retry(struct bfa_ioc *ioc, enum ioc_event event)
|
|||
bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
|
||||
break;
|
||||
|
||||
case IOC_E_PFAILED:
|
||||
case IOC_E_PFFAILED:
|
||||
case IOC_E_HWERROR:
|
||||
/**
|
||||
* Initialization retry failed.
|
||||
*/
|
||||
ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
|
||||
if (event != IOC_E_PFAILED)
|
||||
if (event != IOC_E_PFFAILED)
|
||||
bfa_iocpf_initfail(ioc);
|
||||
break;
|
||||
|
||||
|
@ -512,7 +514,7 @@ bfa_ioc_sm_fail_retry(struct bfa_ioc *ioc, enum ioc_event event)
|
|||
break;
|
||||
|
||||
default:
|
||||
bfa_sm_fault(ioc, event);
|
||||
bfa_sm_fault(event);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -546,7 +548,7 @@ bfa_ioc_sm_fail(struct bfa_ioc *ioc, enum ioc_event event)
|
|||
break;
|
||||
|
||||
default:
|
||||
bfa_sm_fault(ioc, event);
|
||||
bfa_sm_fault(event);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -579,7 +581,7 @@ bfa_iocpf_sm_reset(struct bfa_iocpf *iocpf, enum iocpf_event event)
|
|||
break;
|
||||
|
||||
default:
|
||||
bfa_sm_fault(iocpf->ioc, event);
|
||||
bfa_sm_fault(event);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -589,6 +591,7 @@ bfa_iocpf_sm_reset(struct bfa_iocpf *iocpf, enum iocpf_event event)
|
|||
static void
|
||||
bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf *iocpf)
|
||||
{
|
||||
bfa_ioc_hw_sem_init(iocpf->ioc);
|
||||
bfa_ioc_hw_sem_get(iocpf->ioc);
|
||||
}
|
||||
|
||||
|
@ -631,7 +634,7 @@ bfa_iocpf_sm_fwcheck(struct bfa_iocpf *iocpf, enum iocpf_event event)
|
|||
break;
|
||||
|
||||
default:
|
||||
bfa_sm_fault(ioc, event);
|
||||
bfa_sm_fault(event);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -675,7 +678,7 @@ bfa_iocpf_sm_mismatch(struct bfa_iocpf *iocpf, enum iocpf_event event)
|
|||
break;
|
||||
|
||||
default:
|
||||
bfa_sm_fault(ioc, event);
|
||||
bfa_sm_fault(event);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -714,7 +717,7 @@ bfa_iocpf_sm_semwait(struct bfa_iocpf *iocpf, enum iocpf_event event)
|
|||
break;
|
||||
|
||||
default:
|
||||
bfa_sm_fault(ioc, event);
|
||||
bfa_sm_fault(event);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -762,7 +765,7 @@ bfa_iocpf_sm_hwinit(struct bfa_iocpf *iocpf, enum iocpf_event event)
|
|||
break;
|
||||
|
||||
default:
|
||||
bfa_sm_fault(ioc, event);
|
||||
bfa_sm_fault(event);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -813,7 +816,7 @@ bfa_iocpf_sm_enabling(struct bfa_iocpf *iocpf, enum iocpf_event event)
|
|||
break;
|
||||
|
||||
default:
|
||||
bfa_sm_fault(ioc, event);
|
||||
bfa_sm_fault(event);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -856,7 +859,7 @@ bfa_iocpf_sm_ready(struct bfa_iocpf *iocpf, enum iocpf_event event)
|
|||
break;
|
||||
|
||||
default:
|
||||
bfa_sm_fault(ioc, event);
|
||||
bfa_sm_fault(event);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -898,7 +901,7 @@ bfa_iocpf_sm_disabling(struct bfa_iocpf *iocpf, enum iocpf_event event)
|
|||
break;
|
||||
|
||||
default:
|
||||
bfa_sm_fault(ioc, event);
|
||||
bfa_sm_fault(event);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -927,7 +930,7 @@ bfa_iocpf_sm_disabling_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
|
|||
break;
|
||||
|
||||
default:
|
||||
bfa_sm_fault(ioc, event);
|
||||
bfa_sm_fault(event);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -937,6 +940,7 @@ bfa_iocpf_sm_disabling_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
|
|||
static void
|
||||
bfa_iocpf_sm_disabled_entry(struct bfa_iocpf *iocpf)
|
||||
{
|
||||
bfa_ioc_mbox_flush(iocpf->ioc);
|
||||
bfa_ioc_pf_disabled(iocpf->ioc);
|
||||
}
|
||||
|
||||
|
@ -957,7 +961,7 @@ bfa_iocpf_sm_disabled(struct bfa_iocpf *iocpf, enum iocpf_event event)
|
|||
break;
|
||||
|
||||
default:
|
||||
bfa_sm_fault(ioc, event);
|
||||
bfa_sm_fault(event);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1009,7 +1013,7 @@ bfa_iocpf_sm_initfail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
|
|||
break;
|
||||
|
||||
default:
|
||||
bfa_sm_fault(ioc, event);
|
||||
bfa_sm_fault(event);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1038,7 +1042,7 @@ bfa_iocpf_sm_initfail(struct bfa_iocpf *iocpf, enum iocpf_event event)
|
|||
break;
|
||||
|
||||
default:
|
||||
bfa_sm_fault(ioc, event);
|
||||
bfa_sm_fault(event);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1053,7 +1057,7 @@ bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf *iocpf)
|
|||
/**
|
||||
* Flush any queued up mailbox requests.
|
||||
*/
|
||||
bfa_ioc_mbox_hbfail(iocpf->ioc);
|
||||
bfa_ioc_mbox_flush(iocpf->ioc);
|
||||
bfa_ioc_hw_sem_get(iocpf->ioc);
|
||||
}
|
||||
|
||||
|
@ -1093,7 +1097,7 @@ bfa_iocpf_sm_fail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
|
|||
break;
|
||||
|
||||
default:
|
||||
bfa_sm_fault(ioc, event);
|
||||
bfa_sm_fault(event);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1115,7 +1119,7 @@ bfa_iocpf_sm_fail(struct bfa_iocpf *iocpf, enum iocpf_event event)
|
|||
break;
|
||||
|
||||
default:
|
||||
bfa_sm_fault(iocpf->ioc, event);
|
||||
bfa_sm_fault(event);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1123,21 +1127,26 @@ bfa_iocpf_sm_fail(struct bfa_iocpf *iocpf, enum iocpf_event event)
|
|||
* BFA IOC private functions
|
||||
*/
|
||||
|
||||
/**
|
||||
* Notify common modules registered for notification.
|
||||
*/
|
||||
static void
|
||||
bfa_ioc_event_notify(struct bfa_ioc *ioc, enum bfa_ioc_event event)
|
||||
{
|
||||
struct bfa_ioc_notify *notify;
|
||||
struct list_head *qe;
|
||||
|
||||
list_for_each(qe, &ioc->notify_q) {
|
||||
notify = (struct bfa_ioc_notify *)qe;
|
||||
notify->cbfn(notify->cbarg, event);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
bfa_ioc_disable_comp(struct bfa_ioc *ioc)
|
||||
{
|
||||
struct list_head *qe;
|
||||
struct bfa_ioc_hbfail_notify *notify;
|
||||
|
||||
ioc->cbfn->disable_cbfn(ioc->bfa);
|
||||
|
||||
/**
|
||||
* Notify common modules registered for notification.
|
||||
*/
|
||||
list_for_each(qe, &ioc->hb_notify_q) {
|
||||
notify = (struct bfa_ioc_hbfail_notify *) qe;
|
||||
notify->cbfn(notify->cbarg);
|
||||
}
|
||||
bfa_ioc_event_notify(ioc, BFA_IOC_E_DISABLED);
|
||||
}
|
||||
|
||||
bool
|
||||
|
@ -1168,6 +1177,29 @@ bfa_nw_ioc_sem_release(void __iomem *sem_reg)
|
|||
writel(1, sem_reg);
|
||||
}
|
||||
|
||||
static void
|
||||
bfa_ioc_hw_sem_init(struct bfa_ioc *ioc)
|
||||
{
|
||||
struct bfi_ioc_image_hdr fwhdr;
|
||||
u32 fwstate = readl(ioc->ioc_regs.ioc_fwstate);
|
||||
|
||||
if (fwstate == BFI_IOC_UNINIT)
|
||||
return;
|
||||
|
||||
bfa_nw_ioc_fwver_get(ioc, &fwhdr);
|
||||
|
||||
if (swab32(fwhdr.exec) == BFI_FWBOOT_TYPE_NORMAL)
|
||||
return;
|
||||
|
||||
writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
|
||||
|
||||
/*
|
||||
* Try to lock and then unlock the semaphore.
|
||||
*/
|
||||
readl(ioc->ioc_regs.ioc_sem_reg);
|
||||
writel(1, ioc->ioc_regs.ioc_sem_reg);
|
||||
}
|
||||
|
||||
static void
|
||||
bfa_ioc_hw_sem_get(struct bfa_ioc *ioc)
|
||||
{
|
||||
|
@ -1638,7 +1670,7 @@ bfa_ioc_mbox_poll(struct bfa_ioc *ioc)
|
|||
* Cleanup any pending requests.
|
||||
*/
|
||||
static void
|
||||
bfa_ioc_mbox_hbfail(struct bfa_ioc *ioc)
|
||||
bfa_ioc_mbox_flush(struct bfa_ioc *ioc)
|
||||
{
|
||||
struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
|
||||
struct bfa_mbox_cmd *cmd;
|
||||
|
@ -1650,17 +1682,11 @@ bfa_ioc_mbox_hbfail(struct bfa_ioc *ioc)
|
|||
static void
|
||||
bfa_ioc_fail_notify(struct bfa_ioc *ioc)
|
||||
{
|
||||
struct list_head *qe;
|
||||
struct bfa_ioc_hbfail_notify *notify;
|
||||
|
||||
/**
|
||||
* Notify driver and common modules registered for notification.
|
||||
*/
|
||||
ioc->cbfn->hbfail_cbfn(ioc->bfa);
|
||||
list_for_each(qe, &ioc->hb_notify_q) {
|
||||
notify = (struct bfa_ioc_hbfail_notify *) qe;
|
||||
notify->cbfn(notify->cbarg);
|
||||
}
|
||||
bfa_ioc_event_notify(ioc, BFA_IOC_E_FAILED);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -1684,7 +1710,7 @@ bfa_ioc_pf_initfailed(struct bfa_ioc *ioc)
|
|||
static void
|
||||
bfa_ioc_pf_failed(struct bfa_ioc *ioc)
|
||||
{
|
||||
bfa_fsm_send_event(ioc, IOC_E_PFAILED);
|
||||
bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -1839,7 +1865,7 @@ bfa_nw_ioc_attach(struct bfa_ioc *ioc, void *bfa, struct bfa_ioc_cbfn *cbfn)
|
|||
ioc->iocpf.ioc = ioc;
|
||||
|
||||
bfa_ioc_mbox_attach(ioc);
|
||||
INIT_LIST_HEAD(&ioc->hb_notify_q);
|
||||
INIT_LIST_HEAD(&ioc->notify_q);
|
||||
|
||||
bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
|
||||
bfa_fsm_send_event(ioc, IOC_E_RESET);
|
||||
|
@ -1969,6 +1995,8 @@ bfa_nw_ioc_mbox_queue(struct bfa_ioc *ioc, struct bfa_mbox_cmd *cmd)
|
|||
* mailbox is free -- queue command to firmware
|
||||
*/
|
||||
bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -2001,18 +2029,30 @@ bfa_nw_ioc_mbox_isr(struct bfa_ioc *ioc)
|
|||
void
|
||||
bfa_nw_ioc_error_isr(struct bfa_ioc *ioc)
|
||||
{
|
||||
bfa_ioc_stats(ioc, ioc_hbfails);
|
||||
bfa_ioc_stats_hb_count(ioc, ioc->hb_count);
|
||||
bfa_fsm_send_event(ioc, IOC_E_HWERROR);
|
||||
}
|
||||
|
||||
/**
|
||||
* return true if IOC is disabled
|
||||
*/
|
||||
bool
|
||||
bfa_nw_ioc_is_disabled(struct bfa_ioc *ioc)
|
||||
{
|
||||
return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling) ||
|
||||
bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
|
||||
}
|
||||
|
||||
/**
|
||||
* Add to IOC heartbeat failure notification queue. To be used by common
|
||||
* modules such as cee, port, diag.
|
||||
*/
|
||||
void
|
||||
bfa_nw_ioc_hbfail_register(struct bfa_ioc *ioc,
|
||||
struct bfa_ioc_hbfail_notify *notify)
|
||||
bfa_nw_ioc_notify_register(struct bfa_ioc *ioc,
|
||||
struct bfa_ioc_notify *notify)
|
||||
{
|
||||
list_add_tail(¬ify->qe, &ioc->hb_notify_q);
|
||||
list_add_tail(¬ify->qe, &ioc->notify_q);
|
||||
}
|
||||
|
||||
#define BFA_MFG_NAME "Brocade"
|
||||
|
@ -2217,6 +2257,7 @@ bfa_ioc_recover(struct bfa_ioc *ioc)
|
|||
{
|
||||
pr_crit("Heart Beat of IOC has failed\n");
|
||||
bfa_ioc_stats(ioc, ioc_hbfails);
|
||||
bfa_ioc_stats_hb_count(ioc, ioc->hb_count);
|
||||
bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
|
||||
}
|
||||
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
#ifndef __BFA_IOC_H__
|
||||
#define __BFA_IOC_H__
|
||||
|
||||
#include "bfa_sm.h"
|
||||
#include "bfa_cs.h"
|
||||
#include "bfi.h"
|
||||
#include "cna.h"
|
||||
|
||||
|
@ -97,8 +97,11 @@ struct bfa_ioc_regs {
|
|||
/**
|
||||
* IOC Mailbox structures
|
||||
*/
|
||||
typedef void (*bfa_mbox_cmd_cbfn_t)(void *cbarg);
|
||||
struct bfa_mbox_cmd {
|
||||
struct list_head qe;
|
||||
bfa_mbox_cmd_cbfn_t cbfn;
|
||||
void *cbarg;
|
||||
u32 msg[BFI_IOC_MSGSZ];
|
||||
};
|
||||
|
||||
|
@ -129,6 +132,23 @@ struct bfa_ioc_cbfn {
|
|||
bfa_ioc_reset_cbfn_t reset_cbfn;
|
||||
};
|
||||
|
||||
/**
|
||||
* IOC event notification mechanism.
|
||||
*/
|
||||
enum bfa_ioc_event {
|
||||
BFA_IOC_E_ENABLED = 1,
|
||||
BFA_IOC_E_DISABLED = 2,
|
||||
BFA_IOC_E_FAILED = 3,
|
||||
};
|
||||
|
||||
typedef void (*bfa_ioc_notify_cbfn_t)(void *, enum bfa_ioc_event);
|
||||
|
||||
struct bfa_ioc_notify {
|
||||
struct list_head qe;
|
||||
bfa_ioc_notify_cbfn_t cbfn;
|
||||
void *cbarg;
|
||||
};
|
||||
|
||||
/**
|
||||
* Heartbeat failure notification queue element.
|
||||
*/
|
||||
|
@ -141,7 +161,7 @@ struct bfa_ioc_hbfail_notify {
|
|||
/**
|
||||
* Initialize a heartbeat failure notification structure
|
||||
*/
|
||||
#define bfa_ioc_hbfail_init(__notify, __cbfn, __cbarg) do { \
|
||||
#define bfa_ioc_notify_init(__notify, __cbfn, __cbarg) do { \
|
||||
(__notify)->cbfn = (__cbfn); \
|
||||
(__notify)->cbarg = (__cbarg); \
|
||||
} while (0)
|
||||
|
@ -162,7 +182,7 @@ struct bfa_ioc {
|
|||
struct timer_list sem_timer;
|
||||
struct timer_list hb_timer;
|
||||
u32 hb_count;
|
||||
struct list_head hb_notify_q;
|
||||
struct list_head notify_q;
|
||||
void *dbg_fwsave;
|
||||
int dbg_fwsave_len;
|
||||
bool dbg_fwsave_once;
|
||||
|
@ -217,6 +237,8 @@ struct bfa_ioc_hwif {
|
|||
BFI_ADAPTER_GETP(NPORTS, (__ioc)->attr->adapter_prop)
|
||||
|
||||
#define bfa_ioc_stats(_ioc, _stats) ((_ioc)->stats._stats++)
|
||||
#define bfa_ioc_stats_hb_count(_ioc, _hb_count) \
|
||||
((_ioc)->stats.hb_count = (_hb_count))
|
||||
#define BFA_IOC_FWIMG_MINSZ (16 * 1024)
|
||||
#define BFA_IOC_FWIMG_TYPE(__ioc) \
|
||||
(((__ioc)->ctdev) ? \
|
||||
|
@ -263,9 +285,10 @@ void bfa_nw_ioc_enable(struct bfa_ioc *ioc);
|
|||
void bfa_nw_ioc_disable(struct bfa_ioc *ioc);
|
||||
|
||||
void bfa_nw_ioc_error_isr(struct bfa_ioc *ioc);
|
||||
bool bfa_nw_ioc_is_disabled(struct bfa_ioc *ioc);
|
||||
void bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr);
|
||||
void bfa_nw_ioc_hbfail_register(struct bfa_ioc *ioc,
|
||||
struct bfa_ioc_hbfail_notify *notify);
|
||||
void bfa_nw_ioc_notify_register(struct bfa_ioc *ioc,
|
||||
struct bfa_ioc_notify *notify);
|
||||
bool bfa_nw_ioc_sem_get(void __iomem *sem_reg);
|
||||
void bfa_nw_ioc_sem_release(void __iomem *sem_reg);
|
||||
void bfa_nw_ioc_hw_sem_release(struct bfa_ioc *ioc);
|
||||
|
|
|
@ -1,69 +0,0 @@
|
|||
/*
|
||||
* Linux network driver for Brocade Converged Network Adapter.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License (GPL) Version 2 as
|
||||
* published by the Free Software Foundation
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*/
|
||||
/*
|
||||
* Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
|
||||
* All rights reserved
|
||||
* www.brocade.com
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file bfa_wc.h Generic wait counter.
|
||||
*/
|
||||
|
||||
#ifndef __BFA_WC_H__
|
||||
#define __BFA_WC_H__
|
||||
|
||||
typedef void (*bfa_wc_resume_t) (void *cbarg);
|
||||
|
||||
struct bfa_wc {
|
||||
bfa_wc_resume_t wc_resume;
|
||||
void *wc_cbarg;
|
||||
int wc_count;
|
||||
};
|
||||
|
||||
static inline void
|
||||
bfa_wc_up(struct bfa_wc *wc)
|
||||
{
|
||||
wc->wc_count++;
|
||||
}
|
||||
|
||||
static inline void
|
||||
bfa_wc_down(struct bfa_wc *wc)
|
||||
{
|
||||
wc->wc_count--;
|
||||
if (wc->wc_count == 0)
|
||||
wc->wc_resume(wc->wc_cbarg);
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize a waiting counter.
|
||||
*/
|
||||
static inline void
|
||||
bfa_wc_init(struct bfa_wc *wc, bfa_wc_resume_t wc_resume, void *wc_cbarg)
|
||||
{
|
||||
wc->wc_resume = wc_resume;
|
||||
wc->wc_cbarg = wc_cbarg;
|
||||
wc->wc_count = 0;
|
||||
bfa_wc_up(wc);
|
||||
}
|
||||
|
||||
/**
|
||||
* Wait for counter to reach zero
|
||||
*/
|
||||
static inline void
|
||||
bfa_wc_wait(struct bfa_wc *wc)
|
||||
{
|
||||
bfa_wc_down(wc);
|
||||
}
|
||||
|
||||
#endif
|
|
@ -289,6 +289,12 @@ struct bfi_ioc_image_hdr {
|
|||
u32 md5sum[BFI_IOC_MD5SUM_SZ];
|
||||
};
|
||||
|
||||
enum bfi_fwboot_type {
|
||||
BFI_FWBOOT_TYPE_NORMAL = 0,
|
||||
BFI_FWBOOT_TYPE_FLASH = 1,
|
||||
BFI_FWBOOT_TYPE_MEMTEST = 2,
|
||||
};
|
||||
|
||||
/**
|
||||
* BFI_IOC_I2H_READY_EVENT message
|
||||
*/
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
#ifndef __BNA_H__
|
||||
#define __BNA_H__
|
||||
|
||||
#include "bfa_wc.h"
|
||||
#include "bfa_cs.h"
|
||||
#include "bfa_ioc.h"
|
||||
#include "cna.h"
|
||||
#include "bfi_ll.h"
|
||||
|
|
|
@ -16,8 +16,7 @@
|
|||
* www.brocade.com
|
||||
*/
|
||||
#include "bna.h"
|
||||
#include "bfa_sm.h"
|
||||
#include "bfa_wc.h"
|
||||
#include "bfa_cs.h"
|
||||
|
||||
static void bna_device_cb_port_stopped(void *arg, enum bna_cb_status status);
|
||||
|
||||
|
@ -380,7 +379,7 @@ bna_llport_sm_stopped(struct bna_llport *llport,
|
|||
break;
|
||||
|
||||
default:
|
||||
bfa_sm_fault(llport->bna, event);
|
||||
bfa_sm_fault(event);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -409,7 +408,7 @@ bna_llport_sm_down(struct bna_llport *llport,
|
|||
break;
|
||||
|
||||
default:
|
||||
bfa_sm_fault(llport->bna, event);
|
||||
bfa_sm_fault(event);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -455,7 +454,7 @@ bna_llport_sm_up_resp_wait(struct bna_llport *llport,
|
|||
break;
|
||||
|
||||
default:
|
||||
bfa_sm_fault(llport->bna, event);
|
||||
bfa_sm_fault(event);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -497,7 +496,7 @@ bna_llport_sm_down_resp_wait(struct bna_llport *llport,
|
|||
break;
|
||||
|
||||
default:
|
||||
bfa_sm_fault(llport->bna, event);
|
||||
bfa_sm_fault(event);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -526,7 +525,7 @@ bna_llport_sm_up(struct bna_llport *llport,
|
|||
break;
|
||||
|
||||
default:
|
||||
bfa_sm_fault(llport->bna, event);
|
||||
bfa_sm_fault(event);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -563,7 +562,7 @@ bna_llport_sm_last_resp_wait(struct bna_llport *llport,
|
|||
break;
|
||||
|
||||
default:
|
||||
bfa_sm_fault(llport->bna, event);
|
||||
bfa_sm_fault(event);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -916,7 +915,7 @@ bna_port_sm_stopped(struct bna_port *port, enum bna_port_event event)
|
|||
break;
|
||||
|
||||
default:
|
||||
bfa_sm_fault(port->bna, event);
|
||||
bfa_sm_fault(event);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -956,7 +955,7 @@ bna_port_sm_mtu_init_wait(struct bna_port *port, enum bna_port_event event)
|
|||
break;
|
||||
|
||||
default:
|
||||
bfa_sm_fault(port->bna, event);
|
||||
bfa_sm_fault(event);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1001,7 +1000,7 @@ bna_port_sm_pause_init_wait(struct bna_port *port,
|
|||
break;
|
||||
|
||||
default:
|
||||
bfa_sm_fault(port->bna, event);
|
||||
bfa_sm_fault(event);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1022,7 +1021,7 @@ bna_port_sm_last_resp_wait(struct bna_port *port,
|
|||
break;
|
||||
|
||||
default:
|
||||
bfa_sm_fault(port->bna, event);
|
||||
bfa_sm_fault(event);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1061,7 +1060,7 @@ bna_port_sm_started(struct bna_port *port,
|
|||
break;
|
||||
|
||||
default:
|
||||
bfa_sm_fault(port->bna, event);
|
||||
bfa_sm_fault(event);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1086,7 +1085,7 @@ bna_port_sm_pause_cfg_wait(struct bna_port *port,
|
|||
break;
|
||||
|
||||
default:
|
||||
bfa_sm_fault(port->bna, event);
|
||||
bfa_sm_fault(event);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1111,7 +1110,7 @@ bna_port_sm_rx_stop_wait(struct bna_port *port,
|
|||
break;
|
||||
|
||||
default:
|
||||
bfa_sm_fault(port->bna, event);
|
||||
bfa_sm_fault(event);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1136,7 +1135,7 @@ bna_port_sm_mtu_cfg_wait(struct bna_port *port, enum bna_port_event event)
|
|||
break;
|
||||
|
||||
default:
|
||||
bfa_sm_fault(port->bna, event);
|
||||
bfa_sm_fault(event);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1161,7 +1160,7 @@ bna_port_sm_chld_stop_wait(struct bna_port *port,
|
|||
break;
|
||||
|
||||
default:
|
||||
bfa_sm_fault(port->bna, event);
|
||||
bfa_sm_fault(event);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1472,7 +1471,7 @@ bna_device_sm_stopped(struct bna_device *device,
|
|||
break;
|
||||
|
||||
default:
|
||||
bfa_sm_fault(device->bna, event);
|
||||
bfa_sm_fault(event);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1512,7 +1511,7 @@ bna_device_sm_ioc_ready_wait(struct bna_device *device,
|
|||
break;
|
||||
|
||||
default:
|
||||
bfa_sm_fault(device->bna, event);
|
||||
bfa_sm_fault(event);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1542,7 +1541,7 @@ bna_device_sm_ready(struct bna_device *device, enum bna_device_event event)
|
|||
break;
|
||||
|
||||
default:
|
||||
bfa_sm_fault(device->bna, event);
|
||||
bfa_sm_fault(event);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1568,7 +1567,7 @@ bna_device_sm_port_stop_wait(struct bna_device *device,
|
|||
break;
|
||||
|
||||
default:
|
||||
bfa_sm_fault(device->bna, event);
|
||||
bfa_sm_fault(event);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1589,7 +1588,7 @@ bna_device_sm_ioc_disable_wait(struct bna_device *device,
|
|||
break;
|
||||
|
||||
default:
|
||||
bfa_sm_fault(device->bna, event);
|
||||
bfa_sm_fault(event);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1622,7 +1621,7 @@ bna_device_sm_failed(struct bna_device *device,
|
|||
break;
|
||||
|
||||
default:
|
||||
bfa_sm_fault(device->bna, event);
|
||||
bfa_sm_fault(event);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
* www.brocade.com
|
||||
*/
|
||||
#include "bna.h"
|
||||
#include "bfa_sm.h"
|
||||
#include "bfa_cs.h"
|
||||
#include "bfi.h"
|
||||
|
||||
/**
|
||||
|
@ -569,7 +569,7 @@ bna_rxf_sm_stopped(struct bna_rxf *rxf, enum bna_rxf_event event)
|
|||
break;
|
||||
|
||||
default:
|
||||
bfa_sm_fault(rxf->rx->bna, event);
|
||||
bfa_sm_fault(event);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -627,7 +627,7 @@ bna_rxf_sm_start_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
|
|||
break;
|
||||
|
||||
default:
|
||||
bfa_sm_fault(rxf->rx->bna, event);
|
||||
bfa_sm_fault(event);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -678,7 +678,7 @@ bna_rxf_sm_cam_fltr_mod_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
|
|||
break;
|
||||
|
||||
default:
|
||||
bfa_sm_fault(rxf->rx->bna, event);
|
||||
bfa_sm_fault(event);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -724,7 +724,7 @@ bna_rxf_sm_started(struct bna_rxf *rxf, enum bna_rxf_event event)
|
|||
break;
|
||||
|
||||
default:
|
||||
bfa_sm_fault(rxf->rx->bna, event);
|
||||
bfa_sm_fault(event);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -761,7 +761,7 @@ bna_rxf_sm_cam_fltr_clr_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
|
|||
break;
|
||||
|
||||
default:
|
||||
bfa_sm_fault(rxf->rx->bna, event);
|
||||
bfa_sm_fault(event);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -815,7 +815,7 @@ bna_rxf_sm_stop_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
|
|||
break;
|
||||
|
||||
default:
|
||||
bfa_sm_fault(rxf->rx->bna, event);
|
||||
bfa_sm_fault(event);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -851,7 +851,7 @@ bna_rxf_sm_pause_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
|
|||
* any other event during these states
|
||||
*/
|
||||
default:
|
||||
bfa_sm_fault(rxf->rx->bna, event);
|
||||
bfa_sm_fault(event);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -887,7 +887,7 @@ bna_rxf_sm_resume_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
|
|||
* any other event during these states
|
||||
*/
|
||||
default:
|
||||
bfa_sm_fault(rxf->rx->bna, event);
|
||||
bfa_sm_fault(event);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -907,7 +907,7 @@ bna_rxf_sm_stat_clr_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
|
|||
break;
|
||||
|
||||
default:
|
||||
bfa_sm_fault(rxf->rx->bna, event);
|
||||
bfa_sm_fault(event);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1898,7 +1898,7 @@ static void bna_rx_sm_stopped(struct bna_rx *rx,
|
|||
/* no-op */
|
||||
break;
|
||||
default:
|
||||
bfa_sm_fault(rx->bna, event);
|
||||
bfa_sm_fault(event);
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -1946,7 +1946,7 @@ static void bna_rx_sm_rxf_start_wait(struct bna_rx *rx,
|
|||
bfa_fsm_set_state(rx, bna_rx_sm_started);
|
||||
break;
|
||||
default:
|
||||
bfa_sm_fault(rx->bna, event);
|
||||
bfa_sm_fault(event);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -1981,7 +1981,7 @@ bna_rx_sm_started(struct bna_rx *rx, enum bna_rx_event event)
|
|||
bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait);
|
||||
break;
|
||||
default:
|
||||
bfa_sm_fault(rx->bna, event);
|
||||
bfa_sm_fault(event);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -2011,7 +2011,7 @@ bna_rx_sm_rxf_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
|
|||
bna_rxf_fail(&rx->rxf);
|
||||
break;
|
||||
default:
|
||||
bfa_sm_fault(rx->bna, event);
|
||||
bfa_sm_fault(event);
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -2064,7 +2064,7 @@ bna_rx_sm_rxq_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
|
|||
bfa_fsm_set_state(rx, bna_rx_sm_stopped);
|
||||
break;
|
||||
default:
|
||||
bfa_sm_fault(rx->bna, event);
|
||||
bfa_sm_fault(event);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -3216,7 +3216,7 @@ bna_tx_sm_stopped(struct bna_tx *tx, enum bna_tx_event event)
|
|||
break;
|
||||
|
||||
default:
|
||||
bfa_sm_fault(tx->bna, event);
|
||||
bfa_sm_fault(event);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3261,7 +3261,7 @@ bna_tx_sm_started(struct bna_tx *tx, enum bna_tx_event event)
|
|||
break;
|
||||
|
||||
default:
|
||||
bfa_sm_fault(tx->bna, event);
|
||||
bfa_sm_fault(event);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3294,7 +3294,7 @@ bna_tx_sm_txq_stop_wait(struct bna_tx *tx, enum bna_tx_event event)
|
|||
break;
|
||||
|
||||
default:
|
||||
bfa_sm_fault(tx->bna, event);
|
||||
bfa_sm_fault(event);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3335,7 +3335,7 @@ bna_tx_sm_prio_stop_wait(struct bna_tx *tx, enum bna_tx_event event)
|
|||
break;
|
||||
|
||||
default:
|
||||
bfa_sm_fault(tx->bna, event);
|
||||
bfa_sm_fault(event);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3355,7 +3355,7 @@ bna_tx_sm_stat_clr_wait(struct bna_tx *tx, enum bna_tx_event event)
|
|||
break;
|
||||
|
||||
default:
|
||||
bfa_sm_fault(tx->bna, event);
|
||||
bfa_sm_fault(event);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -25,7 +25,6 @@
|
|||
#include <linux/if_ether.h>
|
||||
#include <linux/ip.h>
|
||||
#include <linux/prefetch.h>
|
||||
#include <linux/if_vlan.h>
|
||||
|
||||
#include "bnad.h"
|
||||
#include "bna.h"
|
||||
|
@ -60,7 +59,7 @@ static const u8 bnad_bcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
|
|||
|
||||
#define BNAD_GET_MBOX_IRQ(_bnad) \
|
||||
(((_bnad)->cfg_flags & BNAD_CF_MSIX) ? \
|
||||
((_bnad)->msix_table[(_bnad)->msix_num - 1].vector) : \
|
||||
((_bnad)->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector) : \
|
||||
((_bnad)->pcidev->irq))
|
||||
|
||||
#define BNAD_FILL_UNMAPQ_MEM_REQ(_res_info, _num, _depth) \
|
||||
|
@ -1116,17 +1115,17 @@ bnad_mbox_irq_alloc(struct bnad *bnad,
|
|||
spin_lock_irqsave(&bnad->bna_lock, flags);
|
||||
if (bnad->cfg_flags & BNAD_CF_MSIX) {
|
||||
irq_handler = (irq_handler_t)bnad_msix_mbox_handler;
|
||||
irq = bnad->msix_table[bnad->msix_num - 1].vector;
|
||||
irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
|
||||
irq_flags = 0;
|
||||
intr_info->intr_type = BNA_INTR_T_MSIX;
|
||||
intr_info->idl[0].vector = bnad->msix_num - 1;
|
||||
intr_info->idl[0].vector = BNAD_MAILBOX_MSIX_INDEX;
|
||||
} else {
|
||||
irq_handler = (irq_handler_t)bnad_isr;
|
||||
irq = bnad->pcidev->irq;
|
||||
irq_flags = IRQF_SHARED;
|
||||
intr_info->intr_type = BNA_INTR_T_INTX;
|
||||
/* intr_info->idl.vector = 0 ? */
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&bnad->bna_lock, flags);
|
||||
sprintf(bnad->mbox_irq_name, "%s", BNAD_NAME);
|
||||
|
||||
|
@ -1179,11 +1178,12 @@ bnad_txrx_irq_alloc(struct bnad *bnad, enum bnad_intr_source src,
|
|||
|
||||
switch (src) {
|
||||
case BNAD_INTR_TX:
|
||||
vector_start = txrx_id;
|
||||
vector_start = BNAD_MAILBOX_MSIX_VECTORS + txrx_id;
|
||||
break;
|
||||
|
||||
case BNAD_INTR_RX:
|
||||
vector_start = bnad->num_tx * bnad->num_txq_per_tx +
|
||||
vector_start = BNAD_MAILBOX_MSIX_VECTORS +
|
||||
(bnad->num_tx * bnad->num_txq_per_tx) +
|
||||
txrx_id;
|
||||
break;
|
||||
|
||||
|
@ -1204,11 +1204,11 @@ bnad_txrx_irq_alloc(struct bnad *bnad, enum bnad_intr_source src,
|
|||
|
||||
switch (src) {
|
||||
case BNAD_INTR_TX:
|
||||
intr_info->idl[0].vector = 0x1; /* Bit mask : Tx IB */
|
||||
intr_info->idl[0].vector = BNAD_INTX_TX_IB_BITMASK;
|
||||
break;
|
||||
|
||||
case BNAD_INTR_RX:
|
||||
intr_info->idl[0].vector = 0x2; /* Bit mask : Rx IB */
|
||||
intr_info->idl[0].vector = BNAD_INTX_RX_IB_BITMASK;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -2075,7 +2075,7 @@ bnad_mbox_irq_sync(struct bnad *bnad)
|
|||
|
||||
spin_lock_irqsave(&bnad->bna_lock, flags);
|
||||
if (bnad->cfg_flags & BNAD_CF_MSIX)
|
||||
irq = bnad->msix_table[bnad->msix_num - 1].vector;
|
||||
irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
|
||||
else
|
||||
irq = bnad->pcidev->irq;
|
||||
spin_unlock_irqrestore(&bnad->bna_lock, flags);
|
||||
|
@ -3209,7 +3209,7 @@ bnad_pci_remove(struct pci_dev *pdev)
|
|||
free_netdev(netdev);
|
||||
}
|
||||
|
||||
static const struct pci_device_id bnad_pci_id_table[] = {
|
||||
static DEFINE_PCI_DEVICE_TABLE(bnad_pci_id_table) = {
|
||||
{
|
||||
PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
|
||||
PCI_DEVICE_ID_BROCADE_CT),
|
||||
|
@ -3232,7 +3232,8 @@ bnad_module_init(void)
|
|||
{
|
||||
int err;
|
||||
|
||||
pr_info("Brocade 10G Ethernet driver\n");
|
||||
pr_info("Brocade 10G Ethernet driver - version: %s\n",
|
||||
BNAD_VERSION);
|
||||
|
||||
bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover);
|
||||
|
||||
|
|
|
@ -68,7 +68,10 @@ struct bnad_rx_ctrl {
|
|||
|
||||
#define BNAD_VERSION "2.3.2.3"
|
||||
|
||||
#define BNAD_MAILBOX_MSIX_INDEX 0
|
||||
#define BNAD_MAILBOX_MSIX_VECTORS 1
|
||||
#define BNAD_INTX_TX_IB_BITMASK 0x1
|
||||
#define BNAD_INTX_RX_IB_BITMASK 0x2
|
||||
|
||||
#define BNAD_STATS_TIMER_FREQ 1000 /* in msecs */
|
||||
#define BNAD_DIM_TIMER_FREQ 1000 /* in msecs */
|
||||
|
|
|
@ -33,7 +33,7 @@
|
|||
|
||||
#include <linux/list.h>
|
||||
|
||||
#define bfa_sm_fault(__mod, __event) do { \
|
||||
#define bfa_sm_fault(__event) do { \
|
||||
pr_err("SM Assertion failure: %s: %d: event = %d", __FILE__, __LINE__, \
|
||||
__event); \
|
||||
} while (0)
|
||||
|
|
|
@ -19,15 +19,13 @@
|
|||
#include <linux/netdevice.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/rtnetlink.h>
|
||||
#include <net/dcbnl.h>
|
||||
|
||||
#include "bnx2x.h"
|
||||
#include "bnx2x_cmn.h"
|
||||
#include "bnx2x_dcb.h"
|
||||
|
||||
#ifdef BCM_DCBNL
|
||||
#include <linux/rtnetlink.h>
|
||||
#endif
|
||||
|
||||
/* forward declarations of dcbx related functions */
|
||||
static int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp);
|
||||
static void bnx2x_pfc_set_pfc(struct bnx2x *bp);
|
||||
|
@ -333,6 +331,32 @@ static void bnx2x_dcbx_get_pfc_feature(struct bnx2x *bp,
|
|||
}
|
||||
}
|
||||
|
||||
/* maps unmapped priorities to to the same COS as L2 */
|
||||
static void bnx2x_dcbx_map_nw(struct bnx2x *bp)
|
||||
{
|
||||
int i;
|
||||
u32 unmapped = (1 << MAX_PFC_PRIORITIES) - 1; /* all ones */
|
||||
u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority;
|
||||
u32 nw_prio = 1 << ttp[LLFC_TRAFFIC_TYPE_NW];
|
||||
struct bnx2x_dcbx_cos_params *cos_params =
|
||||
bp->dcbx_port_params.ets.cos_params;
|
||||
|
||||
/* get unmapped priorities by clearing mapped bits */
|
||||
for (i = 0; i < LLFC_DRIVER_TRAFFIC_TYPE_MAX; i++)
|
||||
unmapped &= ~(1 << ttp[i]);
|
||||
|
||||
/* find cos for nw prio and extend it with unmapped */
|
||||
for (i = 0; i < ARRAY_SIZE(bp->dcbx_port_params.ets.cos_params); i++) {
|
||||
if (cos_params[i].pri_bitmask & nw_prio) {
|
||||
/* extend the bitmask with unmapped */
|
||||
DP(NETIF_MSG_LINK,
|
||||
"cos %d extended with 0x%08x", i, unmapped);
|
||||
cos_params[i].pri_bitmask |= unmapped;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void bnx2x_get_dcbx_drv_param(struct bnx2x *bp,
|
||||
struct dcbx_features *features,
|
||||
u32 error)
|
||||
|
@ -342,6 +366,8 @@ static void bnx2x_get_dcbx_drv_param(struct bnx2x *bp,
|
|||
bnx2x_dcbx_get_pfc_feature(bp, &features->pfc, error);
|
||||
|
||||
bnx2x_dcbx_get_ets_feature(bp, &features->ets, error);
|
||||
|
||||
bnx2x_dcbx_map_nw(bp);
|
||||
}
|
||||
|
||||
#define DCBX_LOCAL_MIB_MAX_TRY_READ (100)
|
||||
|
@ -682,6 +708,8 @@ static inline void bnx2x_dcbx_update_tc_mapping(struct bnx2x *bp)
|
|||
if (bp->dcbx_port_params.ets.cos_params[cos].pri_bitmask
|
||||
& (1 << prio)) {
|
||||
bp->prio_to_cos[prio] = cos;
|
||||
DP(NETIF_MSG_LINK,
|
||||
"tx_mapping %d --> %d\n", prio, cos);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -749,7 +777,7 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
|
|||
DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_TX_RELEASED\n");
|
||||
bnx2x_fw_command(bp, DRV_MSG_CODE_DCBX_PMF_DRV_OK, 0);
|
||||
#ifdef BCM_DCBNL
|
||||
/**
|
||||
/*
|
||||
* Send a notification for the new negotiated parameters
|
||||
*/
|
||||
dcbnl_cee_notify(bp->dev, RTM_GETDCB, DCB_CMD_CEE_GET, 0, 0);
|
||||
|
@ -1732,7 +1760,6 @@ static void bnx2x_dcbx_fill_cos_params(struct bnx2x *bp,
|
|||
pri_join_mask,
|
||||
num_of_dif_pri);
|
||||
|
||||
|
||||
for (i = 0; i < cos_data.num_of_cos ; i++) {
|
||||
struct bnx2x_dcbx_cos_params *p =
|
||||
&bp->dcbx_port_params.ets.cos_params[i];
|
||||
|
|
|
@ -1671,11 +1671,12 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
|
|||
|
||||
switch (command) {
|
||||
case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE):
|
||||
DP(NETIF_MSG_IFUP, "got UPDATE ramrod. CID %d\n", cid);
|
||||
DP(BNX2X_MSG_SP, "got UPDATE ramrod. CID %d\n", cid);
|
||||
drv_cmd = BNX2X_Q_CMD_UPDATE;
|
||||
break;
|
||||
|
||||
case (RAMROD_CMD_ID_ETH_CLIENT_SETUP):
|
||||
DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n", cid);
|
||||
DP(BNX2X_MSG_SP, "got MULTI[%d] setup ramrod\n", cid);
|
||||
drv_cmd = BNX2X_Q_CMD_SETUP;
|
||||
break;
|
||||
|
||||
|
@ -1685,17 +1686,17 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
|
|||
break;
|
||||
|
||||
case (RAMROD_CMD_ID_ETH_HALT):
|
||||
DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n", cid);
|
||||
DP(BNX2X_MSG_SP, "got MULTI[%d] halt ramrod\n", cid);
|
||||
drv_cmd = BNX2X_Q_CMD_HALT;
|
||||
break;
|
||||
|
||||
case (RAMROD_CMD_ID_ETH_TERMINATE):
|
||||
DP(NETIF_MSG_IFDOWN, "got MULTI[%d] teminate ramrod\n", cid);
|
||||
DP(BNX2X_MSG_SP, "got MULTI[%d] teminate ramrod\n", cid);
|
||||
drv_cmd = BNX2X_Q_CMD_TERMINATE;
|
||||
break;
|
||||
|
||||
case (RAMROD_CMD_ID_ETH_EMPTY):
|
||||
DP(NETIF_MSG_IFDOWN, "got MULTI[%d] empty ramrod\n", cid);
|
||||
DP(BNX2X_MSG_SP, "got MULTI[%d] empty ramrod\n", cid);
|
||||
drv_cmd = BNX2X_Q_CMD_EMPTY;
|
||||
break;
|
||||
|
||||
|
@ -1725,6 +1726,8 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
|
|||
/* push the change in bp->spq_left and towards the memory */
|
||||
smp_mb__after_atomic_inc();
|
||||
|
||||
DP(BNX2X_MSG_SP, "bp->cq_spq_left %x\n", atomic_read(&bp->cq_spq_left));
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -2151,10 +2154,12 @@ u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
|
|||
u8 rc;
|
||||
int cfx_idx = bnx2x_get_link_cfg_idx(bp);
|
||||
u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx];
|
||||
/* Initialize link parameters structure variables */
|
||||
/* It is recommended to turn off RX FC for jumbo frames
|
||||
for better performance */
|
||||
if ((CHIP_IS_E1x(bp)) && (bp->dev->mtu > 5000))
|
||||
/*
|
||||
* Initialize link parameters structure variables
|
||||
* It is recommended to turn off RX FC for jumbo frames
|
||||
* for better performance
|
||||
*/
|
||||
if (CHIP_IS_E1x(bp) && (bp->dev->mtu > 5000))
|
||||
bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
|
||||
else
|
||||
bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
|
||||
|
@ -2162,8 +2167,18 @@ u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
|
|||
bnx2x_acquire_phy_lock(bp);
|
||||
|
||||
if (load_mode == LOAD_DIAG) {
|
||||
bp->link_params.loopback_mode = LOOPBACK_XGXS;
|
||||
bp->link_params.req_line_speed[cfx_idx] = SPEED_10000;
|
||||
struct link_params *lp = &bp->link_params;
|
||||
lp->loopback_mode = LOOPBACK_XGXS;
|
||||
/* do PHY loopback at 10G speed, if possible */
|
||||
if (lp->req_line_speed[cfx_idx] < SPEED_10000) {
|
||||
if (lp->speed_cap_mask[cfx_idx] &
|
||||
PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
|
||||
lp->req_line_speed[cfx_idx] =
|
||||
SPEED_10000;
|
||||
else
|
||||
lp->req_line_speed[cfx_idx] =
|
||||
SPEED_1000;
|
||||
}
|
||||
}
|
||||
|
||||
rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
|
||||
|
@ -3077,8 +3092,6 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
|
|||
spe->data.update_data_addr.hi = cpu_to_le32(data_hi);
|
||||
spe->data.update_data_addr.lo = cpu_to_le32(data_lo);
|
||||
|
||||
/* stats ramrod has it's own slot on the spq */
|
||||
if (command != RAMROD_CMD_ID_COMMON_STAT_QUERY) {
|
||||
/*
|
||||
* It's ok if the actual decrement is issued towards the memory
|
||||
* somewhere between the spin_lock and spin_unlock. Thus no
|
||||
|
@ -3088,15 +3101,14 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
|
|||
atomic_dec(&bp->eq_spq_left);
|
||||
else
|
||||
atomic_dec(&bp->cq_spq_left);
|
||||
}
|
||||
|
||||
|
||||
DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
|
||||
"SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) "
|
||||
"type(0x%x) left (ETH, COMMON) (%x,%x)\n",
|
||||
"SPQE[%x] (%x:%x) (cmd, common?) (%d,%d) hw_cid %x data (%x:%x) "
|
||||
"type(0x%x) left (CQ, EQ) (%x,%x)\n",
|
||||
bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
|
||||
(u32)(U64_LO(bp->spq_mapping) +
|
||||
(void *)bp->spq_prod_bd - (void *)bp->spq), command,
|
||||
(void *)bp->spq_prod_bd - (void *)bp->spq), command, common,
|
||||
HW_CID(bp, cid), data_hi, data_lo, type,
|
||||
atomic_read(&bp->cq_spq_left), atomic_read(&bp->eq_spq_left));
|
||||
|
||||
|
@ -3453,6 +3465,7 @@ static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
|
|||
} else if (attn & BNX2X_MC_ASSERT_BITS) {
|
||||
|
||||
BNX2X_ERR("MC assert!\n");
|
||||
bnx2x_mc_assert(bp);
|
||||
REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
|
||||
REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
|
||||
REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
|
||||
|
@ -4412,7 +4425,7 @@ static void bnx2x_eq_int(struct bnx2x *bp)
|
|||
sw_cons = bp->eq_cons;
|
||||
sw_prod = bp->eq_prod;
|
||||
|
||||
DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->cq_spq_left %u\n",
|
||||
DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->eq_spq_left %x\n",
|
||||
hw_cons, sw_cons, atomic_read(&bp->eq_spq_left));
|
||||
|
||||
for (; sw_cons != hw_cons;
|
||||
|
@ -4431,7 +4444,7 @@ static void bnx2x_eq_int(struct bnx2x *bp)
|
|||
DP(NETIF_MSG_TIMER, "got statistics comp event %d\n",
|
||||
bp->stats_comp++);
|
||||
/* nothing to do with stats comp */
|
||||
continue;
|
||||
goto next_spqe;
|
||||
|
||||
case EVENT_RING_OPCODE_CFC_DEL:
|
||||
/* handle according to cid range */
|
||||
|
@ -4439,7 +4452,7 @@ static void bnx2x_eq_int(struct bnx2x *bp)
|
|||
* we may want to verify here that the bp state is
|
||||
* HALTING
|
||||
*/
|
||||
DP(NETIF_MSG_IFDOWN,
|
||||
DP(BNX2X_MSG_SP,
|
||||
"got delete ramrod for MULTI[%d]\n", cid);
|
||||
#ifdef BCM_CNIC
|
||||
if (!bnx2x_cnic_handle_cfc_del(bp, cid, elem))
|
||||
|
@ -4455,7 +4468,7 @@ static void bnx2x_eq_int(struct bnx2x *bp)
|
|||
goto next_spqe;
|
||||
|
||||
case EVENT_RING_OPCODE_STOP_TRAFFIC:
|
||||
DP(NETIF_MSG_IFUP, "got STOP TRAFFIC\n");
|
||||
DP(BNX2X_MSG_SP, "got STOP TRAFFIC\n");
|
||||
if (f_obj->complete_cmd(bp, f_obj,
|
||||
BNX2X_F_CMD_TX_STOP))
|
||||
break;
|
||||
|
@ -4463,21 +4476,21 @@ static void bnx2x_eq_int(struct bnx2x *bp)
|
|||
goto next_spqe;
|
||||
|
||||
case EVENT_RING_OPCODE_START_TRAFFIC:
|
||||
DP(NETIF_MSG_IFUP, "got START TRAFFIC\n");
|
||||
DP(BNX2X_MSG_SP, "got START TRAFFIC\n");
|
||||
if (f_obj->complete_cmd(bp, f_obj,
|
||||
BNX2X_F_CMD_TX_START))
|
||||
break;
|
||||
bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED);
|
||||
goto next_spqe;
|
||||
case EVENT_RING_OPCODE_FUNCTION_START:
|
||||
DP(NETIF_MSG_IFUP, "got FUNC_START ramrod\n");
|
||||
DP(BNX2X_MSG_SP, "got FUNC_START ramrod\n");
|
||||
if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_START))
|
||||
break;
|
||||
|
||||
goto next_spqe;
|
||||
|
||||
case EVENT_RING_OPCODE_FUNCTION_STOP:
|
||||
DP(NETIF_MSG_IFDOWN, "got FUNC_STOP ramrod\n");
|
||||
DP(BNX2X_MSG_SP, "got FUNC_STOP ramrod\n");
|
||||
if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_STOP))
|
||||
break;
|
||||
|
||||
|
@ -4491,7 +4504,7 @@ static void bnx2x_eq_int(struct bnx2x *bp)
|
|||
BNX2X_STATE_OPENING_WAIT4_PORT):
|
||||
cid = elem->message.data.eth_event.echo &
|
||||
BNX2X_SWCID_MASK;
|
||||
DP(NETIF_MSG_IFUP, "got RSS_UPDATE ramrod. CID %d\n",
|
||||
DP(BNX2X_MSG_SP, "got RSS_UPDATE ramrod. CID %d\n",
|
||||
cid);
|
||||
rss_raw->clear_pending(rss_raw);
|
||||
break;
|
||||
|
@ -4506,7 +4519,7 @@ static void bnx2x_eq_int(struct bnx2x *bp)
|
|||
BNX2X_STATE_DIAG):
|
||||
case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
|
||||
BNX2X_STATE_CLOSING_WAIT4_HALT):
|
||||
DP(NETIF_MSG_IFUP, "got (un)set mac ramrod\n");
|
||||
DP(BNX2X_MSG_SP, "got (un)set mac ramrod\n");
|
||||
bnx2x_handle_classification_eqe(bp, elem);
|
||||
break;
|
||||
|
||||
|
@ -4516,7 +4529,7 @@ static void bnx2x_eq_int(struct bnx2x *bp)
|
|||
BNX2X_STATE_DIAG):
|
||||
case (EVENT_RING_OPCODE_MULTICAST_RULES |
|
||||
BNX2X_STATE_CLOSING_WAIT4_HALT):
|
||||
DP(NETIF_MSG_IFUP, "got mcast ramrod\n");
|
||||
DP(BNX2X_MSG_SP, "got mcast ramrod\n");
|
||||
bnx2x_handle_mcast_eqe(bp);
|
||||
break;
|
||||
|
||||
|
@ -4526,7 +4539,7 @@ static void bnx2x_eq_int(struct bnx2x *bp)
|
|||
BNX2X_STATE_DIAG):
|
||||
case (EVENT_RING_OPCODE_FILTERS_RULES |
|
||||
BNX2X_STATE_CLOSING_WAIT4_HALT):
|
||||
DP(NETIF_MSG_IFUP, "got rx_mode ramrod\n");
|
||||
DP(BNX2X_MSG_SP, "got rx_mode ramrod\n");
|
||||
bnx2x_handle_rx_mode_eqe(bp);
|
||||
break;
|
||||
default:
|
||||
|
@ -5639,7 +5652,7 @@ static void bnx2x_init_pxp(struct bnx2x *bp)
|
|||
int r_order, w_order;
|
||||
|
||||
pci_read_config_word(bp->pdev,
|
||||
bp->pdev->pcie_cap + PCI_EXP_DEVCTL, &devctl);
|
||||
pci_pcie_cap(bp->pdev) + PCI_EXP_DEVCTL, &devctl);
|
||||
DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
|
||||
w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
|
||||
if (bp->mrrs == -1)
|
||||
|
@ -8400,31 +8413,45 @@ static void bnx2x_sp_rtnl_task(struct work_struct *work)
|
|||
if (!netif_running(bp->dev))
|
||||
goto sp_rtnl_exit;
|
||||
|
||||
if (test_and_clear_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state))
|
||||
bnx2x_setup_tc(bp->dev, bp->dcbx_port_params.ets.num_of_cos);
|
||||
|
||||
/* if stop on error is defined no recovery flows should be executed */
|
||||
#ifdef BNX2X_STOP_ON_ERROR
|
||||
BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined "
|
||||
"so reset not done to allow debug dump,\n"
|
||||
"you will need to reboot when done\n");
|
||||
goto sp_rtnl_exit;
|
||||
goto sp_rtnl_not_reset;
|
||||
#endif
|
||||
|
||||
if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE)) {
|
||||
/*
|
||||
* Clear TX_TIMEOUT bit as we are going to reset the function
|
||||
* anyway.
|
||||
* Clear all pending SP commands as we are going to reset the
|
||||
* function anyway.
|
||||
*/
|
||||
smp_mb__before_clear_bit();
|
||||
clear_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state);
|
||||
smp_mb__after_clear_bit();
|
||||
bp->sp_rtnl_state = 0;
|
||||
smp_mb();
|
||||
|
||||
bnx2x_parity_recover(bp);
|
||||
} else if (test_and_clear_bit(BNX2X_SP_RTNL_TX_TIMEOUT,
|
||||
&bp->sp_rtnl_state)){
|
||||
|
||||
goto sp_rtnl_exit;
|
||||
}
|
||||
|
||||
if (test_and_clear_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state)) {
|
||||
/*
|
||||
* Clear all pending SP commands as we are going to reset the
|
||||
* function anyway.
|
||||
*/
|
||||
bp->sp_rtnl_state = 0;
|
||||
smp_mb();
|
||||
|
||||
bnx2x_nic_unload(bp, UNLOAD_NORMAL);
|
||||
bnx2x_nic_load(bp, LOAD_NORMAL);
|
||||
|
||||
goto sp_rtnl_exit;
|
||||
}
|
||||
#ifdef BNX2X_STOP_ON_ERROR
|
||||
sp_rtnl_not_reset:
|
||||
#endif
|
||||
if (test_and_clear_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state))
|
||||
bnx2x_setup_tc(bp->dev, bp->dcbx_port_params.ets.num_of_cos);
|
||||
|
||||
sp_rtnl_exit:
|
||||
rtnl_unlock();
|
||||
|
@ -10229,10 +10256,13 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
|
|||
REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
|
||||
REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
|
||||
|
||||
/**
|
||||
/*
|
||||
* Enable internal target-read (in case we are probed after PF FLR).
|
||||
* Must be done prior to any BAR read access
|
||||
* Must be done prior to any BAR read access. Only for 57712 and up
|
||||
*/
|
||||
if (board_type != BCM57710 &&
|
||||
board_type != BCM57711 &&
|
||||
board_type != BCM57711E)
|
||||
REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
|
||||
|
||||
/* Reset the load counter */
|
||||
|
|
|
@ -33,7 +33,6 @@
|
|||
#include <linux/if_arp.h>
|
||||
#include <linux/if_ether.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/io.h>
|
||||
|
||||
#include <linux/can.h>
|
||||
|
|
|
@ -27,7 +27,6 @@
|
|||
#include <linux/if_arp.h>
|
||||
#include <linux/if_ether.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/clk.h>
|
||||
|
|
|
@ -47,7 +47,6 @@
|
|||
#include <linux/if_ether.h>
|
||||
#include <linux/aer.h>
|
||||
#include <linux/prefetch.h>
|
||||
#include <linux/if_vlan.h>
|
||||
#ifdef CONFIG_IGB_DCA
|
||||
#include <linux/dca.h>
|
||||
#endif
|
||||
|
|
|
@ -36,7 +36,6 @@
|
|||
#include <linux/ethtool.h>
|
||||
#include <linux/if_vlan.h>
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/if_vlan.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/vmalloc.h>
|
||||
|
|
|
@ -2160,12 +2160,9 @@ static void rtl8169sb_hw_phy_config(struct rtl8169_private *tp)
|
|||
static void rtl8169scd_hw_phy_config_quirk(struct rtl8169_private *tp)
|
||||
{
|
||||
struct pci_dev *pdev = tp->pci_dev;
|
||||
u16 vendor_id, device_id;
|
||||
|
||||
pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID, &vendor_id);
|
||||
pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &device_id);
|
||||
|
||||
if ((vendor_id != PCI_VENDOR_ID_GIGABYTE) || (device_id != 0xe000))
|
||||
if ((pdev->subsystem_vendor != PCI_VENDOR_ID_GIGABYTE) ||
|
||||
(pdev->subsystem_device != 0xe000))
|
||||
return;
|
||||
|
||||
rtl_writephy(tp, 0x1f, 0x0001);
|
||||
|
|
|
@ -77,7 +77,6 @@
|
|||
#include <linux/udp.h>
|
||||
#include <linux/crc-ccitt.h>
|
||||
#include <linux/crc32.h>
|
||||
#include <linux/if_vlan.h>
|
||||
|
||||
#include "via-velocity.h"
|
||||
|
||||
|
|
|
@ -303,7 +303,6 @@ sbni_pci_probe( struct net_device *dev )
|
|||
!= NULL ) {
|
||||
int pci_irq_line;
|
||||
unsigned long pci_ioaddr;
|
||||
u16 subsys;
|
||||
|
||||
if( pdev->vendor != SBNI_PCI_VENDOR &&
|
||||
pdev->device != SBNI_PCI_DEVICE )
|
||||
|
@ -314,9 +313,7 @@ sbni_pci_probe( struct net_device *dev )
|
|||
|
||||
/* Avoid already found cards from previous calls */
|
||||
if( !request_region( pci_ioaddr, SBNI_IO_EXTENT, dev->name ) ) {
|
||||
pci_read_config_word( pdev, PCI_SUBSYSTEM_ID, &subsys );
|
||||
|
||||
if (subsys != 2)
|
||||
if (pdev->subsystem_device != 2)
|
||||
continue;
|
||||
|
||||
/* Dual adapter is present */
|
||||
|
|
|
@ -35,8 +35,8 @@ static void ath5k_ahb_read_cachesize(struct ath_common *common, int *csz)
|
|||
static bool
|
||||
ath5k_ahb_eeprom_read(struct ath_common *common, u32 off, u16 *data)
|
||||
{
|
||||
struct ath5k_softc *sc = common->priv;
|
||||
struct platform_device *pdev = to_platform_device(sc->dev);
|
||||
struct ath5k_hw *ah = common->priv;
|
||||
struct platform_device *pdev = to_platform_device(ah->dev);
|
||||
struct ar231x_board_config *bcfg = pdev->dev.platform_data;
|
||||
u16 *eeprom, *eeprom_end;
|
||||
|
||||
|
@ -56,8 +56,7 @@ ath5k_ahb_eeprom_read(struct ath_common *common, u32 off, u16 *data)
|
|||
|
||||
int ath5k_hw_read_srev(struct ath5k_hw *ah)
|
||||
{
|
||||
struct ath5k_softc *sc = ah->ah_sc;
|
||||
struct platform_device *pdev = to_platform_device(sc->dev);
|
||||
struct platform_device *pdev = to_platform_device(ah->dev);
|
||||
struct ar231x_board_config *bcfg = pdev->dev.platform_data;
|
||||
ah->ah_mac_srev = bcfg->devid;
|
||||
return 0;
|
||||
|
@ -65,12 +64,11 @@ int ath5k_hw_read_srev(struct ath5k_hw *ah)
|
|||
|
||||
static int ath5k_ahb_eeprom_read_mac(struct ath5k_hw *ah, u8 *mac)
|
||||
{
|
||||
struct ath5k_softc *sc = ah->ah_sc;
|
||||
struct platform_device *pdev = to_platform_device(sc->dev);
|
||||
struct platform_device *pdev = to_platform_device(ah->dev);
|
||||
struct ar231x_board_config *bcfg = pdev->dev.platform_data;
|
||||
u8 *cfg_mac;
|
||||
|
||||
if (to_platform_device(sc->dev)->id == 0)
|
||||
if (to_platform_device(ah->dev)->id == 0)
|
||||
cfg_mac = bcfg->config->wlan0_mac;
|
||||
else
|
||||
cfg_mac = bcfg->config->wlan1_mac;
|
||||
|
@ -90,7 +88,7 @@ static const struct ath_bus_ops ath_ahb_bus_ops = {
|
|||
static int ath_ahb_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct ar231x_board_config *bcfg = pdev->dev.platform_data;
|
||||
struct ath5k_softc *sc;
|
||||
struct ath5k_hw *ah;
|
||||
struct ieee80211_hw *hw;
|
||||
struct resource *res;
|
||||
void __iomem *mem;
|
||||
|
@ -127,19 +125,19 @@ static int ath_ahb_probe(struct platform_device *pdev)
|
|||
|
||||
irq = res->start;
|
||||
|
||||
hw = ieee80211_alloc_hw(sizeof(struct ath5k_softc), &ath5k_hw_ops);
|
||||
hw = ieee80211_alloc_hw(sizeof(struct ath5k_hw), &ath5k_hw_ops);
|
||||
if (hw == NULL) {
|
||||
dev_err(&pdev->dev, "no memory for ieee80211_hw\n");
|
||||
ret = -ENOMEM;
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
sc = hw->priv;
|
||||
sc->hw = hw;
|
||||
sc->dev = &pdev->dev;
|
||||
sc->iobase = mem;
|
||||
sc->irq = irq;
|
||||
sc->devid = bcfg->devid;
|
||||
ah = hw->priv;
|
||||
ah->hw = hw;
|
||||
ah->dev = &pdev->dev;
|
||||
ah->iobase = mem;
|
||||
ah->irq = irq;
|
||||
ah->devid = bcfg->devid;
|
||||
|
||||
if (bcfg->devid >= AR5K_SREV_AR2315_R6) {
|
||||
/* Enable WMAC AHB arbitration */
|
||||
|
@ -155,7 +153,7 @@ static int ath_ahb_probe(struct platform_device *pdev)
|
|||
/* Enable WMAC DMA access (assuming 5312 or 231x*/
|
||||
/* TODO: check other platforms */
|
||||
reg = __raw_readl((void __iomem *) AR5K_AR5312_ENABLE);
|
||||
if (to_platform_device(sc->dev)->id == 0)
|
||||
if (to_platform_device(ah->dev)->id == 0)
|
||||
reg |= AR5K_AR5312_ENABLE_WLAN0;
|
||||
else
|
||||
reg |= AR5K_AR5312_ENABLE_WLAN1;
|
||||
|
@ -166,13 +164,13 @@ static int ath_ahb_probe(struct platform_device *pdev)
|
|||
* used as pass-through. Disable 2 GHz support in the
|
||||
* driver for it
|
||||
*/
|
||||
if (to_platform_device(sc->dev)->id == 0 &&
|
||||
if (to_platform_device(ah->dev)->id == 0 &&
|
||||
(bcfg->config->flags & (BD_WLAN0 | BD_WLAN1)) ==
|
||||
(BD_WLAN1 | BD_WLAN0))
|
||||
__set_bit(ATH_STAT_2G_DISABLED, sc->status);
|
||||
__set_bit(ATH_STAT_2G_DISABLED, ah->status);
|
||||
}
|
||||
|
||||
ret = ath5k_init_softc(sc, &ath_ahb_bus_ops);
|
||||
ret = ath5k_init_softc(ah, &ath_ahb_bus_ops);
|
||||
if (ret != 0) {
|
||||
dev_err(&pdev->dev, "failed to attach device, err=%d\n", ret);
|
||||
ret = -ENODEV;
|
||||
|
@ -194,13 +192,13 @@ static int ath_ahb_remove(struct platform_device *pdev)
|
|||
{
|
||||
struct ar231x_board_config *bcfg = pdev->dev.platform_data;
|
||||
struct ieee80211_hw *hw = platform_get_drvdata(pdev);
|
||||
struct ath5k_softc *sc;
|
||||
struct ath5k_hw *ah;
|
||||
u32 reg;
|
||||
|
||||
if (!hw)
|
||||
return 0;
|
||||
|
||||
sc = hw->priv;
|
||||
ah = hw->priv;
|
||||
|
||||
if (bcfg->devid >= AR5K_SREV_AR2315_R6) {
|
||||
/* Disable WMAC AHB arbitration */
|
||||
|
@ -210,14 +208,14 @@ static int ath_ahb_remove(struct platform_device *pdev)
|
|||
} else {
|
||||
/*Stop DMA access */
|
||||
reg = __raw_readl((void __iomem *) AR5K_AR5312_ENABLE);
|
||||
if (to_platform_device(sc->dev)->id == 0)
|
||||
if (to_platform_device(ah->dev)->id == 0)
|
||||
reg &= ~AR5K_AR5312_ENABLE_WLAN0;
|
||||
else
|
||||
reg &= ~AR5K_AR5312_ENABLE_WLAN1;
|
||||
__raw_writel(reg, (void __iomem *) AR5K_AR5312_ENABLE);
|
||||
}
|
||||
|
||||
ath5k_deinit_softc(sc);
|
||||
ath5k_deinit_softc(ah);
|
||||
platform_set_drvdata(pdev, NULL);
|
||||
ieee80211_free_hw(hw);
|
||||
|
||||
|
|
|
@ -74,7 +74,7 @@ ath5k_ani_set_noise_immunity_level(struct ath5k_hw *ah, int level)
|
|||
static const s8 fr[] = { -78, -80 };
|
||||
#endif
|
||||
if (level < 0 || level >= ARRAY_SIZE(sz)) {
|
||||
ATH5K_ERR(ah->ah_sc, "noise immunity level %d out of range",
|
||||
ATH5K_ERR(ah, "noise immunity level %d out of range",
|
||||
level);
|
||||
return;
|
||||
}
|
||||
|
@ -88,8 +88,8 @@ ath5k_ani_set_noise_immunity_level(struct ath5k_hw *ah, int level)
|
|||
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_SIG,
|
||||
AR5K_PHY_SIG_FIRPWR, fr[level]);
|
||||
|
||||
ah->ah_sc->ani_state.noise_imm_level = level;
|
||||
ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, "new level %d", level);
|
||||
ah->ani_state.noise_imm_level = level;
|
||||
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "new level %d", level);
|
||||
}
|
||||
|
||||
|
||||
|
@ -105,8 +105,8 @@ ath5k_ani_set_spur_immunity_level(struct ath5k_hw *ah, int level)
|
|||
static const int val[] = { 2, 4, 6, 8, 10, 12, 14, 16 };
|
||||
|
||||
if (level < 0 || level >= ARRAY_SIZE(val) ||
|
||||
level > ah->ah_sc->ani_state.max_spur_level) {
|
||||
ATH5K_ERR(ah->ah_sc, "spur immunity level %d out of range",
|
||||
level > ah->ani_state.max_spur_level) {
|
||||
ATH5K_ERR(ah, "spur immunity level %d out of range",
|
||||
level);
|
||||
return;
|
||||
}
|
||||
|
@ -114,8 +114,8 @@ ath5k_ani_set_spur_immunity_level(struct ath5k_hw *ah, int level)
|
|||
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_OFDM_SELFCORR,
|
||||
AR5K_PHY_OFDM_SELFCORR_CYPWR_THR1, val[level]);
|
||||
|
||||
ah->ah_sc->ani_state.spur_level = level;
|
||||
ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, "new level %d", level);
|
||||
ah->ani_state.spur_level = level;
|
||||
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "new level %d", level);
|
||||
}
|
||||
|
||||
|
||||
|
@ -130,15 +130,15 @@ ath5k_ani_set_firstep_level(struct ath5k_hw *ah, int level)
|
|||
static const int val[] = { 0, 4, 8 };
|
||||
|
||||
if (level < 0 || level >= ARRAY_SIZE(val)) {
|
||||
ATH5K_ERR(ah->ah_sc, "firstep level %d out of range", level);
|
||||
ATH5K_ERR(ah, "firstep level %d out of range", level);
|
||||
return;
|
||||
}
|
||||
|
||||
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_SIG,
|
||||
AR5K_PHY_SIG_FIRSTEP, val[level]);
|
||||
|
||||
ah->ah_sc->ani_state.firstep_level = level;
|
||||
ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, "new level %d", level);
|
||||
ah->ani_state.firstep_level = level;
|
||||
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "new level %d", level);
|
||||
}
|
||||
|
||||
|
||||
|
@ -178,8 +178,8 @@ ath5k_ani_set_ofdm_weak_signal_detection(struct ath5k_hw *ah, bool on)
|
|||
AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_WEAK_OFDM_LOW_THR,
|
||||
AR5K_PHY_WEAK_OFDM_LOW_THR_SELFCOR_EN);
|
||||
|
||||
ah->ah_sc->ani_state.ofdm_weak_sig = on;
|
||||
ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, "turned %s",
|
||||
ah->ani_state.ofdm_weak_sig = on;
|
||||
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "turned %s",
|
||||
on ? "on" : "off");
|
||||
}
|
||||
|
||||
|
@ -195,8 +195,8 @@ ath5k_ani_set_cck_weak_signal_detection(struct ath5k_hw *ah, bool on)
|
|||
static const int val[] = { 8, 6 };
|
||||
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_CCK_CROSSCORR,
|
||||
AR5K_PHY_CCK_CROSSCORR_WEAK_SIG_THR, val[on]);
|
||||
ah->ah_sc->ani_state.cck_weak_sig = on;
|
||||
ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, "turned %s",
|
||||
ah->ani_state.cck_weak_sig = on;
|
||||
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "turned %s",
|
||||
on ? "on" : "off");
|
||||
}
|
||||
|
||||
|
@ -218,7 +218,7 @@ ath5k_ani_raise_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as,
|
|||
{
|
||||
int rssi = ewma_read(&ah->ah_beacon_rssi_avg);
|
||||
|
||||
ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, "raise immunity (%s)",
|
||||
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "raise immunity (%s)",
|
||||
ofdm_trigger ? "ODFM" : "CCK");
|
||||
|
||||
/* first: raise noise immunity */
|
||||
|
@ -229,13 +229,13 @@ ath5k_ani_raise_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as,
|
|||
|
||||
/* only OFDM: raise spur immunity level */
|
||||
if (ofdm_trigger &&
|
||||
as->spur_level < ah->ah_sc->ani_state.max_spur_level) {
|
||||
as->spur_level < ah->ani_state.max_spur_level) {
|
||||
ath5k_ani_set_spur_immunity_level(ah, as->spur_level + 1);
|
||||
return;
|
||||
}
|
||||
|
||||
/* AP mode */
|
||||
if (ah->ah_sc->opmode == NL80211_IFTYPE_AP) {
|
||||
if (ah->opmode == NL80211_IFTYPE_AP) {
|
||||
if (as->firstep_level < ATH5K_ANI_MAX_FIRSTEP_LVL)
|
||||
ath5k_ani_set_firstep_level(ah, as->firstep_level + 1);
|
||||
return;
|
||||
|
@ -248,7 +248,7 @@ ath5k_ani_raise_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as,
|
|||
* don't shut out a remote node by raising immunity too high. */
|
||||
|
||||
if (rssi > ATH5K_ANI_RSSI_THR_HIGH) {
|
||||
ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
|
||||
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI,
|
||||
"beacon RSSI high");
|
||||
/* only OFDM: beacon RSSI is high, we can disable ODFM weak
|
||||
* signal detection */
|
||||
|
@ -265,7 +265,7 @@ ath5k_ani_raise_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as,
|
|||
} else if (rssi > ATH5K_ANI_RSSI_THR_LOW) {
|
||||
/* beacon RSSI in mid range, we need OFDM weak signal detect,
|
||||
* but can raise firstep level */
|
||||
ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
|
||||
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI,
|
||||
"beacon RSSI mid");
|
||||
if (ofdm_trigger && as->ofdm_weak_sig == false)
|
||||
ath5k_ani_set_ofdm_weak_signal_detection(ah, true);
|
||||
|
@ -275,7 +275,7 @@ ath5k_ani_raise_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as,
|
|||
} else if (ah->ah_current_channel->band == IEEE80211_BAND_2GHZ) {
|
||||
/* beacon RSSI is low. in B/G mode turn of OFDM weak signal
|
||||
* detect and zero firstep level to maximize CCK sensitivity */
|
||||
ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
|
||||
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI,
|
||||
"beacon RSSI low, 2GHz");
|
||||
if (ofdm_trigger && as->ofdm_weak_sig == true)
|
||||
ath5k_ani_set_ofdm_weak_signal_detection(ah, false);
|
||||
|
@ -303,9 +303,9 @@ ath5k_ani_lower_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as)
|
|||
{
|
||||
int rssi = ewma_read(&ah->ah_beacon_rssi_avg);
|
||||
|
||||
ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, "lower immunity");
|
||||
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "lower immunity");
|
||||
|
||||
if (ah->ah_sc->opmode == NL80211_IFTYPE_AP) {
|
||||
if (ah->opmode == NL80211_IFTYPE_AP) {
|
||||
/* AP mode */
|
||||
if (as->firstep_level > 0) {
|
||||
ath5k_ani_set_firstep_level(ah, as->firstep_level - 1);
|
||||
|
@ -464,7 +464,7 @@ ath5k_ani_period_restart(struct ath5k_hw *ah, struct ath5k_ani_state *as)
|
|||
void
|
||||
ath5k_ani_calibration(struct ath5k_hw *ah)
|
||||
{
|
||||
struct ath5k_ani_state *as = &ah->ah_sc->ani_state;
|
||||
struct ath5k_ani_state *as = &ah->ani_state;
|
||||
int listen, ofdm_high, ofdm_low, cck_high, cck_low;
|
||||
|
||||
/* get listen time since last call and add it to the counter because we
|
||||
|
@ -483,9 +483,9 @@ ath5k_ani_calibration(struct ath5k_hw *ah)
|
|||
ofdm_low = as->listen_time * ATH5K_ANI_OFDM_TRIG_LOW / 1000;
|
||||
cck_low = as->listen_time * ATH5K_ANI_CCK_TRIG_LOW / 1000;
|
||||
|
||||
ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
|
||||
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI,
|
||||
"listen %d (now %d)", as->listen_time, listen);
|
||||
ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
|
||||
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI,
|
||||
"check high ofdm %d/%d cck %d/%d",
|
||||
as->ofdm_errors, ofdm_high, as->cck_errors, cck_high);
|
||||
|
||||
|
@ -498,7 +498,7 @@ ath5k_ani_calibration(struct ath5k_hw *ah)
|
|||
} else if (as->listen_time > 5 * ATH5K_ANI_LISTEN_PERIOD) {
|
||||
/* If more than 5 (TODO: why 5?) periods have passed and we got
|
||||
* relatively little errors we can try to lower immunity */
|
||||
ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
|
||||
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI,
|
||||
"check low ofdm %d/%d cck %d/%d",
|
||||
as->ofdm_errors, ofdm_low, as->cck_errors, cck_low);
|
||||
|
||||
|
@ -525,7 +525,7 @@ ath5k_ani_calibration(struct ath5k_hw *ah)
|
|||
void
|
||||
ath5k_ani_mib_intr(struct ath5k_hw *ah)
|
||||
{
|
||||
struct ath5k_ani_state *as = &ah->ah_sc->ani_state;
|
||||
struct ath5k_ani_state *as = &ah->ani_state;
|
||||
|
||||
/* nothing to do here if HW does not have PHY error counters - they
|
||||
* can't be the reason for the MIB interrupt then */
|
||||
|
@ -536,7 +536,7 @@ ath5k_ani_mib_intr(struct ath5k_hw *ah)
|
|||
ath5k_hw_reg_write(ah, 0, AR5K_OFDM_FIL_CNT);
|
||||
ath5k_hw_reg_write(ah, 0, AR5K_CCK_FIL_CNT);
|
||||
|
||||
if (ah->ah_sc->ani_state.ani_mode != ATH5K_ANI_MODE_AUTO)
|
||||
if (ah->ani_state.ani_mode != ATH5K_ANI_MODE_AUTO)
|
||||
return;
|
||||
|
||||
/* If one of the errors triggered, we can get a superfluous second
|
||||
|
@ -547,7 +547,7 @@ ath5k_ani_mib_intr(struct ath5k_hw *ah)
|
|||
|
||||
if (as->ofdm_errors > ATH5K_ANI_OFDM_TRIG_HIGH ||
|
||||
as->cck_errors > ATH5K_ANI_CCK_TRIG_HIGH)
|
||||
tasklet_schedule(&ah->ah_sc->ani_tasklet);
|
||||
tasklet_schedule(&ah->ani_tasklet);
|
||||
}
|
||||
|
||||
|
||||
|
@ -561,16 +561,16 @@ void
|
|||
ath5k_ani_phy_error_report(struct ath5k_hw *ah,
|
||||
enum ath5k_phy_error_code phyerr)
|
||||
{
|
||||
struct ath5k_ani_state *as = &ah->ah_sc->ani_state;
|
||||
struct ath5k_ani_state *as = &ah->ani_state;
|
||||
|
||||
if (phyerr == AR5K_RX_PHY_ERROR_OFDM_TIMING) {
|
||||
as->ofdm_errors++;
|
||||
if (as->ofdm_errors > ATH5K_ANI_OFDM_TRIG_HIGH)
|
||||
tasklet_schedule(&ah->ah_sc->ani_tasklet);
|
||||
tasklet_schedule(&ah->ani_tasklet);
|
||||
} else if (phyerr == AR5K_RX_PHY_ERROR_CCK_TIMING) {
|
||||
as->cck_errors++;
|
||||
if (as->cck_errors > ATH5K_ANI_CCK_TRIG_HIGH)
|
||||
tasklet_schedule(&ah->ah_sc->ani_tasklet);
|
||||
tasklet_schedule(&ah->ani_tasklet);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -631,24 +631,24 @@ ath5k_ani_init(struct ath5k_hw *ah, enum ath5k_ani_mode mode)
|
|||
return;
|
||||
|
||||
if (mode < ATH5K_ANI_MODE_OFF || mode > ATH5K_ANI_MODE_AUTO) {
|
||||
ATH5K_ERR(ah->ah_sc, "ANI mode %d out of range", mode);
|
||||
ATH5K_ERR(ah, "ANI mode %d out of range", mode);
|
||||
return;
|
||||
}
|
||||
|
||||
/* clear old state information */
|
||||
memset(&ah->ah_sc->ani_state, 0, sizeof(ah->ah_sc->ani_state));
|
||||
memset(&ah->ani_state, 0, sizeof(ah->ani_state));
|
||||
|
||||
/* older hardware has more spur levels than newer */
|
||||
if (ah->ah_mac_srev < AR5K_SREV_AR2414)
|
||||
ah->ah_sc->ani_state.max_spur_level = 7;
|
||||
ah->ani_state.max_spur_level = 7;
|
||||
else
|
||||
ah->ah_sc->ani_state.max_spur_level = 2;
|
||||
ah->ani_state.max_spur_level = 2;
|
||||
|
||||
/* initial values for our ani parameters */
|
||||
if (mode == ATH5K_ANI_MODE_OFF) {
|
||||
ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, "ANI off\n");
|
||||
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "ANI off\n");
|
||||
} else if (mode == ATH5K_ANI_MODE_MANUAL_LOW) {
|
||||
ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
|
||||
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI,
|
||||
"ANI manual low -> high sensitivity\n");
|
||||
ath5k_ani_set_noise_immunity_level(ah, 0);
|
||||
ath5k_ani_set_spur_immunity_level(ah, 0);
|
||||
|
@ -656,17 +656,17 @@ ath5k_ani_init(struct ath5k_hw *ah, enum ath5k_ani_mode mode)
|
|||
ath5k_ani_set_ofdm_weak_signal_detection(ah, true);
|
||||
ath5k_ani_set_cck_weak_signal_detection(ah, true);
|
||||
} else if (mode == ATH5K_ANI_MODE_MANUAL_HIGH) {
|
||||
ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
|
||||
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI,
|
||||
"ANI manual high -> low sensitivity\n");
|
||||
ath5k_ani_set_noise_immunity_level(ah,
|
||||
ATH5K_ANI_MAX_NOISE_IMM_LVL);
|
||||
ath5k_ani_set_spur_immunity_level(ah,
|
||||
ah->ah_sc->ani_state.max_spur_level);
|
||||
ah->ani_state.max_spur_level);
|
||||
ath5k_ani_set_firstep_level(ah, ATH5K_ANI_MAX_FIRSTEP_LVL);
|
||||
ath5k_ani_set_ofdm_weak_signal_detection(ah, false);
|
||||
ath5k_ani_set_cck_weak_signal_detection(ah, false);
|
||||
} else if (mode == ATH5K_ANI_MODE_AUTO) {
|
||||
ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, "ANI auto\n");
|
||||
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "ANI auto\n");
|
||||
ath5k_ani_set_noise_immunity_level(ah, 0);
|
||||
ath5k_ani_set_spur_immunity_level(ah, 0);
|
||||
ath5k_ani_set_firstep_level(ah, 0);
|
||||
|
@ -692,7 +692,7 @@ ath5k_ani_init(struct ath5k_hw *ah, enum ath5k_ani_mode mode)
|
|||
~AR5K_RX_FILTER_PHYERR);
|
||||
}
|
||||
|
||||
ah->ah_sc->ani_state.ani_mode = mode;
|
||||
ah->ani_state.ani_mode = mode;
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -24,8 +24,10 @@
|
|||
#define CHAN_DEBUG 0
|
||||
|
||||
#include <linux/io.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/average.h>
|
||||
#include <linux/leds.h>
|
||||
#include <net/mac80211.h>
|
||||
|
||||
/* RX/TX descriptor hw structs
|
||||
|
@ -36,7 +38,9 @@
|
|||
* TODO: Make a more generic struct (eg. add more stuff to ath5k_capabilities)
|
||||
* and clean up common bits, then introduce set/get functions in eeprom.c */
|
||||
#include "eeprom.h"
|
||||
#include "debug.h"
|
||||
#include "../ath.h"
|
||||
#include "ani.h"
|
||||
|
||||
/* PCI IDs */
|
||||
#define PCI_DEVICE_ID_ATHEROS_AR5210 0x0007 /* AR5210 */
|
||||
|
@ -537,6 +541,27 @@ enum ath5k_tx_queue_id {
|
|||
#define AR5K_TXQ_FLAG_POST_FR_BKOFF_DIS 0x1000 /* Disable backoff while bursting */
|
||||
#define AR5K_TXQ_FLAG_COMPRESSION_ENABLE 0x2000 /* Enable hw compression -not implemented-*/
|
||||
|
||||
/*
|
||||
* Data transmit queue state. One of these exists for each
|
||||
* hardware transmit queue. Packets sent to us from above
|
||||
* are assigned to queues based on their priority. Not all
|
||||
* devices support a complete set of hardware transmit queues.
|
||||
* For those devices the array sc_ac2q will map multiple
|
||||
* priorities to fewer hardware queues (typically all to one
|
||||
* hardware queue).
|
||||
*/
|
||||
struct ath5k_txq {
|
||||
unsigned int qnum; /* hardware q number */
|
||||
u32 *link; /* link ptr in last TX desc */
|
||||
struct list_head q; /* transmit queue */
|
||||
spinlock_t lock; /* lock on q and link */
|
||||
bool setup;
|
||||
int txq_len; /* number of queued buffers */
|
||||
int txq_max; /* max allowed num of queued buffers */
|
||||
bool txq_poll_mark;
|
||||
unsigned int txq_stuck; /* informational counter */
|
||||
};
|
||||
|
||||
/*
|
||||
* A struct to hold tx queue's parameters
|
||||
*/
|
||||
|
@ -947,35 +972,6 @@ enum ath5k_power_mode {
|
|||
#define AR5K_SOFTLED_ON 0
|
||||
#define AR5K_SOFTLED_OFF 1
|
||||
|
||||
/*
|
||||
* Chipset capabilities -see ath5k_hw_get_capability-
|
||||
* get_capability function is not yet fully implemented
|
||||
* in ath5k so most of these don't work yet...
|
||||
* TODO: Implement these & merge with _TUNE_ stuff above
|
||||
*/
|
||||
enum ath5k_capability_type {
|
||||
AR5K_CAP_REG_DMN = 0, /* Used to get current reg. domain id */
|
||||
AR5K_CAP_TKIP_MIC = 2, /* Can handle TKIP MIC in hardware */
|
||||
AR5K_CAP_TKIP_SPLIT = 3, /* TKIP uses split keys */
|
||||
AR5K_CAP_PHYCOUNTERS = 4, /* PHY error counters */
|
||||
AR5K_CAP_DIVERSITY = 5, /* Supports fast diversity */
|
||||
AR5K_CAP_NUM_TXQUEUES = 6, /* Used to get max number of hw txqueues */
|
||||
AR5K_CAP_VEOL = 7, /* Supports virtual EOL */
|
||||
AR5K_CAP_COMPRESSION = 8, /* Supports compression */
|
||||
AR5K_CAP_BURST = 9, /* Supports packet bursting */
|
||||
AR5K_CAP_FASTFRAME = 10, /* Supports fast frames */
|
||||
AR5K_CAP_TXPOW = 11, /* Used to get global tx power limit */
|
||||
AR5K_CAP_TPC = 12, /* Can do per-packet tx power control (needed for 802.11a) */
|
||||
AR5K_CAP_BSSIDMASK = 13, /* Supports bssid mask */
|
||||
AR5K_CAP_MCAST_KEYSRCH = 14, /* Supports multicast key search */
|
||||
AR5K_CAP_TSF_ADJUST = 15, /* Supports beacon tsf adjust */
|
||||
AR5K_CAP_XR = 16, /* Supports XR mode */
|
||||
AR5K_CAP_WME_TKIPMIC = 17, /* Supports TKIP MIC when using WMM */
|
||||
AR5K_CAP_CHAN_HALFRATE = 18, /* Supports half rate channels */
|
||||
AR5K_CAP_CHAN_QUARTERRATE = 19, /* Supports quarter rate channels */
|
||||
AR5K_CAP_RFSILENT = 20, /* Supports RFsilent */
|
||||
};
|
||||
|
||||
|
||||
/* XXX: we *may* move cap_range stuff to struct wiphy */
|
||||
struct ath5k_capabilities {
|
||||
|
@ -1027,9 +1023,66 @@ struct ath5k_avg_val {
|
|||
int avg_weight;
|
||||
};
|
||||
|
||||
/***************************************\
|
||||
HARDWARE ABSTRACTION LAYER STRUCTURE
|
||||
\***************************************/
|
||||
#define ATH5K_LED_MAX_NAME_LEN 31
|
||||
|
||||
/*
|
||||
* State for LED triggers
|
||||
*/
|
||||
struct ath5k_led {
|
||||
char name[ATH5K_LED_MAX_NAME_LEN + 1]; /* name of the LED in sysfs */
|
||||
struct ath5k_hw *ah; /* driver state */
|
||||
struct led_classdev led_dev; /* led classdev */
|
||||
};
|
||||
|
||||
/* Rfkill */
|
||||
struct ath5k_rfkill {
|
||||
/* GPIO PIN for rfkill */
|
||||
u16 gpio;
|
||||
/* polarity of rfkill GPIO PIN */
|
||||
bool polarity;
|
||||
/* RFKILL toggle tasklet */
|
||||
struct tasklet_struct toggleq;
|
||||
};
|
||||
|
||||
/* statistics */
|
||||
struct ath5k_statistics {
|
||||
/* antenna use */
|
||||
unsigned int antenna_rx[5]; /* frames count per antenna RX */
|
||||
unsigned int antenna_tx[5]; /* frames count per antenna TX */
|
||||
|
||||
/* frame errors */
|
||||
unsigned int rx_all_count; /* all RX frames, including errors */
|
||||
unsigned int tx_all_count; /* all TX frames, including errors */
|
||||
unsigned int rx_bytes_count; /* all RX bytes, including errored pkts
|
||||
* and the MAC headers for each packet
|
||||
*/
|
||||
unsigned int tx_bytes_count; /* all TX bytes, including errored pkts
|
||||
* and the MAC headers and padding for
|
||||
* each packet.
|
||||
*/
|
||||
unsigned int rxerr_crc;
|
||||
unsigned int rxerr_phy;
|
||||
unsigned int rxerr_phy_code[32];
|
||||
unsigned int rxerr_fifo;
|
||||
unsigned int rxerr_decrypt;
|
||||
unsigned int rxerr_mic;
|
||||
unsigned int rxerr_proc;
|
||||
unsigned int rxerr_jumbo;
|
||||
unsigned int txerr_retry;
|
||||
unsigned int txerr_fifo;
|
||||
unsigned int txerr_filt;
|
||||
|
||||
/* MIB counters */
|
||||
unsigned int ack_fail;
|
||||
unsigned int rts_fail;
|
||||
unsigned int rts_ok;
|
||||
unsigned int fcs_error;
|
||||
unsigned int beacons;
|
||||
|
||||
unsigned int mib_intr;
|
||||
unsigned int rxorn_intr;
|
||||
unsigned int rxeol_intr;
|
||||
};
|
||||
|
||||
/*
|
||||
* Misc defines
|
||||
|
@ -1038,12 +1091,114 @@ struct ath5k_avg_val {
|
|||
#define AR5K_MAX_GPIO 10
|
||||
#define AR5K_MAX_RF_BANKS 8
|
||||
|
||||
/* TODO: Clean up and merge with ath5k_softc */
|
||||
#if CHAN_DEBUG
|
||||
#define ATH_CHAN_MAX (26 + 26 + 26 + 200 + 200)
|
||||
#else
|
||||
#define ATH_CHAN_MAX (14 + 14 + 14 + 252 + 20)
|
||||
#endif
|
||||
|
||||
#define ATH_RXBUF 40 /* number of RX buffers */
|
||||
#define ATH_TXBUF 200 /* number of TX buffers */
|
||||
#define ATH_BCBUF 4 /* number of beacon buffers */
|
||||
#define ATH5K_TXQ_LEN_MAX (ATH_TXBUF / 4) /* bufs per queue */
|
||||
#define ATH5K_TXQ_LEN_LOW (ATH5K_TXQ_LEN_MAX / 2) /* low mark */
|
||||
|
||||
/* Driver state associated with an instance of a device */
|
||||
struct ath5k_hw {
|
||||
struct ath_common common;
|
||||
|
||||
struct ath5k_softc *ah_sc;
|
||||
void __iomem *ah_iobase;
|
||||
struct pci_dev *pdev;
|
||||
struct device *dev; /* for dma mapping */
|
||||
int irq;
|
||||
u16 devid;
|
||||
void __iomem *iobase; /* address of the device */
|
||||
struct mutex lock; /* dev-level lock */
|
||||
struct ieee80211_hw *hw; /* IEEE 802.11 common */
|
||||
struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
|
||||
struct ieee80211_channel channels[ATH_CHAN_MAX];
|
||||
struct ieee80211_rate rates[IEEE80211_NUM_BANDS][AR5K_MAX_RATES];
|
||||
s8 rate_idx[IEEE80211_NUM_BANDS][AR5K_MAX_RATES];
|
||||
enum nl80211_iftype opmode;
|
||||
|
||||
#ifdef CONFIG_ATH5K_DEBUG
|
||||
struct ath5k_dbg_info debug; /* debug info */
|
||||
#endif /* CONFIG_ATH5K_DEBUG */
|
||||
|
||||
struct ath5k_buf *bufptr; /* allocated buffer ptr */
|
||||
struct ath5k_desc *desc; /* TX/RX descriptors */
|
||||
dma_addr_t desc_daddr; /* DMA (physical) address */
|
||||
size_t desc_len; /* size of TX/RX descriptors */
|
||||
|
||||
DECLARE_BITMAP(status, 6);
|
||||
#define ATH_STAT_INVALID 0 /* disable hardware accesses */
|
||||
#define ATH_STAT_MRRETRY 1 /* multi-rate retry support */
|
||||
#define ATH_STAT_PROMISC 2
|
||||
#define ATH_STAT_LEDSOFT 3 /* enable LED gpio status */
|
||||
#define ATH_STAT_STARTED 4 /* opened & irqs enabled */
|
||||
#define ATH_STAT_2G_DISABLED 5 /* multiband radio without 2G */
|
||||
|
||||
unsigned int filter_flags; /* HW flags, AR5K_RX_FILTER_* */
|
||||
struct ieee80211_channel *curchan; /* current h/w channel */
|
||||
|
||||
u16 nvifs;
|
||||
|
||||
enum ath5k_int imask; /* interrupt mask copy */
|
||||
|
||||
spinlock_t irqlock;
|
||||
bool rx_pending; /* rx tasklet pending */
|
||||
bool tx_pending; /* tx tasklet pending */
|
||||
|
||||
u8 lladdr[ETH_ALEN];
|
||||
u8 bssidmask[ETH_ALEN];
|
||||
|
||||
unsigned int led_pin, /* GPIO pin for driving LED */
|
||||
led_on; /* pin setting for LED on */
|
||||
|
||||
struct work_struct reset_work; /* deferred chip reset */
|
||||
|
||||
unsigned int rxbufsize; /* rx size based on mtu */
|
||||
struct list_head rxbuf; /* receive buffer */
|
||||
spinlock_t rxbuflock;
|
||||
u32 *rxlink; /* link ptr in last RX desc */
|
||||
struct tasklet_struct rxtq; /* rx intr tasklet */
|
||||
struct ath5k_led rx_led; /* rx led */
|
||||
|
||||
struct list_head txbuf; /* transmit buffer */
|
||||
spinlock_t txbuflock;
|
||||
unsigned int txbuf_len; /* buf count in txbuf list */
|
||||
struct ath5k_txq txqs[AR5K_NUM_TX_QUEUES]; /* tx queues */
|
||||
struct tasklet_struct txtq; /* tx intr tasklet */
|
||||
struct ath5k_led tx_led; /* tx led */
|
||||
|
||||
struct ath5k_rfkill rf_kill;
|
||||
|
||||
struct tasklet_struct calib; /* calibration tasklet */
|
||||
|
||||
spinlock_t block; /* protects beacon */
|
||||
struct tasklet_struct beacontq; /* beacon intr tasklet */
|
||||
struct list_head bcbuf; /* beacon buffer */
|
||||
struct ieee80211_vif *bslot[ATH_BCBUF];
|
||||
u16 num_ap_vifs;
|
||||
u16 num_adhoc_vifs;
|
||||
unsigned int bhalq, /* SW q for outgoing beacons */
|
||||
bmisscount, /* missed beacon transmits */
|
||||
bintval, /* beacon interval in TU */
|
||||
bsent;
|
||||
unsigned int nexttbtt; /* next beacon time in TU */
|
||||
struct ath5k_txq *cabq; /* content after beacon */
|
||||
|
||||
int power_level; /* Requested tx power in dBm */
|
||||
bool assoc; /* associate state */
|
||||
bool enable_beacon; /* true if beacons are on */
|
||||
|
||||
struct ath5k_statistics stats;
|
||||
|
||||
struct ath5k_ani_state ani_state;
|
||||
struct tasklet_struct ani_tasklet; /* ANI calibration */
|
||||
|
||||
struct delayed_work tx_complete_work;
|
||||
|
||||
struct survey_info survey; /* collected survey info */
|
||||
|
||||
enum ath5k_int ah_imr;
|
||||
|
||||
|
@ -1172,43 +1327,43 @@ struct ath_bus_ops {
|
|||
extern const struct ieee80211_ops ath5k_hw_ops;
|
||||
|
||||
/* Initialization and detach functions */
|
||||
int ath5k_init_softc(struct ath5k_softc *sc, const struct ath_bus_ops *bus_ops);
|
||||
void ath5k_deinit_softc(struct ath5k_softc *sc);
|
||||
int ath5k_hw_init(struct ath5k_softc *sc);
|
||||
int ath5k_init_softc(struct ath5k_hw *ah, const struct ath_bus_ops *bus_ops);
|
||||
void ath5k_deinit_softc(struct ath5k_hw *ah);
|
||||
int ath5k_hw_init(struct ath5k_hw *ah);
|
||||
void ath5k_hw_deinit(struct ath5k_hw *ah);
|
||||
|
||||
int ath5k_sysfs_register(struct ath5k_softc *sc);
|
||||
void ath5k_sysfs_unregister(struct ath5k_softc *sc);
|
||||
int ath5k_sysfs_register(struct ath5k_hw *ah);
|
||||
void ath5k_sysfs_unregister(struct ath5k_hw *ah);
|
||||
|
||||
/* base.c */
|
||||
struct ath5k_buf;
|
||||
struct ath5k_txq;
|
||||
|
||||
void ath5k_set_beacon_filter(struct ieee80211_hw *hw, bool enable);
|
||||
bool ath5k_any_vif_assoc(struct ath5k_softc *sc);
|
||||
bool ath5k_any_vif_assoc(struct ath5k_hw *ah);
|
||||
void ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
|
||||
struct ath5k_txq *txq);
|
||||
int ath5k_init_hw(struct ath5k_softc *sc);
|
||||
int ath5k_stop_hw(struct ath5k_softc *sc);
|
||||
void ath5k_mode_setup(struct ath5k_softc *sc, struct ieee80211_vif *vif);
|
||||
void ath5k_update_bssid_mask_and_opmode(struct ath5k_softc *sc,
|
||||
int ath5k_start(struct ieee80211_hw *hw);
|
||||
void ath5k_stop(struct ieee80211_hw *hw);
|
||||
void ath5k_mode_setup(struct ath5k_hw *ah, struct ieee80211_vif *vif);
|
||||
void ath5k_update_bssid_mask_and_opmode(struct ath5k_hw *ah,
|
||||
struct ieee80211_vif *vif);
|
||||
int ath5k_chan_set(struct ath5k_softc *sc, struct ieee80211_channel *chan);
|
||||
void ath5k_beacon_update_timers(struct ath5k_softc *sc, u64 bc_tsf);
|
||||
int ath5k_chan_set(struct ath5k_hw *ah, struct ieee80211_channel *chan);
|
||||
void ath5k_beacon_update_timers(struct ath5k_hw *ah, u64 bc_tsf);
|
||||
int ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
|
||||
void ath5k_beacon_config(struct ath5k_softc *sc);
|
||||
void ath5k_txbuf_free_skb(struct ath5k_softc *sc, struct ath5k_buf *bf);
|
||||
void ath5k_rxbuf_free_skb(struct ath5k_softc *sc, struct ath5k_buf *bf);
|
||||
void ath5k_beacon_config(struct ath5k_hw *ah);
|
||||
void ath5k_txbuf_free_skb(struct ath5k_hw *ah, struct ath5k_buf *bf);
|
||||
void ath5k_rxbuf_free_skb(struct ath5k_hw *ah, struct ath5k_buf *bf);
|
||||
|
||||
/*Chip id helper functions */
|
||||
const char *ath5k_chip_name(enum ath5k_srev_type type, u_int16_t val);
|
||||
int ath5k_hw_read_srev(struct ath5k_hw *ah);
|
||||
|
||||
/* LED functions */
|
||||
int ath5k_init_leds(struct ath5k_softc *sc);
|
||||
void ath5k_led_enable(struct ath5k_softc *sc);
|
||||
void ath5k_led_off(struct ath5k_softc *sc);
|
||||
void ath5k_unregister_leds(struct ath5k_softc *sc);
|
||||
int ath5k_init_leds(struct ath5k_hw *ah);
|
||||
void ath5k_led_enable(struct ath5k_hw *ah);
|
||||
void ath5k_led_off(struct ath5k_hw *ah);
|
||||
void ath5k_unregister_leds(struct ath5k_hw *ah);
|
||||
|
||||
|
||||
/* Reset Functions */
|
||||
|
@ -1322,9 +1477,6 @@ void ath5k_rfkill_hw_stop(struct ath5k_hw *ah);
|
|||
|
||||
/* Misc functions TODO: Cleanup */
|
||||
int ath5k_hw_set_capabilities(struct ath5k_hw *ah);
|
||||
int ath5k_hw_get_capability(struct ath5k_hw *ah,
|
||||
enum ath5k_capability_type cap_type, u32 capability,
|
||||
u32 *result);
|
||||
int ath5k_hw_enable_pspoll(struct ath5k_hw *ah, u8 *bssid, u16 assoc_id);
|
||||
int ath5k_hw_disable_pspoll(struct ath5k_hw *ah);
|
||||
|
||||
|
@ -1384,7 +1536,7 @@ static inline void __iomem *ath5k_ahb_reg(struct ath5k_hw *ah, u16 reg)
|
|||
(ah->ah_mac_srev >= AR5K_SREV_AR2315_R6)))
|
||||
return AR5K_AR2315_PCI_BASE + reg;
|
||||
|
||||
return ah->ah_iobase + reg;
|
||||
return ah->iobase + reg;
|
||||
}
|
||||
|
||||
static inline u32 ath5k_hw_reg_read(struct ath5k_hw *ah, u16 reg)
|
||||
|
@ -1401,12 +1553,12 @@ static inline void ath5k_hw_reg_write(struct ath5k_hw *ah, u32 val, u16 reg)
|
|||
|
||||
static inline u32 ath5k_hw_reg_read(struct ath5k_hw *ah, u16 reg)
|
||||
{
|
||||
return ioread32(ah->ah_iobase + reg);
|
||||
return ioread32(ah->iobase + reg);
|
||||
}
|
||||
|
||||
static inline void ath5k_hw_reg_write(struct ath5k_hw *ah, u32 val, u16 reg)
|
||||
{
|
||||
iowrite32(val, ah->ah_iobase + reg);
|
||||
iowrite32(val, ah->iobase + reg);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -59,7 +59,7 @@ static int ath5k_hw_post(struct ath5k_hw *ah)
|
|||
cur_val = ath5k_hw_reg_read(ah, cur_reg);
|
||||
|
||||
if (cur_val != var_pattern) {
|
||||
ATH5K_ERR(ah->ah_sc, "POST Failed !!!\n");
|
||||
ATH5K_ERR(ah, "POST Failed !!!\n");
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
|
@ -74,7 +74,7 @@ static int ath5k_hw_post(struct ath5k_hw *ah)
|
|||
cur_val = ath5k_hw_reg_read(ah, cur_reg);
|
||||
|
||||
if (cur_val != var_pattern) {
|
||||
ATH5K_ERR(ah->ah_sc, "POST Failed !!!\n");
|
||||
ATH5K_ERR(ah, "POST Failed !!!\n");
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
|
@ -95,19 +95,18 @@ static int ath5k_hw_post(struct ath5k_hw *ah)
|
|||
/**
|
||||
* ath5k_hw_init - Check if hw is supported and init the needed structs
|
||||
*
|
||||
* @sc: The &struct ath5k_softc we got from the driver's init_softc function
|
||||
* @ah: The &struct ath5k_hw we got from the driver's init_softc function
|
||||
*
|
||||
* Check if the device is supported, perform a POST and initialize the needed
|
||||
* structs. Returns -ENOMEM if we don't have memory for the needed structs,
|
||||
* -ENODEV if the device is not supported or prints an error msg if something
|
||||
* else went wrong.
|
||||
*/
|
||||
int ath5k_hw_init(struct ath5k_softc *sc)
|
||||
int ath5k_hw_init(struct ath5k_hw *ah)
|
||||
{
|
||||
static const u8 zero_mac[ETH_ALEN] = { };
|
||||
struct ath5k_hw *ah = sc->ah;
|
||||
struct ath_common *common = ath5k_hw_common(ah);
|
||||
struct pci_dev *pdev = sc->pdev;
|
||||
struct pci_dev *pdev = ah->pdev;
|
||||
struct ath5k_eeprom_info *ee;
|
||||
int ret;
|
||||
u32 srev;
|
||||
|
@ -123,8 +122,8 @@ int ath5k_hw_init(struct ath5k_softc *sc)
|
|||
ah->ah_retry_long = AR5K_INIT_RETRY_LONG;
|
||||
ah->ah_ant_mode = AR5K_ANTMODE_DEFAULT;
|
||||
ah->ah_noise_floor = -95; /* until first NF calibration is run */
|
||||
sc->ani_state.ani_mode = ATH5K_ANI_MODE_AUTO;
|
||||
ah->ah_current_channel = &sc->channels[0];
|
||||
ah->ani_state.ani_mode = ATH5K_ANI_MODE_AUTO;
|
||||
ah->ah_current_channel = &ah->channels[0];
|
||||
|
||||
/*
|
||||
* Find the mac version
|
||||
|
@ -237,7 +236,7 @@ int ath5k_hw_init(struct ath5k_softc *sc)
|
|||
ah->ah_single_chip = true;
|
||||
ah->ah_radio_5ghz_revision = AR5K_SREV_RAD_2413;
|
||||
} else {
|
||||
ATH5K_ERR(sc, "Couldn't identify radio revision.\n");
|
||||
ATH5K_ERR(ah, "Couldn't identify radio revision.\n");
|
||||
ret = -ENODEV;
|
||||
goto err;
|
||||
}
|
||||
|
@ -246,7 +245,7 @@ int ath5k_hw_init(struct ath5k_softc *sc)
|
|||
|
||||
/* Return on unsupported chips (unsupported eeprom etc) */
|
||||
if ((srev >= AR5K_SREV_AR5416) && (srev < AR5K_SREV_AR2425)) {
|
||||
ATH5K_ERR(sc, "Device not yet supported.\n");
|
||||
ATH5K_ERR(ah, "Device not yet supported.\n");
|
||||
ret = -ENODEV;
|
||||
goto err;
|
||||
}
|
||||
|
@ -268,7 +267,7 @@ int ath5k_hw_init(struct ath5k_softc *sc)
|
|||
*/
|
||||
ret = ath5k_eeprom_init(ah);
|
||||
if (ret) {
|
||||
ATH5K_ERR(sc, "unable to init EEPROM\n");
|
||||
ATH5K_ERR(ah, "unable to init EEPROM\n");
|
||||
goto err;
|
||||
}
|
||||
|
||||
|
@ -309,17 +308,17 @@ int ath5k_hw_init(struct ath5k_softc *sc)
|
|||
/* Get misc capabilities */
|
||||
ret = ath5k_hw_set_capabilities(ah);
|
||||
if (ret) {
|
||||
ATH5K_ERR(sc, "unable to get device capabilities\n");
|
||||
ATH5K_ERR(ah, "unable to get device capabilities\n");
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (test_bit(ATH_STAT_2G_DISABLED, sc->status)) {
|
||||
if (test_bit(ATH_STAT_2G_DISABLED, ah->status)) {
|
||||
__clear_bit(AR5K_MODE_11B, ah->ah_capabilities.cap_mode);
|
||||
__clear_bit(AR5K_MODE_11G, ah->ah_capabilities.cap_mode);
|
||||
}
|
||||
|
||||
/* Crypto settings */
|
||||
common->keymax = (sc->ah->ah_version == AR5K_AR5210 ?
|
||||
common->keymax = (ah->ah_version == AR5K_AR5210 ?
|
||||
AR5K_KEYTABLE_SIZE_5210 : AR5K_KEYTABLE_SIZE_5211);
|
||||
|
||||
if (srev >= AR5K_SREV_AR5212_V4 &&
|
||||
|
@ -339,7 +338,7 @@ int ath5k_hw_init(struct ath5k_softc *sc)
|
|||
/* Set BSSID to bcast address: ff:ff:ff:ff:ff:ff for now */
|
||||
memcpy(common->curbssid, ath_bcast_mac, ETH_ALEN);
|
||||
ath5k_hw_set_bssid(ah);
|
||||
ath5k_hw_set_opmode(ah, sc->opmode);
|
||||
ath5k_hw_set_opmode(ah, ah->opmode);
|
||||
|
||||
ath5k_hw_rfgain_opt_init(ah);
|
||||
|
||||
|
@ -360,7 +359,7 @@ int ath5k_hw_init(struct ath5k_softc *sc)
|
|||
*/
|
||||
void ath5k_hw_deinit(struct ath5k_hw *ah)
|
||||
{
|
||||
__set_bit(ATH_STAT_INVALID, ah->ah_sc->status);
|
||||
__set_bit(ATH_STAT_INVALID, ah->status);
|
||||
|
||||
if (ah->ah_rf_banks != NULL)
|
||||
kfree(ah->ah_rf_banks);
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -45,23 +45,13 @@
|
|||
#include <linux/list.h>
|
||||
#include <linux/wireless.h>
|
||||
#include <linux/if_ether.h>
|
||||
#include <linux/leds.h>
|
||||
#include <linux/rfkill.h>
|
||||
#include <linux/workqueue.h>
|
||||
|
||||
#include "ath5k.h"
|
||||
#include "debug.h"
|
||||
#include "ani.h"
|
||||
|
||||
#include "../regd.h"
|
||||
#include "../ath.h"
|
||||
|
||||
#define ATH_RXBUF 40 /* number of RX buffers */
|
||||
#define ATH_TXBUF 200 /* number of TX buffers */
|
||||
#define ATH_BCBUF 4 /* number of beacon buffers */
|
||||
#define ATH5K_TXQ_LEN_MAX (ATH_TXBUF / 4) /* bufs per queue */
|
||||
#define ATH5K_TXQ_LEN_LOW (ATH5K_TXQ_LEN_MAX / 2) /* low mark */
|
||||
|
||||
struct ath5k_buf {
|
||||
struct list_head list;
|
||||
struct ath5k_desc *desc; /* virtual addr of desc */
|
||||
|
@ -70,94 +60,6 @@ struct ath5k_buf {
|
|||
dma_addr_t skbaddr;/* physical addr of skb data */
|
||||
};
|
||||
|
||||
/*
|
||||
* Data transmit queue state. One of these exists for each
|
||||
* hardware transmit queue. Packets sent to us from above
|
||||
* are assigned to queues based on their priority. Not all
|
||||
* devices support a complete set of hardware transmit queues.
|
||||
* For those devices the array sc_ac2q will map multiple
|
||||
* priorities to fewer hardware queues (typically all to one
|
||||
* hardware queue).
|
||||
*/
|
||||
struct ath5k_txq {
|
||||
unsigned int qnum; /* hardware q number */
|
||||
u32 *link; /* link ptr in last TX desc */
|
||||
struct list_head q; /* transmit queue */
|
||||
spinlock_t lock; /* lock on q and link */
|
||||
bool setup;
|
||||
int txq_len; /* number of queued buffers */
|
||||
int txq_max; /* max allowed num of queued buffers */
|
||||
bool txq_poll_mark;
|
||||
unsigned int txq_stuck; /* informational counter */
|
||||
};
|
||||
|
||||
#define ATH5K_LED_MAX_NAME_LEN 31
|
||||
|
||||
/*
|
||||
* State for LED triggers
|
||||
*/
|
||||
struct ath5k_led {
|
||||
char name[ATH5K_LED_MAX_NAME_LEN + 1]; /* name of the LED in sysfs */
|
||||
struct ath5k_softc *sc; /* driver state */
|
||||
struct led_classdev led_dev; /* led classdev */
|
||||
};
|
||||
|
||||
/* Rfkill */
|
||||
struct ath5k_rfkill {
|
||||
/* GPIO PIN for rfkill */
|
||||
u16 gpio;
|
||||
/* polarity of rfkill GPIO PIN */
|
||||
bool polarity;
|
||||
/* RFKILL toggle tasklet */
|
||||
struct tasklet_struct toggleq;
|
||||
};
|
||||
|
||||
/* statistics */
|
||||
struct ath5k_statistics {
|
||||
/* antenna use */
|
||||
unsigned int antenna_rx[5]; /* frames count per antenna RX */
|
||||
unsigned int antenna_tx[5]; /* frames count per antenna TX */
|
||||
|
||||
/* frame errors */
|
||||
unsigned int rx_all_count; /* all RX frames, including errors */
|
||||
unsigned int tx_all_count; /* all TX frames, including errors */
|
||||
unsigned int rx_bytes_count; /* all RX bytes, including errored pkts
|
||||
* and the MAC headers for each packet
|
||||
*/
|
||||
unsigned int tx_bytes_count; /* all TX bytes, including errored pkts
|
||||
* and the MAC headers and padding for
|
||||
* each packet.
|
||||
*/
|
||||
unsigned int rxerr_crc;
|
||||
unsigned int rxerr_phy;
|
||||
unsigned int rxerr_phy_code[32];
|
||||
unsigned int rxerr_fifo;
|
||||
unsigned int rxerr_decrypt;
|
||||
unsigned int rxerr_mic;
|
||||
unsigned int rxerr_proc;
|
||||
unsigned int rxerr_jumbo;
|
||||
unsigned int txerr_retry;
|
||||
unsigned int txerr_fifo;
|
||||
unsigned int txerr_filt;
|
||||
|
||||
/* MIB counters */
|
||||
unsigned int ack_fail;
|
||||
unsigned int rts_fail;
|
||||
unsigned int rts_ok;
|
||||
unsigned int fcs_error;
|
||||
unsigned int beacons;
|
||||
|
||||
unsigned int mib_intr;
|
||||
unsigned int rxorn_intr;
|
||||
unsigned int rxeol_intr;
|
||||
};
|
||||
|
||||
#if CHAN_DEBUG
|
||||
#define ATH_CHAN_MAX (26 + 26 + 26 + 200 + 200)
|
||||
#else
|
||||
#define ATH_CHAN_MAX (14 + 14 + 14 + 252 + 20)
|
||||
#endif
|
||||
|
||||
struct ath5k_vif {
|
||||
bool assoc; /* are we associated or not */
|
||||
enum nl80211_iftype opmode;
|
||||
|
@ -166,104 +68,6 @@ struct ath5k_vif {
|
|||
u8 lladdr[ETH_ALEN];
|
||||
};
|
||||
|
||||
/* Software Carrier, keeps track of the driver state
|
||||
* associated with an instance of a device */
|
||||
struct ath5k_softc {
|
||||
struct pci_dev *pdev;
|
||||
struct device *dev; /* for dma mapping */
|
||||
int irq;
|
||||
u16 devid;
|
||||
void __iomem *iobase; /* address of the device */
|
||||
struct mutex lock; /* dev-level lock */
|
||||
struct ieee80211_hw *hw; /* IEEE 802.11 common */
|
||||
struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
|
||||
struct ieee80211_channel channels[ATH_CHAN_MAX];
|
||||
struct ieee80211_rate rates[IEEE80211_NUM_BANDS][AR5K_MAX_RATES];
|
||||
s8 rate_idx[IEEE80211_NUM_BANDS][AR5K_MAX_RATES];
|
||||
enum nl80211_iftype opmode;
|
||||
struct ath5k_hw *ah; /* Atheros HW */
|
||||
|
||||
#ifdef CONFIG_ATH5K_DEBUG
|
||||
struct ath5k_dbg_info debug; /* debug info */
|
||||
#endif /* CONFIG_ATH5K_DEBUG */
|
||||
|
||||
struct ath5k_buf *bufptr; /* allocated buffer ptr */
|
||||
struct ath5k_desc *desc; /* TX/RX descriptors */
|
||||
dma_addr_t desc_daddr; /* DMA (physical) address */
|
||||
size_t desc_len; /* size of TX/RX descriptors */
|
||||
|
||||
DECLARE_BITMAP(status, 6);
|
||||
#define ATH_STAT_INVALID 0 /* disable hardware accesses */
|
||||
#define ATH_STAT_MRRETRY 1 /* multi-rate retry support */
|
||||
#define ATH_STAT_PROMISC 2
|
||||
#define ATH_STAT_LEDSOFT 3 /* enable LED gpio status */
|
||||
#define ATH_STAT_STARTED 4 /* opened & irqs enabled */
|
||||
#define ATH_STAT_2G_DISABLED 5 /* multiband radio without 2G */
|
||||
|
||||
unsigned int filter_flags; /* HW flags, AR5K_RX_FILTER_* */
|
||||
struct ieee80211_channel *curchan; /* current h/w channel */
|
||||
|
||||
u16 nvifs;
|
||||
|
||||
enum ath5k_int imask; /* interrupt mask copy */
|
||||
|
||||
spinlock_t irqlock;
|
||||
bool rx_pending; /* rx tasklet pending */
|
||||
bool tx_pending; /* tx tasklet pending */
|
||||
|
||||
u8 lladdr[ETH_ALEN];
|
||||
u8 bssidmask[ETH_ALEN];
|
||||
|
||||
unsigned int led_pin, /* GPIO pin for driving LED */
|
||||
led_on; /* pin setting for LED on */
|
||||
|
||||
struct work_struct reset_work; /* deferred chip reset */
|
||||
|
||||
unsigned int rxbufsize; /* rx size based on mtu */
|
||||
struct list_head rxbuf; /* receive buffer */
|
||||
spinlock_t rxbuflock;
|
||||
u32 *rxlink; /* link ptr in last RX desc */
|
||||
struct tasklet_struct rxtq; /* rx intr tasklet */
|
||||
struct ath5k_led rx_led; /* rx led */
|
||||
|
||||
struct list_head txbuf; /* transmit buffer */
|
||||
spinlock_t txbuflock;
|
||||
unsigned int txbuf_len; /* buf count in txbuf list */
|
||||
struct ath5k_txq txqs[AR5K_NUM_TX_QUEUES]; /* tx queues */
|
||||
struct tasklet_struct txtq; /* tx intr tasklet */
|
||||
struct ath5k_led tx_led; /* tx led */
|
||||
|
||||
struct ath5k_rfkill rf_kill;
|
||||
|
||||
struct tasklet_struct calib; /* calibration tasklet */
|
||||
|
||||
spinlock_t block; /* protects beacon */
|
||||
struct tasklet_struct beacontq; /* beacon intr tasklet */
|
||||
struct list_head bcbuf; /* beacon buffer */
|
||||
struct ieee80211_vif *bslot[ATH_BCBUF];
|
||||
u16 num_ap_vifs;
|
||||
u16 num_adhoc_vifs;
|
||||
unsigned int bhalq, /* SW q for outgoing beacons */
|
||||
bmisscount, /* missed beacon transmits */
|
||||
bintval, /* beacon interval in TU */
|
||||
bsent;
|
||||
unsigned int nexttbtt; /* next beacon time in TU */
|
||||
struct ath5k_txq *cabq; /* content after beacon */
|
||||
|
||||
int power_level; /* Requested tx power in dBm */
|
||||
bool assoc; /* associate state */
|
||||
bool enable_beacon; /* true if beacons are on */
|
||||
|
||||
struct ath5k_statistics stats;
|
||||
|
||||
struct ath5k_ani_state ani_state;
|
||||
struct tasklet_struct ani_tasklet; /* ANI calibration */
|
||||
|
||||
struct delayed_work tx_complete_work;
|
||||
|
||||
struct survey_info survey; /* collected survey info */
|
||||
};
|
||||
|
||||
struct ath5k_vif_iter_data {
|
||||
const u8 *hw_macaddr;
|
||||
u8 mask[ETH_ALEN];
|
||||
|
@ -277,9 +81,10 @@ struct ath5k_vif_iter_data {
|
|||
void ath5k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif);
|
||||
|
||||
|
||||
#define ath5k_hw_hasbssidmask(_ah) \
|
||||
(ath5k_hw_get_capability(_ah, AR5K_CAP_BSSIDMASK, 0, NULL) == 0)
|
||||
#define ath5k_hw_hasveol(_ah) \
|
||||
(ath5k_hw_get_capability(_ah, AR5K_CAP_VEOL, 0, NULL) == 0)
|
||||
/* Check whether BSSID mask is supported */
|
||||
#define ath5k_hw_hasbssidmask(_ah) (ah->ah_version == AR5K_AR5212)
|
||||
|
||||
/* Check whether virtual EOL is supported */
|
||||
#define ath5k_hw_hasveol(_ah) (ah->ah_version != AR5K_AR5210)
|
||||
|
||||
#endif
|
||||
|
|
|
@ -112,51 +112,6 @@ int ath5k_hw_set_capabilities(struct ath5k_hw *ah)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/* Main function used by the driver part to check caps */
|
||||
int ath5k_hw_get_capability(struct ath5k_hw *ah,
|
||||
enum ath5k_capability_type cap_type,
|
||||
u32 capability, u32 *result)
|
||||
{
|
||||
switch (cap_type) {
|
||||
case AR5K_CAP_NUM_TXQUEUES:
|
||||
if (result) {
|
||||
if (ah->ah_version == AR5K_AR5210)
|
||||
*result = AR5K_NUM_TX_QUEUES_NOQCU;
|
||||
else
|
||||
*result = AR5K_NUM_TX_QUEUES;
|
||||
goto yes;
|
||||
}
|
||||
case AR5K_CAP_VEOL:
|
||||
goto yes;
|
||||
case AR5K_CAP_COMPRESSION:
|
||||
if (ah->ah_version == AR5K_AR5212)
|
||||
goto yes;
|
||||
else
|
||||
goto no;
|
||||
case AR5K_CAP_BURST:
|
||||
goto yes;
|
||||
case AR5K_CAP_TPC:
|
||||
goto yes;
|
||||
case AR5K_CAP_BSSIDMASK:
|
||||
if (ah->ah_version == AR5K_AR5212)
|
||||
goto yes;
|
||||
else
|
||||
goto no;
|
||||
case AR5K_CAP_XR:
|
||||
if (ah->ah_version == AR5K_AR5212)
|
||||
goto yes;
|
||||
else
|
||||
goto no;
|
||||
default:
|
||||
goto no;
|
||||
}
|
||||
|
||||
no:
|
||||
return -EINVAL;
|
||||
yes:
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* TODO: Following functions should be part of a new function
|
||||
* set_capability
|
||||
|
|
|
@ -157,10 +157,10 @@ static void *reg_next(struct seq_file *seq, void *p, loff_t *pos)
|
|||
|
||||
static int reg_show(struct seq_file *seq, void *p)
|
||||
{
|
||||
struct ath5k_softc *sc = seq->private;
|
||||
struct ath5k_hw *ah = seq->private;
|
||||
struct reg *r = p;
|
||||
seq_printf(seq, "%-25s0x%08x\n", r->name,
|
||||
ath5k_hw_reg_read(sc->ah, r->addr));
|
||||
ath5k_hw_reg_read(ah, r->addr));
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -197,42 +197,41 @@ static const struct file_operations fops_registers = {
|
|||
static ssize_t read_file_beacon(struct file *file, char __user *user_buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
struct ath5k_softc *sc = file->private_data;
|
||||
struct ath5k_hw *ah = sc->ah;
|
||||
struct ath5k_hw *ah = file->private_data;
|
||||
char buf[500];
|
||||
unsigned int len = 0;
|
||||
unsigned int v;
|
||||
u64 tsf;
|
||||
|
||||
v = ath5k_hw_reg_read(sc->ah, AR5K_BEACON);
|
||||
v = ath5k_hw_reg_read(ah, AR5K_BEACON);
|
||||
len += snprintf(buf + len, sizeof(buf) - len,
|
||||
"%-24s0x%08x\tintval: %d\tTIM: 0x%x\n",
|
||||
"AR5K_BEACON", v, v & AR5K_BEACON_PERIOD,
|
||||
(v & AR5K_BEACON_TIM) >> AR5K_BEACON_TIM_S);
|
||||
|
||||
len += snprintf(buf + len, sizeof(buf) - len, "%-24s0x%08x\n",
|
||||
"AR5K_LAST_TSTP", ath5k_hw_reg_read(sc->ah, AR5K_LAST_TSTP));
|
||||
"AR5K_LAST_TSTP", ath5k_hw_reg_read(ah, AR5K_LAST_TSTP));
|
||||
|
||||
len += snprintf(buf + len, sizeof(buf) - len, "%-24s0x%08x\n\n",
|
||||
"AR5K_BEACON_CNT", ath5k_hw_reg_read(sc->ah, AR5K_BEACON_CNT));
|
||||
"AR5K_BEACON_CNT", ath5k_hw_reg_read(ah, AR5K_BEACON_CNT));
|
||||
|
||||
v = ath5k_hw_reg_read(sc->ah, AR5K_TIMER0);
|
||||
v = ath5k_hw_reg_read(ah, AR5K_TIMER0);
|
||||
len += snprintf(buf + len, sizeof(buf) - len, "%-24s0x%08x\tTU: %08x\n",
|
||||
"AR5K_TIMER0 (TBTT)", v, v);
|
||||
|
||||
v = ath5k_hw_reg_read(sc->ah, AR5K_TIMER1);
|
||||
v = ath5k_hw_reg_read(ah, AR5K_TIMER1);
|
||||
len += snprintf(buf + len, sizeof(buf) - len, "%-24s0x%08x\tTU: %08x\n",
|
||||
"AR5K_TIMER1 (DMA)", v, v >> 3);
|
||||
|
||||
v = ath5k_hw_reg_read(sc->ah, AR5K_TIMER2);
|
||||
v = ath5k_hw_reg_read(ah, AR5K_TIMER2);
|
||||
len += snprintf(buf + len, sizeof(buf) - len, "%-24s0x%08x\tTU: %08x\n",
|
||||
"AR5K_TIMER2 (SWBA)", v, v >> 3);
|
||||
|
||||
v = ath5k_hw_reg_read(sc->ah, AR5K_TIMER3);
|
||||
v = ath5k_hw_reg_read(ah, AR5K_TIMER3);
|
||||
len += snprintf(buf + len, sizeof(buf) - len, "%-24s0x%08x\tTU: %08x\n",
|
||||
"AR5K_TIMER3 (ATIM)", v, v);
|
||||
|
||||
tsf = ath5k_hw_get_tsf64(sc->ah);
|
||||
tsf = ath5k_hw_get_tsf64(ah);
|
||||
len += snprintf(buf + len, sizeof(buf) - len,
|
||||
"TSF\t\t0x%016llx\tTU: %08x\n",
|
||||
(unsigned long long)tsf, TSF_TO_TU(tsf));
|
||||
|
@ -247,8 +246,7 @@ static ssize_t write_file_beacon(struct file *file,
|
|||
const char __user *userbuf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
struct ath5k_softc *sc = file->private_data;
|
||||
struct ath5k_hw *ah = sc->ah;
|
||||
struct ath5k_hw *ah = file->private_data;
|
||||
char buf[20];
|
||||
|
||||
if (copy_from_user(buf, userbuf, min(count, sizeof(buf))))
|
||||
|
@ -279,9 +277,9 @@ static ssize_t write_file_reset(struct file *file,
|
|||
const char __user *userbuf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
struct ath5k_softc *sc = file->private_data;
|
||||
ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "debug file triggered reset\n");
|
||||
ieee80211_queue_work(sc->hw, &sc->reset_work);
|
||||
struct ath5k_hw *ah = file->private_data;
|
||||
ATH5K_DBG(ah, ATH5K_DEBUG_RESET, "debug file triggered reset\n");
|
||||
ieee80211_queue_work(ah->hw, &ah->reset_work);
|
||||
return count;
|
||||
}
|
||||
|
||||
|
@ -318,23 +316,23 @@ static const struct {
|
|||
static ssize_t read_file_debug(struct file *file, char __user *user_buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
struct ath5k_softc *sc = file->private_data;
|
||||
struct ath5k_hw *ah = file->private_data;
|
||||
char buf[700];
|
||||
unsigned int len = 0;
|
||||
unsigned int i;
|
||||
|
||||
len += snprintf(buf + len, sizeof(buf) - len,
|
||||
"DEBUG LEVEL: 0x%08x\n\n", sc->debug.level);
|
||||
"DEBUG LEVEL: 0x%08x\n\n", ah->debug.level);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(dbg_info) - 1; i++) {
|
||||
len += snprintf(buf + len, sizeof(buf) - len,
|
||||
"%10s %c 0x%08x - %s\n", dbg_info[i].name,
|
||||
sc->debug.level & dbg_info[i].level ? '+' : ' ',
|
||||
ah->debug.level & dbg_info[i].level ? '+' : ' ',
|
||||
dbg_info[i].level, dbg_info[i].desc);
|
||||
}
|
||||
len += snprintf(buf + len, sizeof(buf) - len,
|
||||
"%10s %c 0x%08x - %s\n", dbg_info[i].name,
|
||||
sc->debug.level == dbg_info[i].level ? '+' : ' ',
|
||||
ah->debug.level == dbg_info[i].level ? '+' : ' ',
|
||||
dbg_info[i].level, dbg_info[i].desc);
|
||||
|
||||
if (len > sizeof(buf))
|
||||
|
@ -347,7 +345,7 @@ static ssize_t write_file_debug(struct file *file,
|
|||
const char __user *userbuf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
struct ath5k_softc *sc = file->private_data;
|
||||
struct ath5k_hw *ah = file->private_data;
|
||||
unsigned int i;
|
||||
char buf[20];
|
||||
|
||||
|
@ -357,7 +355,7 @@ static ssize_t write_file_debug(struct file *file,
|
|||
for (i = 0; i < ARRAY_SIZE(dbg_info); i++) {
|
||||
if (strncmp(buf, dbg_info[i].name,
|
||||
strlen(dbg_info[i].name)) == 0) {
|
||||
sc->debug.level ^= dbg_info[i].level; /* toggle bit */
|
||||
ah->debug.level ^= dbg_info[i].level; /* toggle bit */
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -378,33 +376,33 @@ static const struct file_operations fops_debug = {
|
|||
static ssize_t read_file_antenna(struct file *file, char __user *user_buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
struct ath5k_softc *sc = file->private_data;
|
||||
struct ath5k_hw *ah = file->private_data;
|
||||
char buf[700];
|
||||
unsigned int len = 0;
|
||||
unsigned int i;
|
||||
unsigned int v;
|
||||
|
||||
len += snprintf(buf + len, sizeof(buf) - len, "antenna mode\t%d\n",
|
||||
sc->ah->ah_ant_mode);
|
||||
ah->ah_ant_mode);
|
||||
len += snprintf(buf + len, sizeof(buf) - len, "default antenna\t%d\n",
|
||||
sc->ah->ah_def_ant);
|
||||
ah->ah_def_ant);
|
||||
len += snprintf(buf + len, sizeof(buf) - len, "tx antenna\t%d\n",
|
||||
sc->ah->ah_tx_ant);
|
||||
ah->ah_tx_ant);
|
||||
|
||||
len += snprintf(buf + len, sizeof(buf) - len, "\nANTENNA\t\tRX\tTX\n");
|
||||
for (i = 1; i < ARRAY_SIZE(sc->stats.antenna_rx); i++) {
|
||||
for (i = 1; i < ARRAY_SIZE(ah->stats.antenna_rx); i++) {
|
||||
len += snprintf(buf + len, sizeof(buf) - len,
|
||||
"[antenna %d]\t%d\t%d\n",
|
||||
i, sc->stats.antenna_rx[i], sc->stats.antenna_tx[i]);
|
||||
i, ah->stats.antenna_rx[i], ah->stats.antenna_tx[i]);
|
||||
}
|
||||
len += snprintf(buf + len, sizeof(buf) - len, "[invalid]\t%d\t%d\n",
|
||||
sc->stats.antenna_rx[0], sc->stats.antenna_tx[0]);
|
||||
ah->stats.antenna_rx[0], ah->stats.antenna_tx[0]);
|
||||
|
||||
v = ath5k_hw_reg_read(sc->ah, AR5K_DEFAULT_ANTENNA);
|
||||
v = ath5k_hw_reg_read(ah, AR5K_DEFAULT_ANTENNA);
|
||||
len += snprintf(buf + len, sizeof(buf) - len,
|
||||
"\nAR5K_DEFAULT_ANTENNA\t0x%08x\n", v);
|
||||
|
||||
v = ath5k_hw_reg_read(sc->ah, AR5K_STA_ID1);
|
||||
v = ath5k_hw_reg_read(ah, AR5K_STA_ID1);
|
||||
len += snprintf(buf + len, sizeof(buf) - len,
|
||||
"AR5K_STA_ID1_DEFAULT_ANTENNA\t%d\n",
|
||||
(v & AR5K_STA_ID1_DEFAULT_ANTENNA) != 0);
|
||||
|
@ -418,25 +416,25 @@ static ssize_t read_file_antenna(struct file *file, char __user *user_buf,
|
|||
"AR5K_STA_ID1_SELFGEN_DEF_ANT\t%d\n",
|
||||
(v & AR5K_STA_ID1_SELFGEN_DEF_ANT) != 0);
|
||||
|
||||
v = ath5k_hw_reg_read(sc->ah, AR5K_PHY_AGCCTL);
|
||||
v = ath5k_hw_reg_read(ah, AR5K_PHY_AGCCTL);
|
||||
len += snprintf(buf + len, sizeof(buf) - len,
|
||||
"\nAR5K_PHY_AGCCTL_OFDM_DIV_DIS\t%d\n",
|
||||
(v & AR5K_PHY_AGCCTL_OFDM_DIV_DIS) != 0);
|
||||
|
||||
v = ath5k_hw_reg_read(sc->ah, AR5K_PHY_RESTART);
|
||||
v = ath5k_hw_reg_read(ah, AR5K_PHY_RESTART);
|
||||
len += snprintf(buf + len, sizeof(buf) - len,
|
||||
"AR5K_PHY_RESTART_DIV_GC\t\t%x\n",
|
||||
(v & AR5K_PHY_RESTART_DIV_GC) >> AR5K_PHY_RESTART_DIV_GC_S);
|
||||
|
||||
v = ath5k_hw_reg_read(sc->ah, AR5K_PHY_FAST_ANT_DIV);
|
||||
v = ath5k_hw_reg_read(ah, AR5K_PHY_FAST_ANT_DIV);
|
||||
len += snprintf(buf + len, sizeof(buf) - len,
|
||||
"AR5K_PHY_FAST_ANT_DIV_EN\t%d\n",
|
||||
(v & AR5K_PHY_FAST_ANT_DIV_EN) != 0);
|
||||
|
||||
v = ath5k_hw_reg_read(sc->ah, AR5K_PHY_ANT_SWITCH_TABLE_0);
|
||||
v = ath5k_hw_reg_read(ah, AR5K_PHY_ANT_SWITCH_TABLE_0);
|
||||
len += snprintf(buf + len, sizeof(buf) - len,
|
||||
"\nAR5K_PHY_ANT_SWITCH_TABLE_0\t0x%08x\n", v);
|
||||
v = ath5k_hw_reg_read(sc->ah, AR5K_PHY_ANT_SWITCH_TABLE_1);
|
||||
v = ath5k_hw_reg_read(ah, AR5K_PHY_ANT_SWITCH_TABLE_1);
|
||||
len += snprintf(buf + len, sizeof(buf) - len,
|
||||
"AR5K_PHY_ANT_SWITCH_TABLE_1\t0x%08x\n", v);
|
||||
|
||||
|
@ -450,7 +448,7 @@ static ssize_t write_file_antenna(struct file *file,
|
|||
const char __user *userbuf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
struct ath5k_softc *sc = file->private_data;
|
||||
struct ath5k_hw *ah = file->private_data;
|
||||
unsigned int i;
|
||||
char buf[20];
|
||||
|
||||
|
@ -458,18 +456,18 @@ static ssize_t write_file_antenna(struct file *file,
|
|||
return -EFAULT;
|
||||
|
||||
if (strncmp(buf, "diversity", 9) == 0) {
|
||||
ath5k_hw_set_antenna_mode(sc->ah, AR5K_ANTMODE_DEFAULT);
|
||||
ath5k_hw_set_antenna_mode(ah, AR5K_ANTMODE_DEFAULT);
|
||||
printk(KERN_INFO "ath5k debug: enable diversity\n");
|
||||
} else if (strncmp(buf, "fixed-a", 7) == 0) {
|
||||
ath5k_hw_set_antenna_mode(sc->ah, AR5K_ANTMODE_FIXED_A);
|
||||
ath5k_hw_set_antenna_mode(ah, AR5K_ANTMODE_FIXED_A);
|
||||
printk(KERN_INFO "ath5k debugfs: fixed antenna A\n");
|
||||
} else if (strncmp(buf, "fixed-b", 7) == 0) {
|
||||
ath5k_hw_set_antenna_mode(sc->ah, AR5K_ANTMODE_FIXED_B);
|
||||
ath5k_hw_set_antenna_mode(ah, AR5K_ANTMODE_FIXED_B);
|
||||
printk(KERN_INFO "ath5k debug: fixed antenna B\n");
|
||||
} else if (strncmp(buf, "clear", 5) == 0) {
|
||||
for (i = 0; i < ARRAY_SIZE(sc->stats.antenna_rx); i++) {
|
||||
sc->stats.antenna_rx[i] = 0;
|
||||
sc->stats.antenna_tx[i] = 0;
|
||||
for (i = 0; i < ARRAY_SIZE(ah->stats.antenna_rx); i++) {
|
||||
ah->stats.antenna_rx[i] = 0;
|
||||
ah->stats.antenna_tx[i] = 0;
|
||||
}
|
||||
printk(KERN_INFO "ath5k debug: cleared antenna stats\n");
|
||||
}
|
||||
|
@ -489,13 +487,13 @@ static const struct file_operations fops_antenna = {
|
|||
static ssize_t read_file_misc(struct file *file, char __user *user_buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
struct ath5k_softc *sc = file->private_data;
|
||||
struct ath5k_hw *ah = file->private_data;
|
||||
char buf[700];
|
||||
unsigned int len = 0;
|
||||
u32 filt = ath5k_hw_get_rx_filter(sc->ah);
|
||||
u32 filt = ath5k_hw_get_rx_filter(ah);
|
||||
|
||||
len += snprintf(buf + len, sizeof(buf) - len, "bssid-mask: %pM\n",
|
||||
sc->bssidmask);
|
||||
ah->bssidmask);
|
||||
len += snprintf(buf + len, sizeof(buf) - len, "filter-flags: 0x%x ",
|
||||
filt);
|
||||
if (filt & AR5K_RX_FILTER_UCAST)
|
||||
|
@ -524,7 +522,7 @@ static ssize_t read_file_misc(struct file *file, char __user *user_buf,
|
|||
len += snprintf(buf + len, sizeof(buf) - len, " RADARERR-5211");
|
||||
|
||||
len += snprintf(buf + len, sizeof(buf) - len, "\nopmode: %s (%d)\n",
|
||||
ath_opmode_to_string(sc->opmode), sc->opmode);
|
||||
ath_opmode_to_string(ah->opmode), ah->opmode);
|
||||
|
||||
if (len > sizeof(buf))
|
||||
len = sizeof(buf);
|
||||
|
@ -544,8 +542,8 @@ static const struct file_operations fops_misc = {
|
|||
static ssize_t read_file_frameerrors(struct file *file, char __user *user_buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
struct ath5k_softc *sc = file->private_data;
|
||||
struct ath5k_statistics *st = &sc->stats;
|
||||
struct ath5k_hw *ah = file->private_data;
|
||||
struct ath5k_statistics *st = &ah->stats;
|
||||
char buf[700];
|
||||
unsigned int len = 0;
|
||||
int i;
|
||||
|
@ -621,8 +619,8 @@ static ssize_t write_file_frameerrors(struct file *file,
|
|||
const char __user *userbuf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
struct ath5k_softc *sc = file->private_data;
|
||||
struct ath5k_statistics *st = &sc->stats;
|
||||
struct ath5k_hw *ah = file->private_data;
|
||||
struct ath5k_statistics *st = &ah->stats;
|
||||
char buf[20];
|
||||
|
||||
if (copy_from_user(buf, userbuf, min(count, sizeof(buf))))
|
||||
|
@ -660,16 +658,16 @@ static const struct file_operations fops_frameerrors = {
|
|||
static ssize_t read_file_ani(struct file *file, char __user *user_buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
struct ath5k_softc *sc = file->private_data;
|
||||
struct ath5k_statistics *st = &sc->stats;
|
||||
struct ath5k_ani_state *as = &sc->ani_state;
|
||||
struct ath5k_hw *ah = file->private_data;
|
||||
struct ath5k_statistics *st = &ah->stats;
|
||||
struct ath5k_ani_state *as = &ah->ani_state;
|
||||
|
||||
char buf[700];
|
||||
unsigned int len = 0;
|
||||
|
||||
len += snprintf(buf + len, sizeof(buf) - len,
|
||||
"HW has PHY error counters:\t%s\n",
|
||||
sc->ah->ah_capabilities.cap_has_phyerr_counters ?
|
||||
ah->ah_capabilities.cap_has_phyerr_counters ?
|
||||
"yes" : "no");
|
||||
len += snprintf(buf + len, sizeof(buf) - len,
|
||||
"HW max spur immunity level:\t%d\n",
|
||||
|
@ -718,7 +716,7 @@ static ssize_t read_file_ani(struct file *file, char __user *user_buf,
|
|||
st->mib_intr);
|
||||
len += snprintf(buf + len, sizeof(buf) - len,
|
||||
"beacon RSSI average:\t%d\n",
|
||||
(int)ewma_read(&sc->ah->ah_beacon_rssi_avg));
|
||||
(int)ewma_read(&ah->ah_beacon_rssi_avg));
|
||||
|
||||
#define CC_PRINT(_struct, _field) \
|
||||
_struct._field, \
|
||||
|
@ -750,14 +748,14 @@ static ssize_t read_file_ani(struct file *file, char __user *user_buf,
|
|||
as->sum_cck_errors);
|
||||
len += snprintf(buf + len, sizeof(buf) - len,
|
||||
"AR5K_PHYERR_CNT1\t%x\t(=%d)\n",
|
||||
ath5k_hw_reg_read(sc->ah, AR5K_PHYERR_CNT1),
|
||||
ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT1),
|
||||
ATH5K_ANI_OFDM_TRIG_HIGH - (ATH5K_PHYERR_CNT_MAX -
|
||||
ath5k_hw_reg_read(sc->ah, AR5K_PHYERR_CNT1)));
|
||||
ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT1)));
|
||||
len += snprintf(buf + len, sizeof(buf) - len,
|
||||
"AR5K_PHYERR_CNT2\t%x\t(=%d)\n",
|
||||
ath5k_hw_reg_read(sc->ah, AR5K_PHYERR_CNT2),
|
||||
ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT2),
|
||||
ATH5K_ANI_CCK_TRIG_HIGH - (ATH5K_PHYERR_CNT_MAX -
|
||||
ath5k_hw_reg_read(sc->ah, AR5K_PHYERR_CNT2)));
|
||||
ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT2)));
|
||||
|
||||
if (len > sizeof(buf))
|
||||
len = sizeof(buf);
|
||||
|
@ -769,42 +767,42 @@ static ssize_t write_file_ani(struct file *file,
|
|||
const char __user *userbuf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
struct ath5k_softc *sc = file->private_data;
|
||||
struct ath5k_hw *ah = file->private_data;
|
||||
char buf[20];
|
||||
|
||||
if (copy_from_user(buf, userbuf, min(count, sizeof(buf))))
|
||||
return -EFAULT;
|
||||
|
||||
if (strncmp(buf, "sens-low", 8) == 0) {
|
||||
ath5k_ani_init(sc->ah, ATH5K_ANI_MODE_MANUAL_HIGH);
|
||||
ath5k_ani_init(ah, ATH5K_ANI_MODE_MANUAL_HIGH);
|
||||
} else if (strncmp(buf, "sens-high", 9) == 0) {
|
||||
ath5k_ani_init(sc->ah, ATH5K_ANI_MODE_MANUAL_LOW);
|
||||
ath5k_ani_init(ah, ATH5K_ANI_MODE_MANUAL_LOW);
|
||||
} else if (strncmp(buf, "ani-off", 7) == 0) {
|
||||
ath5k_ani_init(sc->ah, ATH5K_ANI_MODE_OFF);
|
||||
ath5k_ani_init(ah, ATH5K_ANI_MODE_OFF);
|
||||
} else if (strncmp(buf, "ani-on", 6) == 0) {
|
||||
ath5k_ani_init(sc->ah, ATH5K_ANI_MODE_AUTO);
|
||||
ath5k_ani_init(ah, ATH5K_ANI_MODE_AUTO);
|
||||
} else if (strncmp(buf, "noise-low", 9) == 0) {
|
||||
ath5k_ani_set_noise_immunity_level(sc->ah, 0);
|
||||
ath5k_ani_set_noise_immunity_level(ah, 0);
|
||||
} else if (strncmp(buf, "noise-high", 10) == 0) {
|
||||
ath5k_ani_set_noise_immunity_level(sc->ah,
|
||||
ath5k_ani_set_noise_immunity_level(ah,
|
||||
ATH5K_ANI_MAX_NOISE_IMM_LVL);
|
||||
} else if (strncmp(buf, "spur-low", 8) == 0) {
|
||||
ath5k_ani_set_spur_immunity_level(sc->ah, 0);
|
||||
ath5k_ani_set_spur_immunity_level(ah, 0);
|
||||
} else if (strncmp(buf, "spur-high", 9) == 0) {
|
||||
ath5k_ani_set_spur_immunity_level(sc->ah,
|
||||
sc->ani_state.max_spur_level);
|
||||
ath5k_ani_set_spur_immunity_level(ah,
|
||||
ah->ani_state.max_spur_level);
|
||||
} else if (strncmp(buf, "fir-low", 7) == 0) {
|
||||
ath5k_ani_set_firstep_level(sc->ah, 0);
|
||||
ath5k_ani_set_firstep_level(ah, 0);
|
||||
} else if (strncmp(buf, "fir-high", 8) == 0) {
|
||||
ath5k_ani_set_firstep_level(sc->ah, ATH5K_ANI_MAX_FIRSTEP_LVL);
|
||||
ath5k_ani_set_firstep_level(ah, ATH5K_ANI_MAX_FIRSTEP_LVL);
|
||||
} else if (strncmp(buf, "ofdm-off", 8) == 0) {
|
||||
ath5k_ani_set_ofdm_weak_signal_detection(sc->ah, false);
|
||||
ath5k_ani_set_ofdm_weak_signal_detection(ah, false);
|
||||
} else if (strncmp(buf, "ofdm-on", 7) == 0) {
|
||||
ath5k_ani_set_ofdm_weak_signal_detection(sc->ah, true);
|
||||
ath5k_ani_set_ofdm_weak_signal_detection(ah, true);
|
||||
} else if (strncmp(buf, "cck-off", 7) == 0) {
|
||||
ath5k_ani_set_cck_weak_signal_detection(sc->ah, false);
|
||||
ath5k_ani_set_cck_weak_signal_detection(ah, false);
|
||||
} else if (strncmp(buf, "cck-on", 6) == 0) {
|
||||
ath5k_ani_set_cck_weak_signal_detection(sc->ah, true);
|
||||
ath5k_ani_set_cck_weak_signal_detection(ah, true);
|
||||
}
|
||||
return count;
|
||||
}
|
||||
|
@ -823,7 +821,7 @@ static const struct file_operations fops_ani = {
|
|||
static ssize_t read_file_queue(struct file *file, char __user *user_buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
struct ath5k_softc *sc = file->private_data;
|
||||
struct ath5k_hw *ah = file->private_data;
|
||||
char buf[700];
|
||||
unsigned int len = 0;
|
||||
|
||||
|
@ -832,10 +830,10 @@ static ssize_t read_file_queue(struct file *file, char __user *user_buf,
|
|||
int i, n;
|
||||
|
||||
len += snprintf(buf + len, sizeof(buf) - len,
|
||||
"available txbuffers: %d\n", sc->txbuf_len);
|
||||
"available txbuffers: %d\n", ah->txbuf_len);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(sc->txqs); i++) {
|
||||
txq = &sc->txqs[i];
|
||||
for (i = 0; i < ARRAY_SIZE(ah->txqs); i++) {
|
||||
txq = &ah->txqs[i];
|
||||
|
||||
len += snprintf(buf + len, sizeof(buf) - len,
|
||||
"%02d: %ssetup\n", i, txq->setup ? "" : "not ");
|
||||
|
@ -865,16 +863,16 @@ static ssize_t write_file_queue(struct file *file,
|
|||
const char __user *userbuf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
struct ath5k_softc *sc = file->private_data;
|
||||
struct ath5k_hw *ah = file->private_data;
|
||||
char buf[20];
|
||||
|
||||
if (copy_from_user(buf, userbuf, min(count, sizeof(buf))))
|
||||
return -EFAULT;
|
||||
|
||||
if (strncmp(buf, "start", 5) == 0)
|
||||
ieee80211_wake_queues(sc->hw);
|
||||
ieee80211_wake_queues(ah->hw);
|
||||
else if (strncmp(buf, "stop", 4) == 0)
|
||||
ieee80211_stop_queues(sc->hw);
|
||||
ieee80211_stop_queues(ah->hw);
|
||||
|
||||
return count;
|
||||
}
|
||||
|
@ -890,57 +888,57 @@ static const struct file_operations fops_queue = {
|
|||
|
||||
|
||||
void
|
||||
ath5k_debug_init_device(struct ath5k_softc *sc)
|
||||
ath5k_debug_init_device(struct ath5k_hw *ah)
|
||||
{
|
||||
struct dentry *phydir;
|
||||
|
||||
sc->debug.level = ath5k_debug;
|
||||
ah->debug.level = ath5k_debug;
|
||||
|
||||
phydir = debugfs_create_dir("ath5k", sc->hw->wiphy->debugfsdir);
|
||||
phydir = debugfs_create_dir("ath5k", ah->hw->wiphy->debugfsdir);
|
||||
if (!phydir)
|
||||
return;
|
||||
|
||||
debugfs_create_file("debug", S_IWUSR | S_IRUSR, phydir, sc,
|
||||
debugfs_create_file("debug", S_IWUSR | S_IRUSR, phydir, ah,
|
||||
&fops_debug);
|
||||
|
||||
debugfs_create_file("registers", S_IRUSR, phydir, sc, &fops_registers);
|
||||
debugfs_create_file("registers", S_IRUSR, phydir, ah, &fops_registers);
|
||||
|
||||
debugfs_create_file("beacon", S_IWUSR | S_IRUSR, phydir, sc,
|
||||
debugfs_create_file("beacon", S_IWUSR | S_IRUSR, phydir, ah,
|
||||
&fops_beacon);
|
||||
|
||||
debugfs_create_file("reset", S_IWUSR, phydir, sc, &fops_reset);
|
||||
debugfs_create_file("reset", S_IWUSR, phydir, ah, &fops_reset);
|
||||
|
||||
debugfs_create_file("antenna", S_IWUSR | S_IRUSR, phydir, sc,
|
||||
debugfs_create_file("antenna", S_IWUSR | S_IRUSR, phydir, ah,
|
||||
&fops_antenna);
|
||||
|
||||
debugfs_create_file("misc", S_IRUSR, phydir, sc, &fops_misc);
|
||||
debugfs_create_file("misc", S_IRUSR, phydir, ah, &fops_misc);
|
||||
|
||||
debugfs_create_file("frameerrors", S_IWUSR | S_IRUSR, phydir, sc,
|
||||
debugfs_create_file("frameerrors", S_IWUSR | S_IRUSR, phydir, ah,
|
||||
&fops_frameerrors);
|
||||
|
||||
debugfs_create_file("ani", S_IWUSR | S_IRUSR, phydir, sc, &fops_ani);
|
||||
debugfs_create_file("ani", S_IWUSR | S_IRUSR, phydir, ah, &fops_ani);
|
||||
|
||||
debugfs_create_file("queue", S_IWUSR | S_IRUSR, phydir, sc,
|
||||
debugfs_create_file("queue", S_IWUSR | S_IRUSR, phydir, ah,
|
||||
&fops_queue);
|
||||
|
||||
debugfs_create_bool("32khz_clock", S_IWUSR | S_IRUSR, phydir,
|
||||
&sc->ah->ah_use_32khz_clock);
|
||||
&ah->ah_use_32khz_clock);
|
||||
}
|
||||
|
||||
/* functions used in other places */
|
||||
|
||||
void
|
||||
ath5k_debug_dump_bands(struct ath5k_softc *sc)
|
||||
ath5k_debug_dump_bands(struct ath5k_hw *ah)
|
||||
{
|
||||
unsigned int b, i;
|
||||
|
||||
if (likely(!(sc->debug.level & ATH5K_DEBUG_DUMPBANDS)))
|
||||
if (likely(!(ah->debug.level & ATH5K_DEBUG_DUMPBANDS)))
|
||||
return;
|
||||
|
||||
BUG_ON(!sc->sbands);
|
||||
BUG_ON(!ah->sbands);
|
||||
|
||||
for (b = 0; b < IEEE80211_NUM_BANDS; b++) {
|
||||
struct ieee80211_supported_band *band = &sc->sbands[b];
|
||||
struct ieee80211_supported_band *band = &ah->sbands[b];
|
||||
char bname[6];
|
||||
switch (band->band) {
|
||||
case IEEE80211_BAND_2GHZ:
|
||||
|
@ -990,41 +988,41 @@ ath5k_debug_printrxbuf(struct ath5k_buf *bf, int done,
|
|||
}
|
||||
|
||||
void
|
||||
ath5k_debug_printrxbuffs(struct ath5k_softc *sc, struct ath5k_hw *ah)
|
||||
ath5k_debug_printrxbuffs(struct ath5k_hw *ah)
|
||||
{
|
||||
struct ath5k_desc *ds;
|
||||
struct ath5k_buf *bf;
|
||||
struct ath5k_rx_status rs = {};
|
||||
int status;
|
||||
|
||||
if (likely(!(sc->debug.level & ATH5K_DEBUG_DESC)))
|
||||
if (likely(!(ah->debug.level & ATH5K_DEBUG_DESC)))
|
||||
return;
|
||||
|
||||
printk(KERN_DEBUG "rxdp %x, rxlink %p\n",
|
||||
ath5k_hw_get_rxdp(ah), sc->rxlink);
|
||||
ath5k_hw_get_rxdp(ah), ah->rxlink);
|
||||
|
||||
spin_lock_bh(&sc->rxbuflock);
|
||||
list_for_each_entry(bf, &sc->rxbuf, list) {
|
||||
spin_lock_bh(&ah->rxbuflock);
|
||||
list_for_each_entry(bf, &ah->rxbuf, list) {
|
||||
ds = bf->desc;
|
||||
status = ah->ah_proc_rx_desc(ah, ds, &rs);
|
||||
if (!status)
|
||||
ath5k_debug_printrxbuf(bf, status == 0, &rs);
|
||||
}
|
||||
spin_unlock_bh(&sc->rxbuflock);
|
||||
spin_unlock_bh(&ah->rxbuflock);
|
||||
}
|
||||
|
||||
void
|
||||
ath5k_debug_printtxbuf(struct ath5k_softc *sc, struct ath5k_buf *bf)
|
||||
ath5k_debug_printtxbuf(struct ath5k_hw *ah, struct ath5k_buf *bf)
|
||||
{
|
||||
struct ath5k_desc *ds = bf->desc;
|
||||
struct ath5k_hw_5212_tx_desc *td = &ds->ud.ds_tx5212;
|
||||
struct ath5k_tx_status ts = {};
|
||||
int done;
|
||||
|
||||
if (likely(!(sc->debug.level & ATH5K_DEBUG_DESC)))
|
||||
if (likely(!(ah->debug.level & ATH5K_DEBUG_DESC)))
|
||||
return;
|
||||
|
||||
done = sc->ah->ah_proc_tx_desc(sc->ah, bf->desc, &ts);
|
||||
done = ah->ah_proc_tx_desc(ah, bf->desc, &ts);
|
||||
|
||||
printk(KERN_DEBUG "T (%p %llx) %08x %08x %08x %08x %08x %08x %08x "
|
||||
"%08x %c\n", ds, (unsigned long long)bf->daddr, ds->ds_link,
|
||||
|
|
|
@ -61,7 +61,6 @@
|
|||
#ifndef _ATH5K_DEBUG_H
|
||||
#define _ATH5K_DEBUG_H
|
||||
|
||||
struct ath5k_softc;
|
||||
struct ath5k_hw;
|
||||
struct sk_buff;
|
||||
struct ath5k_buf;
|
||||
|
@ -127,39 +126,39 @@ enum ath5k_debug_level {
|
|||
} while (0)
|
||||
|
||||
void
|
||||
ath5k_debug_init_device(struct ath5k_softc *sc);
|
||||
ath5k_debug_init_device(struct ath5k_hw *ah);
|
||||
|
||||
void
|
||||
ath5k_debug_printrxbuffs(struct ath5k_softc *sc, struct ath5k_hw *ah);
|
||||
ath5k_debug_printrxbuffs(struct ath5k_hw *ah);
|
||||
|
||||
void
|
||||
ath5k_debug_dump_bands(struct ath5k_softc *sc);
|
||||
ath5k_debug_dump_bands(struct ath5k_hw *ah);
|
||||
|
||||
void
|
||||
ath5k_debug_printtxbuf(struct ath5k_softc *sc, struct ath5k_buf *bf);
|
||||
ath5k_debug_printtxbuf(struct ath5k_hw *ah, struct ath5k_buf *bf);
|
||||
|
||||
#else /* no debugging */
|
||||
|
||||
#include <linux/compiler.h>
|
||||
|
||||
static inline void __attribute__ ((format (printf, 3, 4)))
|
||||
ATH5K_DBG(struct ath5k_softc *sc, unsigned int m, const char *fmt, ...) {}
|
||||
ATH5K_DBG(struct ath5k_hw *ah, unsigned int m, const char *fmt, ...) {}
|
||||
|
||||
static inline void __attribute__ ((format (printf, 3, 4)))
|
||||
ATH5K_DBG_UNLIMIT(struct ath5k_softc *sc, unsigned int m, const char *fmt, ...)
|
||||
ATH5K_DBG_UNLIMIT(struct ath5k_hw *ah, unsigned int m, const char *fmt, ...)
|
||||
{}
|
||||
|
||||
static inline void
|
||||
ath5k_debug_init_device(struct ath5k_softc *sc) {}
|
||||
ath5k_debug_init_device(struct ath5k_hw *ah) {}
|
||||
|
||||
static inline void
|
||||
ath5k_debug_printrxbuffs(struct ath5k_softc *sc, struct ath5k_hw *ah) {}
|
||||
ath5k_debug_printrxbuffs(struct ath5k_hw *ah) {}
|
||||
|
||||
static inline void
|
||||
ath5k_debug_dump_bands(struct ath5k_softc *sc) {}
|
||||
ath5k_debug_dump_bands(struct ath5k_hw *ah) {}
|
||||
|
||||
static inline void
|
||||
ath5k_debug_printtxbuf(struct ath5k_softc *sc, struct ath5k_buf *bf) {}
|
||||
ath5k_debug_printtxbuf(struct ath5k_hw *ah, struct ath5k_buf *bf) {}
|
||||
|
||||
#endif /* ifdef CONFIG_ATH5K_DEBUG */
|
||||
|
||||
|
|
|
@ -55,12 +55,12 @@ ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
|
|||
* noise on the channel, so it is important to avoid this.
|
||||
*/
|
||||
if (unlikely(tx_tries0 == 0)) {
|
||||
ATH5K_ERR(ah->ah_sc, "zero retries\n");
|
||||
ATH5K_ERR(ah, "zero retries\n");
|
||||
WARN_ON(1);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (unlikely(tx_rate0 == 0)) {
|
||||
ATH5K_ERR(ah->ah_sc, "zero rate\n");
|
||||
ATH5K_ERR(ah, "zero rate\n");
|
||||
WARN_ON(1);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -203,12 +203,12 @@ static int ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah,
|
|||
* noise on the channel, so it is important to avoid this.
|
||||
*/
|
||||
if (unlikely(tx_tries0 == 0)) {
|
||||
ATH5K_ERR(ah->ah_sc, "zero retries\n");
|
||||
ATH5K_ERR(ah, "zero retries\n");
|
||||
WARN_ON(1);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (unlikely(tx_rate0 == 0)) {
|
||||
ATH5K_ERR(ah->ah_sc, "zero rate\n");
|
||||
ATH5K_ERR(ah, "zero rate\n");
|
||||
WARN_ON(1);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -316,7 +316,7 @@ ath5k_hw_setup_mrr_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
|
|||
if (unlikely((tx_rate1 == 0 && tx_tries1 != 0) ||
|
||||
(tx_rate2 == 0 && tx_tries2 != 0) ||
|
||||
(tx_rate3 == 0 && tx_tries3 != 0))) {
|
||||
ATH5K_ERR(ah->ah_sc, "zero rate\n");
|
||||
ATH5K_ERR(ah, "zero rate\n");
|
||||
WARN_ON(1);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
|
|
@ -73,7 +73,7 @@ static int ath5k_hw_stop_rx_dma(struct ath5k_hw *ah)
|
|||
udelay(100);
|
||||
|
||||
if (!i)
|
||||
ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_DMA,
|
||||
ATH5K_DBG(ah, ATH5K_DEBUG_DMA,
|
||||
"failed to stop RX DMA !\n");
|
||||
|
||||
return i ? 0 : -EBUSY;
|
||||
|
@ -100,7 +100,7 @@ u32 ath5k_hw_get_rxdp(struct ath5k_hw *ah)
|
|||
int ath5k_hw_set_rxdp(struct ath5k_hw *ah, u32 phys_addr)
|
||||
{
|
||||
if (ath5k_hw_reg_read(ah, AR5K_CR) & AR5K_CR_RXE) {
|
||||
ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_DMA,
|
||||
ATH5K_DBG(ah, ATH5K_DEBUG_DMA,
|
||||
"tried to set RXDP while rx was active !\n");
|
||||
return -EIO;
|
||||
}
|
||||
|
@ -243,7 +243,7 @@ static int ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue)
|
|||
udelay(100);
|
||||
|
||||
if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue))
|
||||
ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_DMA,
|
||||
ATH5K_DBG(ah, ATH5K_DEBUG_DMA,
|
||||
"queue %i didn't stop !\n", queue);
|
||||
|
||||
/* Check for pending frames */
|
||||
|
@ -295,7 +295,7 @@ static int ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue)
|
|||
AR5K_DIAG_SW_CHANNEL_IDLE_HIGH);
|
||||
|
||||
if (pending)
|
||||
ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_DMA,
|
||||
ATH5K_DBG(ah, ATH5K_DEBUG_DMA,
|
||||
"quiet mechanism didn't work q:%i !\n",
|
||||
queue);
|
||||
}
|
||||
|
@ -309,7 +309,7 @@ static int ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue)
|
|||
/* Clear register */
|
||||
ath5k_hw_reg_write(ah, 0, AR5K_QCU_TXD);
|
||||
if (pending) {
|
||||
ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_DMA,
|
||||
ATH5K_DBG(ah, ATH5K_DEBUG_DMA,
|
||||
"tx dma didn't stop (q:%i, frm:%i) !\n",
|
||||
queue, pending);
|
||||
return -EBUSY;
|
||||
|
@ -333,7 +333,7 @@ int ath5k_hw_stop_beacon_queue(struct ath5k_hw *ah, unsigned int queue)
|
|||
int ret;
|
||||
ret = ath5k_hw_stop_tx_dma(ah, queue);
|
||||
if (ret) {
|
||||
ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_DMA,
|
||||
ATH5K_DBG(ah, ATH5K_DEBUG_DMA,
|
||||
"beacon queue didn't stop !\n");
|
||||
return -EIO;
|
||||
}
|
||||
|
|
|
@ -105,7 +105,7 @@ ath5k_eeprom_init_header(struct ath5k_hw *ah)
|
|||
* big still, waiting on a better value.
|
||||
*/
|
||||
if (eep_max > (3 * AR5K_EEPROM_INFO_MAX)) {
|
||||
ATH5K_ERR(ah->ah_sc, "Invalid max custom EEPROM size: "
|
||||
ATH5K_ERR(ah, "Invalid max custom EEPROM size: "
|
||||
"%d (0x%04x) max expected: %d (0x%04x)\n",
|
||||
eep_max, eep_max,
|
||||
3 * AR5K_EEPROM_INFO_MAX,
|
||||
|
@ -119,7 +119,7 @@ ath5k_eeprom_init_header(struct ath5k_hw *ah)
|
|||
cksum ^= val;
|
||||
}
|
||||
if (cksum != AR5K_EEPROM_INFO_CKSUM) {
|
||||
ATH5K_ERR(ah->ah_sc, "Invalid EEPROM "
|
||||
ATH5K_ERR(ah, "Invalid EEPROM "
|
||||
"checksum: 0x%04x eep_max: 0x%04x (%s)\n",
|
||||
cksum, eep_max,
|
||||
eep_max == AR5K_EEPROM_INFO_MAX ?
|
||||
|
|
|
@ -1542,7 +1542,7 @@ int ath5k_hw_write_initvals(struct ath5k_hw *ah, u8 mode, bool skip_pcu)
|
|||
|
||||
/* AR5K_MODE_11B */
|
||||
if (mode > 2) {
|
||||
ATH5K_ERR(ah->ah_sc,
|
||||
ATH5K_ERR(ah,
|
||||
"unsupported channel mode: %d\n", mode);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
|
|
@ -86,26 +86,26 @@ static DEFINE_PCI_DEVICE_TABLE(ath5k_led_devices) = {
|
|||
{ }
|
||||
};
|
||||
|
||||
void ath5k_led_enable(struct ath5k_softc *sc)
|
||||
void ath5k_led_enable(struct ath5k_hw *ah)
|
||||
{
|
||||
if (test_bit(ATH_STAT_LEDSOFT, sc->status)) {
|
||||
ath5k_hw_set_gpio_output(sc->ah, sc->led_pin);
|
||||
ath5k_led_off(sc);
|
||||
if (test_bit(ATH_STAT_LEDSOFT, ah->status)) {
|
||||
ath5k_hw_set_gpio_output(ah, ah->led_pin);
|
||||
ath5k_led_off(ah);
|
||||
}
|
||||
}
|
||||
|
||||
static void ath5k_led_on(struct ath5k_softc *sc)
|
||||
static void ath5k_led_on(struct ath5k_hw *ah)
|
||||
{
|
||||
if (!test_bit(ATH_STAT_LEDSOFT, sc->status))
|
||||
if (!test_bit(ATH_STAT_LEDSOFT, ah->status))
|
||||
return;
|
||||
ath5k_hw_set_gpio(sc->ah, sc->led_pin, sc->led_on);
|
||||
ath5k_hw_set_gpio(ah, ah->led_pin, ah->led_on);
|
||||
}
|
||||
|
||||
void ath5k_led_off(struct ath5k_softc *sc)
|
||||
void ath5k_led_off(struct ath5k_hw *ah)
|
||||
{
|
||||
if (!test_bit(ATH_STAT_LEDSOFT, sc->status))
|
||||
if (!test_bit(ATH_STAT_LEDSOFT, ah->status))
|
||||
return;
|
||||
ath5k_hw_set_gpio(sc->ah, sc->led_pin, !sc->led_on);
|
||||
ath5k_hw_set_gpio(ah, ah->led_pin, !ah->led_on);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -116,27 +116,27 @@ ath5k_led_brightness_set(struct led_classdev *led_dev,
|
|||
led_dev);
|
||||
|
||||
if (brightness == LED_OFF)
|
||||
ath5k_led_off(led->sc);
|
||||
ath5k_led_off(led->ah);
|
||||
else
|
||||
ath5k_led_on(led->sc);
|
||||
ath5k_led_on(led->ah);
|
||||
}
|
||||
|
||||
static int
|
||||
ath5k_register_led(struct ath5k_softc *sc, struct ath5k_led *led,
|
||||
ath5k_register_led(struct ath5k_hw *ah, struct ath5k_led *led,
|
||||
const char *name, char *trigger)
|
||||
{
|
||||
int err;
|
||||
|
||||
led->sc = sc;
|
||||
led->ah = ah;
|
||||
strncpy(led->name, name, sizeof(led->name));
|
||||
led->led_dev.name = led->name;
|
||||
led->led_dev.default_trigger = trigger;
|
||||
led->led_dev.brightness_set = ath5k_led_brightness_set;
|
||||
|
||||
err = led_classdev_register(sc->dev, &led->led_dev);
|
||||
err = led_classdev_register(ah->dev, &led->led_dev);
|
||||
if (err) {
|
||||
ATH5K_WARN(sc, "could not register LED %s\n", name);
|
||||
led->sc = NULL;
|
||||
ATH5K_WARN(ah, "could not register LED %s\n", name);
|
||||
led->ah = NULL;
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
@ -144,30 +144,30 @@ ath5k_register_led(struct ath5k_softc *sc, struct ath5k_led *led,
|
|||
static void
|
||||
ath5k_unregister_led(struct ath5k_led *led)
|
||||
{
|
||||
if (!led->sc)
|
||||
if (!led->ah)
|
||||
return;
|
||||
led_classdev_unregister(&led->led_dev);
|
||||
ath5k_led_off(led->sc);
|
||||
led->sc = NULL;
|
||||
ath5k_led_off(led->ah);
|
||||
led->ah = NULL;
|
||||
}
|
||||
|
||||
void ath5k_unregister_leds(struct ath5k_softc *sc)
|
||||
void ath5k_unregister_leds(struct ath5k_hw *ah)
|
||||
{
|
||||
ath5k_unregister_led(&sc->rx_led);
|
||||
ath5k_unregister_led(&sc->tx_led);
|
||||
ath5k_unregister_led(&ah->rx_led);
|
||||
ath5k_unregister_led(&ah->tx_led);
|
||||
}
|
||||
|
||||
int __devinit ath5k_init_leds(struct ath5k_softc *sc)
|
||||
int __devinit ath5k_init_leds(struct ath5k_hw *ah)
|
||||
{
|
||||
int ret = 0;
|
||||
struct ieee80211_hw *hw = sc->hw;
|
||||
struct ieee80211_hw *hw = ah->hw;
|
||||
#ifndef CONFIG_ATHEROS_AR231X
|
||||
struct pci_dev *pdev = sc->pdev;
|
||||
struct pci_dev *pdev = ah->pdev;
|
||||
#endif
|
||||
char name[ATH5K_LED_MAX_NAME_LEN + 1];
|
||||
const struct pci_device_id *match;
|
||||
|
||||
if (!sc->pdev)
|
||||
if (!ah->pdev)
|
||||
return 0;
|
||||
|
||||
#ifdef CONFIG_ATHEROS_AR231X
|
||||
|
@ -176,24 +176,24 @@ int __devinit ath5k_init_leds(struct ath5k_softc *sc)
|
|||
match = pci_match_id(&ath5k_led_devices[0], pdev);
|
||||
#endif
|
||||
if (match) {
|
||||
__set_bit(ATH_STAT_LEDSOFT, sc->status);
|
||||
sc->led_pin = ATH_PIN(match->driver_data);
|
||||
sc->led_on = ATH_POLARITY(match->driver_data);
|
||||
__set_bit(ATH_STAT_LEDSOFT, ah->status);
|
||||
ah->led_pin = ATH_PIN(match->driver_data);
|
||||
ah->led_on = ATH_POLARITY(match->driver_data);
|
||||
}
|
||||
|
||||
if (!test_bit(ATH_STAT_LEDSOFT, sc->status))
|
||||
if (!test_bit(ATH_STAT_LEDSOFT, ah->status))
|
||||
goto out;
|
||||
|
||||
ath5k_led_enable(sc);
|
||||
ath5k_led_enable(ah);
|
||||
|
||||
snprintf(name, sizeof(name), "ath5k-%s::rx", wiphy_name(hw->wiphy));
|
||||
ret = ath5k_register_led(sc, &sc->rx_led, name,
|
||||
ret = ath5k_register_led(ah, &ah->rx_led, name,
|
||||
ieee80211_get_rx_led_name(hw));
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
snprintf(name, sizeof(name), "ath5k-%s::tx", wiphy_name(hw->wiphy));
|
||||
ret = ath5k_register_led(sc, &sc->tx_led, name,
|
||||
ret = ath5k_register_led(ah, &ah->tx_led, name,
|
||||
ieee80211_get_tx_led_name(hw));
|
||||
out:
|
||||
return ret;
|
||||
|
|
|
@ -53,44 +53,30 @@
|
|||
static void
|
||||
ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
|
||||
{
|
||||
struct ath5k_softc *sc = hw->priv;
|
||||
struct ath5k_hw *ah = hw->priv;
|
||||
u16 qnum = skb_get_queue_mapping(skb);
|
||||
|
||||
if (WARN_ON(qnum >= sc->ah->ah_capabilities.cap_queues.q_tx_num)) {
|
||||
if (WARN_ON(qnum >= ah->ah_capabilities.cap_queues.q_tx_num)) {
|
||||
dev_kfree_skb_any(skb);
|
||||
return;
|
||||
}
|
||||
|
||||
ath5k_tx_queue(hw, skb, &sc->txqs[qnum]);
|
||||
}
|
||||
|
||||
|
||||
static int
|
||||
ath5k_start(struct ieee80211_hw *hw)
|
||||
{
|
||||
return ath5k_init_hw(hw->priv);
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
ath5k_stop(struct ieee80211_hw *hw)
|
||||
{
|
||||
ath5k_stop_hw(hw->priv);
|
||||
ath5k_tx_queue(hw, skb, &ah->txqs[qnum]);
|
||||
}
|
||||
|
||||
|
||||
static int
|
||||
ath5k_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
|
||||
{
|
||||
struct ath5k_softc *sc = hw->priv;
|
||||
struct ath5k_hw *ah = hw->priv;
|
||||
int ret;
|
||||
struct ath5k_vif *avf = (void *)vif->drv_priv;
|
||||
|
||||
mutex_lock(&sc->lock);
|
||||
mutex_lock(&ah->lock);
|
||||
|
||||
if ((vif->type == NL80211_IFTYPE_AP ||
|
||||
vif->type == NL80211_IFTYPE_ADHOC)
|
||||
&& (sc->num_ap_vifs + sc->num_adhoc_vifs) >= ATH_BCBUF) {
|
||||
&& (ah->num_ap_vifs + ah->num_adhoc_vifs) >= ATH_BCBUF) {
|
||||
ret = -ELNRNG;
|
||||
goto end;
|
||||
}
|
||||
|
@ -100,9 +86,9 @@ ath5k_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
|
|||
* We would need to operate the HW in ad-hoc mode to allow TSF updates
|
||||
* for the IBSS, but this breaks with additional AP or STA interfaces
|
||||
* at the moment. */
|
||||
if (sc->num_adhoc_vifs ||
|
||||
(sc->nvifs && vif->type == NL80211_IFTYPE_ADHOC)) {
|
||||
ATH5K_ERR(sc, "Only one single ad-hoc interface is allowed.\n");
|
||||
if (ah->num_adhoc_vifs ||
|
||||
(ah->nvifs && vif->type == NL80211_IFTYPE_ADHOC)) {
|
||||
ATH5K_ERR(ah, "Only one single ad-hoc interface is allowed.\n");
|
||||
ret = -ELNRNG;
|
||||
goto end;
|
||||
}
|
||||
|
@ -119,8 +105,8 @@ ath5k_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
|
|||
goto end;
|
||||
}
|
||||
|
||||
sc->nvifs++;
|
||||
ATH5K_DBG(sc, ATH5K_DEBUG_MODE, "add interface mode %d\n", avf->opmode);
|
||||
ah->nvifs++;
|
||||
ATH5K_DBG(ah, ATH5K_DEBUG_MODE, "add interface mode %d\n", avf->opmode);
|
||||
|
||||
/* Assign the vap/adhoc to a beacon xmit slot. */
|
||||
if ((avf->opmode == NL80211_IFTYPE_AP) ||
|
||||
|
@ -128,38 +114,38 @@ ath5k_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
|
|||
(avf->opmode == NL80211_IFTYPE_MESH_POINT)) {
|
||||
int slot;
|
||||
|
||||
WARN_ON(list_empty(&sc->bcbuf));
|
||||
avf->bbuf = list_first_entry(&sc->bcbuf, struct ath5k_buf,
|
||||
WARN_ON(list_empty(&ah->bcbuf));
|
||||
avf->bbuf = list_first_entry(&ah->bcbuf, struct ath5k_buf,
|
||||
list);
|
||||
list_del(&avf->bbuf->list);
|
||||
|
||||
avf->bslot = 0;
|
||||
for (slot = 0; slot < ATH_BCBUF; slot++) {
|
||||
if (!sc->bslot[slot]) {
|
||||
if (!ah->bslot[slot]) {
|
||||
avf->bslot = slot;
|
||||
break;
|
||||
}
|
||||
}
|
||||
BUG_ON(sc->bslot[avf->bslot] != NULL);
|
||||
sc->bslot[avf->bslot] = vif;
|
||||
BUG_ON(ah->bslot[avf->bslot] != NULL);
|
||||
ah->bslot[avf->bslot] = vif;
|
||||
if (avf->opmode == NL80211_IFTYPE_AP)
|
||||
sc->num_ap_vifs++;
|
||||
ah->num_ap_vifs++;
|
||||
else if (avf->opmode == NL80211_IFTYPE_ADHOC)
|
||||
sc->num_adhoc_vifs++;
|
||||
ah->num_adhoc_vifs++;
|
||||
}
|
||||
|
||||
/* Any MAC address is fine, all others are included through the
|
||||
* filter.
|
||||
*/
|
||||
memcpy(&sc->lladdr, vif->addr, ETH_ALEN);
|
||||
ath5k_hw_set_lladdr(sc->ah, vif->addr);
|
||||
memcpy(&ah->lladdr, vif->addr, ETH_ALEN);
|
||||
ath5k_hw_set_lladdr(ah, vif->addr);
|
||||
|
||||
memcpy(&avf->lladdr, vif->addr, ETH_ALEN);
|
||||
|
||||
ath5k_update_bssid_mask_and_opmode(sc, vif);
|
||||
ath5k_update_bssid_mask_and_opmode(ah, vif);
|
||||
ret = 0;
|
||||
end:
|
||||
mutex_unlock(&sc->lock);
|
||||
mutex_unlock(&ah->lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -168,31 +154,31 @@ static void
|
|||
ath5k_remove_interface(struct ieee80211_hw *hw,
|
||||
struct ieee80211_vif *vif)
|
||||
{
|
||||
struct ath5k_softc *sc = hw->priv;
|
||||
struct ath5k_hw *ah = hw->priv;
|
||||
struct ath5k_vif *avf = (void *)vif->drv_priv;
|
||||
unsigned int i;
|
||||
|
||||
mutex_lock(&sc->lock);
|
||||
sc->nvifs--;
|
||||
mutex_lock(&ah->lock);
|
||||
ah->nvifs--;
|
||||
|
||||
if (avf->bbuf) {
|
||||
ath5k_txbuf_free_skb(sc, avf->bbuf);
|
||||
list_add_tail(&avf->bbuf->list, &sc->bcbuf);
|
||||
ath5k_txbuf_free_skb(ah, avf->bbuf);
|
||||
list_add_tail(&avf->bbuf->list, &ah->bcbuf);
|
||||
for (i = 0; i < ATH_BCBUF; i++) {
|
||||
if (sc->bslot[i] == vif) {
|
||||
sc->bslot[i] = NULL;
|
||||
if (ah->bslot[i] == vif) {
|
||||
ah->bslot[i] = NULL;
|
||||
break;
|
||||
}
|
||||
}
|
||||
avf->bbuf = NULL;
|
||||
}
|
||||
if (avf->opmode == NL80211_IFTYPE_AP)
|
||||
sc->num_ap_vifs--;
|
||||
ah->num_ap_vifs--;
|
||||
else if (avf->opmode == NL80211_IFTYPE_ADHOC)
|
||||
sc->num_adhoc_vifs--;
|
||||
ah->num_adhoc_vifs--;
|
||||
|
||||
ath5k_update_bssid_mask_and_opmode(sc, NULL);
|
||||
mutex_unlock(&sc->lock);
|
||||
ath5k_update_bssid_mask_and_opmode(ah, NULL);
|
||||
mutex_unlock(&ah->lock);
|
||||
}
|
||||
|
||||
|
||||
|
@ -202,23 +188,22 @@ ath5k_remove_interface(struct ieee80211_hw *hw,
|
|||
static int
|
||||
ath5k_config(struct ieee80211_hw *hw, u32 changed)
|
||||
{
|
||||
struct ath5k_softc *sc = hw->priv;
|
||||
struct ath5k_hw *ah = sc->ah;
|
||||
struct ath5k_hw *ah = hw->priv;
|
||||
struct ieee80211_conf *conf = &hw->conf;
|
||||
int ret = 0;
|
||||
int i;
|
||||
|
||||
mutex_lock(&sc->lock);
|
||||
mutex_lock(&ah->lock);
|
||||
|
||||
if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
|
||||
ret = ath5k_chan_set(sc, conf->channel);
|
||||
ret = ath5k_chan_set(ah, conf->channel);
|
||||
if (ret < 0)
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
if ((changed & IEEE80211_CONF_CHANGE_POWER) &&
|
||||
(sc->power_level != conf->power_level)) {
|
||||
sc->power_level = conf->power_level;
|
||||
(ah->power_level != conf->power_level)) {
|
||||
ah->power_level = conf->power_level;
|
||||
|
||||
/* Half dB steps */
|
||||
ath5k_hw_set_txpower_limit(ah, (conf->power_level * 2));
|
||||
|
@ -252,7 +237,7 @@ ath5k_config(struct ieee80211_hw *hw, u32 changed)
|
|||
ath5k_hw_set_antenna_mode(ah, ah->ah_ant_mode);
|
||||
|
||||
unlock:
|
||||
mutex_unlock(&sc->lock);
|
||||
mutex_unlock(&ah->lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -262,12 +247,11 @@ ath5k_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
|
|||
struct ieee80211_bss_conf *bss_conf, u32 changes)
|
||||
{
|
||||
struct ath5k_vif *avf = (void *)vif->drv_priv;
|
||||
struct ath5k_softc *sc = hw->priv;
|
||||
struct ath5k_hw *ah = sc->ah;
|
||||
struct ath5k_hw *ah = hw->priv;
|
||||
struct ath_common *common = ath5k_hw_common(ah);
|
||||
unsigned long flags;
|
||||
|
||||
mutex_lock(&sc->lock);
|
||||
mutex_lock(&ah->lock);
|
||||
|
||||
if (changes & BSS_CHANGED_BSSID) {
|
||||
/* Cache for later use during resets */
|
||||
|
@ -278,7 +262,7 @@ ath5k_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
|
|||
}
|
||||
|
||||
if (changes & BSS_CHANGED_BEACON_INT)
|
||||
sc->bintval = bss_conf->beacon_int;
|
||||
ah->bintval = bss_conf->beacon_int;
|
||||
|
||||
if (changes & BSS_CHANGED_ERP_SLOT) {
|
||||
int slot_time;
|
||||
|
@ -292,16 +276,16 @@ ath5k_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
|
|||
if (changes & BSS_CHANGED_ASSOC) {
|
||||
avf->assoc = bss_conf->assoc;
|
||||
if (bss_conf->assoc)
|
||||
sc->assoc = bss_conf->assoc;
|
||||
ah->assoc = bss_conf->assoc;
|
||||
else
|
||||
sc->assoc = ath5k_any_vif_assoc(sc);
|
||||
ah->assoc = ath5k_any_vif_assoc(ah);
|
||||
|
||||
if (sc->opmode == NL80211_IFTYPE_STATION)
|
||||
ath5k_set_beacon_filter(hw, sc->assoc);
|
||||
ath5k_hw_set_ledstate(sc->ah, sc->assoc ?
|
||||
if (ah->opmode == NL80211_IFTYPE_STATION)
|
||||
ath5k_set_beacon_filter(hw, ah->assoc);
|
||||
ath5k_hw_set_ledstate(ah, ah->assoc ?
|
||||
AR5K_LED_ASSOC : AR5K_LED_INIT);
|
||||
if (bss_conf->assoc) {
|
||||
ATH5K_DBG(sc, ATH5K_DEBUG_ANY,
|
||||
ATH5K_DBG(ah, ATH5K_DEBUG_ANY,
|
||||
"Bss Info ASSOC %d, bssid: %pM\n",
|
||||
bss_conf->aid, common->curbssid);
|
||||
common->curaid = bss_conf->aid;
|
||||
|
@ -311,19 +295,19 @@ ath5k_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
|
|||
}
|
||||
|
||||
if (changes & BSS_CHANGED_BEACON) {
|
||||
spin_lock_irqsave(&sc->block, flags);
|
||||
spin_lock_irqsave(&ah->block, flags);
|
||||
ath5k_beacon_update(hw, vif);
|
||||
spin_unlock_irqrestore(&sc->block, flags);
|
||||
spin_unlock_irqrestore(&ah->block, flags);
|
||||
}
|
||||
|
||||
if (changes & BSS_CHANGED_BEACON_ENABLED)
|
||||
sc->enable_beacon = bss_conf->enable_beacon;
|
||||
ah->enable_beacon = bss_conf->enable_beacon;
|
||||
|
||||
if (changes & (BSS_CHANGED_BEACON | BSS_CHANGED_BEACON_ENABLED |
|
||||
BSS_CHANGED_BEACON_INT))
|
||||
ath5k_beacon_config(sc);
|
||||
ath5k_beacon_config(ah);
|
||||
|
||||
mutex_unlock(&sc->lock);
|
||||
mutex_unlock(&ah->lock);
|
||||
}
|
||||
|
||||
|
||||
|
@ -384,12 +368,11 @@ ath5k_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
|
|||
FIF_PLCPFAIL | FIF_CONTROL | FIF_OTHER_BSS | \
|
||||
FIF_BCN_PRBRESP_PROMISC)
|
||||
|
||||
struct ath5k_softc *sc = hw->priv;
|
||||
struct ath5k_hw *ah = sc->ah;
|
||||
struct ath5k_hw *ah = hw->priv;
|
||||
u32 mfilt[2], rfilt;
|
||||
struct ath5k_vif_iter_data iter_data; /* to count STA interfaces */
|
||||
|
||||
mutex_lock(&sc->lock);
|
||||
mutex_lock(&ah->lock);
|
||||
|
||||
mfilt[0] = multicast;
|
||||
mfilt[1] = multicast >> 32;
|
||||
|
@ -407,12 +390,12 @@ ath5k_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
|
|||
|
||||
if (changed_flags & (FIF_PROMISC_IN_BSS | FIF_OTHER_BSS)) {
|
||||
if (*new_flags & FIF_PROMISC_IN_BSS)
|
||||
__set_bit(ATH_STAT_PROMISC, sc->status);
|
||||
__set_bit(ATH_STAT_PROMISC, ah->status);
|
||||
else
|
||||
__clear_bit(ATH_STAT_PROMISC, sc->status);
|
||||
__clear_bit(ATH_STAT_PROMISC, ah->status);
|
||||
}
|
||||
|
||||
if (test_bit(ATH_STAT_PROMISC, sc->status))
|
||||
if (test_bit(ATH_STAT_PROMISC, ah->status))
|
||||
rfilt |= AR5K_RX_FILTER_PROM;
|
||||
|
||||
/* Note, AR5K_RX_FILTER_MCAST is already enabled */
|
||||
|
@ -427,7 +410,7 @@ ath5k_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
|
|||
|
||||
/* FIF_BCN_PRBRESP_PROMISC really means to enable beacons
|
||||
* and probes for any BSSID */
|
||||
if ((*new_flags & FIF_BCN_PRBRESP_PROMISC) || (sc->nvifs > 1))
|
||||
if ((*new_flags & FIF_BCN_PRBRESP_PROMISC) || (ah->nvifs > 1))
|
||||
rfilt |= AR5K_RX_FILTER_BEACON;
|
||||
|
||||
/* FIF_CONTROL doc says that if FIF_PROMISC_IN_BSS is not
|
||||
|
@ -442,7 +425,7 @@ ath5k_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
|
|||
|
||||
/* XXX move these to mac80211, and add a beacon IFF flag to mac80211 */
|
||||
|
||||
switch (sc->opmode) {
|
||||
switch (ah->opmode) {
|
||||
case NL80211_IFTYPE_MESH_POINT:
|
||||
rfilt |= AR5K_RX_FILTER_CONTROL |
|
||||
AR5K_RX_FILTER_BEACON |
|
||||
|
@ -455,7 +438,7 @@ ath5k_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
|
|||
AR5K_RX_FILTER_BEACON;
|
||||
break;
|
||||
case NL80211_IFTYPE_STATION:
|
||||
if (sc->assoc)
|
||||
if (ah->assoc)
|
||||
rfilt |= AR5K_RX_FILTER_BEACON;
|
||||
default:
|
||||
break;
|
||||
|
@ -464,7 +447,7 @@ ath5k_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
|
|||
iter_data.hw_macaddr = NULL;
|
||||
iter_data.n_stas = 0;
|
||||
iter_data.need_set_hw_addr = false;
|
||||
ieee80211_iterate_active_interfaces_atomic(sc->hw, ath5k_vif_iter,
|
||||
ieee80211_iterate_active_interfaces_atomic(ah->hw, ath5k_vif_iter,
|
||||
&iter_data);
|
||||
|
||||
/* Set up RX Filter */
|
||||
|
@ -483,9 +466,9 @@ ath5k_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
|
|||
ath5k_hw_set_mcast_filter(ah, mfilt[0], mfilt[1]);
|
||||
/* Set the cached hw filter flags, this will later actually
|
||||
* be set in HW */
|
||||
sc->filter_flags = rfilt;
|
||||
ah->filter_flags = rfilt;
|
||||
|
||||
mutex_unlock(&sc->lock);
|
||||
mutex_unlock(&ah->lock);
|
||||
}
|
||||
|
||||
|
||||
|
@ -494,8 +477,7 @@ ath5k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
|
|||
struct ieee80211_vif *vif, struct ieee80211_sta *sta,
|
||||
struct ieee80211_key_conf *key)
|
||||
{
|
||||
struct ath5k_softc *sc = hw->priv;
|
||||
struct ath5k_hw *ah = sc->ah;
|
||||
struct ath5k_hw *ah = hw->priv;
|
||||
struct ath_common *common = ath5k_hw_common(ah);
|
||||
int ret = 0;
|
||||
|
||||
|
@ -516,7 +498,7 @@ ath5k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
mutex_lock(&sc->lock);
|
||||
mutex_lock(&ah->lock);
|
||||
|
||||
switch (cmd) {
|
||||
case SET_KEY:
|
||||
|
@ -540,7 +522,7 @@ ath5k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
|
|||
}
|
||||
|
||||
mmiowb();
|
||||
mutex_unlock(&sc->lock);
|
||||
mutex_unlock(&ah->lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -548,17 +530,17 @@ ath5k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
|
|||
static void
|
||||
ath5k_sw_scan_start(struct ieee80211_hw *hw)
|
||||
{
|
||||
struct ath5k_softc *sc = hw->priv;
|
||||
if (!sc->assoc)
|
||||
ath5k_hw_set_ledstate(sc->ah, AR5K_LED_SCAN);
|
||||
struct ath5k_hw *ah = hw->priv;
|
||||
if (!ah->assoc)
|
||||
ath5k_hw_set_ledstate(ah, AR5K_LED_SCAN);
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
ath5k_sw_scan_complete(struct ieee80211_hw *hw)
|
||||
{
|
||||
struct ath5k_softc *sc = hw->priv;
|
||||
ath5k_hw_set_ledstate(sc->ah, sc->assoc ?
|
||||
struct ath5k_hw *ah = hw->priv;
|
||||
ath5k_hw_set_ledstate(ah, ah->assoc ?
|
||||
AR5K_LED_ASSOC : AR5K_LED_INIT);
|
||||
}
|
||||
|
||||
|
@ -567,15 +549,15 @@ static int
|
|||
ath5k_get_stats(struct ieee80211_hw *hw,
|
||||
struct ieee80211_low_level_stats *stats)
|
||||
{
|
||||
struct ath5k_softc *sc = hw->priv;
|
||||
struct ath5k_hw *ah = hw->priv;
|
||||
|
||||
/* Force update */
|
||||
ath5k_hw_update_mib_counters(sc->ah);
|
||||
ath5k_hw_update_mib_counters(ah);
|
||||
|
||||
stats->dot11ACKFailureCount = sc->stats.ack_fail;
|
||||
stats->dot11RTSFailureCount = sc->stats.rts_fail;
|
||||
stats->dot11RTSSuccessCount = sc->stats.rts_ok;
|
||||
stats->dot11FCSErrorCount = sc->stats.fcs_error;
|
||||
stats->dot11ACKFailureCount = ah->stats.ack_fail;
|
||||
stats->dot11RTSFailureCount = ah->stats.rts_fail;
|
||||
stats->dot11RTSSuccessCount = ah->stats.rts_ok;
|
||||
stats->dot11FCSErrorCount = ah->stats.fcs_error;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -585,15 +567,14 @@ static int
|
|||
ath5k_conf_tx(struct ieee80211_hw *hw, u16 queue,
|
||||
const struct ieee80211_tx_queue_params *params)
|
||||
{
|
||||
struct ath5k_softc *sc = hw->priv;
|
||||
struct ath5k_hw *ah = sc->ah;
|
||||
struct ath5k_hw *ah = hw->priv;
|
||||
struct ath5k_txq_info qi;
|
||||
int ret = 0;
|
||||
|
||||
if (queue >= ah->ah_capabilities.cap_queues.q_tx_num)
|
||||
return 0;
|
||||
|
||||
mutex_lock(&sc->lock);
|
||||
mutex_lock(&ah->lock);
|
||||
|
||||
ath5k_hw_get_tx_queueprops(ah, queue, &qi);
|
||||
|
||||
|
@ -602,20 +583,20 @@ ath5k_conf_tx(struct ieee80211_hw *hw, u16 queue,
|
|||
qi.tqi_cw_max = params->cw_max;
|
||||
qi.tqi_burst_time = params->txop;
|
||||
|
||||
ATH5K_DBG(sc, ATH5K_DEBUG_ANY,
|
||||
ATH5K_DBG(ah, ATH5K_DEBUG_ANY,
|
||||
"Configure tx [queue %d], "
|
||||
"aifs: %d, cw_min: %d, cw_max: %d, txop: %d\n",
|
||||
queue, params->aifs, params->cw_min,
|
||||
params->cw_max, params->txop);
|
||||
|
||||
if (ath5k_hw_set_tx_queueprops(ah, queue, &qi)) {
|
||||
ATH5K_ERR(sc,
|
||||
ATH5K_ERR(ah,
|
||||
"Unable to update hardware queue %u!\n", queue);
|
||||
ret = -EIO;
|
||||
} else
|
||||
ath5k_hw_reset_tx_queue(ah, queue);
|
||||
|
||||
mutex_unlock(&sc->lock);
|
||||
mutex_unlock(&ah->lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -624,43 +605,43 @@ ath5k_conf_tx(struct ieee80211_hw *hw, u16 queue,
|
|||
static u64
|
||||
ath5k_get_tsf(struct ieee80211_hw *hw)
|
||||
{
|
||||
struct ath5k_softc *sc = hw->priv;
|
||||
struct ath5k_hw *ah = hw->priv;
|
||||
|
||||
return ath5k_hw_get_tsf64(sc->ah);
|
||||
return ath5k_hw_get_tsf64(ah);
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
ath5k_set_tsf(struct ieee80211_hw *hw, u64 tsf)
|
||||
{
|
||||
struct ath5k_softc *sc = hw->priv;
|
||||
struct ath5k_hw *ah = hw->priv;
|
||||
|
||||
ath5k_hw_set_tsf64(sc->ah, tsf);
|
||||
ath5k_hw_set_tsf64(ah, tsf);
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
ath5k_reset_tsf(struct ieee80211_hw *hw)
|
||||
{
|
||||
struct ath5k_softc *sc = hw->priv;
|
||||
struct ath5k_hw *ah = hw->priv;
|
||||
|
||||
/*
|
||||
* in IBSS mode we need to update the beacon timers too.
|
||||
* this will also reset the TSF if we call it with 0
|
||||
*/
|
||||
if (sc->opmode == NL80211_IFTYPE_ADHOC)
|
||||
ath5k_beacon_update_timers(sc, 0);
|
||||
if (ah->opmode == NL80211_IFTYPE_ADHOC)
|
||||
ath5k_beacon_update_timers(ah, 0);
|
||||
else
|
||||
ath5k_hw_reset_tsf(sc->ah);
|
||||
ath5k_hw_reset_tsf(ah);
|
||||
}
|
||||
|
||||
|
||||
static int
|
||||
ath5k_get_survey(struct ieee80211_hw *hw, int idx, struct survey_info *survey)
|
||||
{
|
||||
struct ath5k_softc *sc = hw->priv;
|
||||
struct ath5k_hw *ah = hw->priv;
|
||||
struct ieee80211_conf *conf = &hw->conf;
|
||||
struct ath_common *common = ath5k_hw_common(sc->ah);
|
||||
struct ath_common *common = ath5k_hw_common(ah);
|
||||
struct ath_cycle_counters *cc = &common->cc_survey;
|
||||
unsigned int div = common->clockrate * 1000;
|
||||
|
||||
|
@ -670,18 +651,18 @@ ath5k_get_survey(struct ieee80211_hw *hw, int idx, struct survey_info *survey)
|
|||
spin_lock_bh(&common->cc_lock);
|
||||
ath_hw_cycle_counters_update(common);
|
||||
if (cc->cycles > 0) {
|
||||
sc->survey.channel_time += cc->cycles / div;
|
||||
sc->survey.channel_time_busy += cc->rx_busy / div;
|
||||
sc->survey.channel_time_rx += cc->rx_frame / div;
|
||||
sc->survey.channel_time_tx += cc->tx_frame / div;
|
||||
ah->survey.channel_time += cc->cycles / div;
|
||||
ah->survey.channel_time_busy += cc->rx_busy / div;
|
||||
ah->survey.channel_time_rx += cc->rx_frame / div;
|
||||
ah->survey.channel_time_tx += cc->tx_frame / div;
|
||||
}
|
||||
memset(cc, 0, sizeof(*cc));
|
||||
spin_unlock_bh(&common->cc_lock);
|
||||
|
||||
memcpy(survey, &sc->survey, sizeof(*survey));
|
||||
memcpy(survey, &ah->survey, sizeof(*survey));
|
||||
|
||||
survey->channel = conf->channel;
|
||||
survey->noise = sc->ah->ah_noise_floor;
|
||||
survey->noise = ah->ah_noise_floor;
|
||||
survey->filled = SURVEY_INFO_NOISE_DBM |
|
||||
SURVEY_INFO_CHANNEL_TIME |
|
||||
SURVEY_INFO_CHANNEL_TIME_BUSY |
|
||||
|
@ -705,25 +686,25 @@ ath5k_get_survey(struct ieee80211_hw *hw, int idx, struct survey_info *survey)
|
|||
static void
|
||||
ath5k_set_coverage_class(struct ieee80211_hw *hw, u8 coverage_class)
|
||||
{
|
||||
struct ath5k_softc *sc = hw->priv;
|
||||
struct ath5k_hw *ah = hw->priv;
|
||||
|
||||
mutex_lock(&sc->lock);
|
||||
ath5k_hw_set_coverage_class(sc->ah, coverage_class);
|
||||
mutex_unlock(&sc->lock);
|
||||
mutex_lock(&ah->lock);
|
||||
ath5k_hw_set_coverage_class(ah, coverage_class);
|
||||
mutex_unlock(&ah->lock);
|
||||
}
|
||||
|
||||
|
||||
static int
|
||||
ath5k_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
|
||||
{
|
||||
struct ath5k_softc *sc = hw->priv;
|
||||
struct ath5k_hw *ah = hw->priv;
|
||||
|
||||
if (tx_ant == 1 && rx_ant == 1)
|
||||
ath5k_hw_set_antenna_mode(sc->ah, AR5K_ANTMODE_FIXED_A);
|
||||
ath5k_hw_set_antenna_mode(ah, AR5K_ANTMODE_FIXED_A);
|
||||
else if (tx_ant == 2 && rx_ant == 2)
|
||||
ath5k_hw_set_antenna_mode(sc->ah, AR5K_ANTMODE_FIXED_B);
|
||||
ath5k_hw_set_antenna_mode(ah, AR5K_ANTMODE_FIXED_B);
|
||||
else if ((tx_ant & 3) == 3 && (rx_ant & 3) == 3)
|
||||
ath5k_hw_set_antenna_mode(sc->ah, AR5K_ANTMODE_DEFAULT);
|
||||
ath5k_hw_set_antenna_mode(ah, AR5K_ANTMODE_DEFAULT);
|
||||
else
|
||||
return -EINVAL;
|
||||
return 0;
|
||||
|
@ -733,9 +714,9 @@ ath5k_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
|
|||
static int
|
||||
ath5k_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
|
||||
{
|
||||
struct ath5k_softc *sc = hw->priv;
|
||||
struct ath5k_hw *ah = hw->priv;
|
||||
|
||||
switch (sc->ah->ah_ant_mode) {
|
||||
switch (ah->ah_ant_mode) {
|
||||
case AR5K_ANTMODE_FIXED_A:
|
||||
*tx_ant = 1; *rx_ant = 1; break;
|
||||
case AR5K_ANTMODE_FIXED_B:
|
||||
|
@ -750,9 +731,9 @@ ath5k_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
|
|||
static void ath5k_get_ringparam(struct ieee80211_hw *hw,
|
||||
u32 *tx, u32 *tx_max, u32 *rx, u32 *rx_max)
|
||||
{
|
||||
struct ath5k_softc *sc = hw->priv;
|
||||
struct ath5k_hw *ah = hw->priv;
|
||||
|
||||
*tx = sc->txqs[AR5K_TX_QUEUE_ID_DATA_MIN].txq_max;
|
||||
*tx = ah->txqs[AR5K_TX_QUEUE_ID_DATA_MIN].txq_max;
|
||||
|
||||
*tx_max = ATH5K_TXQ_LEN_MAX;
|
||||
*rx = *rx_max = ATH_RXBUF;
|
||||
|
@ -761,7 +742,7 @@ static void ath5k_get_ringparam(struct ieee80211_hw *hw,
|
|||
|
||||
static int ath5k_set_ringparam(struct ieee80211_hw *hw, u32 tx, u32 rx)
|
||||
{
|
||||
struct ath5k_softc *sc = hw->priv;
|
||||
struct ath5k_hw *ah = hw->priv;
|
||||
u16 qnum;
|
||||
|
||||
/* only support setting tx ring size for now */
|
||||
|
@ -772,16 +753,16 @@ static int ath5k_set_ringparam(struct ieee80211_hw *hw, u32 tx, u32 rx)
|
|||
if (!tx || tx > ATH5K_TXQ_LEN_MAX)
|
||||
return -EINVAL;
|
||||
|
||||
for (qnum = 0; qnum < ARRAY_SIZE(sc->txqs); qnum++) {
|
||||
if (!sc->txqs[qnum].setup)
|
||||
for (qnum = 0; qnum < ARRAY_SIZE(ah->txqs); qnum++) {
|
||||
if (!ah->txqs[qnum].setup)
|
||||
continue;
|
||||
if (sc->txqs[qnum].qnum < AR5K_TX_QUEUE_ID_DATA_MIN ||
|
||||
sc->txqs[qnum].qnum > AR5K_TX_QUEUE_ID_DATA_MAX)
|
||||
if (ah->txqs[qnum].qnum < AR5K_TX_QUEUE_ID_DATA_MIN ||
|
||||
ah->txqs[qnum].qnum > AR5K_TX_QUEUE_ID_DATA_MAX)
|
||||
continue;
|
||||
|
||||
sc->txqs[qnum].txq_max = tx;
|
||||
if (sc->txqs[qnum].txq_len >= sc->txqs[qnum].txq_max)
|
||||
ieee80211_stop_queue(hw, sc->txqs[qnum].qnum);
|
||||
ah->txqs[qnum].txq_max = tx;
|
||||
if (ah->txqs[qnum].txq_len >= ah->txqs[qnum].txq_max)
|
||||
ieee80211_stop_queue(hw, ah->txqs[qnum].qnum);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -51,10 +51,10 @@ MODULE_DEVICE_TABLE(pci, ath5k_pci_id_table);
|
|||
/* return bus cachesize in 4B word units */
|
||||
static void ath5k_pci_read_cachesize(struct ath_common *common, int *csz)
|
||||
{
|
||||
struct ath5k_softc *sc = (struct ath5k_softc *) common->priv;
|
||||
struct ath5k_hw *ah = (struct ath5k_hw *) common->priv;
|
||||
u8 u8tmp;
|
||||
|
||||
pci_read_config_byte(sc->pdev, PCI_CACHE_LINE_SIZE, &u8tmp);
|
||||
pci_read_config_byte(ah->pdev, PCI_CACHE_LINE_SIZE, &u8tmp);
|
||||
*csz = (int)u8tmp;
|
||||
|
||||
/*
|
||||
|
@ -156,7 +156,7 @@ ath5k_pci_probe(struct pci_dev *pdev,
|
|||
const struct pci_device_id *id)
|
||||
{
|
||||
void __iomem *mem;
|
||||
struct ath5k_softc *sc;
|
||||
struct ath5k_hw *ah;
|
||||
struct ieee80211_hw *hw;
|
||||
int ret;
|
||||
u8 csz;
|
||||
|
@ -243,7 +243,7 @@ ath5k_pci_probe(struct pci_dev *pdev,
|
|||
* Allocate hw (mac80211 main struct)
|
||||
* and hw->priv (driver private data)
|
||||
*/
|
||||
hw = ieee80211_alloc_hw(sizeof(*sc), &ath5k_hw_ops);
|
||||
hw = ieee80211_alloc_hw(sizeof(*ah), &ath5k_hw_ops);
|
||||
if (hw == NULL) {
|
||||
dev_err(&pdev->dev, "cannot allocate ieee80211_hw\n");
|
||||
ret = -ENOMEM;
|
||||
|
@ -252,16 +252,16 @@ ath5k_pci_probe(struct pci_dev *pdev,
|
|||
|
||||
dev_info(&pdev->dev, "registered as '%s'\n", wiphy_name(hw->wiphy));
|
||||
|
||||
sc = hw->priv;
|
||||
sc->hw = hw;
|
||||
sc->pdev = pdev;
|
||||
sc->dev = &pdev->dev;
|
||||
sc->irq = pdev->irq;
|
||||
sc->devid = id->device;
|
||||
sc->iobase = mem; /* So we can unmap it on detach */
|
||||
ah = hw->priv;
|
||||
ah->hw = hw;
|
||||
ah->pdev = pdev;
|
||||
ah->dev = &pdev->dev;
|
||||
ah->irq = pdev->irq;
|
||||
ah->devid = id->device;
|
||||
ah->iobase = mem; /* So we can unmap it on detach */
|
||||
|
||||
/* Initialize */
|
||||
ret = ath5k_init_softc(sc, &ath_pci_bus_ops);
|
||||
ret = ath5k_init_softc(ah, &ath_pci_bus_ops);
|
||||
if (ret)
|
||||
goto err_free;
|
||||
|
||||
|
@ -285,10 +285,10 @@ static void __devexit
|
|||
ath5k_pci_remove(struct pci_dev *pdev)
|
||||
{
|
||||
struct ieee80211_hw *hw = pci_get_drvdata(pdev);
|
||||
struct ath5k_softc *sc = hw->priv;
|
||||
struct ath5k_hw *ah = hw->priv;
|
||||
|
||||
ath5k_deinit_softc(sc);
|
||||
pci_iounmap(pdev, sc->iobase);
|
||||
ath5k_deinit_softc(ah);
|
||||
pci_iounmap(pdev, ah->iobase);
|
||||
pci_release_region(pdev, 0);
|
||||
pci_disable_device(pdev);
|
||||
ieee80211_free_hw(hw);
|
||||
|
@ -299,9 +299,9 @@ static int ath5k_pci_suspend(struct device *dev)
|
|||
{
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
struct ieee80211_hw *hw = pci_get_drvdata(pdev);
|
||||
struct ath5k_softc *sc = hw->priv;
|
||||
struct ath5k_hw *ah = hw->priv;
|
||||
|
||||
ath5k_led_off(sc);
|
||||
ath5k_led_off(ah);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -309,7 +309,7 @@ static int ath5k_pci_resume(struct device *dev)
|
|||
{
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
struct ieee80211_hw *hw = pci_get_drvdata(pdev);
|
||||
struct ath5k_softc *sc = hw->priv;
|
||||
struct ath5k_hw *ah = hw->priv;
|
||||
|
||||
/*
|
||||
* Suspend/Resume resets the PCI configuration space, so we have to
|
||||
|
@ -318,7 +318,7 @@ static int ath5k_pci_resume(struct device *dev)
|
|||
*/
|
||||
pci_write_config_byte(pdev, 0x41, 0);
|
||||
|
||||
ath5k_led_enable(sc);
|
||||
ath5k_led_enable(ah);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -77,14 +77,13 @@ static const unsigned int ack_rates_high[] =
|
|||
int ath5k_hw_get_frame_duration(struct ath5k_hw *ah,
|
||||
int len, struct ieee80211_rate *rate, bool shortpre)
|
||||
{
|
||||
struct ath5k_softc *sc = ah->ah_sc;
|
||||
int sifs, preamble, plcp_bits, sym_time;
|
||||
int bitrate, bits, symbols, symbol_bits;
|
||||
int dur;
|
||||
|
||||
/* Fallback */
|
||||
if (!ah->ah_bwmode) {
|
||||
__le16 raw_dur = ieee80211_generic_frame_duration(sc->hw,
|
||||
__le16 raw_dur = ieee80211_generic_frame_duration(ah->hw,
|
||||
NULL, len, rate);
|
||||
|
||||
/* subtract difference between long and short preamble */
|
||||
|
@ -205,7 +204,7 @@ unsigned int ath5k_hw_get_default_sifs(struct ath5k_hw *ah)
|
|||
*/
|
||||
void ath5k_hw_update_mib_counters(struct ath5k_hw *ah)
|
||||
{
|
||||
struct ath5k_statistics *stats = &ah->ah_sc->stats;
|
||||
struct ath5k_statistics *stats = &ah->stats;
|
||||
|
||||
/* Read-And-Clear */
|
||||
stats->ack_fail += ath5k_hw_reg_read(ah, AR5K_ACK_FAIL);
|
||||
|
@ -240,25 +239,24 @@ void ath5k_hw_update_mib_counters(struct ath5k_hw *ah)
|
|||
*/
|
||||
static inline void ath5k_hw_write_rate_duration(struct ath5k_hw *ah)
|
||||
{
|
||||
struct ath5k_softc *sc = ah->ah_sc;
|
||||
struct ieee80211_rate *rate;
|
||||
unsigned int i;
|
||||
/* 802.11g covers both OFDM and CCK */
|
||||
u8 band = IEEE80211_BAND_2GHZ;
|
||||
|
||||
/* Write rate duration table */
|
||||
for (i = 0; i < sc->sbands[band].n_bitrates; i++) {
|
||||
for (i = 0; i < ah->sbands[band].n_bitrates; i++) {
|
||||
u32 reg;
|
||||
u16 tx_time;
|
||||
|
||||
if (ah->ah_ack_bitrate_high)
|
||||
rate = &sc->sbands[band].bitrates[ack_rates_high[i]];
|
||||
rate = &ah->sbands[band].bitrates[ack_rates_high[i]];
|
||||
/* CCK -> 1Mb */
|
||||
else if (i < 4)
|
||||
rate = &sc->sbands[band].bitrates[0];
|
||||
rate = &ah->sbands[band].bitrates[0];
|
||||
/* OFDM -> 6Mb */
|
||||
else
|
||||
rate = &sc->sbands[band].bitrates[4];
|
||||
rate = &ah->sbands[band].bitrates[4];
|
||||
|
||||
/* Set ACK timeout */
|
||||
reg = AR5K_RATE_DUR(rate->hw_value);
|
||||
|
@ -586,7 +584,7 @@ void ath5k_hw_init_beacon(struct ath5k_hw *ah, u32 next_beacon, u32 interval)
|
|||
/*
|
||||
* Set the additional timers by mode
|
||||
*/
|
||||
switch (ah->ah_sc->opmode) {
|
||||
switch (ah->opmode) {
|
||||
case NL80211_IFTYPE_MONITOR:
|
||||
case NL80211_IFTYPE_STATION:
|
||||
/* In STA mode timer1 is used as next wakeup
|
||||
|
@ -623,8 +621,8 @@ void ath5k_hw_init_beacon(struct ath5k_hw *ah, u32 next_beacon, u32 interval)
|
|||
* Set the beacon register and enable all timers.
|
||||
*/
|
||||
/* When in AP or Mesh Point mode zero timer0 to start TSF */
|
||||
if (ah->ah_sc->opmode == NL80211_IFTYPE_AP ||
|
||||
ah->ah_sc->opmode == NL80211_IFTYPE_MESH_POINT)
|
||||
if (ah->opmode == NL80211_IFTYPE_AP ||
|
||||
ah->opmode == NL80211_IFTYPE_MESH_POINT)
|
||||
ath5k_hw_reg_write(ah, 0, AR5K_TIMER0);
|
||||
|
||||
ath5k_hw_reg_write(ah, next_beacon, AR5K_TIMER0);
|
||||
|
@ -814,7 +812,7 @@ int ath5k_hw_set_opmode(struct ath5k_hw *ah, enum nl80211_iftype op_mode)
|
|||
struct ath_common *common = ath5k_hw_common(ah);
|
||||
u32 pcu_reg, beacon_reg, low_id, high_id;
|
||||
|
||||
ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_MODE, "mode %d\n", op_mode);
|
||||
ATH5K_DBG(ah, ATH5K_DEBUG_MODE, "mode %d\n", op_mode);
|
||||
|
||||
/* Preserve rest settings */
|
||||
pcu_reg = ath5k_hw_reg_read(ah, AR5K_STA_ID1) & 0xffff0000;
|
||||
|
@ -890,7 +888,7 @@ void ath5k_hw_pcu_init(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
|
|||
* XXX: rethink this after new mode changes to
|
||||
* mac80211 are integrated */
|
||||
if (ah->ah_version == AR5K_AR5212 &&
|
||||
ah->ah_sc->nvifs)
|
||||
ah->nvifs)
|
||||
ath5k_hw_write_rate_duration(ah);
|
||||
|
||||
/* Set RSSI/BRSSI thresholds
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
|
||||
#include <linux/delay.h>
|
||||
#include <linux/slab.h>
|
||||
#include <asm/unaligned.h>
|
||||
|
||||
#include "ath5k.h"
|
||||
#include "reg.h"
|
||||
|
@ -561,7 +562,7 @@ static s8 ath5k_hw_rf_gainf_adjust(struct ath5k_hw *ah)
|
|||
}
|
||||
|
||||
done:
|
||||
ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_CALIBRATE,
|
||||
ATH5K_DBG(ah, ATH5K_DEBUG_CALIBRATE,
|
||||
"ret %d, gain step %u, current gain %u, target gain %u\n",
|
||||
ret, ah->ah_gain.g_step_idx, ah->ah_gain.g_current,
|
||||
ah->ah_gain.g_target);
|
||||
|
@ -773,7 +774,7 @@ static int ath5k_hw_rfregs_init(struct ath5k_hw *ah,
|
|||
ah->ah_rf_banks = kmalloc(sizeof(u32) * ah->ah_rf_banks_size,
|
||||
GFP_KERNEL);
|
||||
if (ah->ah_rf_banks == NULL) {
|
||||
ATH5K_ERR(ah->ah_sc, "out of memory\n");
|
||||
ATH5K_ERR(ah, "out of memory\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
|
@ -783,7 +784,7 @@ static int ath5k_hw_rfregs_init(struct ath5k_hw *ah,
|
|||
|
||||
for (i = 0; i < ah->ah_rf_banks_size; i++) {
|
||||
if (ini_rfb[i].rfb_bank >= AR5K_MAX_RF_BANKS) {
|
||||
ATH5K_ERR(ah->ah_sc, "invalid bank\n");
|
||||
ATH5K_ERR(ah, "invalid bank\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -1268,7 +1269,7 @@ static int ath5k_hw_channel(struct ath5k_hw *ah,
|
|||
* (CHANNEL_2GHZ, or CHANNEL_5GHZ) so we inform ath5k_channel_ok()
|
||||
* of the band by that */
|
||||
if (!ath5k_channel_ok(ah, channel->center_freq, channel->hw_value)) {
|
||||
ATH5K_ERR(ah->ah_sc,
|
||||
ATH5K_ERR(ah,
|
||||
"channel frequency (%u MHz) out of supported "
|
||||
"band range\n",
|
||||
channel->center_freq);
|
||||
|
@ -1356,7 +1357,7 @@ static s16 ath5k_hw_get_median_noise_floor(struct ath5k_hw *ah)
|
|||
}
|
||||
}
|
||||
for (i = 0; i < ATH5K_NF_CAL_HIST_MAX; i++) {
|
||||
ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_CALIBRATE,
|
||||
ATH5K_DBG(ah, ATH5K_DEBUG_CALIBRATE,
|
||||
"cal %d:%d\n", i, sort[i]);
|
||||
}
|
||||
return sort[(ATH5K_NF_CAL_HIST_MAX - 1) / 2];
|
||||
|
@ -1382,7 +1383,7 @@ void ath5k_hw_update_noise_floor(struct ath5k_hw *ah)
|
|||
|
||||
/* keep last value if calibration hasn't completed */
|
||||
if (ath5k_hw_reg_read(ah, AR5K_PHY_AGCCTL) & AR5K_PHY_AGCCTL_NF) {
|
||||
ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_CALIBRATE,
|
||||
ATH5K_DBG(ah, ATH5K_DEBUG_CALIBRATE,
|
||||
"NF did not complete in calibration window\n");
|
||||
|
||||
return;
|
||||
|
@ -1395,7 +1396,7 @@ void ath5k_hw_update_noise_floor(struct ath5k_hw *ah)
|
|||
threshold = ee->ee_noise_floor_thr[ee_mode];
|
||||
|
||||
if (nf > threshold) {
|
||||
ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_CALIBRATE,
|
||||
ATH5K_DBG(ah, ATH5K_DEBUG_CALIBRATE,
|
||||
"noise floor failure detected; "
|
||||
"read %d, threshold %d\n",
|
||||
nf, threshold);
|
||||
|
@ -1432,7 +1433,7 @@ void ath5k_hw_update_noise_floor(struct ath5k_hw *ah)
|
|||
|
||||
ah->ah_noise_floor = nf;
|
||||
|
||||
ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_CALIBRATE,
|
||||
ATH5K_DBG(ah, ATH5K_DEBUG_CALIBRATE,
|
||||
"noise floor calibrated: %d\n", nf);
|
||||
}
|
||||
|
||||
|
@ -1520,7 +1521,7 @@ static int ath5k_hw_rf5110_calibrate(struct ath5k_hw *ah,
|
|||
ath5k_hw_reg_write(ah, phy_sat, AR5K_PHY_ADCSAT);
|
||||
|
||||
if (ret) {
|
||||
ATH5K_ERR(ah->ah_sc, "calibration timeout (%uMHz)\n",
|
||||
ATH5K_ERR(ah, "calibration timeout (%uMHz)\n",
|
||||
channel->center_freq);
|
||||
return ret;
|
||||
}
|
||||
|
@ -1555,7 +1556,7 @@ ath5k_hw_rf511x_iq_calibrate(struct ath5k_hw *ah)
|
|||
iq_corr = ath5k_hw_reg_read(ah, AR5K_PHY_IQRES_CAL_CORR);
|
||||
i_pwr = ath5k_hw_reg_read(ah, AR5K_PHY_IQRES_CAL_PWR_I);
|
||||
q_pwr = ath5k_hw_reg_read(ah, AR5K_PHY_IQRES_CAL_PWR_Q);
|
||||
ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_CALIBRATE,
|
||||
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_CALIBRATE,
|
||||
"iq_corr:%x i_pwr:%x q_pwr:%x", iq_corr, i_pwr, q_pwr);
|
||||
if (i_pwr && q_pwr)
|
||||
break;
|
||||
|
@ -1581,7 +1582,7 @@ ath5k_hw_rf511x_iq_calibrate(struct ath5k_hw *ah)
|
|||
q_coff = (i_pwr / q_coffd) - 128;
|
||||
q_coff = clamp(q_coff, -16, 15); /* signed 5 bit */
|
||||
|
||||
ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_CALIBRATE,
|
||||
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_CALIBRATE,
|
||||
"new I:%d Q:%d (i_coffd:%x q_coffd:%x)",
|
||||
i_coff, q_coff, i_coffd, q_coffd);
|
||||
|
||||
|
@ -1966,7 +1967,7 @@ ath5k_hw_set_antenna_mode(struct ath5k_hw *ah, u8 ant_mode)
|
|||
|
||||
ee_mode = ath5k_eeprom_mode_from_channel(channel);
|
||||
if (ee_mode < 0) {
|
||||
ATH5K_ERR(ah->ah_sc,
|
||||
ATH5K_ERR(ah,
|
||||
"invalid channel: %d\n", channel->center_freq);
|
||||
return;
|
||||
}
|
||||
|
@ -2794,12 +2795,8 @@ ath5k_write_pwr_to_pdadc_table(struct ath5k_hw *ah, u8 ee_mode)
|
|||
* Write TX power values
|
||||
*/
|
||||
for (i = 0; i < (AR5K_EEPROM_POWER_TABLE_SIZE / 2); i++) {
|
||||
ath5k_hw_reg_write(ah,
|
||||
((pdadc_out[4 * i + 0] & 0xff) << 0) |
|
||||
((pdadc_out[4 * i + 1] & 0xff) << 8) |
|
||||
((pdadc_out[4 * i + 2] & 0xff) << 16) |
|
||||
((pdadc_out[4 * i + 3] & 0xff) << 24),
|
||||
AR5K_PHY_PDADC_TXPOWER(i));
|
||||
u32 val = get_unaligned_le32(&pdadc_out[4 * i]);
|
||||
ath5k_hw_reg_write(ah, val, AR5K_PHY_PDADC_TXPOWER(i));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3122,13 +3119,13 @@ ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel,
|
|||
int ret;
|
||||
|
||||
if (txpower > AR5K_TUNE_MAX_TXPOWER) {
|
||||
ATH5K_ERR(ah->ah_sc, "invalid tx power: %u\n", txpower);
|
||||
ATH5K_ERR(ah, "invalid tx power: %u\n", txpower);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ee_mode = ath5k_eeprom_mode_from_channel(channel);
|
||||
if (ee_mode < 0) {
|
||||
ATH5K_ERR(ah->ah_sc,
|
||||
ATH5K_ERR(ah,
|
||||
"invalid channel: %d\n", channel->center_freq);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -3229,7 +3226,7 @@ ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel,
|
|||
|
||||
int ath5k_hw_set_txpower_limit(struct ath5k_hw *ah, u8 txpower)
|
||||
{
|
||||
ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_TXPOWER,
|
||||
ATH5K_DBG(ah, ATH5K_DEBUG_TXPOWER,
|
||||
"changing txpower to %d\n", txpower);
|
||||
|
||||
return ath5k_hw_txpower(ah, ah->ah_current_channel, txpower);
|
||||
|
@ -3440,7 +3437,7 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
|
|||
* during ath5k_phy_calibrate) */
|
||||
if (ath5k_hw_register_timeout(ah, AR5K_PHY_AGCCTL,
|
||||
AR5K_PHY_AGCCTL_CAL, 0, false)) {
|
||||
ATH5K_ERR(ah->ah_sc, "gain calibration timeout (%uMHz)\n",
|
||||
ATH5K_ERR(ah, "gain calibration timeout (%uMHz)\n",
|
||||
channel->center_freq);
|
||||
}
|
||||
|
||||
|
|
|
@ -187,7 +187,7 @@ int ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum ath5k_tx_queue queue_type,
|
|||
break;
|
||||
case AR5K_TX_QUEUE_XR_DATA:
|
||||
if (ah->ah_version != AR5K_AR5212)
|
||||
ATH5K_ERR(ah->ah_sc,
|
||||
ATH5K_ERR(ah,
|
||||
"XR data queues only supported in"
|
||||
" 5212!\n");
|
||||
queue = AR5K_TX_QUEUE_ID_XR_DATA;
|
||||
|
@ -510,7 +510,6 @@ int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
|
|||
int ath5k_hw_set_ifs_intervals(struct ath5k_hw *ah, unsigned int slot_time)
|
||||
{
|
||||
struct ieee80211_channel *channel = ah->ah_current_channel;
|
||||
struct ath5k_softc *sc = ah->ah_sc;
|
||||
struct ieee80211_rate *rate;
|
||||
u32 ack_tx_time, eifs, eifs_clock, sifs, sifs_clock;
|
||||
u32 slot_time_clock = ath5k_hw_htoclock(ah, slot_time);
|
||||
|
@ -546,9 +545,9 @@ int ath5k_hw_set_ifs_intervals(struct ath5k_hw *ah, unsigned int slot_time)
|
|||
* Also we have different lowest rate for 802.11a
|
||||
*/
|
||||
if (channel->hw_value & CHANNEL_5GHZ)
|
||||
rate = &sc->sbands[IEEE80211_BAND_5GHZ].bitrates[0];
|
||||
rate = &ah->sbands[IEEE80211_BAND_5GHZ].bitrates[0];
|
||||
else
|
||||
rate = &sc->sbands[IEEE80211_BAND_2GHZ].bitrates[0];
|
||||
rate = &ah->sbands[IEEE80211_BAND_2GHZ].bitrates[0];
|
||||
|
||||
ack_tx_time = ath5k_hw_get_frame_duration(ah, 10, rate, false);
|
||||
|
||||
|
@ -622,7 +621,7 @@ int ath5k_hw_init_queues(struct ath5k_hw *ah)
|
|||
for (i = 0; i < ah->ah_capabilities.cap_queues.q_tx_num; i++) {
|
||||
ret = ath5k_hw_reset_tx_queue(ah, i);
|
||||
if (ret) {
|
||||
ATH5K_ERR(ah->ah_sc,
|
||||
ATH5K_ERR(ah,
|
||||
"failed to reset TX queue #%d\n", i);
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -390,7 +390,7 @@ static int ath5k_hw_wisoc_reset(struct ath5k_hw *ah, u32 flags)
|
|||
u32 val = 0;
|
||||
|
||||
/* ah->ah_mac_srev is not available at this point yet */
|
||||
if (ah->ah_sc->devid >= AR5K_SREV_AR2315_R6) {
|
||||
if (ah->devid >= AR5K_SREV_AR2315_R6) {
|
||||
reg = (u32 __iomem *) AR5K_AR2315_RESET;
|
||||
if (mask & AR5K_RESET_CTL_PCU)
|
||||
val |= AR5K_AR2315_RESET_WMAC;
|
||||
|
@ -398,7 +398,7 @@ static int ath5k_hw_wisoc_reset(struct ath5k_hw *ah, u32 flags)
|
|||
val |= AR5K_AR2315_RESET_BB_WARM;
|
||||
} else {
|
||||
reg = (u32 __iomem *) AR5K_AR5312_RESET;
|
||||
if (to_platform_device(ah->ah_sc->dev)->id == 0) {
|
||||
if (to_platform_device(ah->dev)->id == 0) {
|
||||
if (mask & AR5K_RESET_CTL_PCU)
|
||||
val |= AR5K_AR5312_RESET_WMAC0;
|
||||
if (mask & AR5K_RESET_CTL_BASEBAND)
|
||||
|
@ -530,7 +530,7 @@ static int ath5k_hw_set_power(struct ath5k_hw *ah, enum ath5k_power_mode mode,
|
|||
*/
|
||||
int ath5k_hw_on_hold(struct ath5k_hw *ah)
|
||||
{
|
||||
struct pci_dev *pdev = ah->ah_sc->pdev;
|
||||
struct pci_dev *pdev = ah->pdev;
|
||||
u32 bus_flags;
|
||||
int ret;
|
||||
|
||||
|
@ -540,7 +540,7 @@ int ath5k_hw_on_hold(struct ath5k_hw *ah)
|
|||
/* Make sure device is awake */
|
||||
ret = ath5k_hw_set_power(ah, AR5K_PM_AWAKE, true, 0);
|
||||
if (ret) {
|
||||
ATH5K_ERR(ah->ah_sc, "failed to wakeup the MAC Chip\n");
|
||||
ATH5K_ERR(ah, "failed to wakeup the MAC Chip\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -565,14 +565,14 @@ int ath5k_hw_on_hold(struct ath5k_hw *ah)
|
|||
}
|
||||
|
||||
if (ret) {
|
||||
ATH5K_ERR(ah->ah_sc, "failed to put device on warm reset\n");
|
||||
ATH5K_ERR(ah, "failed to put device on warm reset\n");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
/* ...wakeup again!*/
|
||||
ret = ath5k_hw_set_power(ah, AR5K_PM_AWAKE, true, 0);
|
||||
if (ret) {
|
||||
ATH5K_ERR(ah->ah_sc, "failed to put device on hold\n");
|
||||
ATH5K_ERR(ah, "failed to put device on hold\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -584,7 +584,7 @@ int ath5k_hw_on_hold(struct ath5k_hw *ah)
|
|||
*/
|
||||
int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, int flags, bool initial)
|
||||
{
|
||||
struct pci_dev *pdev = ah->ah_sc->pdev;
|
||||
struct pci_dev *pdev = ah->pdev;
|
||||
u32 turbo, mode, clock, bus_flags;
|
||||
int ret;
|
||||
|
||||
|
@ -596,7 +596,7 @@ int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, int flags, bool initial)
|
|||
/* Wakeup the device */
|
||||
ret = ath5k_hw_set_power(ah, AR5K_PM_AWAKE, true, 0);
|
||||
if (ret) {
|
||||
ATH5K_ERR(ah->ah_sc, "failed to wakeup the MAC Chip\n");
|
||||
ATH5K_ERR(ah, "failed to wakeup the MAC Chip\n");
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
@ -626,14 +626,14 @@ int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, int flags, bool initial)
|
|||
}
|
||||
|
||||
if (ret) {
|
||||
ATH5K_ERR(ah->ah_sc, "failed to reset the MAC Chip\n");
|
||||
ATH5K_ERR(ah, "failed to reset the MAC Chip\n");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
/* ...wakeup again!...*/
|
||||
ret = ath5k_hw_set_power(ah, AR5K_PM_AWAKE, true, 0);
|
||||
if (ret) {
|
||||
ATH5K_ERR(ah->ah_sc, "failed to resume the MAC Chip\n");
|
||||
ATH5K_ERR(ah, "failed to resume the MAC Chip\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -646,7 +646,7 @@ int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, int flags, bool initial)
|
|||
ret = ath5k_hw_nic_reset(ah, 0);
|
||||
|
||||
if (ret) {
|
||||
ATH5K_ERR(ah->ah_sc, "failed to warm reset the MAC Chip\n");
|
||||
ATH5K_ERR(ah, "failed to warm reset the MAC Chip\n");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
|
@ -687,7 +687,7 @@ int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, int flags, bool initial)
|
|||
else
|
||||
mode |= AR5K_PHY_MODE_MOD_DYN;
|
||||
} else {
|
||||
ATH5K_ERR(ah->ah_sc,
|
||||
ATH5K_ERR(ah,
|
||||
"invalid radio modulation mode\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -703,12 +703,12 @@ int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, int flags, bool initial)
|
|||
if (flags & CHANNEL_OFDM)
|
||||
mode |= AR5K_PHY_MODE_MOD_OFDM;
|
||||
else {
|
||||
ATH5K_ERR(ah->ah_sc,
|
||||
ATH5K_ERR(ah,
|
||||
"invalid radio modulation mode\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
} else {
|
||||
ATH5K_ERR(ah->ah_sc, "invalid radio frequency mode\n");
|
||||
ATH5K_ERR(ah, "invalid radio frequency mode\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -1076,7 +1076,7 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
|
|||
/* RF Bus grant won't work if we have pending
|
||||
* frames */
|
||||
if (ret && fast) {
|
||||
ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_RESET,
|
||||
ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
|
||||
"DMA didn't stop, falling back to normal reset\n");
|
||||
fast = 0;
|
||||
/* Non fatal, just continue with
|
||||
|
@ -1091,7 +1091,7 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
|
|||
case CHANNEL_G:
|
||||
|
||||
if (ah->ah_version <= AR5K_AR5211) {
|
||||
ATH5K_ERR(ah->ah_sc,
|
||||
ATH5K_ERR(ah,
|
||||
"G mode not available on 5210/5211");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -1101,7 +1101,7 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
|
|||
case CHANNEL_B:
|
||||
|
||||
if (ah->ah_version < AR5K_AR5211) {
|
||||
ATH5K_ERR(ah->ah_sc,
|
||||
ATH5K_ERR(ah,
|
||||
"B mode not available on 5210");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -1110,14 +1110,14 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
|
|||
break;
|
||||
case CHANNEL_XR:
|
||||
if (ah->ah_version == AR5K_AR5211) {
|
||||
ATH5K_ERR(ah->ah_sc,
|
||||
ATH5K_ERR(ah,
|
||||
"XR mode not available on 5211");
|
||||
return -EINVAL;
|
||||
}
|
||||
mode = AR5K_MODE_XR;
|
||||
break;
|
||||
default:
|
||||
ATH5K_ERR(ah->ah_sc,
|
||||
ATH5K_ERR(ah,
|
||||
"invalid channel: %d\n", channel->center_freq);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -1129,13 +1129,13 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
|
|||
if (fast) {
|
||||
ret = ath5k_hw_phy_init(ah, channel, mode, true);
|
||||
if (ret) {
|
||||
ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_RESET,
|
||||
ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
|
||||
"fast chan change failed, falling back to normal reset\n");
|
||||
/* Non fatal, can happen eg.
|
||||
* on mode change */
|
||||
ret = 0;
|
||||
} else {
|
||||
ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_RESET,
|
||||
ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
|
||||
"fast chan change successful\n");
|
||||
return 0;
|
||||
}
|
||||
|
@ -1268,7 +1268,7 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
|
|||
*/
|
||||
ret = ath5k_hw_phy_init(ah, channel, mode, false);
|
||||
if (ret) {
|
||||
ATH5K_ERR(ah->ah_sc,
|
||||
ATH5K_ERR(ah,
|
||||
"failed to initialize PHY (%i) !\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -36,86 +36,81 @@
|
|||
#include "base.h"
|
||||
|
||||
|
||||
static inline void ath5k_rfkill_disable(struct ath5k_softc *sc)
|
||||
static inline void ath5k_rfkill_disable(struct ath5k_hw *ah)
|
||||
{
|
||||
ATH5K_DBG(sc, ATH5K_DEBUG_ANY, "rfkill disable (gpio:%d polarity:%d)\n",
|
||||
sc->rf_kill.gpio, sc->rf_kill.polarity);
|
||||
ath5k_hw_set_gpio_output(sc->ah, sc->rf_kill.gpio);
|
||||
ath5k_hw_set_gpio(sc->ah, sc->rf_kill.gpio, !sc->rf_kill.polarity);
|
||||
ATH5K_DBG(ah, ATH5K_DEBUG_ANY, "rfkill disable (gpio:%d polarity:%d)\n",
|
||||
ah->rf_kill.gpio, ah->rf_kill.polarity);
|
||||
ath5k_hw_set_gpio_output(ah, ah->rf_kill.gpio);
|
||||
ath5k_hw_set_gpio(ah, ah->rf_kill.gpio, !ah->rf_kill.polarity);
|
||||
}
|
||||
|
||||
|
||||
static inline void ath5k_rfkill_enable(struct ath5k_softc *sc)
|
||||
static inline void ath5k_rfkill_enable(struct ath5k_hw *ah)
|
||||
{
|
||||
ATH5K_DBG(sc, ATH5K_DEBUG_ANY, "rfkill enable (gpio:%d polarity:%d)\n",
|
||||
sc->rf_kill.gpio, sc->rf_kill.polarity);
|
||||
ath5k_hw_set_gpio_output(sc->ah, sc->rf_kill.gpio);
|
||||
ath5k_hw_set_gpio(sc->ah, sc->rf_kill.gpio, sc->rf_kill.polarity);
|
||||
ATH5K_DBG(ah, ATH5K_DEBUG_ANY, "rfkill enable (gpio:%d polarity:%d)\n",
|
||||
ah->rf_kill.gpio, ah->rf_kill.polarity);
|
||||
ath5k_hw_set_gpio_output(ah, ah->rf_kill.gpio);
|
||||
ath5k_hw_set_gpio(ah, ah->rf_kill.gpio, ah->rf_kill.polarity);
|
||||
}
|
||||
|
||||
static inline void ath5k_rfkill_set_intr(struct ath5k_softc *sc, bool enable)
|
||||
static inline void ath5k_rfkill_set_intr(struct ath5k_hw *ah, bool enable)
|
||||
{
|
||||
struct ath5k_hw *ah = sc->ah;
|
||||
u32 curval;
|
||||
|
||||
ath5k_hw_set_gpio_input(ah, sc->rf_kill.gpio);
|
||||
curval = ath5k_hw_get_gpio(ah, sc->rf_kill.gpio);
|
||||
ath5k_hw_set_gpio_intr(ah, sc->rf_kill.gpio, enable ?
|
||||
ath5k_hw_set_gpio_input(ah, ah->rf_kill.gpio);
|
||||
curval = ath5k_hw_get_gpio(ah, ah->rf_kill.gpio);
|
||||
ath5k_hw_set_gpio_intr(ah, ah->rf_kill.gpio, enable ?
|
||||
!!curval : !curval);
|
||||
}
|
||||
|
||||
static bool
|
||||
ath5k_is_rfkill_set(struct ath5k_softc *sc)
|
||||
ath5k_is_rfkill_set(struct ath5k_hw *ah)
|
||||
{
|
||||
/* configuring GPIO for input for some reason disables rfkill */
|
||||
/*ath5k_hw_set_gpio_input(sc->ah, sc->rf_kill.gpio);*/
|
||||
return ath5k_hw_get_gpio(sc->ah, sc->rf_kill.gpio) ==
|
||||
sc->rf_kill.polarity;
|
||||
/*ath5k_hw_set_gpio_input(ah, ah->rf_kill.gpio);*/
|
||||
return ath5k_hw_get_gpio(ah, ah->rf_kill.gpio) ==
|
||||
ah->rf_kill.polarity;
|
||||
}
|
||||
|
||||
static void
|
||||
ath5k_tasklet_rfkill_toggle(unsigned long data)
|
||||
{
|
||||
struct ath5k_softc *sc = (void *)data;
|
||||
struct ath5k_hw *ah = (void *)data;
|
||||
bool blocked;
|
||||
|
||||
blocked = ath5k_is_rfkill_set(sc);
|
||||
wiphy_rfkill_set_hw_state(sc->hw->wiphy, blocked);
|
||||
blocked = ath5k_is_rfkill_set(ah);
|
||||
wiphy_rfkill_set_hw_state(ah->hw->wiphy, blocked);
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
ath5k_rfkill_hw_start(struct ath5k_hw *ah)
|
||||
{
|
||||
struct ath5k_softc *sc = ah->ah_sc;
|
||||
|
||||
/* read rfkill GPIO configuration from EEPROM header */
|
||||
sc->rf_kill.gpio = ah->ah_capabilities.cap_eeprom.ee_rfkill_pin;
|
||||
sc->rf_kill.polarity = ah->ah_capabilities.cap_eeprom.ee_rfkill_pol;
|
||||
ah->rf_kill.gpio = ah->ah_capabilities.cap_eeprom.ee_rfkill_pin;
|
||||
ah->rf_kill.polarity = ah->ah_capabilities.cap_eeprom.ee_rfkill_pol;
|
||||
|
||||
tasklet_init(&sc->rf_kill.toggleq, ath5k_tasklet_rfkill_toggle,
|
||||
(unsigned long)sc);
|
||||
tasklet_init(&ah->rf_kill.toggleq, ath5k_tasklet_rfkill_toggle,
|
||||
(unsigned long)ah);
|
||||
|
||||
ath5k_rfkill_disable(sc);
|
||||
ath5k_rfkill_disable(ah);
|
||||
|
||||
/* enable interrupt for rfkill switch */
|
||||
if (AR5K_EEPROM_HDR_RFKILL(ah->ah_capabilities.cap_eeprom.ee_header))
|
||||
ath5k_rfkill_set_intr(sc, true);
|
||||
ath5k_rfkill_set_intr(ah, true);
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
ath5k_rfkill_hw_stop(struct ath5k_hw *ah)
|
||||
{
|
||||
struct ath5k_softc *sc = ah->ah_sc;
|
||||
|
||||
/* disable interrupt for rfkill switch */
|
||||
if (AR5K_EEPROM_HDR_RFKILL(ah->ah_capabilities.cap_eeprom.ee_header))
|
||||
ath5k_rfkill_set_intr(sc, false);
|
||||
ath5k_rfkill_set_intr(ah, false);
|
||||
|
||||
tasklet_kill(&sc->rf_kill.toggleq);
|
||||
tasklet_kill(&ah->rf_kill.toggleq);
|
||||
|
||||
/* enable RFKILL when stopping HW so Wifi LED is turned off */
|
||||
ath5k_rfkill_enable(sc);
|
||||
ath5k_rfkill_enable(ah);
|
||||
}
|
||||
|
||||
|
|
|
@ -11,7 +11,7 @@ static ssize_t ath5k_attr_show_##name(struct device *dev, \
|
|||
char *buf) \
|
||||
{ \
|
||||
struct ieee80211_hw *hw = dev_get_drvdata(dev); \
|
||||
struct ath5k_softc *sc = hw->priv; \
|
||||
struct ath5k_hw *ah = hw->priv; \
|
||||
return snprintf(buf, PAGE_SIZE, "%d\n", get); \
|
||||
} \
|
||||
\
|
||||
|
@ -20,13 +20,13 @@ static ssize_t ath5k_attr_store_##name(struct device *dev, \
|
|||
const char *buf, size_t count) \
|
||||
{ \
|
||||
struct ieee80211_hw *hw = dev_get_drvdata(dev); \
|
||||
struct ath5k_softc *sc = hw->priv; \
|
||||
struct ath5k_hw *ah = hw->priv; \
|
||||
int val, ret; \
|
||||
\
|
||||
ret = kstrtoint(buf, 10, &val); \
|
||||
if (ret < 0) \
|
||||
return ret; \
|
||||
set(sc->ah, val); \
|
||||
set(ah, val); \
|
||||
return count; \
|
||||
} \
|
||||
static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, \
|
||||
|
@ -38,25 +38,25 @@ static ssize_t ath5k_attr_show_##name(struct device *dev, \
|
|||
char *buf) \
|
||||
{ \
|
||||
struct ieee80211_hw *hw = dev_get_drvdata(dev); \
|
||||
struct ath5k_softc *sc = hw->priv; \
|
||||
struct ath5k_hw *ah = hw->priv; \
|
||||
return snprintf(buf, PAGE_SIZE, "%d\n", get); \
|
||||
} \
|
||||
static DEVICE_ATTR(name, S_IRUGO, ath5k_attr_show_##name, NULL)
|
||||
|
||||
/*** ANI ***/
|
||||
|
||||
SIMPLE_SHOW_STORE(ani_mode, sc->ani_state.ani_mode, ath5k_ani_init);
|
||||
SIMPLE_SHOW_STORE(noise_immunity_level, sc->ani_state.noise_imm_level,
|
||||
SIMPLE_SHOW_STORE(ani_mode, ah->ani_state.ani_mode, ath5k_ani_init);
|
||||
SIMPLE_SHOW_STORE(noise_immunity_level, ah->ani_state.noise_imm_level,
|
||||
ath5k_ani_set_noise_immunity_level);
|
||||
SIMPLE_SHOW_STORE(spur_level, sc->ani_state.spur_level,
|
||||
SIMPLE_SHOW_STORE(spur_level, ah->ani_state.spur_level,
|
||||
ath5k_ani_set_spur_immunity_level);
|
||||
SIMPLE_SHOW_STORE(firstep_level, sc->ani_state.firstep_level,
|
||||
SIMPLE_SHOW_STORE(firstep_level, ah->ani_state.firstep_level,
|
||||
ath5k_ani_set_firstep_level);
|
||||
SIMPLE_SHOW_STORE(ofdm_weak_signal_detection, sc->ani_state.ofdm_weak_sig,
|
||||
SIMPLE_SHOW_STORE(ofdm_weak_signal_detection, ah->ani_state.ofdm_weak_sig,
|
||||
ath5k_ani_set_ofdm_weak_signal_detection);
|
||||
SIMPLE_SHOW_STORE(cck_weak_signal_detection, sc->ani_state.cck_weak_sig,
|
||||
SIMPLE_SHOW_STORE(cck_weak_signal_detection, ah->ani_state.cck_weak_sig,
|
||||
ath5k_ani_set_cck_weak_signal_detection);
|
||||
SIMPLE_SHOW(spur_level_max, sc->ani_state.max_spur_level);
|
||||
SIMPLE_SHOW(spur_level_max, ah->ani_state.max_spur_level);
|
||||
|
||||
static ssize_t ath5k_attr_show_noise_immunity_level_max(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
|
@ -98,14 +98,14 @@ static struct attribute_group ath5k_attribute_group_ani = {
|
|||
/*** register / unregister ***/
|
||||
|
||||
int
|
||||
ath5k_sysfs_register(struct ath5k_softc *sc)
|
||||
ath5k_sysfs_register(struct ath5k_hw *ah)
|
||||
{
|
||||
struct device *dev = sc->dev;
|
||||
struct device *dev = ah->dev;
|
||||
int err;
|
||||
|
||||
err = sysfs_create_group(&dev->kobj, &ath5k_attribute_group_ani);
|
||||
if (err) {
|
||||
ATH5K_ERR(sc, "failed to create sysfs group\n");
|
||||
ATH5K_ERR(ah, "failed to create sysfs group\n");
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -113,9 +113,9 @@ ath5k_sysfs_register(struct ath5k_softc *sc)
|
|||
}
|
||||
|
||||
void
|
||||
ath5k_sysfs_unregister(struct ath5k_softc *sc)
|
||||
ath5k_sysfs_unregister(struct ath5k_hw *ah)
|
||||
{
|
||||
struct device *dev = sc->dev;
|
||||
struct device *dev = ah->dev;
|
||||
|
||||
sysfs_remove_group(&dev->kobj, &ath5k_attribute_group_ani);
|
||||
}
|
||||
|
|
|
@ -16,10 +16,10 @@ struct sk_buff;
|
|||
#define TRACE_SYSTEM ath5k
|
||||
|
||||
TRACE_EVENT(ath5k_rx,
|
||||
TP_PROTO(struct ath5k_softc *priv, struct sk_buff *skb),
|
||||
TP_PROTO(struct ath5k_hw *priv, struct sk_buff *skb),
|
||||
TP_ARGS(priv, skb),
|
||||
TP_STRUCT__entry(
|
||||
__field(struct ath5k_softc *, priv)
|
||||
__field(struct ath5k_hw *, priv)
|
||||
__field(unsigned long, skbaddr)
|
||||
__dynamic_array(u8, frame, skb->len)
|
||||
),
|
||||
|
@ -34,13 +34,13 @@ TRACE_EVENT(ath5k_rx,
|
|||
);
|
||||
|
||||
TRACE_EVENT(ath5k_tx,
|
||||
TP_PROTO(struct ath5k_softc *priv, struct sk_buff *skb,
|
||||
TP_PROTO(struct ath5k_hw *priv, struct sk_buff *skb,
|
||||
struct ath5k_txq *q),
|
||||
|
||||
TP_ARGS(priv, skb, q),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(struct ath5k_softc *, priv)
|
||||
__field(struct ath5k_hw *, priv)
|
||||
__field(unsigned long, skbaddr)
|
||||
__field(u8, qnum)
|
||||
__dynamic_array(u8, frame, skb->len)
|
||||
|
@ -60,13 +60,13 @@ TRACE_EVENT(ath5k_tx,
|
|||
);
|
||||
|
||||
TRACE_EVENT(ath5k_tx_complete,
|
||||
TP_PROTO(struct ath5k_softc *priv, struct sk_buff *skb,
|
||||
TP_PROTO(struct ath5k_hw *priv, struct sk_buff *skb,
|
||||
struct ath5k_txq *q, struct ath5k_tx_status *ts),
|
||||
|
||||
TP_ARGS(priv, skb, q, ts),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(struct ath5k_softc *, priv)
|
||||
__field(struct ath5k_hw *, priv)
|
||||
__field(unsigned long, skbaddr)
|
||||
__field(u8, qnum)
|
||||
__field(u8, ts_status)
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <asm/unaligned.h>
|
||||
#include "hw.h"
|
||||
#include "ar9003_phy.h"
|
||||
#include "ar9003_eeprom.h"
|
||||
|
@ -3006,11 +3007,11 @@ static u32 ath9k_hw_ar9300_get_eeprom(struct ath_hw *ah,
|
|||
|
||||
switch (param) {
|
||||
case EEP_MAC_LSW:
|
||||
return eep->macAddr[0] << 8 | eep->macAddr[1];
|
||||
return get_unaligned_be16(eep->macAddr);
|
||||
case EEP_MAC_MID:
|
||||
return eep->macAddr[2] << 8 | eep->macAddr[3];
|
||||
return get_unaligned_be16(eep->macAddr + 2);
|
||||
case EEP_MAC_MSW:
|
||||
return eep->macAddr[4] << 8 | eep->macAddr[5];
|
||||
return get_unaligned_be16(eep->macAddr + 4);
|
||||
case EEP_REG_0:
|
||||
return le16_to_cpu(pBase->regDmn[0]);
|
||||
case EEP_REG_1:
|
||||
|
@ -3038,7 +3039,7 @@ static u32 ath9k_hw_ar9300_get_eeprom(struct ath_hw *ah,
|
|||
case EEP_CHAIN_MASK_REDUCE:
|
||||
return (pBase->miscConfiguration >> 0x3) & 0x1;
|
||||
case EEP_ANT_DIV_CTL1:
|
||||
return le32_to_cpu(eep->base_ext1.ant_div_control);
|
||||
return eep->base_ext1.ant_div_control;
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
|
@ -3380,8 +3381,7 @@ static int ar9300_eeprom_restore_internal(struct ath_hw *ah,
|
|||
osize = length;
|
||||
read(ah, cptr, word, COMP_HDR_LEN + osize + COMP_CKSUM_LEN);
|
||||
checksum = ar9300_comp_cksum(&word[COMP_HDR_LEN], length);
|
||||
mchecksum = word[COMP_HDR_LEN + osize] |
|
||||
(word[COMP_HDR_LEN + osize + 1] << 8);
|
||||
mchecksum = get_unaligned_le16(&word[COMP_HDR_LEN + osize]);
|
||||
ath_dbg(common, ATH_DBG_EEPROM,
|
||||
"checksum %x %x\n", checksum, mchecksum);
|
||||
if (checksum == mchecksum) {
|
||||
|
|
|
@ -50,7 +50,7 @@ void ath9k_hw_init_btcoex_hw(struct ath_hw *ah, int qnum)
|
|||
.bt_first_slot_time = 5,
|
||||
.bt_hold_rx_clear = true,
|
||||
};
|
||||
u32 i;
|
||||
u32 i, idx;
|
||||
bool rxclear_polarity = ath_bt_config.bt_rxclear_polarity;
|
||||
|
||||
if (AR_SREV_9300_20_OR_LATER(ah))
|
||||
|
@ -73,8 +73,10 @@ void ath9k_hw_init_btcoex_hw(struct ath_hw *ah, int qnum)
|
|||
SM(ATH_BTCOEX_BMISS_THRESH, AR_BT_BCN_MISS_THRESH) |
|
||||
AR_BT_DISABLE_BT_ANT;
|
||||
|
||||
for (i = 0; i < 32; i++)
|
||||
ah->hw_gen_timers.gen_timer_index[(debruijn32 << i) >> 27] = i;
|
||||
for (i = 0; i < 32; i++) {
|
||||
idx = (debruijn32 << i) >> 27;
|
||||
ah->hw_gen_timers.gen_timer_index[idx] = i;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(ath9k_hw_init_btcoex_hw);
|
||||
|
||||
|
|
|
@ -749,7 +749,6 @@ static ssize_t read_file_misc(struct file *file, char __user *user_buf,
|
|||
char *buf;
|
||||
unsigned int len = 0, size = 8000;
|
||||
ssize_t retval = 0;
|
||||
const char *tmp;
|
||||
unsigned int reg;
|
||||
struct ath9k_vif_iter_data iter_data;
|
||||
|
||||
|
@ -759,31 +758,14 @@ static ssize_t read_file_misc(struct file *file, char __user *user_buf,
|
|||
if (buf == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
switch (sc->sc_ah->opmode) {
|
||||
case NL80211_IFTYPE_ADHOC:
|
||||
tmp = "ADHOC";
|
||||
break;
|
||||
case NL80211_IFTYPE_MESH_POINT:
|
||||
tmp = "MESH";
|
||||
break;
|
||||
case NL80211_IFTYPE_AP:
|
||||
tmp = "AP";
|
||||
break;
|
||||
case NL80211_IFTYPE_STATION:
|
||||
tmp = "STATION";
|
||||
break;
|
||||
default:
|
||||
tmp = "???";
|
||||
break;
|
||||
}
|
||||
|
||||
ath9k_ps_wakeup(sc);
|
||||
len += snprintf(buf + len, size - len,
|
||||
"curbssid: %pM\n"
|
||||
"OP-Mode: %s(%i)\n"
|
||||
"Beacon-Timer-Register: 0x%x\n",
|
||||
common->curbssid,
|
||||
tmp, (int)(sc->sc_ah->opmode),
|
||||
ath_opmode_to_string(sc->sc_ah->opmode),
|
||||
(int)(sc->sc_ah->opmode),
|
||||
REG_READ(ah, AR_BEACON_PERIOD));
|
||||
|
||||
reg = REG_READ(ah, AR_TIMER_MODE);
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <asm/unaligned.h>
|
||||
#include "hw.h"
|
||||
#include "ar9002_phy.h"
|
||||
|
||||
|
@ -203,11 +204,11 @@ static u32 ath9k_hw_4k_get_eeprom(struct ath_hw *ah,
|
|||
case EEP_NFTHRESH_2:
|
||||
return pModal->noiseFloorThreshCh[0];
|
||||
case EEP_MAC_LSW:
|
||||
return pBase->macAddr[0] << 8 | pBase->macAddr[1];
|
||||
return get_unaligned_be16(pBase->macAddr);
|
||||
case EEP_MAC_MID:
|
||||
return pBase->macAddr[2] << 8 | pBase->macAddr[3];
|
||||
return get_unaligned_be16(pBase->macAddr + 2);
|
||||
case EEP_MAC_MSW:
|
||||
return pBase->macAddr[4] << 8 | pBase->macAddr[5];
|
||||
return get_unaligned_be16(pBase->macAddr + 4);
|
||||
case EEP_REG_0:
|
||||
return pBase->regDmn[0];
|
||||
case EEP_REG_1:
|
||||
|
@ -331,10 +332,7 @@ static void ath9k_hw_set_4k_power_cal_table(struct ath_hw *ah,
|
|||
|
||||
regOffset = AR_PHY_BASE + (672 << 2) + regChainOffset;
|
||||
for (j = 0; j < 32; j++) {
|
||||
reg32 = ((pdadcValues[4 * j + 0] & 0xFF) << 0) |
|
||||
((pdadcValues[4 * j + 1] & 0xFF) << 8) |
|
||||
((pdadcValues[4 * j + 2] & 0xFF) << 16)|
|
||||
((pdadcValues[4 * j + 3] & 0xFF) << 24);
|
||||
reg32 = get_unaligned_le32(&pdadcValues[4 * j]);
|
||||
REG_WRITE(ah, regOffset, reg32);
|
||||
|
||||
ath_dbg(common, ATH_DBG_EEPROM,
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <asm/unaligned.h>
|
||||
#include "hw.h"
|
||||
#include "ar9002_phy.h"
|
||||
|
||||
|
@ -195,11 +196,11 @@ static u32 ath9k_hw_ar9287_get_eeprom(struct ath_hw *ah,
|
|||
case EEP_NFTHRESH_2:
|
||||
return pModal->noiseFloorThreshCh[0];
|
||||
case EEP_MAC_LSW:
|
||||
return pBase->macAddr[0] << 8 | pBase->macAddr[1];
|
||||
return get_unaligned_be16(pBase->macAddr);
|
||||
case EEP_MAC_MID:
|
||||
return pBase->macAddr[2] << 8 | pBase->macAddr[3];
|
||||
return get_unaligned_be16(pBase->macAddr + 2);
|
||||
case EEP_MAC_MSW:
|
||||
return pBase->macAddr[4] << 8 | pBase->macAddr[5];
|
||||
return get_unaligned_be16(pBase->macAddr + 4);
|
||||
case EEP_REG_0:
|
||||
return pBase->regDmn[0];
|
||||
case EEP_REG_1:
|
||||
|
@ -434,10 +435,7 @@ static void ath9k_hw_set_ar9287_power_cal_table(struct ath_hw *ah,
|
|||
(672 << 2) + regChainOffset;
|
||||
|
||||
for (j = 0; j < 32; j++) {
|
||||
reg32 = ((pdadcValues[4*j + 0] & 0xFF) << 0)
|
||||
| ((pdadcValues[4*j + 1] & 0xFF) << 8)
|
||||
| ((pdadcValues[4*j + 2] & 0xFF) << 16)
|
||||
| ((pdadcValues[4*j + 3] & 0xFF) << 24);
|
||||
reg32 = get_unaligned_le32(&pdadcValues[4 * j]);
|
||||
|
||||
REG_WRITE(ah, regOffset, reg32);
|
||||
regOffset += 4;
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <asm/unaligned.h>
|
||||
#include "hw.h"
|
||||
#include "ar9002_phy.h"
|
||||
|
||||
|
@ -276,11 +277,11 @@ static u32 ath9k_hw_def_get_eeprom(struct ath_hw *ah,
|
|||
case EEP_NFTHRESH_2:
|
||||
return pModal[1].noiseFloorThreshCh[0];
|
||||
case EEP_MAC_LSW:
|
||||
return pBase->macAddr[0] << 8 | pBase->macAddr[1];
|
||||
return get_unaligned_be16(pBase->macAddr);
|
||||
case EEP_MAC_MID:
|
||||
return pBase->macAddr[2] << 8 | pBase->macAddr[3];
|
||||
return get_unaligned_be16(pBase->macAddr + 2);
|
||||
case EEP_MAC_MSW:
|
||||
return pBase->macAddr[4] << 8 | pBase->macAddr[5];
|
||||
return get_unaligned_be16(pBase->macAddr + 4);
|
||||
case EEP_REG_0:
|
||||
return pBase->regDmn[0];
|
||||
case EEP_REG_1:
|
||||
|
@ -831,10 +832,7 @@ static void ath9k_hw_set_def_power_cal_table(struct ath_hw *ah,
|
|||
|
||||
regOffset = AR_PHY_BASE + (672 << 2) + regChainOffset;
|
||||
for (j = 0; j < 32; j++) {
|
||||
reg32 = ((pdadcValues[4 * j + 0] & 0xFF) << 0) |
|
||||
((pdadcValues[4 * j + 1] & 0xFF) << 8) |
|
||||
((pdadcValues[4 * j + 2] & 0xFF) << 16)|
|
||||
((pdadcValues[4 * j + 3] & 0xFF) << 24);
|
||||
reg32 = get_unaligned_le32(&pdadcValues[4 * j]);
|
||||
REG_WRITE(ah, regOffset, reg32);
|
||||
|
||||
ath_dbg(common, ATH_DBG_EEPROM,
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <asm/unaligned.h>
|
||||
#include "htc.h"
|
||||
|
||||
/* identify firmware images */
|
||||
|
@ -129,12 +130,14 @@ static int hif_usb_send_regout(struct hif_device_usb *hif_dev,
|
|||
static void hif_usb_mgmt_cb(struct urb *urb)
|
||||
{
|
||||
struct cmd_buf *cmd = (struct cmd_buf *)urb->context;
|
||||
struct hif_device_usb *hif_dev = cmd->hif_dev;
|
||||
struct hif_device_usb *hif_dev;
|
||||
bool txok = true;
|
||||
|
||||
if (!cmd || !cmd->skb || !cmd->hif_dev)
|
||||
return;
|
||||
|
||||
hif_dev = cmd->hif_dev;
|
||||
|
||||
switch (urb->status) {
|
||||
case 0:
|
||||
break;
|
||||
|
@ -557,8 +560,8 @@ static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev,
|
|||
|
||||
ptr = (u8 *) skb->data;
|
||||
|
||||
pkt_len = ptr[index] + (ptr[index+1] << 8);
|
||||
pkt_tag = ptr[index+2] + (ptr[index+3] << 8);
|
||||
pkt_len = get_unaligned_le16(ptr + index);
|
||||
pkt_tag = get_unaligned_le16(ptr + index + 2);
|
||||
|
||||
if (pkt_tag != ATH_USB_RX_STREAM_MODE_TAG) {
|
||||
RX_STAT_INC(skb_dropped);
|
||||
|
|
|
@ -623,11 +623,8 @@ static ssize_t read_file_base_eeprom(struct file *file, char __user *user_buf,
|
|||
pBase9287->openLoopPwrCntl);
|
||||
}
|
||||
|
||||
len += snprintf(buf + len, size - len,
|
||||
"%20s : %02X:%02X:%02X:%02X:%02X:%02X\n",
|
||||
"MacAddress",
|
||||
pBase->macAddr[0], pBase->macAddr[1], pBase->macAddr[2],
|
||||
pBase->macAddr[3], pBase->macAddr[4], pBase->macAddr[5]);
|
||||
len += snprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress",
|
||||
pBase->macAddr);
|
||||
if (len > size)
|
||||
len = size;
|
||||
|
||||
|
|
|
@ -1997,12 +1997,22 @@ EXPORT_SYMBOL(ath9k_hw_set_sta_beacon_timers);
|
|||
/* HW Capabilities */
|
||||
/*******************/
|
||||
|
||||
static u8 fixup_chainmask(u8 chip_chainmask, u8 eeprom_chainmask)
|
||||
{
|
||||
eeprom_chainmask &= chip_chainmask;
|
||||
if (eeprom_chainmask)
|
||||
return eeprom_chainmask;
|
||||
else
|
||||
return chip_chainmask;
|
||||
}
|
||||
|
||||
int ath9k_hw_fill_cap_info(struct ath_hw *ah)
|
||||
{
|
||||
struct ath9k_hw_capabilities *pCap = &ah->caps;
|
||||
struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
|
||||
struct ath_common *common = ath9k_hw_common(ah);
|
||||
struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
|
||||
unsigned int chip_chainmask;
|
||||
|
||||
u16 eeval;
|
||||
u8 ant_div_ctl1, tx_chainmask, rx_chainmask;
|
||||
|
@ -2039,6 +2049,15 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
|
|||
if (eeval & AR5416_OPFLAGS_11G)
|
||||
pCap->hw_caps |= ATH9K_HW_CAP_2GHZ;
|
||||
|
||||
if (AR_SREV_9485(ah) || AR_SREV_9285(ah) || AR_SREV_9330(ah))
|
||||
chip_chainmask = 1;
|
||||
else if (!AR_SREV_9280_20_OR_LATER(ah))
|
||||
chip_chainmask = 7;
|
||||
else if (!AR_SREV_9300_20_OR_LATER(ah) || AR_SREV_9340(ah))
|
||||
chip_chainmask = 3;
|
||||
else
|
||||
chip_chainmask = 7;
|
||||
|
||||
pCap->tx_chainmask = ah->eep_ops->get_eeprom(ah, EEP_TX_MASK);
|
||||
/*
|
||||
* For AR9271 we will temporarilly uses the rx chainmax as read from
|
||||
|
@ -2055,6 +2074,9 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
|
|||
/* Use rx_chainmask from EEPROM. */
|
||||
pCap->rx_chainmask = ah->eep_ops->get_eeprom(ah, EEP_RX_MASK);
|
||||
|
||||
pCap->tx_chainmask = fixup_chainmask(chip_chainmask, pCap->tx_chainmask);
|
||||
pCap->rx_chainmask = fixup_chainmask(chip_chainmask, pCap->rx_chainmask);
|
||||
|
||||
ah->misc_mode |= AR_PCU_MIC_NEW_LOC_ENA;
|
||||
|
||||
/* enable key search for every frame in an aggregate */
|
||||
|
|
|
@ -197,6 +197,19 @@ static unsigned int ath9k_ioread32(void *hw_priv, u32 reg_offset)
|
|||
return val;
|
||||
}
|
||||
|
||||
static unsigned int __ath9k_reg_rmw(struct ath_softc *sc, u32 reg_offset,
|
||||
u32 set, u32 clr)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
val = ioread32(sc->mem + reg_offset);
|
||||
val &= ~clr;
|
||||
val |= set;
|
||||
iowrite32(val, sc->mem + reg_offset);
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
static unsigned int ath9k_reg_rmw(void *hw_priv, u32 reg_offset, u32 set, u32 clr)
|
||||
{
|
||||
struct ath_hw *ah = (struct ath_hw *) hw_priv;
|
||||
|
@ -205,16 +218,12 @@ static unsigned int ath9k_reg_rmw(void *hw_priv, u32 reg_offset, u32 set, u32 cl
|
|||
unsigned long uninitialized_var(flags);
|
||||
u32 val;
|
||||
|
||||
if (ah->config.serialize_regmode == SER_REG_MODE_ON)
|
||||
if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
|
||||
spin_lock_irqsave(&sc->sc_serial_rw, flags);
|
||||
|
||||
val = ioread32(sc->mem + reg_offset);
|
||||
val &= ~clr;
|
||||
val |= set;
|
||||
iowrite32(val, sc->mem + reg_offset);
|
||||
|
||||
if (ah->config.serialize_regmode == SER_REG_MODE_ON)
|
||||
val = __ath9k_reg_rmw(sc, reg_offset, set, clr);
|
||||
spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
|
||||
} else
|
||||
val = __ath9k_reg_rmw(sc, reg_offset, set, clr);
|
||||
|
||||
return val;
|
||||
}
|
||||
|
|
|
@ -815,16 +815,19 @@ static bool ath9k_rx_accept(struct ath_common *common,
|
|||
struct ath_rx_status *rx_stats,
|
||||
bool *decrypt_error)
|
||||
{
|
||||
#define is_mc_or_valid_tkip_keyix ((is_mc || \
|
||||
(rx_stats->rs_keyix != ATH9K_RXKEYIX_INVALID && \
|
||||
test_bit(rx_stats->rs_keyix, common->tkip_keymap))))
|
||||
|
||||
bool is_mc, is_valid_tkip, strip_mic, mic_error;
|
||||
struct ath_hw *ah = common->ah;
|
||||
__le16 fc;
|
||||
u8 rx_status_len = ah->caps.rx_status_len;
|
||||
|
||||
fc = hdr->frame_control;
|
||||
|
||||
is_mc = !!is_multicast_ether_addr(hdr->addr1);
|
||||
is_valid_tkip = rx_stats->rs_keyix != ATH9K_RXKEYIX_INVALID &&
|
||||
test_bit(rx_stats->rs_keyix, common->tkip_keymap);
|
||||
strip_mic = is_valid_tkip && !(rx_stats->rs_status &
|
||||
(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_CRC | ATH9K_RXERR_MIC));
|
||||
|
||||
if (!rx_stats->rs_datalen)
|
||||
return false;
|
||||
/*
|
||||
|
@ -839,6 +842,11 @@ static bool ath9k_rx_accept(struct ath_common *common,
|
|||
if (rx_stats->rs_more)
|
||||
return true;
|
||||
|
||||
mic_error = is_valid_tkip && !ieee80211_is_ctl(fc) &&
|
||||
!ieee80211_has_morefrags(fc) &&
|
||||
!(le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG) &&
|
||||
(rx_stats->rs_status & ATH9K_RXERR_MIC);
|
||||
|
||||
/*
|
||||
* The rx_stats->rs_status will not be set until the end of the
|
||||
* chained descriptors so it can be ignored if rs_more is set. The
|
||||
|
@ -846,30 +854,18 @@ static bool ath9k_rx_accept(struct ath_common *common,
|
|||
* descriptors.
|
||||
*/
|
||||
if (rx_stats->rs_status != 0) {
|
||||
if (rx_stats->rs_status & ATH9K_RXERR_CRC)
|
||||
if (rx_stats->rs_status & ATH9K_RXERR_CRC) {
|
||||
rxs->flag |= RX_FLAG_FAILED_FCS_CRC;
|
||||
mic_error = false;
|
||||
}
|
||||
if (rx_stats->rs_status & ATH9K_RXERR_PHY)
|
||||
return false;
|
||||
|
||||
if (rx_stats->rs_status & ATH9K_RXERR_DECRYPT) {
|
||||
*decrypt_error = true;
|
||||
} else if (rx_stats->rs_status & ATH9K_RXERR_MIC) {
|
||||
bool is_mc;
|
||||
/*
|
||||
* The MIC error bit is only valid if the frame
|
||||
* is not a control frame or fragment, and it was
|
||||
* decrypted using a valid TKIP key.
|
||||
*/
|
||||
is_mc = !!is_multicast_ether_addr(hdr->addr1);
|
||||
|
||||
if (!ieee80211_is_ctl(fc) &&
|
||||
!ieee80211_has_morefrags(fc) &&
|
||||
!(le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG) &&
|
||||
is_mc_or_valid_tkip_keyix)
|
||||
rxs->flag |= RX_FLAG_MMIC_ERROR;
|
||||
else
|
||||
rx_stats->rs_status &= ~ATH9K_RXERR_MIC;
|
||||
mic_error = false;
|
||||
}
|
||||
|
||||
/*
|
||||
* Reject error frames with the exception of
|
||||
* decryption and MIC failures. For monitor mode,
|
||||
|
@ -887,6 +883,18 @@ static bool ath9k_rx_accept(struct ath_common *common,
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* For unicast frames the MIC error bit can have false positives,
|
||||
* so all MIC error reports need to be validated in software.
|
||||
* False negatives are not common, so skip software verification
|
||||
* if the hardware considers the MIC valid.
|
||||
*/
|
||||
if (strip_mic)
|
||||
rxs->flag |= RX_FLAG_MMIC_STRIPPED;
|
||||
else if (is_mc && mic_error)
|
||||
rxs->flag |= RX_FLAG_MMIC_ERROR;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -1939,6 +1947,9 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
|
|||
sc->rx.rxotherant = 0;
|
||||
}
|
||||
|
||||
if (rxs->flag & RX_FLAG_MMIC_STRIPPED)
|
||||
skb_trim(skb, skb->len - 8);
|
||||
|
||||
spin_lock_irqsave(&sc->sc_pm_lock, flags);
|
||||
|
||||
if ((sc->ps_flags & (PS_WAIT_FOR_BEACON |
|
||||
|
|
|
@ -1873,29 +1873,6 @@ enum {
|
|||
#define AR_RATE_DURATION(_n) (AR_RATE_DURATION_0 + ((_n)<<2))
|
||||
|
||||
|
||||
#define AR_KEYTABLE_0 0x8800
|
||||
#define AR_KEYTABLE(_n) (AR_KEYTABLE_0 + ((_n)*32))
|
||||
#define AR_KEY_CACHE_SIZE 128
|
||||
#define AR_RSVD_KEYTABLE_ENTRIES 4
|
||||
#define AR_KEY_TYPE 0x00000007
|
||||
#define AR_KEYTABLE_TYPE_40 0x00000000
|
||||
#define AR_KEYTABLE_TYPE_104 0x00000001
|
||||
#define AR_KEYTABLE_TYPE_128 0x00000003
|
||||
#define AR_KEYTABLE_TYPE_TKIP 0x00000004
|
||||
#define AR_KEYTABLE_TYPE_AES 0x00000005
|
||||
#define AR_KEYTABLE_TYPE_CCM 0x00000006
|
||||
#define AR_KEYTABLE_TYPE_CLR 0x00000007
|
||||
#define AR_KEYTABLE_ANT 0x00000008
|
||||
#define AR_KEYTABLE_VALID 0x00008000
|
||||
#define AR_KEYTABLE_KEY0(_n) (AR_KEYTABLE(_n) + 0)
|
||||
#define AR_KEYTABLE_KEY1(_n) (AR_KEYTABLE(_n) + 4)
|
||||
#define AR_KEYTABLE_KEY2(_n) (AR_KEYTABLE(_n) + 8)
|
||||
#define AR_KEYTABLE_KEY3(_n) (AR_KEYTABLE(_n) + 12)
|
||||
#define AR_KEYTABLE_KEY4(_n) (AR_KEYTABLE(_n) + 16)
|
||||
#define AR_KEYTABLE_TYPE(_n) (AR_KEYTABLE(_n) + 20)
|
||||
#define AR_KEYTABLE_MAC0(_n) (AR_KEYTABLE(_n) + 24)
|
||||
#define AR_KEYTABLE_MAC1(_n) (AR_KEYTABLE(_n) + 28)
|
||||
|
||||
#define AR9271_CORE_CLOCK 117 /* clock to 117Mhz */
|
||||
#define AR9271_TARGET_BAUD_RATE 19200 /* 115200 */
|
||||
|
||||
|
|
|
@ -1148,6 +1148,8 @@ static bool bf_is_ampdu_not_probing(struct ath_buf *bf)
|
|||
|
||||
static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq,
|
||||
struct list_head *list, bool retry_tx)
|
||||
__releases(txq->axq_lock)
|
||||
__acquires(txq->axq_lock)
|
||||
{
|
||||
struct ath_buf *bf, *lastbf;
|
||||
struct list_head bf_head;
|
||||
|
@ -2036,6 +2038,8 @@ static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
|
|||
static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
|
||||
struct ath_tx_status *ts, struct ath_buf *bf,
|
||||
struct list_head *bf_head)
|
||||
__releases(txq->axq_lock)
|
||||
__acquires(txq->axq_lock)
|
||||
{
|
||||
int txok;
|
||||
|
||||
|
|
|
@ -177,7 +177,7 @@ struct carl9170_tx_queue_stats {
|
|||
|
||||
struct carl9170_vif {
|
||||
unsigned int id;
|
||||
struct ieee80211_vif *vif;
|
||||
struct ieee80211_vif __rcu *vif;
|
||||
};
|
||||
|
||||
struct carl9170_vif_info {
|
||||
|
@ -311,7 +311,7 @@ struct ar9170 {
|
|||
spinlock_t beacon_lock;
|
||||
unsigned int global_pretbtt;
|
||||
unsigned int global_beacon_int;
|
||||
struct carl9170_vif_info *beacon_iter;
|
||||
struct carl9170_vif_info __rcu *beacon_iter;
|
||||
unsigned int beacon_enabled;
|
||||
|
||||
/* cryptographic engine */
|
||||
|
@ -389,7 +389,7 @@ struct ar9170 {
|
|||
/* tx ampdu */
|
||||
struct work_struct ampdu_work;
|
||||
spinlock_t tx_ampdu_list_lock;
|
||||
struct carl9170_sta_tid *tx_ampdu_iter;
|
||||
struct carl9170_sta_tid __rcu *tx_ampdu_iter;
|
||||
struct list_head tx_ampdu_list;
|
||||
atomic_t tx_ampdu_upload;
|
||||
atomic_t tx_ampdu_scheduler;
|
||||
|
@ -456,7 +456,7 @@ struct carl9170_sta_info {
|
|||
bool sleeping;
|
||||
atomic_t pending_frames;
|
||||
unsigned int ampdu_max_len;
|
||||
struct carl9170_sta_tid *agg[CARL9170_NUM_TID];
|
||||
struct carl9170_sta_tid __rcu *agg[CARL9170_NUM_TID];
|
||||
struct carl9170_ba_stats stats[CARL9170_NUM_TID];
|
||||
};
|
||||
|
||||
|
@ -532,7 +532,6 @@ int carl9170_set_ampdu_settings(struct ar9170 *ar);
|
|||
int carl9170_set_slot_time(struct ar9170 *ar);
|
||||
int carl9170_set_mac_rates(struct ar9170 *ar);
|
||||
int carl9170_set_hwretry_limit(struct ar9170 *ar, const u32 max_retry);
|
||||
int carl9170_update_beacon(struct ar9170 *ar, const bool submit);
|
||||
int carl9170_upload_key(struct ar9170 *ar, const u8 id, const u8 *mac,
|
||||
const u8 ktype, const u8 keyidx, const u8 *keydata, const int keylen);
|
||||
int carl9170_disable_key(struct ar9170 *ar, const u8 id);
|
||||
|
@ -553,6 +552,7 @@ void carl9170_tx_drop(struct ar9170 *ar, struct sk_buff *skb);
|
|||
void carl9170_tx_scheduler(struct ar9170 *ar);
|
||||
void carl9170_tx_get_skb(struct sk_buff *skb);
|
||||
int carl9170_tx_put_skb(struct sk_buff *skb);
|
||||
int carl9170_update_beacon(struct ar9170 *ar, const bool submit);
|
||||
|
||||
/* LEDs */
|
||||
#ifdef CONFIG_CARL9170_LEDS
|
||||
|
|
|
@ -87,7 +87,7 @@ do { \
|
|||
__ar->cmd_buf[2 * __nreg + 1] = cpu_to_le32(r); \
|
||||
__ar->cmd_buf[2 * __nreg + 2] = cpu_to_le32(v); \
|
||||
__nreg++; \
|
||||
if ((__nreg >= PAYLOAD_MAX/2)) { \
|
||||
if ((__nreg >= PAYLOAD_MAX / 2)) { \
|
||||
if (IS_ACCEPTING_CMD(__ar)) \
|
||||
__err = carl9170_exec_cmd(__ar, \
|
||||
CARL9170_CMD_WREG, 8 * __nreg, \
|
||||
|
@ -160,7 +160,7 @@ do { \
|
|||
} while (0)
|
||||
|
||||
#define carl9170_async_regwrite_finish() do { \
|
||||
__async_regwrite_out : \
|
||||
__async_regwrite_out: \
|
||||
if (__cmd != NULL && __err == 0) \
|
||||
carl9170_async_regwrite_flush(); \
|
||||
kfree(__cmd); \
|
||||
|
|
|
@ -695,7 +695,7 @@ static char *carl9170_debugfs_bug_read(struct ar9170 *ar, char *buf,
|
|||
}
|
||||
__DEBUGFS_DECLARE_RW_FILE(bug, 400, CARL9170_STOPPED);
|
||||
|
||||
static const char *erp_modes[] = {
|
||||
static const char *const erp_modes[] = {
|
||||
[CARL9170_ERP_INVALID] = "INVALID",
|
||||
[CARL9170_ERP_AUTO] = "Automatic",
|
||||
[CARL9170_ERP_MAC80211] = "Set by MAC80211",
|
||||
|
|
|
@ -75,6 +75,9 @@ enum carl9170fw_feature_list {
|
|||
/* Firmware supports PSM in the 5GHZ Band */
|
||||
CARL9170FW_FIXED_5GHZ_PSM,
|
||||
|
||||
/* HW (ANI, CCA, MIB) tally counters */
|
||||
CARL9170FW_HW_COUNTERS,
|
||||
|
||||
/* KEEP LAST */
|
||||
__CARL9170FW_FEATURE_NUM
|
||||
};
|
||||
|
|
|
@ -174,6 +174,7 @@
|
|||
#define AR9170_MAC_SNIFFER_ENABLE_PROMISC BIT(0)
|
||||
#define AR9170_MAC_SNIFFER_DEFAULTS 0x02000000
|
||||
#define AR9170_MAC_REG_ENCRYPTION (AR9170_MAC_REG_BASE + 0x678)
|
||||
#define AR9170_MAC_ENCRYPTION_MGMT_RX_SOFTWARE BIT(2)
|
||||
#define AR9170_MAC_ENCRYPTION_RX_SOFTWARE BIT(3)
|
||||
#define AR9170_MAC_ENCRYPTION_DEFAULTS 0x70
|
||||
|
||||
|
@ -222,6 +223,12 @@
|
|||
#define AR9170_MAC_REG_TX_BLOCKACKS (AR9170_MAC_REG_BASE + 0x6c0)
|
||||
#define AR9170_MAC_REG_NAV_COUNT (AR9170_MAC_REG_BASE + 0x6c4)
|
||||
#define AR9170_MAC_REG_BACKOFF_STATUS (AR9170_MAC_REG_BASE + 0x6c8)
|
||||
#define AR9170_MAC_BACKOFF_CCA BIT(24)
|
||||
#define AR9170_MAC_BACKOFF_TX_PEX BIT(25)
|
||||
#define AR9170_MAC_BACKOFF_RX_PE BIT(26)
|
||||
#define AR9170_MAC_BACKOFF_MD_READY BIT(27)
|
||||
#define AR9170_MAC_BACKOFF_TX_PE BIT(28)
|
||||
|
||||
#define AR9170_MAC_REG_TX_RETRY (AR9170_MAC_REG_BASE + 0x6cc)
|
||||
|
||||
#define AR9170_MAC_REG_TX_COMPLETE (AR9170_MAC_REG_BASE + 0x6d4)
|
||||
|
@ -388,10 +395,40 @@
|
|||
|
||||
#define AR9170_MAC_REG_BCN_CURR_ADDR (AR9170_MAC_REG_BASE + 0xd98)
|
||||
#define AR9170_MAC_REG_BCN_COUNT (AR9170_MAC_REG_BASE + 0xd9c)
|
||||
|
||||
|
||||
#define AR9170_MAC_REG_BCN_HT1 (AR9170_MAC_REG_BASE + 0xda0)
|
||||
#define AR9170_MAC_BCN_HT1_HT_EN BIT(0)
|
||||
#define AR9170_MAC_BCN_HT1_GF_PMB BIT(1)
|
||||
#define AR9170_MAC_BCN_HT1_SP_EXP BIT(2)
|
||||
#define AR9170_MAC_BCN_HT1_TX_BF BIT(3)
|
||||
#define AR9170_MAC_BCN_HT1_PWR_CTRL_S 4
|
||||
#define AR9170_MAC_BCN_HT1_PWR_CTRL 0x70
|
||||
#define AR9170_MAC_BCN_HT1_TX_ANT1 BIT(7)
|
||||
#define AR9170_MAC_BCN_HT1_TX_ANT0 BIT(8)
|
||||
#define AR9170_MAC_BCN_HT1_NUM_LFT_S 9
|
||||
#define AR9170_MAC_BCN_HT1_NUM_LFT 0x600
|
||||
#define AR9170_MAC_BCN_HT1_BWC_20M_EXT BIT(16)
|
||||
#define AR9170_MAC_BCN_HT1_BWC_40M_SHARED BIT(17)
|
||||
#define AR9170_MAC_BCN_HT1_BWC_40M_DUP (BIT(16) | BIT(17))
|
||||
#define AR9170_MAC_BCN_HT1_BF_MCS_S 18
|
||||
#define AR9170_MAC_BCN_HT1_BF_MCS 0x1c0000
|
||||
#define AR9170_MAC_BCN_HT1_TPC_S 21
|
||||
#define AR9170_MAC_BCN_HT1_TPC 0x7e00000
|
||||
#define AR9170_MAC_BCN_HT1_CHAIN_MASK_S 27
|
||||
#define AR9170_MAC_BCN_HT1_CHAIN_MASK 0x38000000
|
||||
|
||||
#define AR9170_MAC_REG_BCN_HT2 (AR9170_MAC_REG_BASE + 0xda4)
|
||||
#define AR9170_MAC_BCN_HT2_MCS_S 0
|
||||
#define AR9170_MAC_BCN_HT2_MCS 0x7f
|
||||
#define AR9170_MAC_BCN_HT2_BW40 BIT(8)
|
||||
#define AR9170_MAC_BCN_HT2_SMOOTHING BIT(9)
|
||||
#define AR9170_MAC_BCN_HT2_SS BIT(10)
|
||||
#define AR9170_MAC_BCN_HT2_NSS BIT(11)
|
||||
#define AR9170_MAC_BCN_HT2_STBC_S 12
|
||||
#define AR9170_MAC_BCN_HT2_STBC 0x3000
|
||||
#define AR9170_MAC_BCN_HT2_ADV_COD BIT(14)
|
||||
#define AR9170_MAC_BCN_HT2_SGI BIT(15)
|
||||
#define AR9170_MAC_BCN_HT2_LEN_S 16
|
||||
#define AR9170_MAC_BCN_HT2_LEN 0xffff0000
|
||||
|
||||
#define AR9170_MAC_REG_DMA_TXQX_ADDR_CURR (AR9170_MAC_REG_BASE + 0xdc0)
|
||||
|
||||
|
|
|
@ -118,7 +118,7 @@ static void carl9170_led_set_brightness(struct led_classdev *led,
|
|||
}
|
||||
|
||||
if (likely(IS_ACCEPTING_CMD(ar) && arl->toggled))
|
||||
ieee80211_queue_delayed_work(ar->hw, &ar->led_work, HZ/10);
|
||||
ieee80211_queue_delayed_work(ar->hw, &ar->led_work, HZ / 10);
|
||||
}
|
||||
|
||||
static int carl9170_led_register_led(struct ar9170 *ar, int i, char *name,
|
||||
|
|
|
@ -455,135 +455,6 @@ int carl9170_set_beacon_timers(struct ar9170 *ar)
|
|||
return carl9170_regwrite_result();
|
||||
}
|
||||
|
||||
int carl9170_update_beacon(struct ar9170 *ar, const bool submit)
|
||||
{
|
||||
struct sk_buff *skb = NULL;
|
||||
struct carl9170_vif_info *cvif;
|
||||
struct ieee80211_tx_info *txinfo;
|
||||
__le32 *data, *old = NULL;
|
||||
u32 word, off, addr, len;
|
||||
int i = 0, err = 0;
|
||||
|
||||
rcu_read_lock();
|
||||
cvif = rcu_dereference(ar->beacon_iter);
|
||||
retry:
|
||||
if (ar->vifs == 0 || !cvif)
|
||||
goto out_unlock;
|
||||
|
||||
list_for_each_entry_continue_rcu(cvif, &ar->vif_list, list) {
|
||||
if (cvif->active && cvif->enable_beacon)
|
||||
goto found;
|
||||
}
|
||||
|
||||
if (!ar->beacon_enabled || i++)
|
||||
goto out_unlock;
|
||||
|
||||
goto retry;
|
||||
|
||||
found:
|
||||
rcu_assign_pointer(ar->beacon_iter, cvif);
|
||||
|
||||
skb = ieee80211_beacon_get_tim(ar->hw, carl9170_get_vif(cvif),
|
||||
NULL, NULL);
|
||||
|
||||
if (!skb) {
|
||||
err = -ENOMEM;
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
txinfo = IEEE80211_SKB_CB(skb);
|
||||
if (txinfo->control.rates[0].flags & IEEE80211_TX_RC_MCS) {
|
||||
err = -EINVAL;
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
spin_lock_bh(&ar->beacon_lock);
|
||||
data = (__le32 *)skb->data;
|
||||
if (cvif->beacon)
|
||||
old = (__le32 *)cvif->beacon->data;
|
||||
|
||||
off = cvif->id * AR9170_MAC_BCN_LENGTH_MAX;
|
||||
addr = ar->fw.beacon_addr + off;
|
||||
len = roundup(skb->len + FCS_LEN, 4);
|
||||
|
||||
if ((off + len) > ar->fw.beacon_max_len) {
|
||||
if (net_ratelimit()) {
|
||||
wiphy_err(ar->hw->wiphy, "beacon does not "
|
||||
"fit into device memory!\n");
|
||||
}
|
||||
err = -EINVAL;
|
||||
goto err_unlock;
|
||||
}
|
||||
|
||||
if (len > AR9170_MAC_BCN_LENGTH_MAX) {
|
||||
if (net_ratelimit()) {
|
||||
wiphy_err(ar->hw->wiphy, "no support for beacons "
|
||||
"bigger than %d (yours:%d).\n",
|
||||
AR9170_MAC_BCN_LENGTH_MAX, len);
|
||||
}
|
||||
|
||||
err = -EMSGSIZE;
|
||||
goto err_unlock;
|
||||
}
|
||||
|
||||
i = txinfo->control.rates[0].idx;
|
||||
if (txinfo->band != IEEE80211_BAND_2GHZ)
|
||||
i += 4;
|
||||
|
||||
word = __carl9170_ratetable[i].hw_value & 0xf;
|
||||
if (i < 4)
|
||||
word |= ((skb->len + FCS_LEN) << (3 + 16)) + 0x0400;
|
||||
else
|
||||
word |= ((skb->len + FCS_LEN) << 16) + 0x0010;
|
||||
|
||||
carl9170_async_regwrite_begin(ar);
|
||||
carl9170_async_regwrite(AR9170_MAC_REG_BCN_PLCP, word);
|
||||
|
||||
for (i = 0; i < DIV_ROUND_UP(skb->len, 4); i++) {
|
||||
/*
|
||||
* XXX: This accesses beyond skb data for up
|
||||
* to the last 3 bytes!!
|
||||
*/
|
||||
|
||||
if (old && (data[i] == old[i]))
|
||||
continue;
|
||||
|
||||
word = le32_to_cpu(data[i]);
|
||||
carl9170_async_regwrite(addr + 4 * i, word);
|
||||
}
|
||||
carl9170_async_regwrite_finish();
|
||||
|
||||
dev_kfree_skb_any(cvif->beacon);
|
||||
cvif->beacon = NULL;
|
||||
|
||||
err = carl9170_async_regwrite_result();
|
||||
if (!err)
|
||||
cvif->beacon = skb;
|
||||
spin_unlock_bh(&ar->beacon_lock);
|
||||
if (err)
|
||||
goto err_free;
|
||||
|
||||
if (submit) {
|
||||
err = carl9170_bcn_ctrl(ar, cvif->id,
|
||||
CARL9170_BCN_CTRL_CAB_TRIGGER,
|
||||
addr, skb->len + FCS_LEN);
|
||||
|
||||
if (err)
|
||||
goto err_free;
|
||||
}
|
||||
out_unlock:
|
||||
rcu_read_unlock();
|
||||
return 0;
|
||||
|
||||
err_unlock:
|
||||
spin_unlock_bh(&ar->beacon_lock);
|
||||
|
||||
err_free:
|
||||
rcu_read_unlock();
|
||||
dev_kfree_skb_any(skb);
|
||||
return err;
|
||||
}
|
||||
|
||||
int carl9170_upload_key(struct ar9170 *ar, const u8 id, const u8 *mac,
|
||||
const u8 ktype, const u8 keyidx, const u8 *keydata,
|
||||
const int keylen)
|
||||
|
|
|
@ -1630,7 +1630,7 @@ static int carl9170_read_eeprom(struct ar9170 *ar)
|
|||
BUILD_BUG_ON(sizeof(ar->eeprom) % RB);
|
||||
#endif
|
||||
|
||||
for (i = 0; i < sizeof(ar->eeprom)/RB; i++) {
|
||||
for (i = 0; i < sizeof(ar->eeprom) / RB; i++) {
|
||||
for (j = 0; j < RW; j++)
|
||||
offsets[j] = cpu_to_le32(AR9170_EEPROM_START +
|
||||
RB * i + 4 * j);
|
||||
|
|
|
@ -1098,7 +1098,7 @@ static u8 carl9170_interpolate_u8(u8 x, u8 x1, u8 y1, u8 x2, u8 y2)
|
|||
* Isn't it just DIV_ROUND_UP(y, 1<<SHIFT)?
|
||||
* Can we rely on the compiler to optimise away the div?
|
||||
*/
|
||||
return (y >> SHIFT) + ((y & (1<<(SHIFT-1))) >> (SHIFT - 1));
|
||||
return (y >> SHIFT) + ((y & (1 << (SHIFT - 1))) >> (SHIFT - 1));
|
||||
#undef SHIFT
|
||||
}
|
||||
|
||||
|
@ -1379,7 +1379,7 @@ static void carl9170_calc_ctl(struct ar9170 *ar, u32 freq, enum carl9170_bw bw)
|
|||
|
||||
modes[i].max_power =
|
||||
carl9170_get_max_edge_power(ar,
|
||||
freq+f_off, EDGES(ctl_idx, 1));
|
||||
freq + f_off, EDGES(ctl_idx, 1));
|
||||
|
||||
/*
|
||||
* TODO: check if the regulatory max. power is
|
||||
|
@ -1441,7 +1441,7 @@ static int carl9170_set_power_cal(struct ar9170 *ar, u32 freq,
|
|||
if (freq < 3000)
|
||||
f = freq - 2300;
|
||||
else
|
||||
f = (freq - 4800)/5;
|
||||
f = (freq - 4800) / 5;
|
||||
|
||||
/*
|
||||
* cycle through the various modes
|
||||
|
|
|
@ -661,11 +661,67 @@ void carl9170_tx_process_status(struct ar9170 *ar,
|
|||
}
|
||||
}
|
||||
|
||||
static void carl9170_tx_rate_tpc_chains(struct ar9170 *ar,
|
||||
struct ieee80211_tx_info *info, struct ieee80211_tx_rate *txrate,
|
||||
unsigned int *phyrate, unsigned int *tpc, unsigned int *chains)
|
||||
{
|
||||
struct ieee80211_rate *rate = NULL;
|
||||
u8 *txpower;
|
||||
unsigned int idx;
|
||||
|
||||
idx = txrate->idx;
|
||||
*tpc = 0;
|
||||
*phyrate = 0;
|
||||
|
||||
if (txrate->flags & IEEE80211_TX_RC_MCS) {
|
||||
if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) {
|
||||
/* +1 dBm for HT40 */
|
||||
*tpc += 2;
|
||||
|
||||
if (info->band == IEEE80211_BAND_2GHZ)
|
||||
txpower = ar->power_2G_ht40;
|
||||
else
|
||||
txpower = ar->power_5G_ht40;
|
||||
} else {
|
||||
if (info->band == IEEE80211_BAND_2GHZ)
|
||||
txpower = ar->power_2G_ht20;
|
||||
else
|
||||
txpower = ar->power_5G_ht20;
|
||||
}
|
||||
|
||||
*phyrate = txrate->idx;
|
||||
*tpc += txpower[idx & 7];
|
||||
} else {
|
||||
if (info->band == IEEE80211_BAND_2GHZ) {
|
||||
if (idx < 4)
|
||||
txpower = ar->power_2G_cck;
|
||||
else
|
||||
txpower = ar->power_2G_ofdm;
|
||||
} else {
|
||||
txpower = ar->power_5G_leg;
|
||||
idx += 4;
|
||||
}
|
||||
|
||||
rate = &__carl9170_ratetable[idx];
|
||||
*tpc += txpower[(rate->hw_value & 0x30) >> 4];
|
||||
*phyrate = rate->hw_value & 0xf;
|
||||
}
|
||||
|
||||
if (ar->eeprom.tx_mask == 1) {
|
||||
*chains = AR9170_TX_PHY_TXCHAIN_1;
|
||||
} else {
|
||||
if (!(txrate->flags & IEEE80211_TX_RC_MCS) &&
|
||||
rate && rate->bitrate >= 360)
|
||||
*chains = AR9170_TX_PHY_TXCHAIN_1;
|
||||
else
|
||||
*chains = AR9170_TX_PHY_TXCHAIN_2;
|
||||
}
|
||||
}
|
||||
|
||||
static __le32 carl9170_tx_physet(struct ar9170 *ar,
|
||||
struct ieee80211_tx_info *info, struct ieee80211_tx_rate *txrate)
|
||||
{
|
||||
struct ieee80211_rate *rate = NULL;
|
||||
u32 power, chains;
|
||||
unsigned int power = 0, chains = 0, phyrate = 0;
|
||||
__le32 tmp;
|
||||
|
||||
tmp = cpu_to_le32(0);
|
||||
|
@ -682,35 +738,12 @@ static __le32 carl9170_tx_physet(struct ar9170 *ar,
|
|||
tmp |= cpu_to_le32(AR9170_TX_PHY_SHORT_GI);
|
||||
|
||||
if (txrate->flags & IEEE80211_TX_RC_MCS) {
|
||||
u32 r = txrate->idx;
|
||||
u8 *txpower;
|
||||
SET_VAL(AR9170_TX_PHY_MCS, phyrate, txrate->idx);
|
||||
|
||||
/* heavy clip control */
|
||||
tmp |= cpu_to_le32((r & 0x7) <<
|
||||
tmp |= cpu_to_le32((txrate->idx & 0x7) <<
|
||||
AR9170_TX_PHY_TX_HEAVY_CLIP_S);
|
||||
|
||||
if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) {
|
||||
if (info->band == IEEE80211_BAND_5GHZ)
|
||||
txpower = ar->power_5G_ht40;
|
||||
else
|
||||
txpower = ar->power_2G_ht40;
|
||||
} else {
|
||||
if (info->band == IEEE80211_BAND_5GHZ)
|
||||
txpower = ar->power_5G_ht20;
|
||||
else
|
||||
txpower = ar->power_2G_ht20;
|
||||
}
|
||||
|
||||
power = txpower[r & 7];
|
||||
|
||||
/* +1 dBm for HT40 */
|
||||
if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
|
||||
power += 2;
|
||||
|
||||
r <<= AR9170_TX_PHY_MCS_S;
|
||||
BUG_ON(r & ~AR9170_TX_PHY_MCS);
|
||||
|
||||
tmp |= cpu_to_le32(r & AR9170_TX_PHY_MCS);
|
||||
tmp |= cpu_to_le32(AR9170_TX_PHY_MOD_HT);
|
||||
|
||||
/*
|
||||
|
@ -720,33 +753,14 @@ static __le32 carl9170_tx_physet(struct ar9170 *ar,
|
|||
* tmp |= cpu_to_le32(AR9170_TX_PHY_GREENFIELD);
|
||||
*/
|
||||
} else {
|
||||
u8 *txpower;
|
||||
u32 mod;
|
||||
u32 phyrate;
|
||||
u8 idx = txrate->idx;
|
||||
|
||||
if (info->band != IEEE80211_BAND_2GHZ) {
|
||||
idx += 4;
|
||||
txpower = ar->power_5G_leg;
|
||||
mod = AR9170_TX_PHY_MOD_OFDM;
|
||||
if (info->band == IEEE80211_BAND_2GHZ) {
|
||||
if (txrate->idx <= AR9170_TX_PHY_RATE_CCK_11M)
|
||||
tmp |= cpu_to_le32(AR9170_TX_PHY_MOD_CCK);
|
||||
else
|
||||
tmp |= cpu_to_le32(AR9170_TX_PHY_MOD_OFDM);
|
||||
} else {
|
||||
if (idx < 4) {
|
||||
txpower = ar->power_2G_cck;
|
||||
mod = AR9170_TX_PHY_MOD_CCK;
|
||||
} else {
|
||||
mod = AR9170_TX_PHY_MOD_OFDM;
|
||||
txpower = ar->power_2G_ofdm;
|
||||
tmp |= cpu_to_le32(AR9170_TX_PHY_MOD_OFDM);
|
||||
}
|
||||
}
|
||||
|
||||
rate = &__carl9170_ratetable[idx];
|
||||
|
||||
phyrate = rate->hw_value & 0xF;
|
||||
power = txpower[(rate->hw_value & 0x30) >> 4];
|
||||
phyrate <<= AR9170_TX_PHY_MCS_S;
|
||||
|
||||
tmp |= cpu_to_le32(mod);
|
||||
tmp |= cpu_to_le32(phyrate);
|
||||
|
||||
/*
|
||||
* short preamble seems to be broken too.
|
||||
|
@ -755,23 +769,12 @@ static __le32 carl9170_tx_physet(struct ar9170 *ar,
|
|||
* tmp |= cpu_to_le32(AR9170_TX_PHY_SHORT_PREAMBLE);
|
||||
*/
|
||||
}
|
||||
power <<= AR9170_TX_PHY_TX_PWR_S;
|
||||
power &= AR9170_TX_PHY_TX_PWR;
|
||||
tmp |= cpu_to_le32(power);
|
||||
|
||||
/* set TX chains */
|
||||
if (ar->eeprom.tx_mask == 1) {
|
||||
chains = AR9170_TX_PHY_TXCHAIN_1;
|
||||
} else {
|
||||
chains = AR9170_TX_PHY_TXCHAIN_2;
|
||||
|
||||
/* >= 36M legacy OFDM - use only one chain */
|
||||
if (rate && rate->bitrate >= 360 &&
|
||||
!(txrate->flags & IEEE80211_TX_RC_MCS))
|
||||
chains = AR9170_TX_PHY_TXCHAIN_1;
|
||||
}
|
||||
tmp |= cpu_to_le32(chains << AR9170_TX_PHY_TXCHAIN_S);
|
||||
carl9170_tx_rate_tpc_chains(ar, info, txrate,
|
||||
&phyrate, &power, &chains);
|
||||
|
||||
tmp |= cpu_to_le32(SET_CONSTVAL(AR9170_TX_PHY_MCS, phyrate));
|
||||
tmp |= cpu_to_le32(SET_CONSTVAL(AR9170_TX_PHY_TX_PWR, power));
|
||||
tmp |= cpu_to_le32(SET_CONSTVAL(AR9170_TX_PHY_TXCHAIN, chains));
|
||||
return tmp;
|
||||
}
|
||||
|
||||
|
@ -1438,3 +1441,154 @@ void carl9170_tx_scheduler(struct ar9170 *ar)
|
|||
if (ar->tx_schedule)
|
||||
carl9170_tx(ar);
|
||||
}
|
||||
|
||||
int carl9170_update_beacon(struct ar9170 *ar, const bool submit)
|
||||
{
|
||||
struct sk_buff *skb = NULL;
|
||||
struct carl9170_vif_info *cvif;
|
||||
struct ieee80211_tx_info *txinfo;
|
||||
struct ieee80211_tx_rate *rate;
|
||||
__le32 *data, *old = NULL;
|
||||
unsigned int plcp, power, chains;
|
||||
u32 word, ht1, off, addr, len;
|
||||
int i = 0, err = 0;
|
||||
|
||||
rcu_read_lock();
|
||||
cvif = rcu_dereference(ar->beacon_iter);
|
||||
retry:
|
||||
if (ar->vifs == 0 || !cvif)
|
||||
goto out_unlock;
|
||||
|
||||
list_for_each_entry_continue_rcu(cvif, &ar->vif_list, list) {
|
||||
if (cvif->active && cvif->enable_beacon)
|
||||
goto found;
|
||||
}
|
||||
|
||||
if (!ar->beacon_enabled || i++)
|
||||
goto out_unlock;
|
||||
|
||||
goto retry;
|
||||
|
||||
found:
|
||||
rcu_assign_pointer(ar->beacon_iter, cvif);
|
||||
|
||||
skb = ieee80211_beacon_get_tim(ar->hw, carl9170_get_vif(cvif),
|
||||
NULL, NULL);
|
||||
|
||||
if (!skb) {
|
||||
err = -ENOMEM;
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
txinfo = IEEE80211_SKB_CB(skb);
|
||||
spin_lock_bh(&ar->beacon_lock);
|
||||
data = (__le32 *)skb->data;
|
||||
if (cvif->beacon)
|
||||
old = (__le32 *)cvif->beacon->data;
|
||||
|
||||
off = cvif->id * AR9170_MAC_BCN_LENGTH_MAX;
|
||||
addr = ar->fw.beacon_addr + off;
|
||||
len = roundup(skb->len + FCS_LEN, 4);
|
||||
|
||||
if ((off + len) > ar->fw.beacon_max_len) {
|
||||
if (net_ratelimit()) {
|
||||
wiphy_err(ar->hw->wiphy, "beacon does not "
|
||||
"fit into device memory!\n");
|
||||
}
|
||||
err = -EINVAL;
|
||||
goto err_unlock;
|
||||
}
|
||||
|
||||
if (len > AR9170_MAC_BCN_LENGTH_MAX) {
|
||||
if (net_ratelimit()) {
|
||||
wiphy_err(ar->hw->wiphy, "no support for beacons "
|
||||
"bigger than %d (yours:%d).\n",
|
||||
AR9170_MAC_BCN_LENGTH_MAX, len);
|
||||
}
|
||||
|
||||
err = -EMSGSIZE;
|
||||
goto err_unlock;
|
||||
}
|
||||
|
||||
ht1 = AR9170_MAC_BCN_HT1_TX_ANT0;
|
||||
rate = &txinfo->control.rates[0];
|
||||
carl9170_tx_rate_tpc_chains(ar, txinfo, rate, &plcp, &power, &chains);
|
||||
if (!(txinfo->control.rates[0].flags & IEEE80211_TX_RC_MCS)) {
|
||||
if (plcp <= AR9170_TX_PHY_RATE_CCK_11M)
|
||||
plcp |= ((skb->len + FCS_LEN) << (3 + 16)) + 0x0400;
|
||||
else
|
||||
plcp |= ((skb->len + FCS_LEN) << 16) + 0x0010;
|
||||
} else {
|
||||
ht1 |= AR9170_MAC_BCN_HT1_HT_EN;
|
||||
if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
|
||||
plcp |= AR9170_MAC_BCN_HT2_SGI;
|
||||
|
||||
if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) {
|
||||
ht1 |= AR9170_MAC_BCN_HT1_BWC_40M_SHARED;
|
||||
plcp |= AR9170_MAC_BCN_HT2_BW40;
|
||||
}
|
||||
if (rate->flags & IEEE80211_TX_RC_DUP_DATA) {
|
||||
ht1 |= AR9170_MAC_BCN_HT1_BWC_40M_DUP;
|
||||
plcp |= AR9170_MAC_BCN_HT2_BW40;
|
||||
}
|
||||
|
||||
SET_VAL(AR9170_MAC_BCN_HT2_LEN, plcp, skb->len + FCS_LEN);
|
||||
}
|
||||
|
||||
SET_VAL(AR9170_MAC_BCN_HT1_PWR_CTRL, ht1, 7);
|
||||
SET_VAL(AR9170_MAC_BCN_HT1_TPC, ht1, power);
|
||||
SET_VAL(AR9170_MAC_BCN_HT1_CHAIN_MASK, ht1, chains);
|
||||
if (chains == AR9170_TX_PHY_TXCHAIN_2)
|
||||
ht1 |= AR9170_MAC_BCN_HT1_TX_ANT1;
|
||||
|
||||
carl9170_async_regwrite_begin(ar);
|
||||
carl9170_async_regwrite(AR9170_MAC_REG_BCN_HT1, ht1);
|
||||
if (!(txinfo->control.rates[0].flags & IEEE80211_TX_RC_MCS))
|
||||
carl9170_async_regwrite(AR9170_MAC_REG_BCN_PLCP, plcp);
|
||||
else
|
||||
carl9170_async_regwrite(AR9170_MAC_REG_BCN_HT2, plcp);
|
||||
|
||||
for (i = 0; i < DIV_ROUND_UP(skb->len, 4); i++) {
|
||||
/*
|
||||
* XXX: This accesses beyond skb data for up
|
||||
* to the last 3 bytes!!
|
||||
*/
|
||||
|
||||
if (old && (data[i] == old[i]))
|
||||
continue;
|
||||
|
||||
word = le32_to_cpu(data[i]);
|
||||
carl9170_async_regwrite(addr + 4 * i, word);
|
||||
}
|
||||
carl9170_async_regwrite_finish();
|
||||
|
||||
dev_kfree_skb_any(cvif->beacon);
|
||||
cvif->beacon = NULL;
|
||||
|
||||
err = carl9170_async_regwrite_result();
|
||||
if (!err)
|
||||
cvif->beacon = skb;
|
||||
spin_unlock_bh(&ar->beacon_lock);
|
||||
if (err)
|
||||
goto err_free;
|
||||
|
||||
if (submit) {
|
||||
err = carl9170_bcn_ctrl(ar, cvif->id,
|
||||
CARL9170_BCN_CTRL_CAB_TRIGGER,
|
||||
addr, skb->len + FCS_LEN);
|
||||
|
||||
if (err)
|
||||
goto err_free;
|
||||
}
|
||||
out_unlock:
|
||||
rcu_read_unlock();
|
||||
return 0;
|
||||
|
||||
err_unlock:
|
||||
spin_unlock_bh(&ar->beacon_lock);
|
||||
|
||||
err_free:
|
||||
rcu_read_unlock();
|
||||
dev_kfree_skb_any(skb);
|
||||
return err;
|
||||
}
|
||||
|
|
|
@ -105,11 +105,8 @@ static bool ath_hw_keysetmac(struct ath_common *common,
|
|||
if (mac[0] & 0x01)
|
||||
unicast_flag = 0;
|
||||
|
||||
macHi = (mac[5] << 8) | mac[4];
|
||||
macLo = (mac[3] << 24) |
|
||||
(mac[2] << 16) |
|
||||
(mac[1] << 8) |
|
||||
mac[0];
|
||||
macLo = get_unaligned_le32(mac);
|
||||
macHi = get_unaligned_le16(mac + 4);
|
||||
macLo >>= 1;
|
||||
macLo |= (macHi & 1) << 31;
|
||||
macHi >>= 1;
|
||||
|
|
|
@ -433,6 +433,12 @@ enum {
|
|||
#define B43_BCMA_IOCTL_PHY_BW_40MHZ 0x00000080 /* 40 MHz bandwidth, 160 MHz PHY */
|
||||
#define B43_BCMA_IOCTL_GMODE 0x00002000 /* G Mode Enable */
|
||||
|
||||
/* BCMA 802.11 core specific IO status (BCMA_IOST) flags */
|
||||
#define B43_BCMA_IOST_2G_PHY 0x00000001 /* 2.4G capable phy */
|
||||
#define B43_BCMA_IOST_5G_PHY 0x00000002 /* 5G capable phy */
|
||||
#define B43_BCMA_IOST_FASTCLKA 0x00000004 /* Fast Clock Available */
|
||||
#define B43_BCMA_IOST_DUALB_PHY 0x00000008 /* Dualband phy */
|
||||
|
||||
/* 802.11 core specific TM State Low (SSB_TMSLOW) flags */
|
||||
#define B43_TMSLOW_GMODE 0x20000000 /* G Mode Enable */
|
||||
#define B43_TMSLOW_PHY_BANDWIDTH 0x00C00000 /* PHY band width and clock speed mask (N-PHY only) */
|
||||
|
@ -588,6 +594,7 @@ struct b43_dma {
|
|||
struct b43_dmaring *rx_ring;
|
||||
|
||||
u32 translation; /* Routing bits */
|
||||
bool parity; /* Check for parity */
|
||||
};
|
||||
|
||||
struct b43_pio_txqueue;
|
||||
|
|
|
@ -126,55 +126,52 @@ struct b43_bus_dev *b43_bus_dev_bcma_init(struct bcma_device *core)
|
|||
|
||||
/* SSB */
|
||||
#ifdef CONFIG_B43_SSB
|
||||
static inline int b43_bus_ssb_bus_may_powerdown(struct b43_bus_dev *dev)
|
||||
static int b43_bus_ssb_bus_may_powerdown(struct b43_bus_dev *dev)
|
||||
{
|
||||
return ssb_bus_may_powerdown(dev->sdev->bus);
|
||||
}
|
||||
static inline int b43_bus_ssb_bus_powerup(struct b43_bus_dev *dev,
|
||||
static int b43_bus_ssb_bus_powerup(struct b43_bus_dev *dev,
|
||||
bool dynamic_pctl)
|
||||
{
|
||||
return ssb_bus_powerup(dev->sdev->bus, dynamic_pctl);
|
||||
}
|
||||
static inline int b43_bus_ssb_device_is_enabled(struct b43_bus_dev *dev)
|
||||
static int b43_bus_ssb_device_is_enabled(struct b43_bus_dev *dev)
|
||||
{
|
||||
return ssb_device_is_enabled(dev->sdev);
|
||||
}
|
||||
static inline void b43_bus_ssb_device_enable(struct b43_bus_dev *dev,
|
||||
static void b43_bus_ssb_device_enable(struct b43_bus_dev *dev,
|
||||
u32 core_specific_flags)
|
||||
{
|
||||
ssb_device_enable(dev->sdev, core_specific_flags);
|
||||
}
|
||||
static inline void b43_bus_ssb_device_disable(struct b43_bus_dev *dev,
|
||||
static void b43_bus_ssb_device_disable(struct b43_bus_dev *dev,
|
||||
u32 core_specific_flags)
|
||||
{
|
||||
ssb_device_disable(dev->sdev, core_specific_flags);
|
||||
}
|
||||
|
||||
static inline u16 b43_bus_ssb_read16(struct b43_bus_dev *dev, u16 offset)
|
||||
static u16 b43_bus_ssb_read16(struct b43_bus_dev *dev, u16 offset)
|
||||
{
|
||||
return ssb_read16(dev->sdev, offset);
|
||||
}
|
||||
static inline u32 b43_bus_ssb_read32(struct b43_bus_dev *dev, u16 offset)
|
||||
static u32 b43_bus_ssb_read32(struct b43_bus_dev *dev, u16 offset)
|
||||
{
|
||||
return ssb_read32(dev->sdev, offset);
|
||||
}
|
||||
static inline
|
||||
void b43_bus_ssb_write16(struct b43_bus_dev *dev, u16 offset, u16 value)
|
||||
static void b43_bus_ssb_write16(struct b43_bus_dev *dev, u16 offset, u16 value)
|
||||
{
|
||||
ssb_write16(dev->sdev, offset, value);
|
||||
}
|
||||
static inline
|
||||
void b43_bus_ssb_write32(struct b43_bus_dev *dev, u16 offset, u32 value)
|
||||
static void b43_bus_ssb_write32(struct b43_bus_dev *dev, u16 offset, u32 value)
|
||||
{
|
||||
ssb_write32(dev->sdev, offset, value);
|
||||
}
|
||||
static inline
|
||||
void b43_bus_ssb_block_read(struct b43_bus_dev *dev, void *buffer,
|
||||
static void b43_bus_ssb_block_read(struct b43_bus_dev *dev, void *buffer,
|
||||
size_t count, u16 offset, u8 reg_width)
|
||||
{
|
||||
ssb_block_read(dev->sdev, buffer, count, offset, reg_width);
|
||||
}
|
||||
static inline
|
||||
static
|
||||
void b43_bus_ssb_block_write(struct b43_bus_dev *dev, const void *buffer,
|
||||
size_t count, u16 offset, u8 reg_width)
|
||||
{
|
||||
|
|
|
@ -174,7 +174,7 @@ static void op64_fill_descriptor(struct b43_dmaring *ring,
|
|||
addrhi = (((u64) dmaaddr >> 32) & ~SSB_DMA_TRANSLATION_MASK);
|
||||
addrext = (((u64) dmaaddr >> 32) & SSB_DMA_TRANSLATION_MASK)
|
||||
>> SSB_DMA_TRANSLATION_SHIFT;
|
||||
addrhi |= (ring->dev->dma.translation << 1);
|
||||
addrhi |= ring->dev->dma.translation;
|
||||
if (slot == ring->nr_slots - 1)
|
||||
ctl0 |= B43_DMA64_DCTL0_DTABLEEND;
|
||||
if (start)
|
||||
|
@ -659,6 +659,7 @@ static int dmacontroller_setup(struct b43_dmaring *ring)
|
|||
u32 value;
|
||||
u32 addrext;
|
||||
u32 trans = ring->dev->dma.translation;
|
||||
bool parity = ring->dev->dma.parity;
|
||||
|
||||
if (ring->tx) {
|
||||
if (ring->type == B43_DMA_64BIT) {
|
||||
|
@ -669,13 +670,15 @@ static int dmacontroller_setup(struct b43_dmaring *ring)
|
|||
value = B43_DMA64_TXENABLE;
|
||||
value |= (addrext << B43_DMA64_TXADDREXT_SHIFT)
|
||||
& B43_DMA64_TXADDREXT_MASK;
|
||||
if (!parity)
|
||||
value |= B43_DMA64_TXPARITYDISABLE;
|
||||
b43_dma_write(ring, B43_DMA64_TXCTL, value);
|
||||
b43_dma_write(ring, B43_DMA64_TXRINGLO,
|
||||
(ringbase & 0xFFFFFFFF));
|
||||
b43_dma_write(ring, B43_DMA64_TXRINGHI,
|
||||
((ringbase >> 32) &
|
||||
~SSB_DMA_TRANSLATION_MASK)
|
||||
| (trans << 1));
|
||||
| trans);
|
||||
} else {
|
||||
u32 ringbase = (u32) (ring->dmabase);
|
||||
|
||||
|
@ -684,6 +687,8 @@ static int dmacontroller_setup(struct b43_dmaring *ring)
|
|||
value = B43_DMA32_TXENABLE;
|
||||
value |= (addrext << B43_DMA32_TXADDREXT_SHIFT)
|
||||
& B43_DMA32_TXADDREXT_MASK;
|
||||
if (!parity)
|
||||
value |= B43_DMA32_TXPARITYDISABLE;
|
||||
b43_dma_write(ring, B43_DMA32_TXCTL, value);
|
||||
b43_dma_write(ring, B43_DMA32_TXRING,
|
||||
(ringbase & ~SSB_DMA_TRANSLATION_MASK)
|
||||
|
@ -702,13 +707,15 @@ static int dmacontroller_setup(struct b43_dmaring *ring)
|
|||
value |= B43_DMA64_RXENABLE;
|
||||
value |= (addrext << B43_DMA64_RXADDREXT_SHIFT)
|
||||
& B43_DMA64_RXADDREXT_MASK;
|
||||
if (!parity)
|
||||
value |= B43_DMA64_RXPARITYDISABLE;
|
||||
b43_dma_write(ring, B43_DMA64_RXCTL, value);
|
||||
b43_dma_write(ring, B43_DMA64_RXRINGLO,
|
||||
(ringbase & 0xFFFFFFFF));
|
||||
b43_dma_write(ring, B43_DMA64_RXRINGHI,
|
||||
((ringbase >> 32) &
|
||||
~SSB_DMA_TRANSLATION_MASK)
|
||||
| (trans << 1));
|
||||
| trans);
|
||||
b43_dma_write(ring, B43_DMA64_RXINDEX, ring->nr_slots *
|
||||
sizeof(struct b43_dmadesc64));
|
||||
} else {
|
||||
|
@ -720,6 +727,8 @@ static int dmacontroller_setup(struct b43_dmaring *ring)
|
|||
value |= B43_DMA32_RXENABLE;
|
||||
value |= (addrext << B43_DMA32_RXADDREXT_SHIFT)
|
||||
& B43_DMA32_RXADDREXT_MASK;
|
||||
if (!parity)
|
||||
value |= B43_DMA32_RXPARITYDISABLE;
|
||||
b43_dma_write(ring, B43_DMA32_RXCTL, value);
|
||||
b43_dma_write(ring, B43_DMA32_RXRING,
|
||||
(ringbase & ~SSB_DMA_TRANSLATION_MASK)
|
||||
|
@ -1057,6 +1066,11 @@ int b43_dma_init(struct b43_wldev *dev)
|
|||
return err;
|
||||
|
||||
switch (dev->dev->bus_type) {
|
||||
#ifdef CONFIG_B43_BCMA
|
||||
case B43_BUS_BCMA:
|
||||
dma->translation = bcma_core_dma_translation(dev->dev->bdev);
|
||||
break;
|
||||
#endif
|
||||
#ifdef CONFIG_B43_SSB
|
||||
case B43_BUS_SSB:
|
||||
dma->translation = ssb_dma_translation(dev->dev->sdev);
|
||||
|
@ -1064,6 +1078,13 @@ int b43_dma_init(struct b43_wldev *dev)
|
|||
#endif
|
||||
}
|
||||
|
||||
dma->parity = true;
|
||||
#ifdef CONFIG_B43_BCMA
|
||||
/* TODO: find out which SSB devices need disabling parity */
|
||||
if (dev->dev->bus_type == B43_BUS_BCMA)
|
||||
dma->parity = false;
|
||||
#endif
|
||||
|
||||
err = -ENOMEM;
|
||||
/* setup TX DMA channels. */
|
||||
dma->tx_ring_AC_BK = b43_setup_dmaring(dev, 0, 1, type);
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
#define B43_DMA32_TXSUSPEND 0x00000002
|
||||
#define B43_DMA32_TXLOOPBACK 0x00000004
|
||||
#define B43_DMA32_TXFLUSH 0x00000010
|
||||
#define B43_DMA32_TXPARITYDISABLE 0x00000800
|
||||
#define B43_DMA32_TXADDREXT_MASK 0x00030000
|
||||
#define B43_DMA32_TXADDREXT_SHIFT 16
|
||||
#define B43_DMA32_TXRING 0x04
|
||||
|
@ -44,6 +45,7 @@
|
|||
#define B43_DMA32_RXFROFF_MASK 0x000000FE
|
||||
#define B43_DMA32_RXFROFF_SHIFT 1
|
||||
#define B43_DMA32_RXDIRECTFIFO 0x00000100
|
||||
#define B43_DMA32_RXPARITYDISABLE 0x00000800
|
||||
#define B43_DMA32_RXADDREXT_MASK 0x00030000
|
||||
#define B43_DMA32_RXADDREXT_SHIFT 16
|
||||
#define B43_DMA32_RXRING 0x14
|
||||
|
@ -84,6 +86,7 @@ struct b43_dmadesc32 {
|
|||
#define B43_DMA64_TXSUSPEND 0x00000002
|
||||
#define B43_DMA64_TXLOOPBACK 0x00000004
|
||||
#define B43_DMA64_TXFLUSH 0x00000010
|
||||
#define B43_DMA64_TXPARITYDISABLE 0x00000800
|
||||
#define B43_DMA64_TXADDREXT_MASK 0x00030000
|
||||
#define B43_DMA64_TXADDREXT_SHIFT 16
|
||||
#define B43_DMA64_TXINDEX 0x04
|
||||
|
@ -111,6 +114,7 @@ struct b43_dmadesc32 {
|
|||
#define B43_DMA64_RXFROFF_MASK 0x000000FE
|
||||
#define B43_DMA64_RXFROFF_SHIFT 1
|
||||
#define B43_DMA64_RXDIRECTFIFO 0x00000100
|
||||
#define B43_DMA64_RXPARITYDISABLE 0x00000800
|
||||
#define B43_DMA64_RXADDREXT_MASK 0x00030000
|
||||
#define B43_DMA64_RXADDREXT_SHIFT 16
|
||||
#define B43_DMA64_RXINDEX 0x24
|
||||
|
|
|
@ -1156,17 +1156,37 @@ void b43_power_saving_ctl_bits(struct b43_wldev *dev, unsigned int ps_flags)
|
|||
}
|
||||
|
||||
#ifdef CONFIG_B43_BCMA
|
||||
static void b43_bcma_phy_reset(struct b43_wldev *dev)
|
||||
{
|
||||
u32 flags;
|
||||
|
||||
/* Put PHY into reset */
|
||||
flags = bcma_aread32(dev->dev->bdev, BCMA_IOCTL);
|
||||
flags |= B43_BCMA_IOCTL_PHY_RESET;
|
||||
flags |= B43_BCMA_IOCTL_PHY_BW_20MHZ; /* Make 20 MHz def */
|
||||
bcma_awrite32(dev->dev->bdev, BCMA_IOCTL, flags);
|
||||
udelay(2);
|
||||
|
||||
/* Take PHY out of reset */
|
||||
flags = bcma_aread32(dev->dev->bdev, BCMA_IOCTL);
|
||||
flags &= ~B43_BCMA_IOCTL_PHY_RESET;
|
||||
flags |= BCMA_IOCTL_FGC;
|
||||
bcma_awrite32(dev->dev->bdev, BCMA_IOCTL, flags);
|
||||
udelay(1);
|
||||
|
||||
/* Do not force clock anymore */
|
||||
flags = bcma_aread32(dev->dev->bdev, BCMA_IOCTL);
|
||||
flags &= ~BCMA_IOCTL_FGC;
|
||||
bcma_awrite32(dev->dev->bdev, BCMA_IOCTL, flags);
|
||||
udelay(1);
|
||||
}
|
||||
|
||||
static void b43_bcma_wireless_core_reset(struct b43_wldev *dev, bool gmode)
|
||||
{
|
||||
u32 flags = 0;
|
||||
|
||||
if (gmode)
|
||||
flags = B43_BCMA_IOCTL_GMODE;
|
||||
flags |= B43_BCMA_IOCTL_PHY_CLKEN;
|
||||
flags |= B43_BCMA_IOCTL_PHY_BW_20MHZ; /* Make 20 MHz def */
|
||||
b43_device_enable(dev, flags);
|
||||
|
||||
/* TODO: reset PHY */
|
||||
b43_device_enable(dev, B43_BCMA_IOCTL_PHY_CLKEN);
|
||||
bcma_core_set_clockmode(dev->dev->bdev, BCMA_CLKMODE_FAST);
|
||||
b43_bcma_phy_reset(dev);
|
||||
bcma_core_pll_ctl(dev->dev->bdev, 0x300, 0x3000000, true);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -2814,12 +2834,12 @@ void b43_mac_phy_clock_set(struct b43_wldev *dev, bool on)
|
|||
switch (dev->dev->bus_type) {
|
||||
#ifdef CONFIG_B43_BCMA
|
||||
case B43_BUS_BCMA:
|
||||
tmp = bcma_read32(dev->dev->bdev, BCMA_IOCTL);
|
||||
tmp = bcma_aread32(dev->dev->bdev, BCMA_IOCTL);
|
||||
if (on)
|
||||
tmp |= B43_BCMA_IOCTL_MACPHYCLKEN;
|
||||
else
|
||||
tmp &= ~B43_BCMA_IOCTL_MACPHYCLKEN;
|
||||
bcma_write32(dev->dev->bdev, BCMA_IOCTL, tmp);
|
||||
bcma_awrite32(dev->dev->bdev, BCMA_IOCTL, tmp);
|
||||
break;
|
||||
#endif
|
||||
#ifdef CONFIG_B43_SSB
|
||||
|
@ -4948,6 +4968,7 @@ static int b43_wireless_core_attach(struct b43_wldev *dev)
|
|||
struct b43_wl *wl = dev->wl;
|
||||
struct pci_dev *pdev = NULL;
|
||||
int err;
|
||||
u32 tmp;
|
||||
bool have_2ghz_phy = 0, have_5ghz_phy = 0;
|
||||
|
||||
/* Do NOT do any device initialization here.
|
||||
|
@ -4973,17 +4994,17 @@ static int b43_wireless_core_attach(struct b43_wldev *dev)
|
|||
switch (dev->dev->bus_type) {
|
||||
#ifdef CONFIG_B43_BCMA
|
||||
case B43_BUS_BCMA:
|
||||
/* FIXME */
|
||||
have_2ghz_phy = 1;
|
||||
have_5ghz_phy = 0;
|
||||
tmp = bcma_aread32(dev->dev->bdev, BCMA_IOST);
|
||||
have_2ghz_phy = !!(tmp & B43_BCMA_IOST_2G_PHY);
|
||||
have_5ghz_phy = !!(tmp & B43_BCMA_IOST_5G_PHY);
|
||||
break;
|
||||
#endif
|
||||
#ifdef CONFIG_B43_SSB
|
||||
case B43_BUS_SSB:
|
||||
if (dev->dev->core_rev >= 5) {
|
||||
u32 tmshigh = ssb_read32(dev->dev->sdev, SSB_TMSHIGH);
|
||||
have_2ghz_phy = !!(tmshigh & B43_TMSHIGH_HAVE_2GHZ_PHY);
|
||||
have_5ghz_phy = !!(tmshigh & B43_TMSHIGH_HAVE_5GHZ_PHY);
|
||||
tmp = ssb_read32(dev->dev->sdev, SSB_TMSHIGH);
|
||||
have_2ghz_phy = !!(tmp & B43_TMSHIGH_HAVE_2GHZ_PHY);
|
||||
have_5ghz_phy = !!(tmp & B43_TMSHIGH_HAVE_5GHZ_PHY);
|
||||
} else
|
||||
B43_WARN_ON(1);
|
||||
break;
|
||||
|
@ -5164,6 +5185,7 @@ static struct b43_wl *b43_wireless_init(struct b43_bus_dev *dev)
|
|||
struct ssb_sprom *sprom = dev->bus_sprom;
|
||||
struct ieee80211_hw *hw;
|
||||
struct b43_wl *wl;
|
||||
char chip_name[6];
|
||||
|
||||
hw = ieee80211_alloc_hw(sizeof(*wl), &b43_hw_ops);
|
||||
if (!hw) {
|
||||
|
@ -5202,8 +5224,10 @@ static struct b43_wl *b43_wireless_init(struct b43_bus_dev *dev)
|
|||
INIT_WORK(&wl->tx_work, b43_tx_work);
|
||||
skb_queue_head_init(&wl->tx_queue);
|
||||
|
||||
b43info(wl, "Broadcom %04X WLAN found (core revision %u)\n",
|
||||
dev->chip_id, dev->core_rev);
|
||||
snprintf(chip_name, ARRAY_SIZE(chip_name),
|
||||
(dev->chip_id > 0x9999) ? "%d" : "%04X", dev->chip_id);
|
||||
b43info(wl, "Broadcom %s WLAN found (core revision %u)\n", chip_name,
|
||||
dev->core_rev);
|
||||
return wl;
|
||||
}
|
||||
|
||||
|
@ -5211,19 +5235,59 @@ static struct b43_wl *b43_wireless_init(struct b43_bus_dev *dev)
|
|||
static int b43_bcma_probe(struct bcma_device *core)
|
||||
{
|
||||
struct b43_bus_dev *dev;
|
||||
struct b43_wl *wl;
|
||||
int err;
|
||||
|
||||
dev = b43_bus_dev_bcma_init(core);
|
||||
if (!dev)
|
||||
return -ENODEV;
|
||||
|
||||
b43err(NULL, "BCMA is not supported yet!");
|
||||
kfree(dev);
|
||||
return -EOPNOTSUPP;
|
||||
wl = b43_wireless_init(dev);
|
||||
if (IS_ERR(wl)) {
|
||||
err = PTR_ERR(wl);
|
||||
goto bcma_out;
|
||||
}
|
||||
|
||||
err = b43_one_core_attach(dev, wl);
|
||||
if (err)
|
||||
goto bcma_err_wireless_exit;
|
||||
|
||||
err = ieee80211_register_hw(wl->hw);
|
||||
if (err)
|
||||
goto bcma_err_one_core_detach;
|
||||
b43_leds_register(wl->current_dev);
|
||||
|
||||
bcma_out:
|
||||
return err;
|
||||
|
||||
bcma_err_one_core_detach:
|
||||
b43_one_core_detach(dev);
|
||||
bcma_err_wireless_exit:
|
||||
ieee80211_free_hw(wl->hw);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void b43_bcma_remove(struct bcma_device *core)
|
||||
{
|
||||
/* TODO */
|
||||
struct b43_wldev *wldev = bcma_get_drvdata(core);
|
||||
struct b43_wl *wl = wldev->wl;
|
||||
|
||||
/* We must cancel any work here before unregistering from ieee80211,
|
||||
* as the ieee80211 unreg will destroy the workqueue. */
|
||||
cancel_work_sync(&wldev->restart_work);
|
||||
|
||||
/* Restore the queues count before unregistering, because firmware detect
|
||||
* might have modified it. Restoring is important, so the networking
|
||||
* stack can properly free resources. */
|
||||
wl->hw->queues = wl->mac80211_initially_registered_queues;
|
||||
b43_leds_stop(wldev);
|
||||
ieee80211_unregister_hw(wl->hw);
|
||||
|
||||
b43_one_core_detach(wldev->dev);
|
||||
|
||||
b43_leds_unregister(wl);
|
||||
|
||||
ieee80211_free_hw(wl->hw);
|
||||
}
|
||||
|
||||
static struct bcma_driver b43_bcma_driver = {
|
||||
|
|
|
@ -148,7 +148,7 @@ static void b43_radio_2059_init(struct b43_wldev *dev)
|
|||
b43_radio_mask(dev, 0x17F, ~0x1);
|
||||
}
|
||||
|
||||
b43_radio_mask(dev, 0x11, 0x0008);
|
||||
b43_radio_mask(dev, 0x11, ~0x0008);
|
||||
}
|
||||
|
||||
/**************************************************
|
||||
|
@ -276,18 +276,25 @@ static void b43_phy_ht_op_software_rfkill(struct b43_wldev *dev,
|
|||
if (b43_read32(dev, B43_MMIO_MACCTL) & B43_MACCTL_ENABLED)
|
||||
b43err(dev->wl, "MAC not suspended\n");
|
||||
|
||||
/* In the following PHY ops we copy wl's dummy behaviour.
|
||||
* TODO: Find out if reads (currently hidden in masks/masksets) are
|
||||
* needed and replace following ops with just writes or w&r.
|
||||
* Note: B43_PHY_HT_RF_CTL1 register is tricky, wrong operation can
|
||||
* cause delayed (!) machine lock up. */
|
||||
if (blocked) {
|
||||
b43_phy_mask(dev, B43_PHY_HT_RF_CTL1, ~0);
|
||||
b43_phy_mask(dev, B43_PHY_HT_RF_CTL1, 0);
|
||||
} else {
|
||||
b43_phy_mask(dev, B43_PHY_HT_RF_CTL1, ~0);
|
||||
b43_phy_maskset(dev, B43_PHY_HT_RF_CTL1, ~0, 0x1);
|
||||
b43_phy_mask(dev, B43_PHY_HT_RF_CTL1, ~0);
|
||||
b43_phy_maskset(dev, B43_PHY_HT_RF_CTL1, ~0, 0x2);
|
||||
b43_phy_mask(dev, B43_PHY_HT_RF_CTL1, 0);
|
||||
b43_phy_maskset(dev, B43_PHY_HT_RF_CTL1, 0, 0x1);
|
||||
b43_phy_mask(dev, B43_PHY_HT_RF_CTL1, 0);
|
||||
b43_phy_maskset(dev, B43_PHY_HT_RF_CTL1, 0, 0x2);
|
||||
|
||||
if (dev->phy.radio_ver == 0x2059)
|
||||
b43_radio_2059_init(dev);
|
||||
else
|
||||
B43_WARN_ON(1);
|
||||
|
||||
b43_switch_channel(dev, dev->phy.channel);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -329,7 +336,7 @@ static int b43_phy_ht_op_switch_channel(struct b43_wldev *dev,
|
|||
static unsigned int b43_phy_ht_op_get_default_chan(struct b43_wldev *dev)
|
||||
{
|
||||
if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
|
||||
return 1;
|
||||
return 11;
|
||||
return 36;
|
||||
}
|
||||
|
||||
|
|
|
@ -611,12 +611,12 @@ static void b43_nphy_bmac_clock_fgc(struct b43_wldev *dev, bool force)
|
|||
switch (dev->dev->bus_type) {
|
||||
#ifdef CONFIG_B43_BCMA
|
||||
case B43_BUS_BCMA:
|
||||
tmp = bcma_read32(dev->dev->bdev, BCMA_IOCTL);
|
||||
tmp = bcma_aread32(dev->dev->bdev, BCMA_IOCTL);
|
||||
if (force)
|
||||
tmp |= BCMA_IOCTL_FGC;
|
||||
else
|
||||
tmp &= ~BCMA_IOCTL_FGC;
|
||||
bcma_write32(dev->dev->bdev, BCMA_IOCTL, tmp);
|
||||
bcma_awrite32(dev->dev->bdev, BCMA_IOCTL, tmp);
|
||||
break;
|
||||
#endif
|
||||
#ifdef CONFIG_B43_SSB
|
||||
|
|
|
@ -161,5 +161,14 @@ static const struct b43_phy_ht_channeltab_e_radio2059 b43_phy_ht_channeltab_radi
|
|||
const struct b43_phy_ht_channeltab_e_radio2059
|
||||
*b43_phy_ht_get_channeltab_e_r2059(struct b43_wldev *dev, u16 freq)
|
||||
{
|
||||
const struct b43_phy_ht_channeltab_e_radio2059 *e;
|
||||
unsigned int i;
|
||||
|
||||
e = b43_phy_ht_channeltab_radio2059;
|
||||
for (i = 0; i < ARRAY_SIZE(b43_phy_ht_channeltab_radio2059); i++, e++) {
|
||||
if (e->freq == freq)
|
||||
return e;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
|
|
@ -532,6 +532,8 @@ struct b43legacy_dma {
|
|||
|
||||
struct b43legacy_dmaring *rx_ring0;
|
||||
struct b43legacy_dmaring *rx_ring3; /* only on core.rev < 5 */
|
||||
|
||||
u32 translation; /* Routing bits */
|
||||
};
|
||||
|
||||
/* Data structures for PIO transmission, per 80211 core. */
|
||||
|
|
|
@ -73,7 +73,7 @@ static void op32_fill_descriptor(struct b43legacy_dmaring *ring,
|
|||
addr = (u32)(dmaaddr & ~SSB_DMA_TRANSLATION_MASK);
|
||||
addrext = (u32)(dmaaddr & SSB_DMA_TRANSLATION_MASK)
|
||||
>> SSB_DMA_TRANSLATION_SHIFT;
|
||||
addr |= ssb_dma_translation(ring->dev->dev);
|
||||
addr |= ring->dev->dma.translation;
|
||||
ctl = (bufsize - ring->frameoffset)
|
||||
& B43legacy_DMA32_DCTL_BYTECNT;
|
||||
if (slot == ring->nr_slots - 1)
|
||||
|
@ -175,7 +175,7 @@ static void op64_fill_descriptor(struct b43legacy_dmaring *ring,
|
|||
addrhi = (((u64)dmaaddr >> 32) & ~SSB_DMA_TRANSLATION_MASK);
|
||||
addrext = (((u64)dmaaddr >> 32) & SSB_DMA_TRANSLATION_MASK)
|
||||
>> SSB_DMA_TRANSLATION_SHIFT;
|
||||
addrhi |= ssb_dma_translation(ring->dev->dev);
|
||||
addrhi |= ring->dev->dma.translation;
|
||||
if (slot == ring->nr_slots - 1)
|
||||
ctl0 |= B43legacy_DMA64_DCTL0_DTABLEEND;
|
||||
if (start)
|
||||
|
@ -709,7 +709,7 @@ static int dmacontroller_setup(struct b43legacy_dmaring *ring)
|
|||
int err = 0;
|
||||
u32 value;
|
||||
u32 addrext;
|
||||
u32 trans = ssb_dma_translation(ring->dev->dev);
|
||||
u32 trans = ring->dev->dma.translation;
|
||||
|
||||
if (ring->tx) {
|
||||
if (ring->type == B43legacy_DMA_64BIT) {
|
||||
|
@ -1093,6 +1093,7 @@ int b43legacy_dma_init(struct b43legacy_wldev *dev)
|
|||
return -EOPNOTSUPP;
|
||||
#endif
|
||||
}
|
||||
dma->translation = ssb_dma_translation(dev->dev);
|
||||
|
||||
err = -ENOMEM;
|
||||
/* setup TX DMA channels. */
|
||||
|
|
|
@ -5,16 +5,16 @@ iwlagn-objs += iwl-agn-ucode.o iwl-agn-tx.o
|
|||
iwlagn-objs += iwl-agn-lib.o iwl-agn-calib.o iwl-io.o
|
||||
iwlagn-objs += iwl-agn-tt.o iwl-agn-sta.o iwl-agn-eeprom.o
|
||||
|
||||
iwlagn-objs += iwl-core.o iwl-eeprom.o iwl-hcmd.o iwl-power.o
|
||||
iwlagn-objs += iwl-rx.o iwl-tx.o iwl-sta.o
|
||||
iwlagn-objs += iwl-core.o iwl-eeprom.o iwl-power.o
|
||||
iwlagn-objs += iwl-rx.o iwl-sta.o
|
||||
iwlagn-objs += iwl-scan.o iwl-led.o
|
||||
iwlagn-objs += iwl-agn-rxon.o iwl-agn-hcmd.o iwl-agn-ict.o
|
||||
iwlagn-objs += iwl-agn-rxon.o
|
||||
iwlagn-objs += iwl-5000.o
|
||||
iwlagn-objs += iwl-6000.o
|
||||
iwlagn-objs += iwl-1000.o
|
||||
iwlagn-objs += iwl-2000.o
|
||||
iwlagn-objs += iwl-pci.o
|
||||
iwlagn-objs += iwl-trans.o
|
||||
iwlagn-objs += iwl-trans.o iwl-trans-rx-pcie.o iwl-trans-tx-pcie.o
|
||||
|
||||
iwlagn-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o
|
||||
iwlagn-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o
|
||||
|
|
|
@ -168,9 +168,6 @@ static int iwl1000_hw_set_hw_params(struct iwl_priv *priv)
|
|||
|
||||
static struct iwl_lib_ops iwl1000_lib = {
|
||||
.set_hw_params = iwl1000_hw_set_hw_params,
|
||||
.rx_handler_setup = iwlagn_rx_handler_setup,
|
||||
.setup_deferred_work = iwlagn_setup_deferred_work,
|
||||
.is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr,
|
||||
.nic_config = iwl1000_nic_config,
|
||||
.eeprom_ops = {
|
||||
.regulatory_bands = {
|
||||
|
@ -186,10 +183,6 @@ static struct iwl_lib_ops iwl1000_lib = {
|
|||
.temperature = iwlagn_temperature,
|
||||
};
|
||||
|
||||
static const struct iwl_ops iwl1000_ops = {
|
||||
.lib = &iwl1000_lib,
|
||||
};
|
||||
|
||||
static struct iwl_base_params iwl1000_base_params = {
|
||||
.num_of_queues = IWLAGN_NUM_QUEUES,
|
||||
.num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
|
||||
|
@ -217,7 +210,7 @@ static struct iwl_ht_params iwl1000_ht_params = {
|
|||
.ucode_api_min = IWL1000_UCODE_API_MIN, \
|
||||
.eeprom_ver = EEPROM_1000_EEPROM_VERSION, \
|
||||
.eeprom_calib_ver = EEPROM_1000_TX_POWER_VERSION, \
|
||||
.ops = &iwl1000_ops, \
|
||||
.lib = &iwl1000_lib, \
|
||||
.base_params = &iwl1000_base_params, \
|
||||
.led_mode = IWL_LED_BLINK
|
||||
|
||||
|
@ -238,7 +231,7 @@ struct iwl_cfg iwl1000_bg_cfg = {
|
|||
.ucode_api_min = IWL100_UCODE_API_MIN, \
|
||||
.eeprom_ver = EEPROM_1000_EEPROM_VERSION, \
|
||||
.eeprom_calib_ver = EEPROM_1000_TX_POWER_VERSION, \
|
||||
.ops = &iwl1000_ops, \
|
||||
.lib = &iwl1000_lib, \
|
||||
.base_params = &iwl1000_base_params, \
|
||||
.led_mode = IWL_LED_RF_STATE, \
|
||||
.rx_with_siso_diversity = true
|
||||
|
|
|
@ -85,9 +85,6 @@ static void iwl2000_nic_config(struct iwl_priv *priv)
|
|||
if (priv->cfg->iq_invert)
|
||||
iwl_set_bit(priv, CSR_GP_DRIVER_REG,
|
||||
CSR_GP_DRIVER_REG_BIT_RADIO_IQ_INVER);
|
||||
|
||||
if (priv->cfg->disable_otp_refresh)
|
||||
iwl_write_prph(priv, APMG_ANALOG_SVR_REG, 0x80000010);
|
||||
}
|
||||
|
||||
static struct iwl_sensitivity_ranges iwl2000_sensitivity = {
|
||||
|
@ -156,7 +153,7 @@ static int iwl2000_hw_set_hw_params(struct iwl_priv *priv)
|
|||
BIT(IWL_CALIB_TX_IQ) |
|
||||
BIT(IWL_CALIB_BASE_BAND);
|
||||
if (priv->cfg->need_dc_calib)
|
||||
priv->hw_params.calib_rt_cfg |= BIT(IWL_CALIB_CFG_DC_IDX);
|
||||
priv->hw_params.calib_rt_cfg |= IWL_CALIB_CFG_DC_IDX;
|
||||
if (priv->cfg->need_temp_offset_calib)
|
||||
priv->hw_params.calib_init_cfg |= BIT(IWL_CALIB_TEMP_OFFSET);
|
||||
|
||||
|
@ -167,9 +164,6 @@ static int iwl2000_hw_set_hw_params(struct iwl_priv *priv)
|
|||
|
||||
static struct iwl_lib_ops iwl2000_lib = {
|
||||
.set_hw_params = iwl2000_hw_set_hw_params,
|
||||
.rx_handler_setup = iwlagn_rx_handler_setup,
|
||||
.setup_deferred_work = iwlagn_setup_deferred_work,
|
||||
.is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr,
|
||||
.nic_config = iwl2000_nic_config,
|
||||
.eeprom_ops = {
|
||||
.regulatory_bands = {
|
||||
|
@ -188,10 +182,9 @@ static struct iwl_lib_ops iwl2000_lib = {
|
|||
|
||||
static struct iwl_lib_ops iwl2030_lib = {
|
||||
.set_hw_params = iwl2000_hw_set_hw_params,
|
||||
.rx_handler_setup = iwlagn_bt_rx_handler_setup,
|
||||
.setup_deferred_work = iwlagn_bt_setup_deferred_work,
|
||||
.bt_rx_handler_setup = iwlagn_bt_rx_handler_setup,
|
||||
.bt_setup_deferred_work = iwlagn_bt_setup_deferred_work,
|
||||
.cancel_deferred_work = iwlagn_bt_cancel_deferred_work,
|
||||
.is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr,
|
||||
.nic_config = iwl2000_nic_config,
|
||||
.eeprom_ops = {
|
||||
.regulatory_bands = {
|
||||
|
@ -208,22 +201,6 @@ static struct iwl_lib_ops iwl2030_lib = {
|
|||
.temperature = iwlagn_temperature,
|
||||
};
|
||||
|
||||
static const struct iwl_ops iwl2000_ops = {
|
||||
.lib = &iwl2000_lib,
|
||||
};
|
||||
|
||||
static const struct iwl_ops iwl2030_ops = {
|
||||
.lib = &iwl2030_lib,
|
||||
};
|
||||
|
||||
static const struct iwl_ops iwl105_ops = {
|
||||
.lib = &iwl2000_lib,
|
||||
};
|
||||
|
||||
static const struct iwl_ops iwl135_ops = {
|
||||
.lib = &iwl2030_lib,
|
||||
};
|
||||
|
||||
static struct iwl_base_params iwl2000_base_params = {
|
||||
.eeprom_size = OTP_LOW_IMAGE_SIZE,
|
||||
.num_of_queues = IWLAGN_NUM_QUEUES,
|
||||
|
@ -282,13 +259,12 @@ static struct iwl_bt_params iwl2030_bt_params = {
|
|||
.ucode_api_min = IWL2000_UCODE_API_MIN, \
|
||||
.eeprom_ver = EEPROM_2000_EEPROM_VERSION, \
|
||||
.eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
|
||||
.ops = &iwl2000_ops, \
|
||||
.lib = &iwl2000_lib, \
|
||||
.base_params = &iwl2000_base_params, \
|
||||
.need_dc_calib = true, \
|
||||
.need_temp_offset_calib = true, \
|
||||
.led_mode = IWL_LED_RF_STATE, \
|
||||
.iq_invert = true, \
|
||||
.disable_otp_refresh = true \
|
||||
.iq_invert = true \
|
||||
|
||||
struct iwl_cfg iwl2000_2bgn_cfg = {
|
||||
.name = "2000 Series 2x2 BGN",
|
||||
|
@ -307,7 +283,7 @@ struct iwl_cfg iwl2000_2bg_cfg = {
|
|||
.ucode_api_min = IWL2030_UCODE_API_MIN, \
|
||||
.eeprom_ver = EEPROM_2000_EEPROM_VERSION, \
|
||||
.eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
|
||||
.ops = &iwl2030_ops, \
|
||||
.lib = &iwl2030_lib, \
|
||||
.base_params = &iwl2030_base_params, \
|
||||
.bt_params = &iwl2030_bt_params, \
|
||||
.need_dc_calib = true, \
|
||||
|
@ -333,13 +309,14 @@ struct iwl_cfg iwl2030_2bg_cfg = {
|
|||
.ucode_api_min = IWL105_UCODE_API_MIN, \
|
||||
.eeprom_ver = EEPROM_2000_EEPROM_VERSION, \
|
||||
.eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
|
||||
.ops = &iwl105_ops, \
|
||||
.lib = &iwl2000_lib, \
|
||||
.base_params = &iwl2000_base_params, \
|
||||
.need_dc_calib = true, \
|
||||
.need_temp_offset_calib = true, \
|
||||
.led_mode = IWL_LED_RF_STATE, \
|
||||
.adv_pm = true, \
|
||||
.rx_with_siso_diversity = true \
|
||||
.rx_with_siso_diversity = true, \
|
||||
.iq_invert = true \
|
||||
|
||||
struct iwl_cfg iwl105_bg_cfg = {
|
||||
.name = "105 Series 1x1 BG",
|
||||
|
@ -358,14 +335,15 @@ struct iwl_cfg iwl105_bgn_cfg = {
|
|||
.ucode_api_min = IWL135_UCODE_API_MIN, \
|
||||
.eeprom_ver = EEPROM_2000_EEPROM_VERSION, \
|
||||
.eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
|
||||
.ops = &iwl135_ops, \
|
||||
.lib = &iwl2030_lib, \
|
||||
.base_params = &iwl2030_base_params, \
|
||||
.bt_params = &iwl2030_bt_params, \
|
||||
.need_dc_calib = true, \
|
||||
.need_temp_offset_calib = true, \
|
||||
.led_mode = IWL_LED_RF_STATE, \
|
||||
.adv_pm = true, \
|
||||
.rx_with_siso_diversity = true \
|
||||
.rx_with_siso_diversity = true, \
|
||||
.iq_invert = true \
|
||||
|
||||
struct iwl_cfg iwl135_bg_cfg = {
|
||||
.name = "135 Series 1x1 BG/BT",
|
||||
|
|
|
@ -315,14 +315,11 @@ static int iwl5000_hw_channel_switch(struct iwl_priv *priv,
|
|||
return -EFAULT;
|
||||
}
|
||||
|
||||
return trans_send_cmd(priv, &hcmd);
|
||||
return trans_send_cmd(&priv->trans, &hcmd);
|
||||
}
|
||||
|
||||
static struct iwl_lib_ops iwl5000_lib = {
|
||||
.set_hw_params = iwl5000_hw_set_hw_params,
|
||||
.rx_handler_setup = iwlagn_rx_handler_setup,
|
||||
.setup_deferred_work = iwlagn_setup_deferred_work,
|
||||
.is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr,
|
||||
.set_channel_switch = iwl5000_hw_channel_switch,
|
||||
.nic_config = iwl5000_nic_config,
|
||||
.eeprom_ops = {
|
||||
|
@ -341,9 +338,6 @@ static struct iwl_lib_ops iwl5000_lib = {
|
|||
|
||||
static struct iwl_lib_ops iwl5150_lib = {
|
||||
.set_hw_params = iwl5150_hw_set_hw_params,
|
||||
.rx_handler_setup = iwlagn_rx_handler_setup,
|
||||
.setup_deferred_work = iwlagn_setup_deferred_work,
|
||||
.is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr,
|
||||
.set_channel_switch = iwl5000_hw_channel_switch,
|
||||
.nic_config = iwl5000_nic_config,
|
||||
.eeprom_ops = {
|
||||
|
@ -360,14 +354,6 @@ static struct iwl_lib_ops iwl5150_lib = {
|
|||
.temperature = iwl5150_temperature,
|
||||
};
|
||||
|
||||
static const struct iwl_ops iwl5000_ops = {
|
||||
.lib = &iwl5000_lib,
|
||||
};
|
||||
|
||||
static const struct iwl_ops iwl5150_ops = {
|
||||
.lib = &iwl5150_lib,
|
||||
};
|
||||
|
||||
static struct iwl_base_params iwl5000_base_params = {
|
||||
.eeprom_size = IWLAGN_EEPROM_IMG_SIZE,
|
||||
.num_of_queues = IWLAGN_NUM_QUEUES,
|
||||
|
@ -390,7 +376,7 @@ static struct iwl_ht_params iwl5000_ht_params = {
|
|||
.ucode_api_min = IWL5000_UCODE_API_MIN, \
|
||||
.eeprom_ver = EEPROM_5000_EEPROM_VERSION, \
|
||||
.eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, \
|
||||
.ops = &iwl5000_ops, \
|
||||
.lib = &iwl5000_lib, \
|
||||
.base_params = &iwl5000_base_params, \
|
||||
.led_mode = IWL_LED_BLINK
|
||||
|
||||
|
@ -433,7 +419,7 @@ struct iwl_cfg iwl5350_agn_cfg = {
|
|||
.ucode_api_min = IWL5000_UCODE_API_MIN,
|
||||
.eeprom_ver = EEPROM_5050_EEPROM_VERSION,
|
||||
.eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION,
|
||||
.ops = &iwl5000_ops,
|
||||
.lib = &iwl5000_lib,
|
||||
.base_params = &iwl5000_base_params,
|
||||
.ht_params = &iwl5000_ht_params,
|
||||
.led_mode = IWL_LED_BLINK,
|
||||
|
@ -446,7 +432,7 @@ struct iwl_cfg iwl5350_agn_cfg = {
|
|||
.ucode_api_min = IWL5150_UCODE_API_MIN, \
|
||||
.eeprom_ver = EEPROM_5050_EEPROM_VERSION, \
|
||||
.eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION, \
|
||||
.ops = &iwl5150_ops, \
|
||||
.lib = &iwl5150_lib, \
|
||||
.base_params = &iwl5000_base_params, \
|
||||
.need_dc_calib = true, \
|
||||
.led_mode = IWL_LED_BLINK, \
|
||||
|
|
|
@ -106,10 +106,8 @@ static void iwl6000_nic_config(struct iwl_priv *priv)
|
|||
CSR_GP_DRIVER_REG_BIT_RADIO_SKU_2x2_IPA);
|
||||
}
|
||||
/* do additional nic configuration if needed */
|
||||
if (priv->cfg->ops->nic &&
|
||||
priv->cfg->ops->nic->additional_nic_config) {
|
||||
priv->cfg->ops->nic->additional_nic_config(priv);
|
||||
}
|
||||
if (priv->cfg->additional_nic_config)
|
||||
priv->cfg->additional_nic_config(priv);
|
||||
}
|
||||
|
||||
static struct iwl_sensitivity_ranges iwl6000_sensitivity = {
|
||||
|
@ -178,7 +176,7 @@ static int iwl6000_hw_set_hw_params(struct iwl_priv *priv)
|
|||
BIT(IWL_CALIB_TX_IQ) |
|
||||
BIT(IWL_CALIB_BASE_BAND);
|
||||
if (priv->cfg->need_dc_calib)
|
||||
priv->hw_params.calib_rt_cfg |= BIT(IWL_CALIB_CFG_DC_IDX);
|
||||
priv->hw_params.calib_rt_cfg |= IWL_CALIB_CFG_DC_IDX;
|
||||
if (priv->cfg->need_temp_offset_calib)
|
||||
priv->hw_params.calib_init_cfg |= BIT(IWL_CALIB_TEMP_OFFSET);
|
||||
|
||||
|
@ -255,14 +253,11 @@ static int iwl6000_hw_channel_switch(struct iwl_priv *priv,
|
|||
return -EFAULT;
|
||||
}
|
||||
|
||||
return trans_send_cmd(priv, &hcmd);
|
||||
return trans_send_cmd(&priv->trans, &hcmd);
|
||||
}
|
||||
|
||||
static struct iwl_lib_ops iwl6000_lib = {
|
||||
.set_hw_params = iwl6000_hw_set_hw_params,
|
||||
.rx_handler_setup = iwlagn_rx_handler_setup,
|
||||
.setup_deferred_work = iwlagn_setup_deferred_work,
|
||||
.is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr,
|
||||
.set_channel_switch = iwl6000_hw_channel_switch,
|
||||
.nic_config = iwl6000_nic_config,
|
||||
.eeprom_ops = {
|
||||
|
@ -282,10 +277,9 @@ static struct iwl_lib_ops iwl6000_lib = {
|
|||
|
||||
static struct iwl_lib_ops iwl6030_lib = {
|
||||
.set_hw_params = iwl6000_hw_set_hw_params,
|
||||
.rx_handler_setup = iwlagn_bt_rx_handler_setup,
|
||||
.setup_deferred_work = iwlagn_bt_setup_deferred_work,
|
||||
.bt_rx_handler_setup = iwlagn_bt_rx_handler_setup,
|
||||
.bt_setup_deferred_work = iwlagn_bt_setup_deferred_work,
|
||||
.cancel_deferred_work = iwlagn_bt_cancel_deferred_work,
|
||||
.is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr,
|
||||
.set_channel_switch = iwl6000_hw_channel_switch,
|
||||
.nic_config = iwl6000_nic_config,
|
||||
.eeprom_ops = {
|
||||
|
@ -303,32 +297,6 @@ static struct iwl_lib_ops iwl6030_lib = {
|
|||
.temperature = iwlagn_temperature,
|
||||
};
|
||||
|
||||
static struct iwl_nic_ops iwl6050_nic_ops = {
|
||||
.additional_nic_config = &iwl6050_additional_nic_config,
|
||||
};
|
||||
|
||||
static struct iwl_nic_ops iwl6150_nic_ops = {
|
||||
.additional_nic_config = &iwl6150_additional_nic_config,
|
||||
};
|
||||
|
||||
static const struct iwl_ops iwl6000_ops = {
|
||||
.lib = &iwl6000_lib,
|
||||
};
|
||||
|
||||
static const struct iwl_ops iwl6050_ops = {
|
||||
.lib = &iwl6000_lib,
|
||||
.nic = &iwl6050_nic_ops,
|
||||
};
|
||||
|
||||
static const struct iwl_ops iwl6150_ops = {
|
||||
.lib = &iwl6000_lib,
|
||||
.nic = &iwl6150_nic_ops,
|
||||
};
|
||||
|
||||
static const struct iwl_ops iwl6030_ops = {
|
||||
.lib = &iwl6030_lib,
|
||||
};
|
||||
|
||||
static struct iwl_base_params iwl6000_base_params = {
|
||||
.eeprom_size = OTP_LOW_IMAGE_SIZE,
|
||||
.num_of_queues = IWLAGN_NUM_QUEUES,
|
||||
|
@ -402,7 +370,7 @@ static struct iwl_bt_params iwl6000_bt_params = {
|
|||
.ucode_api_min = IWL6000G2_UCODE_API_MIN, \
|
||||
.eeprom_ver = EEPROM_6005_EEPROM_VERSION, \
|
||||
.eeprom_calib_ver = EEPROM_6005_TX_POWER_VERSION, \
|
||||
.ops = &iwl6000_ops, \
|
||||
.lib = &iwl6000_lib, \
|
||||
.base_params = &iwl6000_g2_base_params, \
|
||||
.need_dc_calib = true, \
|
||||
.need_temp_offset_calib = true, \
|
||||
|
@ -430,7 +398,7 @@ struct iwl_cfg iwl6005_2bg_cfg = {
|
|||
.ucode_api_min = IWL6000G2_UCODE_API_MIN, \
|
||||
.eeprom_ver = EEPROM_6030_EEPROM_VERSION, \
|
||||
.eeprom_calib_ver = EEPROM_6030_TX_POWER_VERSION, \
|
||||
.ops = &iwl6030_ops, \
|
||||
.lib = &iwl6030_lib, \
|
||||
.base_params = &iwl6000_g2_base_params, \
|
||||
.bt_params = &iwl6000_bt_params, \
|
||||
.need_dc_calib = true, \
|
||||
|
@ -511,7 +479,7 @@ struct iwl_cfg iwl130_bg_cfg = {
|
|||
.valid_rx_ant = ANT_BC, /* .cfg overwrite */ \
|
||||
.eeprom_ver = EEPROM_6000_EEPROM_VERSION, \
|
||||
.eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION, \
|
||||
.ops = &iwl6000_ops, \
|
||||
.lib = &iwl6000_lib, \
|
||||
.base_params = &iwl6000_base_params, \
|
||||
.pa_type = IWL_PA_INTERNAL, \
|
||||
.led_mode = IWL_LED_BLINK
|
||||
|
@ -538,7 +506,8 @@ struct iwl_cfg iwl6000i_2bg_cfg = {
|
|||
.ucode_api_min = IWL6050_UCODE_API_MIN, \
|
||||
.valid_tx_ant = ANT_AB, /* .cfg overwrite */ \
|
||||
.valid_rx_ant = ANT_AB, /* .cfg overwrite */ \
|
||||
.ops = &iwl6050_ops, \
|
||||
.lib = &iwl6000_lib, \
|
||||
.additional_nic_config = iwl6050_additional_nic_config, \
|
||||
.eeprom_ver = EEPROM_6050_EEPROM_VERSION, \
|
||||
.eeprom_calib_ver = EEPROM_6050_TX_POWER_VERSION, \
|
||||
.base_params = &iwl6050_base_params, \
|
||||
|
@ -561,7 +530,8 @@ struct iwl_cfg iwl6050_2abg_cfg = {
|
|||
.fw_name_pre = IWL6050_FW_PRE, \
|
||||
.ucode_api_max = IWL6050_UCODE_API_MAX, \
|
||||
.ucode_api_min = IWL6050_UCODE_API_MIN, \
|
||||
.ops = &iwl6150_ops, \
|
||||
.lib = &iwl6000_lib, \
|
||||
.additional_nic_config = iwl6150_additional_nic_config, \
|
||||
.eeprom_ver = EEPROM_6150_EEPROM_VERSION, \
|
||||
.eeprom_calib_ver = EEPROM_6150_TX_POWER_VERSION, \
|
||||
.base_params = &iwl6050_base_params, \
|
||||
|
@ -587,7 +557,7 @@ struct iwl_cfg iwl6000_3agn_cfg = {
|
|||
.ucode_api_min = IWL6000_UCODE_API_MIN,
|
||||
.eeprom_ver = EEPROM_6000_EEPROM_VERSION,
|
||||
.eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION,
|
||||
.ops = &iwl6000_ops,
|
||||
.lib = &iwl6000_lib,
|
||||
.base_params = &iwl6000_base_params,
|
||||
.ht_params = &iwl6000_ht_params,
|
||||
.need_dc_calib = true,
|
||||
|
|
|
@ -98,7 +98,7 @@ int iwl_send_calib_results(struct iwl_priv *priv)
|
|||
hcmd.len[0] = priv->calib_results[i].buf_len;
|
||||
hcmd.data[0] = priv->calib_results[i].buf;
|
||||
hcmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
|
||||
ret = trans_send_cmd(priv, &hcmd);
|
||||
ret = trans_send_cmd(&priv->trans, &hcmd);
|
||||
if (ret) {
|
||||
IWL_ERR(priv, "Error %d iteration %d\n",
|
||||
ret, i);
|
||||
|
@ -484,7 +484,7 @@ static int iwl_sensitivity_write(struct iwl_priv *priv)
|
|||
memcpy(&(priv->sensitivity_tbl[0]), &(cmd.table[0]),
|
||||
sizeof(u16)*HD_TABLE_SIZE);
|
||||
|
||||
return trans_send_cmd(priv, &cmd_out);
|
||||
return trans_send_cmd(&priv->trans, &cmd_out);
|
||||
}
|
||||
|
||||
/* Prepare a SENSITIVITY_CMD, send to uCode if values have changed */
|
||||
|
@ -548,7 +548,7 @@ static int iwl_enhance_sensitivity_write(struct iwl_priv *priv)
|
|||
&(cmd.enhance_table[HD_INA_NON_SQUARE_DET_OFDM_INDEX]),
|
||||
sizeof(u16)*ENHANCE_HD_TABLE_ENTRIES);
|
||||
|
||||
return trans_send_cmd(priv, &cmd_out);
|
||||
return trans_send_cmd(&priv->trans, &cmd_out);
|
||||
}
|
||||
|
||||
void iwl_init_sensitivity(struct iwl_priv *priv)
|
||||
|
@ -840,6 +840,65 @@ static void iwl_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig,
|
|||
active_chains);
|
||||
}
|
||||
|
||||
static void iwlagn_gain_computation(struct iwl_priv *priv,
|
||||
u32 average_noise[NUM_RX_CHAINS],
|
||||
u16 min_average_noise_antenna_i,
|
||||
u32 min_average_noise,
|
||||
u8 default_chain)
|
||||
{
|
||||
int i;
|
||||
s32 delta_g;
|
||||
struct iwl_chain_noise_data *data = &priv->chain_noise_data;
|
||||
|
||||
/*
|
||||
* Find Gain Code for the chains based on "default chain"
|
||||
*/
|
||||
for (i = default_chain + 1; i < NUM_RX_CHAINS; i++) {
|
||||
if ((data->disconn_array[i])) {
|
||||
data->delta_gain_code[i] = 0;
|
||||
continue;
|
||||
}
|
||||
|
||||
delta_g = (priv->cfg->base_params->chain_noise_scale *
|
||||
((s32)average_noise[default_chain] -
|
||||
(s32)average_noise[i])) / 1500;
|
||||
|
||||
/* bound gain by 2 bits value max, 3rd bit is sign */
|
||||
data->delta_gain_code[i] =
|
||||
min(abs(delta_g),
|
||||
(long) CHAIN_NOISE_MAX_DELTA_GAIN_CODE);
|
||||
|
||||
if (delta_g < 0)
|
||||
/*
|
||||
* set negative sign ...
|
||||
* note to Intel developers: This is uCode API format,
|
||||
* not the format of any internal device registers.
|
||||
* Do not change this format for e.g. 6050 or similar
|
||||
* devices. Change format only if more resolution
|
||||
* (i.e. more than 2 bits magnitude) is needed.
|
||||
*/
|
||||
data->delta_gain_code[i] |= (1 << 2);
|
||||
}
|
||||
|
||||
IWL_DEBUG_CALIB(priv, "Delta gains: ANT_B = %d ANT_C = %d\n",
|
||||
data->delta_gain_code[1], data->delta_gain_code[2]);
|
||||
|
||||
if (!data->radio_write) {
|
||||
struct iwl_calib_chain_noise_gain_cmd cmd;
|
||||
|
||||
memset(&cmd, 0, sizeof(cmd));
|
||||
|
||||
iwl_set_calib_hdr(&cmd.hdr,
|
||||
priv->phy_calib_chain_noise_gain_cmd);
|
||||
cmd.delta_gain_1 = data->delta_gain_code[1];
|
||||
cmd.delta_gain_2 = data->delta_gain_code[2];
|
||||
trans_send_cmd_pdu(&priv->trans, REPLY_PHY_CALIBRATION_CMD,
|
||||
CMD_ASYNC, sizeof(cmd), &cmd);
|
||||
|
||||
data->radio_write = 1;
|
||||
data->state = IWL_CHAIN_NOISE_CALIBRATED;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Accumulate 16 beacons of signal and noise statistics for each of
|
||||
|
|
|
@ -1,210 +0,0 @@
|
|||
/******************************************************************************
|
||||
*
|
||||
* GPL LICENSE SUMMARY
|
||||
*
|
||||
* Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
|
||||
* USA
|
||||
*
|
||||
* The full GNU General Public License is included in this distribution
|
||||
* in the file called LICENSE.GPL.
|
||||
*
|
||||
* Contact Information:
|
||||
* Intel Linux Wireless <ilw@linux.intel.com>
|
||||
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
|
||||
*
|
||||
*****************************************************************************/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/sched.h>
|
||||
|
||||
#include "iwl-dev.h"
|
||||
#include "iwl-core.h"
|
||||
#include "iwl-io.h"
|
||||
#include "iwl-agn.h"
|
||||
#include "iwl-trans.h"
|
||||
|
||||
int iwlagn_send_tx_ant_config(struct iwl_priv *priv, u8 valid_tx_ant)
|
||||
{
|
||||
struct iwl_tx_ant_config_cmd tx_ant_cmd = {
|
||||
.valid = cpu_to_le32(valid_tx_ant),
|
||||
};
|
||||
|
||||
if (IWL_UCODE_API(priv->ucode_ver) > 1) {
|
||||
IWL_DEBUG_HC(priv, "select valid tx ant: %u\n", valid_tx_ant);
|
||||
return trans_send_cmd_pdu(priv,
|
||||
TX_ANT_CONFIGURATION_CMD,
|
||||
CMD_SYNC,
|
||||
sizeof(struct iwl_tx_ant_config_cmd),
|
||||
&tx_ant_cmd);
|
||||
} else {
|
||||
IWL_DEBUG_HC(priv, "TX_ANT_CONFIGURATION_CMD not supported\n");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
}
|
||||
|
||||
void iwlagn_gain_computation(struct iwl_priv *priv,
|
||||
u32 average_noise[NUM_RX_CHAINS],
|
||||
u16 min_average_noise_antenna_i,
|
||||
u32 min_average_noise,
|
||||
u8 default_chain)
|
||||
{
|
||||
int i;
|
||||
s32 delta_g;
|
||||
struct iwl_chain_noise_data *data = &priv->chain_noise_data;
|
||||
|
||||
/*
|
||||
* Find Gain Code for the chains based on "default chain"
|
||||
*/
|
||||
for (i = default_chain + 1; i < NUM_RX_CHAINS; i++) {
|
||||
if ((data->disconn_array[i])) {
|
||||
data->delta_gain_code[i] = 0;
|
||||
continue;
|
||||
}
|
||||
|
||||
delta_g = (priv->cfg->base_params->chain_noise_scale *
|
||||
((s32)average_noise[default_chain] -
|
||||
(s32)average_noise[i])) / 1500;
|
||||
|
||||
/* bound gain by 2 bits value max, 3rd bit is sign */
|
||||
data->delta_gain_code[i] =
|
||||
min(abs(delta_g), (long) CHAIN_NOISE_MAX_DELTA_GAIN_CODE);
|
||||
|
||||
if (delta_g < 0)
|
||||
/*
|
||||
* set negative sign ...
|
||||
* note to Intel developers: This is uCode API format,
|
||||
* not the format of any internal device registers.
|
||||
* Do not change this format for e.g. 6050 or similar
|
||||
* devices. Change format only if more resolution
|
||||
* (i.e. more than 2 bits magnitude) is needed.
|
||||
*/
|
||||
data->delta_gain_code[i] |= (1 << 2);
|
||||
}
|
||||
|
||||
IWL_DEBUG_CALIB(priv, "Delta gains: ANT_B = %d ANT_C = %d\n",
|
||||
data->delta_gain_code[1], data->delta_gain_code[2]);
|
||||
|
||||
if (!data->radio_write) {
|
||||
struct iwl_calib_chain_noise_gain_cmd cmd;
|
||||
|
||||
memset(&cmd, 0, sizeof(cmd));
|
||||
|
||||
iwl_set_calib_hdr(&cmd.hdr,
|
||||
priv->_agn.phy_calib_chain_noise_gain_cmd);
|
||||
cmd.delta_gain_1 = data->delta_gain_code[1];
|
||||
cmd.delta_gain_2 = data->delta_gain_code[2];
|
||||
trans_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
|
||||
CMD_ASYNC, sizeof(cmd), &cmd);
|
||||
|
||||
data->radio_write = 1;
|
||||
data->state = IWL_CHAIN_NOISE_CALIBRATED;
|
||||
}
|
||||
}
|
||||
|
||||
int iwlagn_set_pan_params(struct iwl_priv *priv)
|
||||
{
|
||||
struct iwl_wipan_params_cmd cmd;
|
||||
struct iwl_rxon_context *ctx_bss, *ctx_pan;
|
||||
int slot0 = 300, slot1 = 0;
|
||||
int ret;
|
||||
|
||||
if (priv->valid_contexts == BIT(IWL_RXON_CTX_BSS))
|
||||
return 0;
|
||||
|
||||
BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
|
||||
|
||||
lockdep_assert_held(&priv->mutex);
|
||||
|
||||
ctx_bss = &priv->contexts[IWL_RXON_CTX_BSS];
|
||||
ctx_pan = &priv->contexts[IWL_RXON_CTX_PAN];
|
||||
|
||||
/*
|
||||
* If the PAN context is inactive, then we don't need
|
||||
* to update the PAN parameters, the last thing we'll
|
||||
* have done before it goes inactive is making the PAN
|
||||
* parameters be WLAN-only.
|
||||
*/
|
||||
if (!ctx_pan->is_active)
|
||||
return 0;
|
||||
|
||||
memset(&cmd, 0, sizeof(cmd));
|
||||
|
||||
/* only 2 slots are currently allowed */
|
||||
cmd.num_slots = 2;
|
||||
|
||||
cmd.slots[0].type = 0; /* BSS */
|
||||
cmd.slots[1].type = 1; /* PAN */
|
||||
|
||||
if (priv->_agn.hw_roc_channel) {
|
||||
/* both contexts must be used for this to happen */
|
||||
slot1 = priv->_agn.hw_roc_duration;
|
||||
slot0 = IWL_MIN_SLOT_TIME;
|
||||
} else if (ctx_bss->vif && ctx_pan->vif) {
|
||||
int bcnint = ctx_pan->vif->bss_conf.beacon_int;
|
||||
int dtim = ctx_pan->vif->bss_conf.dtim_period ?: 1;
|
||||
|
||||
/* should be set, but seems unused?? */
|
||||
cmd.flags |= cpu_to_le16(IWL_WIPAN_PARAMS_FLG_SLOTTED_MODE);
|
||||
|
||||
if (ctx_pan->vif->type == NL80211_IFTYPE_AP &&
|
||||
bcnint &&
|
||||
bcnint != ctx_bss->vif->bss_conf.beacon_int) {
|
||||
IWL_ERR(priv,
|
||||
"beacon intervals don't match (%d, %d)\n",
|
||||
ctx_bss->vif->bss_conf.beacon_int,
|
||||
ctx_pan->vif->bss_conf.beacon_int);
|
||||
} else
|
||||
bcnint = max_t(int, bcnint,
|
||||
ctx_bss->vif->bss_conf.beacon_int);
|
||||
if (!bcnint)
|
||||
bcnint = DEFAULT_BEACON_INTERVAL;
|
||||
slot0 = bcnint / 2;
|
||||
slot1 = bcnint - slot0;
|
||||
|
||||
if (test_bit(STATUS_SCAN_HW, &priv->status) ||
|
||||
(!ctx_bss->vif->bss_conf.idle &&
|
||||
!ctx_bss->vif->bss_conf.assoc)) {
|
||||
slot0 = dtim * bcnint * 3 - IWL_MIN_SLOT_TIME;
|
||||
slot1 = IWL_MIN_SLOT_TIME;
|
||||
} else if (!ctx_pan->vif->bss_conf.idle &&
|
||||
!ctx_pan->vif->bss_conf.assoc) {
|
||||
slot1 = bcnint * 3 - IWL_MIN_SLOT_TIME;
|
||||
slot0 = IWL_MIN_SLOT_TIME;
|
||||
}
|
||||
} else if (ctx_pan->vif) {
|
||||
slot0 = 0;
|
||||
slot1 = max_t(int, 1, ctx_pan->vif->bss_conf.dtim_period) *
|
||||
ctx_pan->vif->bss_conf.beacon_int;
|
||||
slot1 = max_t(int, DEFAULT_BEACON_INTERVAL, slot1);
|
||||
|
||||
if (test_bit(STATUS_SCAN_HW, &priv->status)) {
|
||||
slot0 = slot1 * 3 - IWL_MIN_SLOT_TIME;
|
||||
slot1 = IWL_MIN_SLOT_TIME;
|
||||
}
|
||||
}
|
||||
|
||||
cmd.slots[0].width = cpu_to_le16(slot0);
|
||||
cmd.slots[1].width = cpu_to_le16(slot1);
|
||||
|
||||
ret = trans_send_cmd_pdu(priv, REPLY_WIPAN_PARAMS, CMD_SYNC,
|
||||
sizeof(cmd), &cmd);
|
||||
if (ret)
|
||||
IWL_ERR(priv, "Error setting PAN parameters (%d)\n", ret);
|
||||
|
||||
return ret;
|
||||
}
|
|
@ -1,306 +0,0 @@
|
|||
/******************************************************************************
|
||||
*
|
||||
* GPL LICENSE SUMMARY
|
||||
*
|
||||
* Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
|
||||
* USA
|
||||
*
|
||||
* The full GNU General Public License is included in this distribution
|
||||
* in the file called LICENSE.GPL.
|
||||
*
|
||||
* Contact Information:
|
||||
* Intel Linux Wireless <ilw@linux.intel.com>
|
||||
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
|
||||
*****************************************************************************/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/etherdevice.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <net/mac80211.h>
|
||||
|
||||
#include "iwl-dev.h"
|
||||
#include "iwl-core.h"
|
||||
#include "iwl-agn.h"
|
||||
#include "iwl-helpers.h"
|
||||
|
||||
#define ICT_COUNT (PAGE_SIZE/sizeof(u32))
|
||||
|
||||
/* Free dram table */
|
||||
void iwl_free_isr_ict(struct iwl_priv *priv)
|
||||
{
|
||||
if (priv->_agn.ict_tbl_vir) {
|
||||
dma_free_coherent(priv->bus.dev,
|
||||
(sizeof(u32) * ICT_COUNT) + PAGE_SIZE,
|
||||
priv->_agn.ict_tbl_vir,
|
||||
priv->_agn.ict_tbl_dma);
|
||||
priv->_agn.ict_tbl_vir = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* allocate dram shared table it is a PAGE_SIZE aligned
|
||||
* also reset all data related to ICT table interrupt.
|
||||
*/
|
||||
int iwl_alloc_isr_ict(struct iwl_priv *priv)
|
||||
{
|
||||
|
||||
/* allocate shrared data table */
|
||||
priv->_agn.ict_tbl_vir =
|
||||
dma_alloc_coherent(priv->bus.dev,
|
||||
(sizeof(u32) * ICT_COUNT) + PAGE_SIZE,
|
||||
&priv->_agn.ict_tbl_dma, GFP_KERNEL);
|
||||
if (!priv->_agn.ict_tbl_vir)
|
||||
return -ENOMEM;
|
||||
|
||||
/* align table to PAGE_SIZE boundary */
|
||||
priv->_agn.aligned_ict_tbl_dma = ALIGN(priv->_agn.ict_tbl_dma, PAGE_SIZE);
|
||||
|
||||
IWL_DEBUG_ISR(priv, "ict dma addr %Lx dma aligned %Lx diff %d\n",
|
||||
(unsigned long long)priv->_agn.ict_tbl_dma,
|
||||
(unsigned long long)priv->_agn.aligned_ict_tbl_dma,
|
||||
(int)(priv->_agn.aligned_ict_tbl_dma - priv->_agn.ict_tbl_dma));
|
||||
|
||||
priv->_agn.ict_tbl = priv->_agn.ict_tbl_vir +
|
||||
(priv->_agn.aligned_ict_tbl_dma - priv->_agn.ict_tbl_dma);
|
||||
|
||||
IWL_DEBUG_ISR(priv, "ict vir addr %p vir aligned %p diff %d\n",
|
||||
priv->_agn.ict_tbl, priv->_agn.ict_tbl_vir,
|
||||
(int)(priv->_agn.aligned_ict_tbl_dma - priv->_agn.ict_tbl_dma));
|
||||
|
||||
/* reset table and index to all 0 */
|
||||
memset(priv->_agn.ict_tbl_vir,0, (sizeof(u32) * ICT_COUNT) + PAGE_SIZE);
|
||||
priv->_agn.ict_index = 0;
|
||||
|
||||
/* add periodic RX interrupt */
|
||||
priv->inta_mask |= CSR_INT_BIT_RX_PERIODIC;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Device is going up inform it about using ICT interrupt table,
|
||||
* also we need to tell the driver to start using ICT interrupt.
|
||||
*/
|
||||
int iwl_reset_ict(struct iwl_priv *priv)
|
||||
{
|
||||
u32 val;
|
||||
unsigned long flags;
|
||||
|
||||
if (!priv->_agn.ict_tbl_vir)
|
||||
return 0;
|
||||
|
||||
spin_lock_irqsave(&priv->lock, flags);
|
||||
iwl_disable_interrupts(priv);
|
||||
|
||||
memset(&priv->_agn.ict_tbl[0], 0, sizeof(u32) * ICT_COUNT);
|
||||
|
||||
val = priv->_agn.aligned_ict_tbl_dma >> PAGE_SHIFT;
|
||||
|
||||
val |= CSR_DRAM_INT_TBL_ENABLE;
|
||||
val |= CSR_DRAM_INIT_TBL_WRAP_CHECK;
|
||||
|
||||
IWL_DEBUG_ISR(priv, "CSR_DRAM_INT_TBL_REG =0x%X "
|
||||
"aligned dma address %Lx\n",
|
||||
val, (unsigned long long)priv->_agn.aligned_ict_tbl_dma);
|
||||
|
||||
iwl_write32(priv, CSR_DRAM_INT_TBL_REG, val);
|
||||
priv->_agn.use_ict = true;
|
||||
priv->_agn.ict_index = 0;
|
||||
iwl_write32(priv, CSR_INT, priv->inta_mask);
|
||||
iwl_enable_interrupts(priv);
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Device is going down disable ict interrupt usage */
|
||||
void iwl_disable_ict(struct iwl_priv *priv)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&priv->lock, flags);
|
||||
priv->_agn.use_ict = false;
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
}
|
||||
|
||||
static irqreturn_t iwl_isr(int irq, void *data)
|
||||
{
|
||||
struct iwl_priv *priv = data;
|
||||
u32 inta, inta_mask;
|
||||
unsigned long flags;
|
||||
#ifdef CONFIG_IWLWIFI_DEBUG
|
||||
u32 inta_fh;
|
||||
#endif
|
||||
if (!priv)
|
||||
return IRQ_NONE;
|
||||
|
||||
spin_lock_irqsave(&priv->lock, flags);
|
||||
|
||||
/* Disable (but don't clear!) interrupts here to avoid
|
||||
* back-to-back ISRs and sporadic interrupts from our NIC.
|
||||
* If we have something to service, the tasklet will re-enable ints.
|
||||
* If we *don't* have something, we'll re-enable before leaving here. */
|
||||
inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */
|
||||
iwl_write32(priv, CSR_INT_MASK, 0x00000000);
|
||||
|
||||
/* Discover which interrupts are active/pending */
|
||||
inta = iwl_read32(priv, CSR_INT);
|
||||
|
||||
/* Ignore interrupt if there's nothing in NIC to service.
|
||||
* This may be due to IRQ shared with another device,
|
||||
* or due to sporadic interrupts thrown from our NIC. */
|
||||
if (!inta) {
|
||||
IWL_DEBUG_ISR(priv, "Ignore interrupt, inta == 0\n");
|
||||
goto none;
|
||||
}
|
||||
|
||||
if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
|
||||
/* Hardware disappeared. It might have already raised
|
||||
* an interrupt */
|
||||
IWL_WARN(priv, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
|
||||
goto unplugged;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_IWLWIFI_DEBUG
|
||||
if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) {
|
||||
inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
|
||||
IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x, "
|
||||
"fh 0x%08x\n", inta, inta_mask, inta_fh);
|
||||
}
|
||||
#endif
|
||||
|
||||
priv->_agn.inta |= inta;
|
||||
/* iwl_irq_tasklet() will service interrupts and re-enable them */
|
||||
if (likely(inta))
|
||||
tasklet_schedule(&priv->irq_tasklet);
|
||||
else if (test_bit(STATUS_INT_ENABLED, &priv->status) && !priv->_agn.inta)
|
||||
iwl_enable_interrupts(priv);
|
||||
|
||||
unplugged:
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
return IRQ_HANDLED;
|
||||
|
||||
none:
|
||||
/* re-enable interrupts here since we don't have anything to service. */
|
||||
/* only Re-enable if disabled by irq and no schedules tasklet. */
|
||||
if (test_bit(STATUS_INT_ENABLED, &priv->status) && !priv->_agn.inta)
|
||||
iwl_enable_interrupts(priv);
|
||||
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
return IRQ_NONE;
|
||||
}
|
||||
|
||||
/* interrupt handler using ict table, with this interrupt driver will
|
||||
* stop using INTA register to get device's interrupt, reading this register
|
||||
* is expensive, device will write interrupts in ICT dram table, increment
|
||||
* index then will fire interrupt to driver, driver will OR all ICT table
|
||||
* entries from current index up to table entry with 0 value. the result is
|
||||
* the interrupt we need to service, driver will set the entries back to 0 and
|
||||
* set index.
|
||||
*/
|
||||
irqreturn_t iwl_isr_ict(int irq, void *data)
|
||||
{
|
||||
struct iwl_priv *priv = data;
|
||||
u32 inta, inta_mask;
|
||||
u32 val = 0;
|
||||
unsigned long flags;
|
||||
|
||||
if (!priv)
|
||||
return IRQ_NONE;
|
||||
|
||||
/* dram interrupt table not set yet,
|
||||
* use legacy interrupt.
|
||||
*/
|
||||
if (!priv->_agn.use_ict)
|
||||
return iwl_isr(irq, data);
|
||||
|
||||
spin_lock_irqsave(&priv->lock, flags);
|
||||
|
||||
/* Disable (but don't clear!) interrupts here to avoid
|
||||
* back-to-back ISRs and sporadic interrupts from our NIC.
|
||||
* If we have something to service, the tasklet will re-enable ints.
|
||||
* If we *don't* have something, we'll re-enable before leaving here.
|
||||
*/
|
||||
inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */
|
||||
iwl_write32(priv, CSR_INT_MASK, 0x00000000);
|
||||
|
||||
|
||||
/* Ignore interrupt if there's nothing in NIC to service.
|
||||
* This may be due to IRQ shared with another device,
|
||||
* or due to sporadic interrupts thrown from our NIC. */
|
||||
if (!priv->_agn.ict_tbl[priv->_agn.ict_index]) {
|
||||
IWL_DEBUG_ISR(priv, "Ignore interrupt, inta == 0\n");
|
||||
goto none;
|
||||
}
|
||||
|
||||
/* read all entries that not 0 start with ict_index */
|
||||
while (priv->_agn.ict_tbl[priv->_agn.ict_index]) {
|
||||
|
||||
val |= le32_to_cpu(priv->_agn.ict_tbl[priv->_agn.ict_index]);
|
||||
IWL_DEBUG_ISR(priv, "ICT index %d value 0x%08X\n",
|
||||
priv->_agn.ict_index,
|
||||
le32_to_cpu(priv->_agn.ict_tbl[priv->_agn.ict_index]));
|
||||
priv->_agn.ict_tbl[priv->_agn.ict_index] = 0;
|
||||
priv->_agn.ict_index = iwl_queue_inc_wrap(priv->_agn.ict_index,
|
||||
ICT_COUNT);
|
||||
|
||||
}
|
||||
|
||||
/* We should not get this value, just ignore it. */
|
||||
if (val == 0xffffffff)
|
||||
val = 0;
|
||||
|
||||
/*
|
||||
* this is a w/a for a h/w bug. the h/w bug may cause the Rx bit
|
||||
* (bit 15 before shifting it to 31) to clear when using interrupt
|
||||
* coalescing. fortunately, bits 18 and 19 stay set when this happens
|
||||
* so we use them to decide on the real state of the Rx bit.
|
||||
* In order words, bit 15 is set if bit 18 or bit 19 are set.
|
||||
*/
|
||||
if (val & 0xC0000)
|
||||
val |= 0x8000;
|
||||
|
||||
inta = (0xff & val) | ((0xff00 & val) << 16);
|
||||
IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x ict 0x%08x\n",
|
||||
inta, inta_mask, val);
|
||||
|
||||
inta &= priv->inta_mask;
|
||||
priv->_agn.inta |= inta;
|
||||
|
||||
/* iwl_irq_tasklet() will service interrupts and re-enable them */
|
||||
if (likely(inta))
|
||||
tasklet_schedule(&priv->irq_tasklet);
|
||||
else if (test_bit(STATUS_INT_ENABLED, &priv->status) && !priv->_agn.inta) {
|
||||
/* Allow interrupt if was disabled by this handler and
|
||||
* no tasklet was schedules, We should not enable interrupt,
|
||||
* tasklet will enable it.
|
||||
*/
|
||||
iwl_enable_interrupts(priv);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
return IRQ_HANDLED;
|
||||
|
||||
none:
|
||||
/* re-enable interrupts here since we don't have anything to service.
|
||||
* only Re-enable if disabled by irq.
|
||||
*/
|
||||
if (test_bit(STATUS_INT_ENABLED, &priv->status) && !priv->_agn.inta)
|
||||
iwl_enable_interrupts(priv);
|
||||
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
return IRQ_NONE;
|
||||
}
|
|
@ -53,73 +53,73 @@ static void iwlagn_count_tx_err_status(struct iwl_priv *priv, u16 status)
|
|||
|
||||
switch (status) {
|
||||
case TX_STATUS_POSTPONE_DELAY:
|
||||
priv->_agn.reply_tx_stats.pp_delay++;
|
||||
priv->reply_tx_stats.pp_delay++;
|
||||
break;
|
||||
case TX_STATUS_POSTPONE_FEW_BYTES:
|
||||
priv->_agn.reply_tx_stats.pp_few_bytes++;
|
||||
priv->reply_tx_stats.pp_few_bytes++;
|
||||
break;
|
||||
case TX_STATUS_POSTPONE_BT_PRIO:
|
||||
priv->_agn.reply_tx_stats.pp_bt_prio++;
|
||||
priv->reply_tx_stats.pp_bt_prio++;
|
||||
break;
|
||||
case TX_STATUS_POSTPONE_QUIET_PERIOD:
|
||||
priv->_agn.reply_tx_stats.pp_quiet_period++;
|
||||
priv->reply_tx_stats.pp_quiet_period++;
|
||||
break;
|
||||
case TX_STATUS_POSTPONE_CALC_TTAK:
|
||||
priv->_agn.reply_tx_stats.pp_calc_ttak++;
|
||||
priv->reply_tx_stats.pp_calc_ttak++;
|
||||
break;
|
||||
case TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY:
|
||||
priv->_agn.reply_tx_stats.int_crossed_retry++;
|
||||
priv->reply_tx_stats.int_crossed_retry++;
|
||||
break;
|
||||
case TX_STATUS_FAIL_SHORT_LIMIT:
|
||||
priv->_agn.reply_tx_stats.short_limit++;
|
||||
priv->reply_tx_stats.short_limit++;
|
||||
break;
|
||||
case TX_STATUS_FAIL_LONG_LIMIT:
|
||||
priv->_agn.reply_tx_stats.long_limit++;
|
||||
priv->reply_tx_stats.long_limit++;
|
||||
break;
|
||||
case TX_STATUS_FAIL_FIFO_UNDERRUN:
|
||||
priv->_agn.reply_tx_stats.fifo_underrun++;
|
||||
priv->reply_tx_stats.fifo_underrun++;
|
||||
break;
|
||||
case TX_STATUS_FAIL_DRAIN_FLOW:
|
||||
priv->_agn.reply_tx_stats.drain_flow++;
|
||||
priv->reply_tx_stats.drain_flow++;
|
||||
break;
|
||||
case TX_STATUS_FAIL_RFKILL_FLUSH:
|
||||
priv->_agn.reply_tx_stats.rfkill_flush++;
|
||||
priv->reply_tx_stats.rfkill_flush++;
|
||||
break;
|
||||
case TX_STATUS_FAIL_LIFE_EXPIRE:
|
||||
priv->_agn.reply_tx_stats.life_expire++;
|
||||
priv->reply_tx_stats.life_expire++;
|
||||
break;
|
||||
case TX_STATUS_FAIL_DEST_PS:
|
||||
priv->_agn.reply_tx_stats.dest_ps++;
|
||||
priv->reply_tx_stats.dest_ps++;
|
||||
break;
|
||||
case TX_STATUS_FAIL_HOST_ABORTED:
|
||||
priv->_agn.reply_tx_stats.host_abort++;
|
||||
priv->reply_tx_stats.host_abort++;
|
||||
break;
|
||||
case TX_STATUS_FAIL_BT_RETRY:
|
||||
priv->_agn.reply_tx_stats.bt_retry++;
|
||||
priv->reply_tx_stats.bt_retry++;
|
||||
break;
|
||||
case TX_STATUS_FAIL_STA_INVALID:
|
||||
priv->_agn.reply_tx_stats.sta_invalid++;
|
||||
priv->reply_tx_stats.sta_invalid++;
|
||||
break;
|
||||
case TX_STATUS_FAIL_FRAG_DROPPED:
|
||||
priv->_agn.reply_tx_stats.frag_drop++;
|
||||
priv->reply_tx_stats.frag_drop++;
|
||||
break;
|
||||
case TX_STATUS_FAIL_TID_DISABLE:
|
||||
priv->_agn.reply_tx_stats.tid_disable++;
|
||||
priv->reply_tx_stats.tid_disable++;
|
||||
break;
|
||||
case TX_STATUS_FAIL_FIFO_FLUSHED:
|
||||
priv->_agn.reply_tx_stats.fifo_flush++;
|
||||
priv->reply_tx_stats.fifo_flush++;
|
||||
break;
|
||||
case TX_STATUS_FAIL_INSUFFICIENT_CF_POLL:
|
||||
priv->_agn.reply_tx_stats.insuff_cf_poll++;
|
||||
priv->reply_tx_stats.insuff_cf_poll++;
|
||||
break;
|
||||
case TX_STATUS_FAIL_PASSIVE_NO_RX:
|
||||
priv->_agn.reply_tx_stats.fail_hw_drop++;
|
||||
priv->reply_tx_stats.fail_hw_drop++;
|
||||
break;
|
||||
case TX_STATUS_FAIL_NO_BEACON_ON_RADAR:
|
||||
priv->_agn.reply_tx_stats.sta_color_mismatch++;
|
||||
priv->reply_tx_stats.sta_color_mismatch++;
|
||||
break;
|
||||
default:
|
||||
priv->_agn.reply_tx_stats.unknown++;
|
||||
priv->reply_tx_stats.unknown++;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -130,43 +130,43 @@ static void iwlagn_count_agg_tx_err_status(struct iwl_priv *priv, u16 status)
|
|||
|
||||
switch (status) {
|
||||
case AGG_TX_STATE_UNDERRUN_MSK:
|
||||
priv->_agn.reply_agg_tx_stats.underrun++;
|
||||
priv->reply_agg_tx_stats.underrun++;
|
||||
break;
|
||||
case AGG_TX_STATE_BT_PRIO_MSK:
|
||||
priv->_agn.reply_agg_tx_stats.bt_prio++;
|
||||
priv->reply_agg_tx_stats.bt_prio++;
|
||||
break;
|
||||
case AGG_TX_STATE_FEW_BYTES_MSK:
|
||||
priv->_agn.reply_agg_tx_stats.few_bytes++;
|
||||
priv->reply_agg_tx_stats.few_bytes++;
|
||||
break;
|
||||
case AGG_TX_STATE_ABORT_MSK:
|
||||
priv->_agn.reply_agg_tx_stats.abort++;
|
||||
priv->reply_agg_tx_stats.abort++;
|
||||
break;
|
||||
case AGG_TX_STATE_LAST_SENT_TTL_MSK:
|
||||
priv->_agn.reply_agg_tx_stats.last_sent_ttl++;
|
||||
priv->reply_agg_tx_stats.last_sent_ttl++;
|
||||
break;
|
||||
case AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK:
|
||||
priv->_agn.reply_agg_tx_stats.last_sent_try++;
|
||||
priv->reply_agg_tx_stats.last_sent_try++;
|
||||
break;
|
||||
case AGG_TX_STATE_LAST_SENT_BT_KILL_MSK:
|
||||
priv->_agn.reply_agg_tx_stats.last_sent_bt_kill++;
|
||||
priv->reply_agg_tx_stats.last_sent_bt_kill++;
|
||||
break;
|
||||
case AGG_TX_STATE_SCD_QUERY_MSK:
|
||||
priv->_agn.reply_agg_tx_stats.scd_query++;
|
||||
priv->reply_agg_tx_stats.scd_query++;
|
||||
break;
|
||||
case AGG_TX_STATE_TEST_BAD_CRC32_MSK:
|
||||
priv->_agn.reply_agg_tx_stats.bad_crc32++;
|
||||
priv->reply_agg_tx_stats.bad_crc32++;
|
||||
break;
|
||||
case AGG_TX_STATE_RESPONSE_MSK:
|
||||
priv->_agn.reply_agg_tx_stats.response++;
|
||||
priv->reply_agg_tx_stats.response++;
|
||||
break;
|
||||
case AGG_TX_STATE_DUMP_TX_MSK:
|
||||
priv->_agn.reply_agg_tx_stats.dump_tx++;
|
||||
priv->reply_agg_tx_stats.dump_tx++;
|
||||
break;
|
||||
case AGG_TX_STATE_DELAY_TX_MSK:
|
||||
priv->_agn.reply_agg_tx_stats.delay_tx++;
|
||||
priv->reply_agg_tx_stats.delay_tx++;
|
||||
break;
|
||||
default:
|
||||
priv->_agn.reply_agg_tx_stats.unknown++;
|
||||
priv->reply_agg_tx_stats.unknown++;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -391,8 +391,7 @@ void iwl_check_abort_status(struct iwl_priv *priv,
|
|||
}
|
||||
}
|
||||
|
||||
static void iwlagn_rx_reply_tx(struct iwl_priv *priv,
|
||||
struct iwl_rx_mem_buffer *rxb)
|
||||
void iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
|
||||
{
|
||||
struct iwl_rx_packet *pkt = rxb_addr(rxb);
|
||||
u16 sequence = le16_to_cpu(pkt->hdr.sequence);
|
||||
|
@ -401,6 +400,7 @@ static void iwlagn_rx_reply_tx(struct iwl_priv *priv,
|
|||
struct iwl_tx_queue *txq = &priv->txq[txq_id];
|
||||
struct ieee80211_tx_info *info;
|
||||
struct iwlagn_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
|
||||
struct ieee80211_hdr *hdr;
|
||||
struct iwl_tx_info *txb;
|
||||
u32 status = le16_to_cpu(tx_resp->status.status);
|
||||
int tid;
|
||||
|
@ -427,6 +427,11 @@ static void iwlagn_rx_reply_tx(struct iwl_priv *priv,
|
|||
IWLAGN_TX_RES_RA_POS;
|
||||
|
||||
spin_lock_irqsave(&priv->sta_lock, flags);
|
||||
|
||||
hdr = (void *)txb->skb->data;
|
||||
if (!ieee80211_is_data_qos(hdr->frame_control))
|
||||
priv->last_seq_ctl = tx_resp->seq_ctl;
|
||||
|
||||
if (txq->sched_retry) {
|
||||
const u32 scd_ssn = iwlagn_get_scd_ssn(tx_resp);
|
||||
struct iwl_ht_agg *agg;
|
||||
|
@ -479,27 +484,6 @@ static void iwlagn_rx_reply_tx(struct iwl_priv *priv,
|
|||
spin_unlock_irqrestore(&priv->sta_lock, flags);
|
||||
}
|
||||
|
||||
void iwlagn_rx_handler_setup(struct iwl_priv *priv)
|
||||
{
|
||||
/* init calibration handlers */
|
||||
priv->rx_handlers[CALIBRATION_RES_NOTIFICATION] =
|
||||
iwlagn_rx_calib_result;
|
||||
priv->rx_handlers[REPLY_TX] = iwlagn_rx_reply_tx;
|
||||
|
||||
/* set up notification wait support */
|
||||
spin_lock_init(&priv->_agn.notif_wait_lock);
|
||||
INIT_LIST_HEAD(&priv->_agn.notif_waits);
|
||||
init_waitqueue_head(&priv->_agn.notif_waitq);
|
||||
}
|
||||
|
||||
void iwlagn_setup_deferred_work(struct iwl_priv *priv)
|
||||
{
|
||||
/*
|
||||
* nothing need to be done here anymore
|
||||
* still keep for future use if needed
|
||||
*/
|
||||
}
|
||||
|
||||
int iwlagn_hw_valid_rtc_data_addr(u32 addr)
|
||||
{
|
||||
return (addr >= IWLAGN_RTC_DATA_LOWER_BOUND) &&
|
||||
|
@ -541,7 +525,7 @@ int iwlagn_send_tx_power(struct iwl_priv *priv)
|
|||
else
|
||||
tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD;
|
||||
|
||||
return trans_send_cmd_pdu(priv, tx_ant_cfg_cmd, CMD_SYNC,
|
||||
return trans_send_cmd_pdu(&priv->trans, tx_ant_cfg_cmd, CMD_SYNC,
|
||||
sizeof(tx_power_cmd), &tx_power_cmd);
|
||||
}
|
||||
|
||||
|
@ -628,283 +612,6 @@ struct iwl_mod_params iwlagn_mod_params = {
|
|||
/* the rest are 0 by default */
|
||||
};
|
||||
|
||||
int iwlagn_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
|
||||
{
|
||||
u32 rb_size;
|
||||
const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
|
||||
u32 rb_timeout = 0; /* FIXME: RX_RB_TIMEOUT for all devices? */
|
||||
|
||||
rb_timeout = RX_RB_TIMEOUT;
|
||||
|
||||
if (iwlagn_mod_params.amsdu_size_8K)
|
||||
rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
|
||||
else
|
||||
rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
|
||||
|
||||
/* Stop Rx DMA */
|
||||
iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
|
||||
|
||||
/* Reset driver's Rx queue write index */
|
||||
iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
|
||||
|
||||
/* Tell device where to find RBD circular buffer in DRAM */
|
||||
iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
|
||||
(u32)(rxq->bd_dma >> 8));
|
||||
|
||||
/* Tell device where in DRAM to update its Rx status */
|
||||
iwl_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG,
|
||||
rxq->rb_stts_dma >> 4);
|
||||
|
||||
/* Enable Rx DMA
|
||||
* FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
|
||||
* the credit mechanism in 5000 HW RX FIFO
|
||||
* Direct rx interrupts to hosts
|
||||
* Rx buffer size 4 or 8k
|
||||
* RB timeout 0x10
|
||||
* 256 RBDs
|
||||
*/
|
||||
iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG,
|
||||
FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
|
||||
FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
|
||||
FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
|
||||
FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
|
||||
rb_size|
|
||||
(rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
|
||||
(rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
|
||||
|
||||
/* Set interrupt coalescing timer to default (2048 usecs) */
|
||||
iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void iwlagn_set_pwr_vmain(struct iwl_priv *priv)
|
||||
{
|
||||
/*
|
||||
* (for documentation purposes)
|
||||
* to set power to V_AUX, do:
|
||||
|
||||
if (pci_pme_capable(priv->pci_dev, PCI_D3cold))
|
||||
iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
|
||||
APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
|
||||
~APMG_PS_CTRL_MSK_PWR_SRC);
|
||||
*/
|
||||
|
||||
iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
|
||||
APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
|
||||
~APMG_PS_CTRL_MSK_PWR_SRC);
|
||||
}
|
||||
|
||||
int iwlagn_hw_nic_init(struct iwl_priv *priv)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct iwl_rx_queue *rxq = &priv->rxq;
|
||||
|
||||
/* nic_init */
|
||||
spin_lock_irqsave(&priv->lock, flags);
|
||||
iwl_apm_init(priv);
|
||||
|
||||
/* Set interrupt coalescing calibration timer to default (512 usecs) */
|
||||
iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF);
|
||||
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
|
||||
iwlagn_set_pwr_vmain(priv);
|
||||
|
||||
priv->cfg->ops->lib->nic_config(priv);
|
||||
|
||||
/* Allocate the RX queue, or reset if it is already allocated */
|
||||
trans_rx_init(priv);
|
||||
|
||||
iwlagn_rx_replenish(priv);
|
||||
|
||||
iwlagn_rx_init(priv, rxq);
|
||||
|
||||
spin_lock_irqsave(&priv->lock, flags);
|
||||
|
||||
rxq->need_update = 1;
|
||||
iwl_rx_queue_update_write_ptr(priv, rxq);
|
||||
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
|
||||
/* Allocate or reset and init all Tx and Command queues */
|
||||
if (trans_tx_init(priv))
|
||||
return -ENOMEM;
|
||||
|
||||
if (priv->cfg->base_params->shadow_reg_enable) {
|
||||
/* enable shadow regs in HW */
|
||||
iwl_set_bit(priv, CSR_MAC_SHADOW_REG_CTRL,
|
||||
0x800FFFFF);
|
||||
}
|
||||
|
||||
set_bit(STATUS_INIT, &priv->status);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* iwlagn_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
|
||||
*/
|
||||
static inline __le32 iwlagn_dma_addr2rbd_ptr(struct iwl_priv *priv,
|
||||
dma_addr_t dma_addr)
|
||||
{
|
||||
return cpu_to_le32((u32)(dma_addr >> 8));
|
||||
}
|
||||
|
||||
/**
|
||||
* iwlagn_rx_queue_restock - refill RX queue from pre-allocated pool
|
||||
*
|
||||
* If there are slots in the RX queue that need to be restocked,
|
||||
* and we have free pre-allocated buffers, fill the ranks as much
|
||||
* as we can, pulling from rx_free.
|
||||
*
|
||||
* This moves the 'write' index forward to catch up with 'processed', and
|
||||
* also updates the memory address in the firmware to reference the new
|
||||
* target buffer.
|
||||
*/
|
||||
void iwlagn_rx_queue_restock(struct iwl_priv *priv)
|
||||
{
|
||||
struct iwl_rx_queue *rxq = &priv->rxq;
|
||||
struct list_head *element;
|
||||
struct iwl_rx_mem_buffer *rxb;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&rxq->lock, flags);
|
||||
while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
|
||||
/* The overwritten rxb must be a used one */
|
||||
rxb = rxq->queue[rxq->write];
|
||||
BUG_ON(rxb && rxb->page);
|
||||
|
||||
/* Get next free Rx buffer, remove from free list */
|
||||
element = rxq->rx_free.next;
|
||||
rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
|
||||
list_del(element);
|
||||
|
||||
/* Point to Rx buffer via next RBD in circular buffer */
|
||||
rxq->bd[rxq->write] = iwlagn_dma_addr2rbd_ptr(priv,
|
||||
rxb->page_dma);
|
||||
rxq->queue[rxq->write] = rxb;
|
||||
rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
|
||||
rxq->free_count--;
|
||||
}
|
||||
spin_unlock_irqrestore(&rxq->lock, flags);
|
||||
/* If the pre-allocated buffer pool is dropping low, schedule to
|
||||
* refill it */
|
||||
if (rxq->free_count <= RX_LOW_WATERMARK)
|
||||
queue_work(priv->workqueue, &priv->rx_replenish);
|
||||
|
||||
|
||||
/* If we've added more space for the firmware to place data, tell it.
|
||||
* Increment device's write pointer in multiples of 8. */
|
||||
if (rxq->write_actual != (rxq->write & ~0x7)) {
|
||||
spin_lock_irqsave(&rxq->lock, flags);
|
||||
rxq->need_update = 1;
|
||||
spin_unlock_irqrestore(&rxq->lock, flags);
|
||||
iwl_rx_queue_update_write_ptr(priv, rxq);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* iwlagn_rx_replenish - Move all used packet from rx_used to rx_free
|
||||
*
|
||||
* When moving to rx_free an SKB is allocated for the slot.
|
||||
*
|
||||
* Also restock the Rx queue via iwl_rx_queue_restock.
|
||||
* This is called as a scheduled work item (except for during initialization)
|
||||
*/
|
||||
void iwlagn_rx_allocate(struct iwl_priv *priv, gfp_t priority)
|
||||
{
|
||||
struct iwl_rx_queue *rxq = &priv->rxq;
|
||||
struct list_head *element;
|
||||
struct iwl_rx_mem_buffer *rxb;
|
||||
struct page *page;
|
||||
unsigned long flags;
|
||||
gfp_t gfp_mask = priority;
|
||||
|
||||
while (1) {
|
||||
spin_lock_irqsave(&rxq->lock, flags);
|
||||
if (list_empty(&rxq->rx_used)) {
|
||||
spin_unlock_irqrestore(&rxq->lock, flags);
|
||||
return;
|
||||
}
|
||||
spin_unlock_irqrestore(&rxq->lock, flags);
|
||||
|
||||
if (rxq->free_count > RX_LOW_WATERMARK)
|
||||
gfp_mask |= __GFP_NOWARN;
|
||||
|
||||
if (priv->hw_params.rx_page_order > 0)
|
||||
gfp_mask |= __GFP_COMP;
|
||||
|
||||
/* Alloc a new receive buffer */
|
||||
page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order);
|
||||
if (!page) {
|
||||
if (net_ratelimit())
|
||||
IWL_DEBUG_INFO(priv, "alloc_pages failed, "
|
||||
"order: %d\n",
|
||||
priv->hw_params.rx_page_order);
|
||||
|
||||
if ((rxq->free_count <= RX_LOW_WATERMARK) &&
|
||||
net_ratelimit())
|
||||
IWL_CRIT(priv, "Failed to alloc_pages with %s. Only %u free buffers remaining.\n",
|
||||
priority == GFP_ATOMIC ? "GFP_ATOMIC" : "GFP_KERNEL",
|
||||
rxq->free_count);
|
||||
/* We don't reschedule replenish work here -- we will
|
||||
* call the restock method and if it still needs
|
||||
* more buffers it will schedule replenish */
|
||||
return;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&rxq->lock, flags);
|
||||
|
||||
if (list_empty(&rxq->rx_used)) {
|
||||
spin_unlock_irqrestore(&rxq->lock, flags);
|
||||
__free_pages(page, priv->hw_params.rx_page_order);
|
||||
return;
|
||||
}
|
||||
element = rxq->rx_used.next;
|
||||
rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
|
||||
list_del(element);
|
||||
|
||||
spin_unlock_irqrestore(&rxq->lock, flags);
|
||||
|
||||
BUG_ON(rxb->page);
|
||||
rxb->page = page;
|
||||
/* Get physical address of the RB */
|
||||
rxb->page_dma = dma_map_page(priv->bus.dev, page, 0,
|
||||
PAGE_SIZE << priv->hw_params.rx_page_order,
|
||||
DMA_FROM_DEVICE);
|
||||
/* dma address must be no more than 36 bits */
|
||||
BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
|
||||
/* and also 256 byte aligned! */
|
||||
BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
|
||||
|
||||
spin_lock_irqsave(&rxq->lock, flags);
|
||||
|
||||
list_add_tail(&rxb->list, &rxq->rx_free);
|
||||
rxq->free_count++;
|
||||
|
||||
spin_unlock_irqrestore(&rxq->lock, flags);
|
||||
}
|
||||
}
|
||||
|
||||
void iwlagn_rx_replenish(struct iwl_priv *priv)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
iwlagn_rx_allocate(priv, GFP_KERNEL);
|
||||
|
||||
spin_lock_irqsave(&priv->lock, flags);
|
||||
iwlagn_rx_queue_restock(priv);
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
}
|
||||
|
||||
void iwlagn_rx_replenish_now(struct iwl_priv *priv)
|
||||
{
|
||||
iwlagn_rx_allocate(priv, GFP_ATOMIC);
|
||||
|
||||
iwlagn_rx_queue_restock(priv);
|
||||
}
|
||||
|
||||
int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band)
|
||||
{
|
||||
int idx = 0;
|
||||
|
@ -1048,7 +755,7 @@ static int iwl_get_channels_for_scan(struct iwl_priv *priv,
|
|||
|
||||
static int iwl_fill_offch_tx(struct iwl_priv *priv, void *data, size_t maxlen)
|
||||
{
|
||||
struct sk_buff *skb = priv->_agn.offchan_tx_skb;
|
||||
struct sk_buff *skb = priv->offchan_tx_skb;
|
||||
|
||||
if (skb->len < maxlen)
|
||||
maxlen = skb->len;
|
||||
|
@ -1134,7 +841,7 @@ int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
|
|||
} else if (priv->scan_type == IWL_SCAN_OFFCH_TX) {
|
||||
scan->suspend_time = 0;
|
||||
scan->max_out_time =
|
||||
cpu_to_le32(1024 * priv->_agn.offchan_tx_timeout);
|
||||
cpu_to_le32(1024 * priv->offchan_tx_timeout);
|
||||
}
|
||||
|
||||
switch (priv->scan_type) {
|
||||
|
@ -1322,9 +1029,9 @@ int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
|
|||
scan_ch = (void *)&scan->data[cmd_len];
|
||||
scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE;
|
||||
scan_ch->channel =
|
||||
cpu_to_le16(priv->_agn.offchan_tx_chan->hw_value);
|
||||
cpu_to_le16(priv->offchan_tx_chan->hw_value);
|
||||
scan_ch->active_dwell =
|
||||
cpu_to_le16(priv->_agn.offchan_tx_timeout);
|
||||
cpu_to_le16(priv->offchan_tx_timeout);
|
||||
scan_ch->passive_dwell = 0;
|
||||
|
||||
/* Set txpower levels to defaults */
|
||||
|
@ -1334,7 +1041,7 @@ int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
|
|||
* power level:
|
||||
* scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3;
|
||||
*/
|
||||
if (priv->_agn.offchan_tx_chan->band == IEEE80211_BAND_5GHZ)
|
||||
if (priv->offchan_tx_chan->band == IEEE80211_BAND_5GHZ)
|
||||
scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
|
||||
else
|
||||
scan_ch->tx_gain = ((1 << 5) | (5 << 3));
|
||||
|
@ -1360,7 +1067,7 @@ int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = trans_send_cmd(priv, &cmd);
|
||||
ret = trans_send_cmd(&priv->trans, &cmd);
|
||||
if (ret) {
|
||||
clear_bit(STATUS_SCAN_HW, &priv->status);
|
||||
iwlagn_set_pan_params(priv);
|
||||
|
@ -1466,7 +1173,7 @@ int iwlagn_txfifo_flush(struct iwl_priv *priv, u16 flush_control)
|
|||
flush_cmd.fifo_control);
|
||||
flush_cmd.flush_control = cpu_to_le16(flush_control);
|
||||
|
||||
return trans_send_cmd(priv, &cmd);
|
||||
return trans_send_cmd(&priv->trans, &cmd);
|
||||
}
|
||||
|
||||
void iwlagn_dev_txfifo_flush(struct iwl_priv *priv, u16 flush_control)
|
||||
|
@ -1660,12 +1367,12 @@ void iwlagn_send_advance_bt_config(struct iwl_priv *priv)
|
|||
if (priv->cfg->bt_params->bt_session_2) {
|
||||
memcpy(&bt_cmd_2000.basic, &basic,
|
||||
sizeof(basic));
|
||||
ret = trans_send_cmd_pdu(priv, REPLY_BT_CONFIG,
|
||||
ret = trans_send_cmd_pdu(&priv->trans, REPLY_BT_CONFIG,
|
||||
CMD_SYNC, sizeof(bt_cmd_2000), &bt_cmd_2000);
|
||||
} else {
|
||||
memcpy(&bt_cmd_6000.basic, &basic,
|
||||
sizeof(basic));
|
||||
ret = trans_send_cmd_pdu(priv, REPLY_BT_CONFIG,
|
||||
ret = trans_send_cmd_pdu(&priv->trans, REPLY_BT_CONFIG,
|
||||
CMD_SYNC, sizeof(bt_cmd_6000), &bt_cmd_6000);
|
||||
}
|
||||
if (ret)
|
||||
|
@ -1986,15 +1693,12 @@ void iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
|
|||
|
||||
void iwlagn_bt_rx_handler_setup(struct iwl_priv *priv)
|
||||
{
|
||||
iwlagn_rx_handler_setup(priv);
|
||||
priv->rx_handlers[REPLY_BT_COEX_PROFILE_NOTIF] =
|
||||
iwlagn_bt_coex_profile_notif;
|
||||
}
|
||||
|
||||
void iwlagn_bt_setup_deferred_work(struct iwl_priv *priv)
|
||||
{
|
||||
iwlagn_setup_deferred_work(priv);
|
||||
|
||||
INIT_WORK(&priv->bt_traffic_change_work,
|
||||
iwlagn_bt_traffic_change_work);
|
||||
}
|
||||
|
@ -2306,9 +2010,9 @@ void iwlagn_init_notification_wait(struct iwl_priv *priv,
|
|||
wait_entry->triggered = false;
|
||||
wait_entry->aborted = false;
|
||||
|
||||
spin_lock_bh(&priv->_agn.notif_wait_lock);
|
||||
list_add(&wait_entry->list, &priv->_agn.notif_waits);
|
||||
spin_unlock_bh(&priv->_agn.notif_wait_lock);
|
||||
spin_lock_bh(&priv->notif_wait_lock);
|
||||
list_add(&wait_entry->list, &priv->notif_waits);
|
||||
spin_unlock_bh(&priv->notif_wait_lock);
|
||||
}
|
||||
|
||||
int iwlagn_wait_notification(struct iwl_priv *priv,
|
||||
|
@ -2317,13 +2021,13 @@ int iwlagn_wait_notification(struct iwl_priv *priv,
|
|||
{
|
||||
int ret;
|
||||
|
||||
ret = wait_event_timeout(priv->_agn.notif_waitq,
|
||||
ret = wait_event_timeout(priv->notif_waitq,
|
||||
wait_entry->triggered || wait_entry->aborted,
|
||||
timeout);
|
||||
|
||||
spin_lock_bh(&priv->_agn.notif_wait_lock);
|
||||
spin_lock_bh(&priv->notif_wait_lock);
|
||||
list_del(&wait_entry->list);
|
||||
spin_unlock_bh(&priv->_agn.notif_wait_lock);
|
||||
spin_unlock_bh(&priv->notif_wait_lock);
|
||||
|
||||
if (wait_entry->aborted)
|
||||
return -EIO;
|
||||
|
@ -2337,93 +2041,7 @@ int iwlagn_wait_notification(struct iwl_priv *priv,
|
|||
void iwlagn_remove_notification(struct iwl_priv *priv,
|
||||
struct iwl_notification_wait *wait_entry)
|
||||
{
|
||||
spin_lock_bh(&priv->_agn.notif_wait_lock);
|
||||
spin_lock_bh(&priv->notif_wait_lock);
|
||||
list_del(&wait_entry->list);
|
||||
spin_unlock_bh(&priv->_agn.notif_wait_lock);
|
||||
}
|
||||
|
||||
int iwlagn_start_device(struct iwl_priv *priv)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if ((priv->cfg->sku & EEPROM_SKU_CAP_AMT_ENABLE) &&
|
||||
iwl_prepare_card_hw(priv)) {
|
||||
IWL_WARN(priv, "Exit HW not ready\n");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
/* If platform's RF_KILL switch is NOT set to KILL */
|
||||
if (iwl_read32(priv, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
|
||||
clear_bit(STATUS_RF_KILL_HW, &priv->status);
|
||||
else
|
||||
set_bit(STATUS_RF_KILL_HW, &priv->status);
|
||||
|
||||
if (iwl_is_rfkill(priv)) {
|
||||
wiphy_rfkill_set_hw_state(priv->hw->wiphy, true);
|
||||
iwl_enable_interrupts(priv);
|
||||
return -ERFKILL;
|
||||
}
|
||||
|
||||
iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
|
||||
|
||||
ret = iwlagn_hw_nic_init(priv);
|
||||
if (ret) {
|
||||
IWL_ERR(priv, "Unable to init nic\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* make sure rfkill handshake bits are cleared */
|
||||
iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
|
||||
iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
|
||||
CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
|
||||
|
||||
/* clear (again), then enable host interrupts */
|
||||
iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
|
||||
iwl_enable_interrupts(priv);
|
||||
|
||||
/* really make sure rfkill handshake bits are cleared */
|
||||
iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
|
||||
iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void iwlagn_stop_device(struct iwl_priv *priv)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
/* stop and reset the on-board processor */
|
||||
iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
|
||||
|
||||
/* tell the device to stop sending interrupts */
|
||||
spin_lock_irqsave(&priv->lock, flags);
|
||||
iwl_disable_interrupts(priv);
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
iwl_synchronize_irq(priv);
|
||||
|
||||
/* device going down, Stop using ICT table */
|
||||
iwl_disable_ict(priv);
|
||||
|
||||
/*
|
||||
* If a HW restart happens during firmware loading,
|
||||
* then the firmware loading might call this function
|
||||
* and later it might be called again due to the
|
||||
* restart. So don't process again if the device is
|
||||
* already dead.
|
||||
*/
|
||||
if (test_bit(STATUS_DEVICE_ENABLED, &priv->status)) {
|
||||
trans_tx_stop(priv);
|
||||
trans_rx_stop(priv);
|
||||
|
||||
/* Power-down device's busmaster DMA clocks */
|
||||
iwl_write_prph(priv, APMG_CLK_DIS_REG,
|
||||
APMG_CLK_VAL_DMA_CLK_RQT);
|
||||
udelay(5);
|
||||
}
|
||||
|
||||
/* Make sure (redundant) we've released our request to stay awake */
|
||||
iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
|
||||
|
||||
/* Stop the device, and put it in low power state */
|
||||
iwl_apm_stop(priv);
|
||||
spin_unlock_bh(&priv->notif_wait_lock);
|
||||
}
|
||||
|
|
|
@ -354,9 +354,11 @@ static void rs_program_fix_rate(struct iwl_priv *priv,
|
|||
lq_sta->active_mimo2_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
|
||||
lq_sta->active_mimo3_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
|
||||
|
||||
#ifdef CONFIG_IWLWIFI_DEVICE_SVTOOL
|
||||
/* testmode has higher priority to overwirte the fixed rate */
|
||||
if (priv->tm_fixed_rate)
|
||||
lq_sta->dbg_fixed_rate = priv->tm_fixed_rate;
|
||||
#endif
|
||||
|
||||
IWL_DEBUG_RATE(priv, "sta_id %d rate 0x%X\n",
|
||||
lq_sta->lq.sta_id, lq_sta->dbg_fixed_rate);
|
||||
|
@ -1080,7 +1082,8 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
|
|||
/* See if there's a better rate or modulation mode to try. */
|
||||
if (sta && sta->supp_rates[sband->band])
|
||||
rs_rate_scale_perform(priv, skb, sta, lq_sta);
|
||||
#ifdef CONFIG_MAC80211_DEBUGFS
|
||||
|
||||
#if defined(CONFIG_MAC80211_DEBUGFS) && defined(CONFIG_IWLWIFI_DEVICE_SVTOOL)
|
||||
if ((priv->tm_fixed_rate) &&
|
||||
(priv->tm_fixed_rate != lq_sta->dbg_fixed_rate))
|
||||
rs_program_fix_rate(priv, lq_sta);
|
||||
|
@ -2904,8 +2907,9 @@ void iwl_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_i
|
|||
if (sband->band == IEEE80211_BAND_5GHZ)
|
||||
lq_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE;
|
||||
lq_sta->is_agg = 0;
|
||||
|
||||
#ifdef CONFIG_IWLWIFI_DEVICE_SVTOOL
|
||||
priv->tm_fixed_rate = 0;
|
||||
#endif
|
||||
#ifdef CONFIG_MAC80211_DEBUGFS
|
||||
lq_sta->dbg_fixed_rate = 0;
|
||||
#endif
|
||||
|
|
|
@ -40,7 +40,7 @@ static int iwlagn_disable_bss(struct iwl_priv *priv,
|
|||
int ret;
|
||||
|
||||
send->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
|
||||
ret = trans_send_cmd_pdu(priv, ctx->rxon_cmd,
|
||||
ret = trans_send_cmd_pdu(&priv->trans, ctx->rxon_cmd,
|
||||
CMD_SYNC, sizeof(*send), send);
|
||||
|
||||
send->filter_flags = old_filter;
|
||||
|
@ -66,7 +66,7 @@ static int iwlagn_disable_pan(struct iwl_priv *priv,
|
|||
|
||||
send->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
|
||||
send->dev_type = RXON_DEV_TYPE_P2P;
|
||||
ret = trans_send_cmd_pdu(priv, ctx->rxon_cmd,
|
||||
ret = trans_send_cmd_pdu(&priv->trans, ctx->rxon_cmd,
|
||||
CMD_SYNC, sizeof(*send), send);
|
||||
|
||||
send->filter_flags = old_filter;
|
||||
|
@ -92,7 +92,7 @@ static int iwlagn_disconn_pan(struct iwl_priv *priv,
|
|||
int ret;
|
||||
|
||||
send->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
|
||||
ret = trans_send_cmd_pdu(priv, ctx->rxon_cmd, CMD_SYNC,
|
||||
ret = trans_send_cmd_pdu(&priv->trans, ctx->rxon_cmd, CMD_SYNC,
|
||||
sizeof(*send), send);
|
||||
|
||||
send->filter_flags = old_filter;
|
||||
|
@ -121,7 +121,7 @@ static void iwlagn_update_qos(struct iwl_priv *priv,
|
|||
ctx->qos_data.qos_active,
|
||||
ctx->qos_data.def_qos_parm.qos_flags);
|
||||
|
||||
ret = trans_send_cmd_pdu(priv, ctx->qos_cmd, CMD_SYNC,
|
||||
ret = trans_send_cmd_pdu(&priv->trans, ctx->qos_cmd, CMD_SYNC,
|
||||
sizeof(struct iwl_qosparam_cmd),
|
||||
&ctx->qos_data.def_qos_parm);
|
||||
if (ret)
|
||||
|
@ -180,7 +180,7 @@ static int iwlagn_send_rxon_assoc(struct iwl_priv *priv,
|
|||
ctx->staging.ofdm_ht_triple_stream_basic_rates;
|
||||
rxon_assoc.acquisition_data = ctx->staging.acquisition_data;
|
||||
|
||||
ret = trans_send_cmd_pdu(priv, ctx->rxon_assoc_cmd,
|
||||
ret = trans_send_cmd_pdu(&priv->trans, ctx->rxon_assoc_cmd,
|
||||
CMD_ASYNC, sizeof(rxon_assoc), &rxon_assoc);
|
||||
return ret;
|
||||
}
|
||||
|
@ -266,7 +266,7 @@ static int iwlagn_rxon_connect(struct iwl_priv *priv,
|
|||
* Associated RXON doesn't clear the station table in uCode,
|
||||
* so we don't need to restore stations etc. after this.
|
||||
*/
|
||||
ret = trans_send_cmd_pdu(priv, ctx->rxon_cmd, CMD_SYNC,
|
||||
ret = trans_send_cmd_pdu(&priv->trans, ctx->rxon_cmd, CMD_SYNC,
|
||||
sizeof(struct iwl_rxon_cmd), &ctx->staging);
|
||||
if (ret) {
|
||||
IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
|
||||
|
@ -303,6 +303,98 @@ static int iwlagn_rxon_connect(struct iwl_priv *priv,
|
|||
return 0;
|
||||
}
|
||||
|
||||
int iwlagn_set_pan_params(struct iwl_priv *priv)
|
||||
{
|
||||
struct iwl_wipan_params_cmd cmd;
|
||||
struct iwl_rxon_context *ctx_bss, *ctx_pan;
|
||||
int slot0 = 300, slot1 = 0;
|
||||
int ret;
|
||||
|
||||
if (priv->valid_contexts == BIT(IWL_RXON_CTX_BSS))
|
||||
return 0;
|
||||
|
||||
BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
|
||||
|
||||
lockdep_assert_held(&priv->mutex);
|
||||
|
||||
ctx_bss = &priv->contexts[IWL_RXON_CTX_BSS];
|
||||
ctx_pan = &priv->contexts[IWL_RXON_CTX_PAN];
|
||||
|
||||
/*
|
||||
* If the PAN context is inactive, then we don't need
|
||||
* to update the PAN parameters, the last thing we'll
|
||||
* have done before it goes inactive is making the PAN
|
||||
* parameters be WLAN-only.
|
||||
*/
|
||||
if (!ctx_pan->is_active)
|
||||
return 0;
|
||||
|
||||
memset(&cmd, 0, sizeof(cmd));
|
||||
|
||||
/* only 2 slots are currently allowed */
|
||||
cmd.num_slots = 2;
|
||||
|
||||
cmd.slots[0].type = 0; /* BSS */
|
||||
cmd.slots[1].type = 1; /* PAN */
|
||||
|
||||
if (priv->hw_roc_channel) {
|
||||
/* both contexts must be used for this to happen */
|
||||
slot1 = priv->hw_roc_duration;
|
||||
slot0 = IWL_MIN_SLOT_TIME;
|
||||
} else if (ctx_bss->vif && ctx_pan->vif) {
|
||||
int bcnint = ctx_pan->beacon_int;
|
||||
int dtim = ctx_pan->vif->bss_conf.dtim_period ?: 1;
|
||||
|
||||
/* should be set, but seems unused?? */
|
||||
cmd.flags |= cpu_to_le16(IWL_WIPAN_PARAMS_FLG_SLOTTED_MODE);
|
||||
|
||||
if (ctx_pan->vif->type == NL80211_IFTYPE_AP &&
|
||||
bcnint &&
|
||||
bcnint != ctx_bss->beacon_int) {
|
||||
IWL_ERR(priv,
|
||||
"beacon intervals don't match (%d, %d)\n",
|
||||
ctx_bss->beacon_int, ctx_pan->beacon_int);
|
||||
} else
|
||||
bcnint = max_t(int, bcnint,
|
||||
ctx_bss->beacon_int);
|
||||
if (!bcnint)
|
||||
bcnint = DEFAULT_BEACON_INTERVAL;
|
||||
slot0 = bcnint / 2;
|
||||
slot1 = bcnint - slot0;
|
||||
|
||||
if (test_bit(STATUS_SCAN_HW, &priv->status) ||
|
||||
(!ctx_bss->vif->bss_conf.idle &&
|
||||
!ctx_bss->vif->bss_conf.assoc)) {
|
||||
slot0 = dtim * bcnint * 3 - IWL_MIN_SLOT_TIME;
|
||||
slot1 = IWL_MIN_SLOT_TIME;
|
||||
} else if (!ctx_pan->vif->bss_conf.idle &&
|
||||
!ctx_pan->vif->bss_conf.assoc) {
|
||||
slot1 = bcnint * 3 - IWL_MIN_SLOT_TIME;
|
||||
slot0 = IWL_MIN_SLOT_TIME;
|
||||
}
|
||||
} else if (ctx_pan->vif) {
|
||||
slot0 = 0;
|
||||
slot1 = max_t(int, 1, ctx_pan->vif->bss_conf.dtim_period) *
|
||||
ctx_pan->beacon_int;
|
||||
slot1 = max_t(int, DEFAULT_BEACON_INTERVAL, slot1);
|
||||
|
||||
if (test_bit(STATUS_SCAN_HW, &priv->status)) {
|
||||
slot0 = slot1 * 3 - IWL_MIN_SLOT_TIME;
|
||||
slot1 = IWL_MIN_SLOT_TIME;
|
||||
}
|
||||
}
|
||||
|
||||
cmd.slots[0].width = cpu_to_le16(slot0);
|
||||
cmd.slots[1].width = cpu_to_le16(slot1);
|
||||
|
||||
ret = trans_send_cmd_pdu(&priv->trans, REPLY_WIPAN_PARAMS, CMD_SYNC,
|
||||
sizeof(cmd), &cmd);
|
||||
if (ret)
|
||||
IWL_ERR(priv, "Error setting PAN parameters (%d)\n", ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* iwlagn_commit_rxon - commit staging_rxon to hardware
|
||||
*
|
||||
|
@ -345,8 +437,8 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
|
|||
/* always get timestamp with Rx frame */
|
||||
ctx->staging.flags |= RXON_FLG_TSF2HOST_MSK;
|
||||
|
||||
if (ctx->ctxid == IWL_RXON_CTX_PAN && priv->_agn.hw_roc_channel) {
|
||||
struct ieee80211_channel *chan = priv->_agn.hw_roc_channel;
|
||||
if (ctx->ctxid == IWL_RXON_CTX_PAN && priv->hw_roc_channel) {
|
||||
struct ieee80211_channel *chan = priv->hw_roc_channel;
|
||||
|
||||
iwl_set_rxon_channel(priv, chan, ctx);
|
||||
iwl_set_flags_for_band(priv, ctx, chan->band, NULL);
|
||||
|
@ -694,8 +786,8 @@ static void iwlagn_chain_noise_reset(struct iwl_priv *priv)
|
|||
|
||||
memset(&cmd, 0, sizeof(cmd));
|
||||
iwl_set_calib_hdr(&cmd.hdr,
|
||||
priv->_agn.phy_calib_chain_noise_reset_cmd);
|
||||
ret = trans_send_cmd_pdu(priv,
|
||||
priv->phy_calib_chain_noise_reset_cmd);
|
||||
ret = trans_send_cmd_pdu(&priv->trans,
|
||||
REPLY_PHY_CALIBRATION_CMD,
|
||||
CMD_SYNC, sizeof(cmd), &cmd);
|
||||
if (ret)
|
||||
|
@ -762,6 +854,9 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
|
|||
iwl_wake_any_queue(priv, ctx);
|
||||
}
|
||||
ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
|
||||
|
||||
if (ctx->ctxid == IWL_RXON_CTX_BSS)
|
||||
priv->have_rekey_data = false;
|
||||
}
|
||||
|
||||
iwlagn_bt_coex_rssi_monitor(priv);
|
||||
|
|
|
@ -139,6 +139,14 @@ int iwlagn_add_bssid_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx
|
|||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* static WEP keys
|
||||
*
|
||||
* For each context, the device has a table of 4 static WEP keys
|
||||
* (one for each key index) that is updated with the following
|
||||
* commands.
|
||||
*/
|
||||
|
||||
static int iwl_send_static_wepkey_cmd(struct iwl_priv *priv,
|
||||
struct iwl_rxon_context *ctx,
|
||||
bool send_if_empty)
|
||||
|
@ -181,7 +189,7 @@ static int iwl_send_static_wepkey_cmd(struct iwl_priv *priv,
|
|||
cmd.len[0] = cmd_size;
|
||||
|
||||
if (not_empty || send_if_empty)
|
||||
return trans_send_cmd(priv, &cmd);
|
||||
return trans_send_cmd(&priv->trans, &cmd);
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
@ -232,9 +240,7 @@ int iwl_set_default_wep_key(struct iwl_priv *priv,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
|
||||
keyconf->hw_key_idx = HW_KEY_DEFAULT;
|
||||
priv->stations[ctx->ap_sta_id].keyinfo.cipher = keyconf->cipher;
|
||||
keyconf->hw_key_idx = IWLAGN_HW_KEY_DEFAULT;
|
||||
|
||||
ctx->wep_keys[keyconf->keyidx].key_size = keyconf->keylen;
|
||||
memcpy(&ctx->wep_keys[keyconf->keyidx].key, &keyconf->key,
|
||||
|
@ -247,166 +253,117 @@ int iwl_set_default_wep_key(struct iwl_priv *priv,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int iwl_set_wep_dynamic_key_info(struct iwl_priv *priv,
|
||||
struct iwl_rxon_context *ctx,
|
||||
/*
|
||||
* dynamic (per-station) keys
|
||||
*
|
||||
* The dynamic keys are a little more complicated. The device has
|
||||
* a key cache of up to STA_KEY_MAX_NUM/STA_KEY_MAX_NUM_PAN keys.
|
||||
* These are linked to stations by a table that contains an index
|
||||
* into the key table for each station/key index/{mcast,unicast},
|
||||
* i.e. it's basically an array of pointers like this:
|
||||
* key_offset_t key_mapping[NUM_STATIONS][4][2];
|
||||
* (it really works differently, but you can think of it as such)
|
||||
*
|
||||
* The key uploading and linking happens in the same command, the
|
||||
* add station command with STA_MODIFY_KEY_MASK.
|
||||
*/
|
||||
|
||||
static u8 iwlagn_key_sta_id(struct iwl_priv *priv,
|
||||
struct ieee80211_vif *vif,
|
||||
struct ieee80211_sta *sta)
|
||||
{
|
||||
struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
|
||||
u8 sta_id = IWL_INVALID_STATION;
|
||||
|
||||
if (sta)
|
||||
sta_id = iwl_sta_id(sta);
|
||||
|
||||
/*
|
||||
* The device expects GTKs for station interfaces to be
|
||||
* installed as GTKs for the AP station. If we have no
|
||||
* station ID, then use the ap_sta_id in that case.
|
||||
*/
|
||||
if (!sta && vif && vif_priv->ctx) {
|
||||
switch (vif->type) {
|
||||
case NL80211_IFTYPE_STATION:
|
||||
sta_id = vif_priv->ctx->ap_sta_id;
|
||||
break;
|
||||
default:
|
||||
/*
|
||||
* In all other cases, the key will be
|
||||
* used either for TX only or is bound
|
||||
* to a station already.
|
||||
*/
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return sta_id;
|
||||
}
|
||||
|
||||
static int iwlagn_send_sta_key(struct iwl_priv *priv,
|
||||
struct ieee80211_key_conf *keyconf,
|
||||
u8 sta_id)
|
||||
u8 sta_id, u32 tkip_iv32, u16 *tkip_p1k,
|
||||
u32 cmd_flags)
|
||||
{
|
||||
unsigned long flags;
|
||||
__le16 key_flags = 0;
|
||||
__le16 key_flags;
|
||||
struct iwl_addsta_cmd sta_cmd;
|
||||
int i;
|
||||
|
||||
lockdep_assert_held(&priv->mutex);
|
||||
spin_lock_irqsave(&priv->sta_lock, flags);
|
||||
memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(sta_cmd));
|
||||
spin_unlock_irqrestore(&priv->sta_lock, flags);
|
||||
|
||||
keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
|
||||
key_flags = cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
|
||||
key_flags |= STA_KEY_FLG_MAP_KEY_MSK;
|
||||
|
||||
key_flags |= (STA_KEY_FLG_WEP | STA_KEY_FLG_MAP_KEY_MSK);
|
||||
key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
|
||||
key_flags &= ~STA_KEY_FLG_INVALID;
|
||||
|
||||
if (keyconf->keylen == WEP_KEY_LEN_128)
|
||||
switch (keyconf->cipher) {
|
||||
case WLAN_CIPHER_SUITE_CCMP:
|
||||
key_flags |= STA_KEY_FLG_CCMP;
|
||||
memcpy(sta_cmd.key.key, keyconf->key, keyconf->keylen);
|
||||
break;
|
||||
case WLAN_CIPHER_SUITE_TKIP:
|
||||
key_flags |= STA_KEY_FLG_TKIP;
|
||||
sta_cmd.key.tkip_rx_tsc_byte2 = tkip_iv32;
|
||||
for (i = 0; i < 5; i++)
|
||||
sta_cmd.key.tkip_rx_ttak[i] = cpu_to_le16(tkip_p1k[i]);
|
||||
memcpy(sta_cmd.key.key, keyconf->key, keyconf->keylen);
|
||||
break;
|
||||
case WLAN_CIPHER_SUITE_WEP104:
|
||||
key_flags |= STA_KEY_FLG_KEY_SIZE_MSK;
|
||||
/* fall through */
|
||||
case WLAN_CIPHER_SUITE_WEP40:
|
||||
key_flags |= STA_KEY_FLG_WEP;
|
||||
memcpy(&sta_cmd.key.key[3], keyconf->key, keyconf->keylen);
|
||||
break;
|
||||
default:
|
||||
WARN_ON(1);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (sta_id == ctx->bcast_sta_id)
|
||||
if (!(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE))
|
||||
key_flags |= STA_KEY_MULTICAST_MSK;
|
||||
|
||||
spin_lock_irqsave(&priv->sta_lock, flags);
|
||||
/* key pointer (offset) */
|
||||
sta_cmd.key.key_offset = keyconf->hw_key_idx;
|
||||
|
||||
priv->stations[sta_id].keyinfo.cipher = keyconf->cipher;
|
||||
priv->stations[sta_id].keyinfo.keylen = keyconf->keylen;
|
||||
priv->stations[sta_id].keyinfo.keyidx = keyconf->keyidx;
|
||||
sta_cmd.key.key_flags = key_flags;
|
||||
sta_cmd.mode = STA_CONTROL_MODIFY_MSK;
|
||||
sta_cmd.sta.modify_mask = STA_MODIFY_KEY_MASK;
|
||||
|
||||
memcpy(priv->stations[sta_id].keyinfo.key,
|
||||
keyconf->key, keyconf->keylen);
|
||||
|
||||
memcpy(&priv->stations[sta_id].sta.key.key[3],
|
||||
keyconf->key, keyconf->keylen);
|
||||
|
||||
if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
|
||||
== STA_KEY_FLG_NO_ENC)
|
||||
priv->stations[sta_id].sta.key.key_offset =
|
||||
iwl_get_free_ucode_key_index(priv);
|
||||
/* else, we are overriding an existing key => no need to allocated room
|
||||
* in uCode. */
|
||||
|
||||
WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
|
||||
"no space for a new key");
|
||||
|
||||
priv->stations[sta_id].sta.key.key_flags = key_flags;
|
||||
priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
|
||||
priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
|
||||
|
||||
memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
|
||||
spin_unlock_irqrestore(&priv->sta_lock, flags);
|
||||
|
||||
return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
|
||||
}
|
||||
|
||||
static int iwl_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
|
||||
struct iwl_rxon_context *ctx,
|
||||
struct ieee80211_key_conf *keyconf,
|
||||
u8 sta_id)
|
||||
{
|
||||
unsigned long flags;
|
||||
__le16 key_flags = 0;
|
||||
struct iwl_addsta_cmd sta_cmd;
|
||||
|
||||
lockdep_assert_held(&priv->mutex);
|
||||
|
||||
key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK);
|
||||
key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
|
||||
key_flags &= ~STA_KEY_FLG_INVALID;
|
||||
|
||||
if (sta_id == ctx->bcast_sta_id)
|
||||
key_flags |= STA_KEY_MULTICAST_MSK;
|
||||
|
||||
keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
|
||||
|
||||
spin_lock_irqsave(&priv->sta_lock, flags);
|
||||
priv->stations[sta_id].keyinfo.cipher = keyconf->cipher;
|
||||
priv->stations[sta_id].keyinfo.keylen = keyconf->keylen;
|
||||
|
||||
memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key,
|
||||
keyconf->keylen);
|
||||
|
||||
memcpy(priv->stations[sta_id].sta.key.key, keyconf->key,
|
||||
keyconf->keylen);
|
||||
|
||||
if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
|
||||
== STA_KEY_FLG_NO_ENC)
|
||||
priv->stations[sta_id].sta.key.key_offset =
|
||||
iwl_get_free_ucode_key_index(priv);
|
||||
/* else, we are overriding an existing key => no need to allocated room
|
||||
* in uCode. */
|
||||
|
||||
WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
|
||||
"no space for a new key");
|
||||
|
||||
priv->stations[sta_id].sta.key.key_flags = key_flags;
|
||||
priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
|
||||
priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
|
||||
|
||||
memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
|
||||
spin_unlock_irqrestore(&priv->sta_lock, flags);
|
||||
|
||||
return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
|
||||
}
|
||||
|
||||
static int iwl_set_tkip_dynamic_key_info(struct iwl_priv *priv,
|
||||
struct iwl_rxon_context *ctx,
|
||||
struct ieee80211_key_conf *keyconf,
|
||||
u8 sta_id)
|
||||
{
|
||||
unsigned long flags;
|
||||
int ret = 0;
|
||||
__le16 key_flags = 0;
|
||||
|
||||
key_flags |= (STA_KEY_FLG_TKIP | STA_KEY_FLG_MAP_KEY_MSK);
|
||||
key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
|
||||
key_flags &= ~STA_KEY_FLG_INVALID;
|
||||
|
||||
if (sta_id == ctx->bcast_sta_id)
|
||||
key_flags |= STA_KEY_MULTICAST_MSK;
|
||||
|
||||
keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
|
||||
keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
|
||||
|
||||
spin_lock_irqsave(&priv->sta_lock, flags);
|
||||
|
||||
priv->stations[sta_id].keyinfo.cipher = keyconf->cipher;
|
||||
priv->stations[sta_id].keyinfo.keylen = 16;
|
||||
|
||||
if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
|
||||
== STA_KEY_FLG_NO_ENC)
|
||||
priv->stations[sta_id].sta.key.key_offset =
|
||||
iwl_get_free_ucode_key_index(priv);
|
||||
/* else, we are overriding an existing key => no need to allocated room
|
||||
* in uCode. */
|
||||
|
||||
WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
|
||||
"no space for a new key");
|
||||
|
||||
priv->stations[sta_id].sta.key.key_flags = key_flags;
|
||||
|
||||
|
||||
/* This copy is acutally not needed: we get the key with each TX */
|
||||
memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key, 16);
|
||||
|
||||
memcpy(priv->stations[sta_id].sta.key.key, keyconf->key, 16);
|
||||
|
||||
spin_unlock_irqrestore(&priv->sta_lock, flags);
|
||||
|
||||
return ret;
|
||||
return iwl_send_add_sta(priv, &sta_cmd, cmd_flags);
|
||||
}
|
||||
|
||||
void iwl_update_tkip_key(struct iwl_priv *priv,
|
||||
struct iwl_rxon_context *ctx,
|
||||
struct ieee80211_vif *vif,
|
||||
struct ieee80211_key_conf *keyconf,
|
||||
struct ieee80211_sta *sta, u32 iv32, u16 *phase1key)
|
||||
{
|
||||
u8 sta_id;
|
||||
unsigned long flags;
|
||||
int i;
|
||||
u8 sta_id = iwlagn_key_sta_id(priv, vif, sta);
|
||||
|
||||
if (sta_id == IWL_INVALID_STATION)
|
||||
return;
|
||||
|
||||
if (iwl_scan_cancel(priv)) {
|
||||
/* cancel scan failed, just live w/ bad key and rely
|
||||
|
@ -414,121 +371,110 @@ void iwl_update_tkip_key(struct iwl_priv *priv,
|
|||
return;
|
||||
}
|
||||
|
||||
sta_id = iwl_sta_id_or_broadcast(priv, ctx, sta);
|
||||
if (sta_id == IWL_INVALID_STATION)
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&priv->sta_lock, flags);
|
||||
|
||||
priv->stations[sta_id].sta.key.tkip_rx_tsc_byte2 = (u8) iv32;
|
||||
|
||||
for (i = 0; i < 5; i++)
|
||||
priv->stations[sta_id].sta.key.tkip_rx_ttak[i] =
|
||||
cpu_to_le16(phase1key[i]);
|
||||
|
||||
priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
|
||||
priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
|
||||
|
||||
iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
|
||||
|
||||
spin_unlock_irqrestore(&priv->sta_lock, flags);
|
||||
|
||||
iwlagn_send_sta_key(priv, keyconf, sta_id,
|
||||
iv32, phase1key, CMD_ASYNC);
|
||||
}
|
||||
|
||||
int iwl_remove_dynamic_key(struct iwl_priv *priv,
|
||||
struct iwl_rxon_context *ctx,
|
||||
struct ieee80211_key_conf *keyconf,
|
||||
u8 sta_id)
|
||||
struct ieee80211_sta *sta)
|
||||
{
|
||||
unsigned long flags;
|
||||
u16 key_flags;
|
||||
u8 keyidx;
|
||||
struct iwl_addsta_cmd sta_cmd;
|
||||
u8 sta_id = iwlagn_key_sta_id(priv, ctx->vif, sta);
|
||||
|
||||
/* if station isn't there, neither is the key */
|
||||
if (sta_id == IWL_INVALID_STATION)
|
||||
return -ENOENT;
|
||||
|
||||
spin_lock_irqsave(&priv->sta_lock, flags);
|
||||
memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(sta_cmd));
|
||||
if (!(priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE))
|
||||
sta_id = IWL_INVALID_STATION;
|
||||
spin_unlock_irqrestore(&priv->sta_lock, flags);
|
||||
|
||||
if (sta_id == IWL_INVALID_STATION)
|
||||
return 0;
|
||||
|
||||
lockdep_assert_held(&priv->mutex);
|
||||
|
||||
ctx->key_mapping_keys--;
|
||||
|
||||
spin_lock_irqsave(&priv->sta_lock, flags);
|
||||
key_flags = le16_to_cpu(priv->stations[sta_id].sta.key.key_flags);
|
||||
keyidx = (key_flags >> STA_KEY_FLG_KEYID_POS) & 0x3;
|
||||
|
||||
IWL_DEBUG_WEP(priv, "Remove dynamic key: idx=%d sta=%d\n",
|
||||
keyconf->keyidx, sta_id);
|
||||
|
||||
if (keyconf->keyidx != keyidx) {
|
||||
/* We need to remove a key with index different that the one
|
||||
* in the uCode. This means that the key we need to remove has
|
||||
* been replaced by another one with different index.
|
||||
* Don't do anything and return ok
|
||||
*/
|
||||
spin_unlock_irqrestore(&priv->sta_lock, flags);
|
||||
return 0;
|
||||
}
|
||||
if (!test_and_clear_bit(keyconf->hw_key_idx, &priv->ucode_key_table))
|
||||
IWL_ERR(priv, "offset %d not used in uCode key table.\n",
|
||||
keyconf->hw_key_idx);
|
||||
|
||||
if (priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET) {
|
||||
IWL_WARN(priv, "Removing wrong key %d 0x%x\n",
|
||||
keyconf->keyidx, key_flags);
|
||||
spin_unlock_irqrestore(&priv->sta_lock, flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!test_and_clear_bit(priv->stations[sta_id].sta.key.key_offset,
|
||||
&priv->ucode_key_table))
|
||||
IWL_ERR(priv, "index %d not used in uCode key table.\n",
|
||||
priv->stations[sta_id].sta.key.key_offset);
|
||||
memset(&priv->stations[sta_id].keyinfo, 0,
|
||||
sizeof(struct iwl_hw_key));
|
||||
memset(&priv->stations[sta_id].sta.key, 0,
|
||||
sizeof(struct iwl_keyinfo));
|
||||
priv->stations[sta_id].sta.key.key_flags =
|
||||
STA_KEY_FLG_NO_ENC | STA_KEY_FLG_INVALID;
|
||||
priv->stations[sta_id].sta.key.key_offset = WEP_INVALID_OFFSET;
|
||||
priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
|
||||
priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
|
||||
|
||||
if (iwl_is_rfkill(priv)) {
|
||||
IWL_DEBUG_WEP(priv, "Not sending REPLY_ADD_STA command because RFKILL enabled.\n");
|
||||
spin_unlock_irqrestore(&priv->sta_lock, flags);
|
||||
return 0;
|
||||
}
|
||||
memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
|
||||
spin_unlock_irqrestore(&priv->sta_lock, flags);
|
||||
sta_cmd.key.key_flags = STA_KEY_FLG_NO_ENC | STA_KEY_FLG_INVALID;
|
||||
sta_cmd.key.key_offset = WEP_INVALID_OFFSET;
|
||||
sta_cmd.sta.modify_mask = STA_MODIFY_KEY_MASK;
|
||||
sta_cmd.mode = STA_CONTROL_MODIFY_MSK;
|
||||
|
||||
return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
|
||||
}
|
||||
|
||||
int iwl_set_dynamic_key(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
|
||||
struct ieee80211_key_conf *keyconf, u8 sta_id)
|
||||
int iwl_set_dynamic_key(struct iwl_priv *priv,
|
||||
struct iwl_rxon_context *ctx,
|
||||
struct ieee80211_key_conf *keyconf,
|
||||
struct ieee80211_sta *sta)
|
||||
{
|
||||
struct ieee80211_key_seq seq;
|
||||
u16 p1k[5];
|
||||
int ret;
|
||||
u8 sta_id = iwlagn_key_sta_id(priv, ctx->vif, sta);
|
||||
const u8 *addr;
|
||||
|
||||
if (sta_id == IWL_INVALID_STATION)
|
||||
return -EINVAL;
|
||||
|
||||
lockdep_assert_held(&priv->mutex);
|
||||
|
||||
keyconf->hw_key_idx = iwl_get_free_ucode_key_offset(priv);
|
||||
if (keyconf->hw_key_idx == WEP_INVALID_OFFSET)
|
||||
return -ENOSPC;
|
||||
|
||||
ctx->key_mapping_keys++;
|
||||
keyconf->hw_key_idx = HW_KEY_DYNAMIC;
|
||||
|
||||
switch (keyconf->cipher) {
|
||||
case WLAN_CIPHER_SUITE_CCMP:
|
||||
ret = iwl_set_ccmp_dynamic_key_info(priv, ctx, keyconf, sta_id);
|
||||
break;
|
||||
case WLAN_CIPHER_SUITE_TKIP:
|
||||
ret = iwl_set_tkip_dynamic_key_info(priv, ctx, keyconf, sta_id);
|
||||
keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
|
||||
keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
|
||||
|
||||
if (sta)
|
||||
addr = sta->addr;
|
||||
else /* station mode case only */
|
||||
addr = ctx->active.bssid_addr;
|
||||
|
||||
/* pre-fill phase 1 key into device cache */
|
||||
ieee80211_get_key_rx_seq(keyconf, 0, &seq);
|
||||
ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);
|
||||
ret = iwlagn_send_sta_key(priv, keyconf, sta_id,
|
||||
seq.tkip.iv32, p1k, CMD_SYNC);
|
||||
break;
|
||||
case WLAN_CIPHER_SUITE_CCMP:
|
||||
keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
|
||||
/* fall through */
|
||||
case WLAN_CIPHER_SUITE_WEP40:
|
||||
case WLAN_CIPHER_SUITE_WEP104:
|
||||
ret = iwl_set_wep_dynamic_key_info(priv, ctx, keyconf, sta_id);
|
||||
ret = iwlagn_send_sta_key(priv, keyconf, sta_id,
|
||||
0, NULL, CMD_SYNC);
|
||||
break;
|
||||
default:
|
||||
IWL_ERR(priv,
|
||||
"Unknown alg: %s cipher = %x\n", __func__,
|
||||
keyconf->cipher);
|
||||
IWL_ERR(priv, "Unknown cipher %x\n", keyconf->cipher);
|
||||
ret = -EINVAL;
|
||||
}
|
||||
|
||||
IWL_DEBUG_WEP(priv, "Set dynamic key: cipher=%x len=%d idx=%d sta=%d ret=%d\n",
|
||||
if (ret) {
|
||||
ctx->key_mapping_keys--;
|
||||
clear_bit(keyconf->hw_key_idx, &priv->ucode_key_table);
|
||||
}
|
||||
|
||||
IWL_DEBUG_WEP(priv, "Set dynamic key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n",
|
||||
keyconf->cipher, keyconf->keylen, keyconf->keyidx,
|
||||
sta_id, ret);
|
||||
sta ? sta->addr : NULL, ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -39,6 +39,7 @@
|
|||
#include "iwl-helpers.h"
|
||||
#include "iwl-agn-hw.h"
|
||||
#include "iwl-agn.h"
|
||||
#include "iwl-trans.h"
|
||||
|
||||
/*
|
||||
* mac80211 queues, ACs, hardware queues, FIFOs.
|
||||
|
@ -95,132 +96,8 @@ static inline int get_fifo_from_tid(struct iwl_rxon_context *ctx, u16 tid)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
/**
|
||||
* iwlagn_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
|
||||
*/
|
||||
static void iwlagn_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
|
||||
struct iwl_tx_queue *txq,
|
||||
u16 byte_cnt)
|
||||
{
|
||||
struct iwlagn_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr;
|
||||
int write_ptr = txq->q.write_ptr;
|
||||
int txq_id = txq->q.id;
|
||||
u8 sec_ctl = 0;
|
||||
u8 sta_id = 0;
|
||||
u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
|
||||
__le16 bc_ent;
|
||||
|
||||
WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);
|
||||
|
||||
sta_id = txq->cmd[txq->q.write_ptr]->cmd.tx.sta_id;
|
||||
sec_ctl = txq->cmd[txq->q.write_ptr]->cmd.tx.sec_ctl;
|
||||
|
||||
switch (sec_ctl & TX_CMD_SEC_MSK) {
|
||||
case TX_CMD_SEC_CCM:
|
||||
len += CCMP_MIC_LEN;
|
||||
break;
|
||||
case TX_CMD_SEC_TKIP:
|
||||
len += TKIP_ICV_LEN;
|
||||
break;
|
||||
case TX_CMD_SEC_WEP:
|
||||
len += WEP_IV_LEN + WEP_ICV_LEN;
|
||||
break;
|
||||
}
|
||||
|
||||
bc_ent = cpu_to_le16((len & 0xFFF) | (sta_id << 12));
|
||||
|
||||
scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
|
||||
|
||||
if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
|
||||
scd_bc_tbl[txq_id].
|
||||
tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
|
||||
}
|
||||
|
||||
static void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_priv *priv,
|
||||
struct iwl_tx_queue *txq)
|
||||
{
|
||||
struct iwlagn_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr;
|
||||
int txq_id = txq->q.id;
|
||||
int read_ptr = txq->q.read_ptr;
|
||||
u8 sta_id = 0;
|
||||
__le16 bc_ent;
|
||||
|
||||
WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
|
||||
|
||||
if (txq_id != priv->cmd_queue)
|
||||
sta_id = txq->cmd[read_ptr]->cmd.tx.sta_id;
|
||||
|
||||
bc_ent = cpu_to_le16(1 | (sta_id << 12));
|
||||
scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
|
||||
|
||||
if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
|
||||
scd_bc_tbl[txq_id].
|
||||
tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent;
|
||||
}
|
||||
|
||||
static int iwlagn_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid,
|
||||
u16 txq_id)
|
||||
{
|
||||
u32 tbl_dw_addr;
|
||||
u32 tbl_dw;
|
||||
u16 scd_q2ratid;
|
||||
|
||||
scd_q2ratid = ra_tid & IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK;
|
||||
|
||||
tbl_dw_addr = priv->scd_base_addr +
|
||||
IWLAGN_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id);
|
||||
|
||||
tbl_dw = iwl_read_targ_mem(priv, tbl_dw_addr);
|
||||
|
||||
if (txq_id & 0x1)
|
||||
tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
|
||||
else
|
||||
tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
|
||||
|
||||
iwl_write_targ_mem(priv, tbl_dw_addr, tbl_dw);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void iwlagn_tx_queue_stop_scheduler(struct iwl_priv *priv, u16 txq_id)
|
||||
{
|
||||
/* Simply stop the queue, but don't change any configuration;
|
||||
* the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
|
||||
iwl_write_prph(priv,
|
||||
IWLAGN_SCD_QUEUE_STATUS_BITS(txq_id),
|
||||
(0 << IWLAGN_SCD_QUEUE_STTS_REG_POS_ACTIVE)|
|
||||
(1 << IWLAGN_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
|
||||
}
|
||||
|
||||
void iwlagn_set_wr_ptrs(struct iwl_priv *priv,
|
||||
int txq_id, u32 index)
|
||||
{
|
||||
iwl_write_direct32(priv, HBUS_TARG_WRPTR,
|
||||
(index & 0xff) | (txq_id << 8));
|
||||
iwl_write_prph(priv, IWLAGN_SCD_QUEUE_RDPTR(txq_id), index);
|
||||
}
|
||||
|
||||
void iwlagn_tx_queue_set_status(struct iwl_priv *priv,
|
||||
struct iwl_tx_queue *txq,
|
||||
int tx_fifo_id, int scd_retry)
|
||||
{
|
||||
int txq_id = txq->q.id;
|
||||
int active = test_bit(txq_id, &priv->txq_ctx_active_msk) ? 1 : 0;
|
||||
|
||||
iwl_write_prph(priv, IWLAGN_SCD_QUEUE_STATUS_BITS(txq_id),
|
||||
(active << IWLAGN_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
|
||||
(tx_fifo_id << IWLAGN_SCD_QUEUE_STTS_REG_POS_TXF) |
|
||||
(1 << IWLAGN_SCD_QUEUE_STTS_REG_POS_WSL) |
|
||||
IWLAGN_SCD_QUEUE_STTS_REG_MSK);
|
||||
|
||||
txq->sched_retry = scd_retry;
|
||||
|
||||
IWL_DEBUG_INFO(priv, "%s %s Queue %d on FIFO %d\n",
|
||||
active ? "Activate" : "Deactivate",
|
||||
scd_retry ? "BA" : "AC/CMD", txq_id, tx_fifo_id);
|
||||
}
|
||||
|
||||
static int iwlagn_txq_agg_enable(struct iwl_priv *priv, int txq_id, int sta_id, int tid)
|
||||
static int iwlagn_txq_agg_enable(struct iwl_priv *priv, int txq_id, int sta_id,
|
||||
int tid)
|
||||
{
|
||||
if ((IWLAGN_FIRST_AMPDU_QUEUE > txq_id) ||
|
||||
(IWLAGN_FIRST_AMPDU_QUEUE +
|
||||
|
@ -237,108 +114,6 @@ static int iwlagn_txq_agg_enable(struct iwl_priv *priv, int txq_id, int sta_id,
|
|||
return iwl_sta_tx_modify_enable_tid(priv, sta_id, tid);
|
||||
}
|
||||
|
||||
void iwlagn_txq_agg_queue_setup(struct iwl_priv *priv,
|
||||
struct ieee80211_sta *sta,
|
||||
int tid, int frame_limit)
|
||||
{
|
||||
int sta_id, tx_fifo, txq_id, ssn_idx;
|
||||
u16 ra_tid;
|
||||
unsigned long flags;
|
||||
struct iwl_tid_data *tid_data;
|
||||
|
||||
sta_id = iwl_sta_id(sta);
|
||||
if (WARN_ON(sta_id == IWL_INVALID_STATION))
|
||||
return;
|
||||
if (WARN_ON(tid >= MAX_TID_COUNT))
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&priv->sta_lock, flags);
|
||||
tid_data = &priv->stations[sta_id].tid[tid];
|
||||
ssn_idx = SEQ_TO_SN(tid_data->seq_number);
|
||||
txq_id = tid_data->agg.txq_id;
|
||||
tx_fifo = tid_data->agg.tx_fifo;
|
||||
spin_unlock_irqrestore(&priv->sta_lock, flags);
|
||||
|
||||
ra_tid = BUILD_RAxTID(sta_id, tid);
|
||||
|
||||
spin_lock_irqsave(&priv->lock, flags);
|
||||
|
||||
/* Stop this Tx queue before configuring it */
|
||||
iwlagn_tx_queue_stop_scheduler(priv, txq_id);
|
||||
|
||||
/* Map receiver-address / traffic-ID to this queue */
|
||||
iwlagn_tx_queue_set_q2ratid(priv, ra_tid, txq_id);
|
||||
|
||||
/* Set this queue as a chain-building queue */
|
||||
iwl_set_bits_prph(priv, IWLAGN_SCD_QUEUECHAIN_SEL, (1<<txq_id));
|
||||
|
||||
/* enable aggregations for the queue */
|
||||
iwl_set_bits_prph(priv, IWLAGN_SCD_AGGR_SEL, (1<<txq_id));
|
||||
|
||||
/* Place first TFD at index corresponding to start sequence number.
|
||||
* Assumes that ssn_idx is valid (!= 0xFFF) */
|
||||
priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
|
||||
priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
|
||||
iwlagn_set_wr_ptrs(priv, txq_id, ssn_idx);
|
||||
|
||||
/* Set up Tx window size and frame limit for this queue */
|
||||
iwl_write_targ_mem(priv, priv->scd_base_addr +
|
||||
IWLAGN_SCD_CONTEXT_QUEUE_OFFSET(txq_id) +
|
||||
sizeof(u32),
|
||||
((frame_limit <<
|
||||
IWLAGN_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
|
||||
IWLAGN_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
|
||||
((frame_limit <<
|
||||
IWLAGN_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
|
||||
IWLAGN_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
|
||||
|
||||
iwl_set_bits_prph(priv, IWLAGN_SCD_INTERRUPT_MASK, (1 << txq_id));
|
||||
|
||||
/* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
|
||||
iwlagn_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1);
|
||||
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
}
|
||||
|
||||
static int iwlagn_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
|
||||
u16 ssn_idx, u8 tx_fifo)
|
||||
{
|
||||
if ((IWLAGN_FIRST_AMPDU_QUEUE > txq_id) ||
|
||||
(IWLAGN_FIRST_AMPDU_QUEUE +
|
||||
priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
|
||||
IWL_ERR(priv,
|
||||
"queue number out of range: %d, must be %d to %d\n",
|
||||
txq_id, IWLAGN_FIRST_AMPDU_QUEUE,
|
||||
IWLAGN_FIRST_AMPDU_QUEUE +
|
||||
priv->cfg->base_params->num_of_ampdu_queues - 1);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
iwlagn_tx_queue_stop_scheduler(priv, txq_id);
|
||||
|
||||
iwl_clear_bits_prph(priv, IWLAGN_SCD_AGGR_SEL, (1 << txq_id));
|
||||
|
||||
priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
|
||||
priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
|
||||
/* supposes that ssn_idx is valid (!= 0xFFF) */
|
||||
iwlagn_set_wr_ptrs(priv, txq_id, ssn_idx);
|
||||
|
||||
iwl_clear_bits_prph(priv, IWLAGN_SCD_INTERRUPT_MASK, (1 << txq_id));
|
||||
iwl_txq_ctx_deactivate(priv, txq_id);
|
||||
iwlagn_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
|
||||
* must be called under priv->lock and mac access
|
||||
*/
|
||||
void iwlagn_txq_set_sched(struct iwl_priv *priv, u32 mask)
|
||||
{
|
||||
iwl_write_prph(priv, IWLAGN_SCD_TXFACT, mask);
|
||||
}
|
||||
|
||||
static void iwlagn_tx_cmd_protection(struct iwl_priv *priv,
|
||||
struct ieee80211_tx_info *info,
|
||||
__le16 fc, __le32 *tx_flags)
|
||||
|
@ -363,19 +138,15 @@ static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv,
|
|||
__le32 tx_flags = tx_cmd->tx_flags;
|
||||
|
||||
tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
|
||||
if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
|
||||
tx_flags |= TX_CMD_FLG_ACK_MSK;
|
||||
if (ieee80211_is_mgmt(fc))
|
||||
tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
|
||||
if (ieee80211_is_probe_resp(fc) &&
|
||||
!(le16_to_cpu(hdr->seq_ctrl) & 0xf))
|
||||
tx_flags |= TX_CMD_FLG_TSF_MSK;
|
||||
} else {
|
||||
tx_flags &= (~TX_CMD_FLG_ACK_MSK);
|
||||
tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
|
||||
}
|
||||
|
||||
if (ieee80211_is_back_req(fc))
|
||||
if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
|
||||
tx_flags |= TX_CMD_FLG_ACK_MSK;
|
||||
else
|
||||
tx_flags &= ~TX_CMD_FLG_ACK_MSK;
|
||||
|
||||
if (ieee80211_is_probe_resp(fc))
|
||||
tx_flags |= TX_CMD_FLG_TSF_MSK;
|
||||
else if (ieee80211_is_back_req(fc))
|
||||
tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
|
||||
else if (info->band == IEEE80211_BAND_2GHZ &&
|
||||
priv->cfg->bt_params &&
|
||||
|
@ -446,6 +217,7 @@ static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
|
|||
if (ieee80211_is_data(fc)) {
|
||||
tx_cmd->initial_rate_index = 0;
|
||||
tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
|
||||
#ifdef CONFIG_IWLWIFI_DEVICE_SVTOOL
|
||||
if (priv->tm_fixed_rate) {
|
||||
/*
|
||||
* rate overwrite by testmode
|
||||
|
@ -456,6 +228,7 @@ static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
|
|||
memcpy(&tx_cmd->rate_n_flags, &priv->tm_fixed_rate,
|
||||
sizeof(tx_cmd->rate_n_flags));
|
||||
}
|
||||
#endif
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -547,26 +320,17 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
|
|||
{
|
||||
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
|
||||
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
||||
struct ieee80211_sta *sta = info->control.sta;
|
||||
struct iwl_station_priv *sta_priv = NULL;
|
||||
struct iwl_tx_queue *txq;
|
||||
struct iwl_queue *q;
|
||||
struct iwl_device_cmd *out_cmd;
|
||||
struct iwl_cmd_meta *out_meta;
|
||||
struct iwl_tx_cmd *tx_cmd;
|
||||
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
|
||||
struct iwl_tx_cmd *tx_cmd;
|
||||
int txq_id;
|
||||
dma_addr_t phys_addr = 0;
|
||||
dma_addr_t txcmd_phys;
|
||||
dma_addr_t scratch_phys;
|
||||
u16 len, firstlen, secondlen;
|
||||
|
||||
u16 seq_number = 0;
|
||||
__le16 fc;
|
||||
u8 hdr_len;
|
||||
u16 len;
|
||||
u8 sta_id;
|
||||
u8 wait_write_ptr = 0;
|
||||
u8 tid = 0;
|
||||
u8 *qc = NULL;
|
||||
unsigned long flags;
|
||||
bool is_agg = false;
|
||||
|
||||
|
@ -614,8 +378,8 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
|
|||
|
||||
IWL_DEBUG_TX(priv, "station Id %d\n", sta_id);
|
||||
|
||||
if (sta)
|
||||
sta_priv = (void *)sta->drv_priv;
|
||||
if (info->control.sta)
|
||||
sta_priv = (void *)info->control.sta->drv_priv;
|
||||
|
||||
if (sta_priv && sta_priv->asleep &&
|
||||
(info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE)) {
|
||||
|
@ -650,6 +414,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
|
|||
spin_lock(&priv->sta_lock);
|
||||
|
||||
if (ieee80211_is_data_qos(fc)) {
|
||||
u8 *qc = NULL;
|
||||
qc = ieee80211_get_qos_ctl(hdr);
|
||||
tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
|
||||
|
||||
|
@ -670,38 +435,13 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
|
|||
}
|
||||
}
|
||||
|
||||
txq = &priv->txq[txq_id];
|
||||
q = &txq->q;
|
||||
|
||||
if (unlikely(iwl_queue_space(q) < q->high_mark))
|
||||
tx_cmd = trans_get_tx_cmd(&priv->trans, txq_id);
|
||||
if (unlikely(!tx_cmd))
|
||||
goto drop_unlock_sta;
|
||||
|
||||
/* Set up driver data for this TFD */
|
||||
memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
|
||||
txq->txb[q->write_ptr].skb = skb;
|
||||
txq->txb[q->write_ptr].ctx = ctx;
|
||||
|
||||
/* Set up first empty entry in queue's array of Tx/cmd buffers */
|
||||
out_cmd = txq->cmd[q->write_ptr];
|
||||
out_meta = &txq->meta[q->write_ptr];
|
||||
tx_cmd = &out_cmd->cmd.tx;
|
||||
memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
|
||||
memset(tx_cmd, 0, sizeof(struct iwl_tx_cmd));
|
||||
|
||||
/*
|
||||
* Set up the Tx-command (not MAC!) header.
|
||||
* Store the chosen Tx queue and TFD index within the sequence field;
|
||||
* after Tx, uCode's Tx response will return this value so driver can
|
||||
* locate the frame within the tx queue and do post-tx processing.
|
||||
*/
|
||||
out_cmd->hdr.cmd = REPLY_TX;
|
||||
out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
|
||||
INDEX_TO_SEQ(q->write_ptr)));
|
||||
|
||||
/* Copy MAC header from skb into command buffer */
|
||||
memcpy(tx_cmd->hdr, hdr, hdr_len);
|
||||
|
||||
|
||||
/* Total # bytes to be transmitted */
|
||||
len = (u16)skb->len;
|
||||
tx_cmd->len = cpu_to_le16(len);
|
||||
|
@ -716,54 +456,9 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
|
|||
iwlagn_tx_cmd_build_rate(priv, tx_cmd, info, fc);
|
||||
|
||||
iwl_update_stats(priv, true, fc, len);
|
||||
/*
|
||||
* Use the first empty entry in this queue's command buffer array
|
||||
* to contain the Tx command and MAC header concatenated together
|
||||
* (payload data will be in another buffer).
|
||||
* Size of this varies, due to varying MAC header length.
|
||||
* If end is not dword aligned, we'll have 2 extra bytes at the end
|
||||
* of the MAC header (device reads on dword boundaries).
|
||||
* We'll tell device about this padding later.
|
||||
*/
|
||||
len = sizeof(struct iwl_tx_cmd) +
|
||||
sizeof(struct iwl_cmd_header) + hdr_len;
|
||||
firstlen = (len + 3) & ~3;
|
||||
|
||||
/* Tell NIC about any 2-byte padding after MAC header */
|
||||
if (firstlen != len)
|
||||
tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
|
||||
|
||||
/* Physical address of this Tx command's header (not MAC header!),
|
||||
* within command buffer array. */
|
||||
txcmd_phys = dma_map_single(priv->bus.dev,
|
||||
&out_cmd->hdr, firstlen,
|
||||
DMA_BIDIRECTIONAL);
|
||||
if (unlikely(dma_mapping_error(priv->bus.dev, txcmd_phys)))
|
||||
if (trans_tx(&priv->trans, skb, tx_cmd, txq_id, fc, is_agg, ctx))
|
||||
goto drop_unlock_sta;
|
||||
dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
|
||||
dma_unmap_len_set(out_meta, len, firstlen);
|
||||
|
||||
if (!ieee80211_has_morefrags(hdr->frame_control)) {
|
||||
txq->need_update = 1;
|
||||
} else {
|
||||
wait_write_ptr = 1;
|
||||
txq->need_update = 0;
|
||||
}
|
||||
|
||||
/* Set up TFD's 2nd entry to point directly to remainder of skb,
|
||||
* if any (802.11 null frames have no payload). */
|
||||
secondlen = skb->len - hdr_len;
|
||||
if (secondlen > 0) {
|
||||
phys_addr = dma_map_single(priv->bus.dev, skb->data + hdr_len,
|
||||
secondlen, DMA_TO_DEVICE);
|
||||
if (unlikely(dma_mapping_error(priv->bus.dev, phys_addr))) {
|
||||
dma_unmap_single(priv->bus.dev,
|
||||
dma_unmap_addr(out_meta, mapping),
|
||||
dma_unmap_len(out_meta, len),
|
||||
DMA_BIDIRECTIONAL);
|
||||
goto drop_unlock_sta;
|
||||
}
|
||||
}
|
||||
|
||||
if (ieee80211_is_data_qos(fc)) {
|
||||
priv->stations[sta_id].tid[tid].tfds_in_queue++;
|
||||
|
@ -772,54 +467,8 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
|
|||
}
|
||||
|
||||
spin_unlock(&priv->sta_lock);
|
||||
|
||||
/* Attach buffers to TFD */
|
||||
iwlagn_txq_attach_buf_to_tfd(priv, txq, txcmd_phys, firstlen, 1);
|
||||
if (secondlen > 0)
|
||||
iwlagn_txq_attach_buf_to_tfd(priv, txq, phys_addr,
|
||||
secondlen, 0);
|
||||
|
||||
scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
|
||||
offsetof(struct iwl_tx_cmd, scratch);
|
||||
|
||||
/* take back ownership of DMA buffer to enable update */
|
||||
dma_sync_single_for_cpu(priv->bus.dev, txcmd_phys, firstlen,
|
||||
DMA_BIDIRECTIONAL);
|
||||
tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
|
||||
tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
|
||||
|
||||
IWL_DEBUG_TX(priv, "sequence nr = 0X%x\n",
|
||||
le16_to_cpu(out_cmd->hdr.sequence));
|
||||
IWL_DEBUG_TX(priv, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
|
||||
iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd));
|
||||
iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
|
||||
|
||||
/* Set up entry for this TFD in Tx byte-count array */
|
||||
if (info->flags & IEEE80211_TX_CTL_AMPDU)
|
||||
iwlagn_txq_update_byte_cnt_tbl(priv, txq,
|
||||
le16_to_cpu(tx_cmd->len));
|
||||
|
||||
dma_sync_single_for_device(priv->bus.dev, txcmd_phys, firstlen,
|
||||
DMA_BIDIRECTIONAL);
|
||||
|
||||
trace_iwlwifi_dev_tx(priv,
|
||||
&((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr],
|
||||
sizeof(struct iwl_tfd),
|
||||
&out_cmd->hdr, firstlen,
|
||||
skb->data + hdr_len, secondlen);
|
||||
|
||||
/* Tell device the write index *just past* this latest filled TFD */
|
||||
q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
|
||||
iwl_txq_update_write_ptr(priv, txq);
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
|
||||
/*
|
||||
* At this point the frame is "transmitted" successfully
|
||||
* and we will get a TX status notification eventually,
|
||||
* regardless of the value of ret. "ret" only indicates
|
||||
* whether or not we should update the write pointer.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Avoid atomic ops if it isn't an associated client.
|
||||
* Also, if this is a packet for aggregation, don't
|
||||
|
@ -830,17 +479,6 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
|
|||
if (sta_priv && sta_priv->client && !is_agg)
|
||||
atomic_inc(&sta_priv->pending_frames);
|
||||
|
||||
if ((iwl_queue_space(q) < q->high_mark) && priv->mac80211_registered) {
|
||||
if (wait_write_ptr) {
|
||||
spin_lock_irqsave(&priv->lock, flags);
|
||||
txq->need_update = 1;
|
||||
iwl_txq_update_write_ptr(priv, txq);
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
} else {
|
||||
iwl_stop_queue(priv, txq);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
drop_unlock_sta:
|
||||
|
@ -997,7 +635,7 @@ int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
|
|||
* to deactivate the uCode queue, just return "success" to allow
|
||||
* mac80211 to clean up it own data.
|
||||
*/
|
||||
iwlagn_txq_agg_disable(priv, txq_id, ssn, tx_fifo_id);
|
||||
trans_txq_agg_disable(&priv->trans, txq_id, ssn, tx_fifo_id);
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
|
||||
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
|
||||
|
@ -1026,7 +664,8 @@ int iwlagn_txq_check_empty(struct iwl_priv *priv,
|
|||
u16 ssn = SEQ_TO_SN(tid_data->seq_number);
|
||||
int tx_fifo = get_fifo_from_tid(ctx, tid);
|
||||
IWL_DEBUG_HT(priv, "HW queue empty: continue DELBA flow\n");
|
||||
iwlagn_txq_agg_disable(priv, txq_id, ssn, tx_fifo);
|
||||
trans_txq_agg_disable(&priv->trans, txq_id,
|
||||
ssn, tx_fifo);
|
||||
tid_data->agg.state = IWL_AGG_OFF;
|
||||
ieee80211_stop_tx_ba_cb_irqsafe(ctx->vif, addr, tid);
|
||||
}
|
||||
|
|
|
@ -41,38 +41,6 @@
|
|||
#include "iwl-agn-calib.h"
|
||||
#include "iwl-trans.h"
|
||||
|
||||
#define IWL_AC_UNSET -1
|
||||
|
||||
struct queue_to_fifo_ac {
|
||||
s8 fifo, ac;
|
||||
};
|
||||
|
||||
static const struct queue_to_fifo_ac iwlagn_default_queue_to_tx_fifo[] = {
|
||||
{ IWL_TX_FIFO_VO, IEEE80211_AC_VO, },
|
||||
{ IWL_TX_FIFO_VI, IEEE80211_AC_VI, },
|
||||
{ IWL_TX_FIFO_BE, IEEE80211_AC_BE, },
|
||||
{ IWL_TX_FIFO_BK, IEEE80211_AC_BK, },
|
||||
{ IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, },
|
||||
{ IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
|
||||
{ IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
|
||||
{ IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
|
||||
{ IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
|
||||
{ IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
|
||||
};
|
||||
|
||||
static const struct queue_to_fifo_ac iwlagn_ipan_queue_to_tx_fifo[] = {
|
||||
{ IWL_TX_FIFO_VO, IEEE80211_AC_VO, },
|
||||
{ IWL_TX_FIFO_VI, IEEE80211_AC_VI, },
|
||||
{ IWL_TX_FIFO_BE, IEEE80211_AC_BE, },
|
||||
{ IWL_TX_FIFO_BK, IEEE80211_AC_BK, },
|
||||
{ IWL_TX_FIFO_BK_IPAN, IEEE80211_AC_BK, },
|
||||
{ IWL_TX_FIFO_BE_IPAN, IEEE80211_AC_BE, },
|
||||
{ IWL_TX_FIFO_VI_IPAN, IEEE80211_AC_VI, },
|
||||
{ IWL_TX_FIFO_VO_IPAN, IEEE80211_AC_VO, },
|
||||
{ IWL_TX_FIFO_BE_IPAN, 2, },
|
||||
{ IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, },
|
||||
};
|
||||
|
||||
static struct iwl_wimax_coex_event_entry cu_priorities[COEX_NUM_OF_EVENTS] = {
|
||||
{COEX_CU_UNASSOC_IDLE_RP, COEX_CU_UNASSOC_IDLE_WP,
|
||||
0, COEX_UNASSOC_IDLE_FLAGS},
|
||||
|
@ -199,12 +167,12 @@ static int iwlagn_set_temperature_offset_calib(struct iwl_priv *priv)
|
|||
|
||||
memset(&cmd, 0, sizeof(cmd));
|
||||
iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD);
|
||||
cmd.radio_sensor_offset = le16_to_cpu(offset_calib[1]);
|
||||
memcpy(&cmd.radio_sensor_offset, offset_calib, sizeof(offset_calib));
|
||||
if (!(cmd.radio_sensor_offset))
|
||||
cmd.radio_sensor_offset = DEFAULT_RADIO_SENSOR_OFFSET;
|
||||
|
||||
IWL_DEBUG_CALIB(priv, "Radio sensor offset: %d\n",
|
||||
cmd.radio_sensor_offset);
|
||||
le16_to_cpu(cmd.radio_sensor_offset));
|
||||
return iwl_calib_set(&priv->calib_results[IWL_CALIB_TEMP_OFFSET],
|
||||
(u8 *)&cmd, sizeof(cmd));
|
||||
}
|
||||
|
@ -222,9 +190,10 @@ static int iwlagn_send_calib_cfg(struct iwl_priv *priv)
|
|||
calib_cfg_cmd.ucd_calib_cfg.once.is_enable = IWL_CALIB_INIT_CFG_ALL;
|
||||
calib_cfg_cmd.ucd_calib_cfg.once.start = IWL_CALIB_INIT_CFG_ALL;
|
||||
calib_cfg_cmd.ucd_calib_cfg.once.send_res = IWL_CALIB_INIT_CFG_ALL;
|
||||
calib_cfg_cmd.ucd_calib_cfg.flags = IWL_CALIB_INIT_CFG_ALL;
|
||||
calib_cfg_cmd.ucd_calib_cfg.flags =
|
||||
IWL_CALIB_CFG_FLAG_SEND_COMPLETE_NTFY_MSK;
|
||||
|
||||
return trans_send_cmd(priv, &cmd);
|
||||
return trans_send_cmd(&priv->trans, &cmd);
|
||||
}
|
||||
|
||||
void iwlagn_rx_calib_result(struct iwl_priv *priv,
|
||||
|
@ -322,7 +291,7 @@ static int iwlagn_send_wimax_coex(struct iwl_priv *priv)
|
|||
/* coexistence is disabled */
|
||||
memset(&coex_cmd, 0, sizeof(coex_cmd));
|
||||
}
|
||||
return trans_send_cmd_pdu(priv,
|
||||
return trans_send_cmd_pdu(&priv->trans,
|
||||
COEX_PRIORITY_TABLE_CMD, CMD_SYNC,
|
||||
sizeof(coex_cmd), &coex_cmd);
|
||||
}
|
||||
|
@ -355,7 +324,7 @@ void iwlagn_send_prio_tbl(struct iwl_priv *priv)
|
|||
|
||||
memcpy(prio_tbl_cmd.prio_tbl, iwlagn_bt_prio_tbl,
|
||||
sizeof(iwlagn_bt_prio_tbl));
|
||||
if (trans_send_cmd_pdu(priv,
|
||||
if (trans_send_cmd_pdu(&priv->trans,
|
||||
REPLY_BT_COEX_PRIO_TABLE, CMD_SYNC,
|
||||
sizeof(prio_tbl_cmd), &prio_tbl_cmd))
|
||||
IWL_ERR(priv, "failed to send BT prio tbl command\n");
|
||||
|
@ -368,7 +337,7 @@ int iwlagn_send_bt_env(struct iwl_priv *priv, u8 action, u8 type)
|
|||
|
||||
env_cmd.action = action;
|
||||
env_cmd.type = type;
|
||||
ret = trans_send_cmd_pdu(priv,
|
||||
ret = trans_send_cmd_pdu(&priv->trans,
|
||||
REPLY_BT_COEX_PROT_ENV, CMD_SYNC,
|
||||
sizeof(env_cmd), &env_cmd);
|
||||
if (ret)
|
||||
|
@ -379,111 +348,9 @@ int iwlagn_send_bt_env(struct iwl_priv *priv, u8 action, u8 type)
|
|||
|
||||
static int iwlagn_alive_notify(struct iwl_priv *priv)
|
||||
{
|
||||
const struct queue_to_fifo_ac *queue_to_fifo;
|
||||
struct iwl_rxon_context *ctx;
|
||||
u32 a;
|
||||
unsigned long flags;
|
||||
int i, chan;
|
||||
u32 reg_val;
|
||||
int ret;
|
||||
|
||||
spin_lock_irqsave(&priv->lock, flags);
|
||||
|
||||
priv->scd_base_addr = iwl_read_prph(priv, IWLAGN_SCD_SRAM_BASE_ADDR);
|
||||
a = priv->scd_base_addr + IWLAGN_SCD_CONTEXT_MEM_LOWER_BOUND;
|
||||
/* reset conext data memory */
|
||||
for (; a < priv->scd_base_addr + IWLAGN_SCD_CONTEXT_MEM_UPPER_BOUND;
|
||||
a += 4)
|
||||
iwl_write_targ_mem(priv, a, 0);
|
||||
/* reset tx status memory */
|
||||
for (; a < priv->scd_base_addr + IWLAGN_SCD_TX_STTS_MEM_UPPER_BOUND;
|
||||
a += 4)
|
||||
iwl_write_targ_mem(priv, a, 0);
|
||||
for (; a < priv->scd_base_addr +
|
||||
IWLAGN_SCD_TRANSLATE_TBL_OFFSET_QUEUE(priv->hw_params.max_txq_num); a += 4)
|
||||
iwl_write_targ_mem(priv, a, 0);
|
||||
|
||||
iwl_write_prph(priv, IWLAGN_SCD_DRAM_BASE_ADDR,
|
||||
priv->scd_bc_tbls.dma >> 10);
|
||||
|
||||
/* Enable DMA channel */
|
||||
for (chan = 0; chan < FH_TCSR_CHNL_NUM ; chan++)
|
||||
iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
|
||||
FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
|
||||
FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
|
||||
|
||||
/* Update FH chicken bits */
|
||||
reg_val = iwl_read_direct32(priv, FH_TX_CHICKEN_BITS_REG);
|
||||
iwl_write_direct32(priv, FH_TX_CHICKEN_BITS_REG,
|
||||
reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
|
||||
|
||||
iwl_write_prph(priv, IWLAGN_SCD_QUEUECHAIN_SEL,
|
||||
IWLAGN_SCD_QUEUECHAIN_SEL_ALL(priv));
|
||||
iwl_write_prph(priv, IWLAGN_SCD_AGGR_SEL, 0);
|
||||
|
||||
/* initiate the queues */
|
||||
for (i = 0; i < priv->hw_params.max_txq_num; i++) {
|
||||
iwl_write_prph(priv, IWLAGN_SCD_QUEUE_RDPTR(i), 0);
|
||||
iwl_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8));
|
||||
iwl_write_targ_mem(priv, priv->scd_base_addr +
|
||||
IWLAGN_SCD_CONTEXT_QUEUE_OFFSET(i), 0);
|
||||
iwl_write_targ_mem(priv, priv->scd_base_addr +
|
||||
IWLAGN_SCD_CONTEXT_QUEUE_OFFSET(i) +
|
||||
sizeof(u32),
|
||||
((SCD_WIN_SIZE <<
|
||||
IWLAGN_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
|
||||
IWLAGN_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
|
||||
((SCD_FRAME_LIMIT <<
|
||||
IWLAGN_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
|
||||
IWLAGN_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
|
||||
}
|
||||
|
||||
iwl_write_prph(priv, IWLAGN_SCD_INTERRUPT_MASK,
|
||||
IWL_MASK(0, priv->hw_params.max_txq_num));
|
||||
|
||||
/* Activate all Tx DMA/FIFO channels */
|
||||
iwlagn_txq_set_sched(priv, IWL_MASK(0, 7));
|
||||
|
||||
/* map queues to FIFOs */
|
||||
if (priv->valid_contexts != BIT(IWL_RXON_CTX_BSS))
|
||||
queue_to_fifo = iwlagn_ipan_queue_to_tx_fifo;
|
||||
else
|
||||
queue_to_fifo = iwlagn_default_queue_to_tx_fifo;
|
||||
|
||||
iwlagn_set_wr_ptrs(priv, priv->cmd_queue, 0);
|
||||
|
||||
/* make sure all queue are not stopped */
|
||||
memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped));
|
||||
for (i = 0; i < 4; i++)
|
||||
atomic_set(&priv->queue_stop_count[i], 0);
|
||||
for_each_context(priv, ctx)
|
||||
ctx->last_tx_rejected = false;
|
||||
|
||||
/* reset to 0 to enable all the queue first */
|
||||
priv->txq_ctx_active_msk = 0;
|
||||
|
||||
BUILD_BUG_ON(ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo) != 10);
|
||||
BUILD_BUG_ON(ARRAY_SIZE(iwlagn_ipan_queue_to_tx_fifo) != 10);
|
||||
|
||||
for (i = 0; i < 10; i++) {
|
||||
int fifo = queue_to_fifo[i].fifo;
|
||||
int ac = queue_to_fifo[i].ac;
|
||||
|
||||
iwl_txq_ctx_activate(priv, i);
|
||||
|
||||
if (fifo == IWL_TX_FIFO_UNUSED)
|
||||
continue;
|
||||
|
||||
if (ac != IWL_AC_UNSET)
|
||||
iwl_set_swq_id(&priv->txq[i], ac, i);
|
||||
iwlagn_tx_queue_set_status(priv, &priv->txq[i], fifo, 0);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
|
||||
/* Enable L1-Active */
|
||||
iwl_clear_bits_prph(priv, APMG_PCIDEV_STT_REG,
|
||||
APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
|
||||
trans_tx_start(&priv->trans);
|
||||
|
||||
ret = iwlagn_send_wimax_coex(priv);
|
||||
if (ret)
|
||||
|
@ -611,7 +478,7 @@ int iwlagn_load_ucode_wait_alive(struct iwl_priv *priv,
|
|||
int ret;
|
||||
enum iwlagn_ucode_type old_type;
|
||||
|
||||
ret = iwlagn_start_device(priv);
|
||||
ret = trans_start_device(&priv->trans);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -628,8 +495,7 @@ int iwlagn_load_ucode_wait_alive(struct iwl_priv *priv,
|
|||
return ret;
|
||||
}
|
||||
|
||||
/* Remove all resets to allow NIC to operate */
|
||||
iwl_write32(priv, CSR_RESET, 0);
|
||||
trans_kick_nic(&priv->trans);
|
||||
|
||||
/*
|
||||
* Some things may run in the background now, but we
|
||||
|
@ -647,6 +513,12 @@ int iwlagn_load_ucode_wait_alive(struct iwl_priv *priv,
|
|||
return -EIO;
|
||||
}
|
||||
|
||||
/*
|
||||
* This step takes a long time (60-80ms!!) and
|
||||
* WoWLAN image should be loaded quickly, so
|
||||
* skip it for WoWLAN.
|
||||
*/
|
||||
if (ucode_type != IWL_UCODE_WOWLAN) {
|
||||
ret = iwl_verify_ucode(priv, image);
|
||||
if (ret) {
|
||||
priv->ucode_type = old_type;
|
||||
|
@ -655,6 +527,7 @@ int iwlagn_load_ucode_wait_alive(struct iwl_priv *priv,
|
|||
|
||||
/* delay a bit to give rfkill time to run */
|
||||
msleep(5);
|
||||
}
|
||||
|
||||
ret = iwlagn_alive_notify(priv);
|
||||
if (ret) {
|
||||
|
@ -707,6 +580,6 @@ int iwlagn_run_init_ucode(struct iwl_priv *priv)
|
|||
iwlagn_remove_notification(priv, &calib_wait);
|
||||
out:
|
||||
/* Whatever happened, stop the device */
|
||||
iwlagn_stop_device(priv);
|
||||
trans_stop_device(&priv->trans);
|
||||
return ret;
|
||||
}
|
||||
|
|
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue