mirror of https://gitee.com/openkylin/linux.git
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6: (43 commits) via-velocity: Fix velocity driver unmapping incorrect size. mlx4_en: Remove redundant refill code on RX mlx4_en: Removed redundant check on lso header size mlx4_en: Cancel port_up check in transmit function mlx4_en: using stop/start_all_queues mlx4_en: Removed redundant skb->len check mlx4_en: Counting all the dropped packets on the TX side usbnet cdc_subset: fix issues talking to PXA gadgets Net: qla3xxx, remove sleeping in atomic ipv4: fix NULL pointer + success return in route lookup path isdn: clean up documentation index cfg80211: validate station settings cfg80211: allow setting station parameters in mesh cfg80211: allow adding/deleting stations on mesh ath5k: fix beacon_int handling MAINTAINERS: Fix Atheros pattern paths ath9k: restore PS mode, before we put the chip into FULL SLEEP state. ath9k: wait for beacon frame along with CAB acer-wmi: fix rfkill conversion ath5k: avoid PCI FATAL interrupts by restoring RETRY_TIMEOUT disabling ...
This commit is contained in:
commit
5165aece0e
|
@ -14,25 +14,14 @@ README
|
|||
- general info on what you need and what to do for Linux ISDN.
|
||||
README.FAQ
|
||||
- general info for FAQ.
|
||||
README.audio
|
||||
- info for running audio over ISDN.
|
||||
README.fax
|
||||
- info for using Fax over ISDN.
|
||||
README.gigaset
|
||||
- info on the drivers for Siemens Gigaset ISDN adapters.
|
||||
README.icn
|
||||
- info on the ICN-ISDN-card and its driver.
|
||||
>>>>>>> 93af7aca44f0e82e67bda10a0fb73d383edcc8bd:Documentation/isdn/00-INDEX
|
||||
README.HiSax
|
||||
- info on the HiSax driver which replaces the old teles.
|
||||
README.act2000
|
||||
- info on driver for IBM ACT-2000 card.
|
||||
README.audio
|
||||
- info for running audio over ISDN.
|
||||
README.avmb1
|
||||
- info on driver for AVM-B1 ISDN card.
|
||||
README.act2000
|
||||
- info on driver for IBM ACT-2000 card.
|
||||
README.eicon
|
||||
- info on driver for Eicon active cards.
|
||||
README.concap
|
||||
- info on "CONCAP" encapsulation protocol interface used for X.25.
|
||||
README.diversion
|
||||
|
@ -59,7 +48,3 @@ README.x25
|
|||
- info for running X.25 over ISDN.
|
||||
syncPPP.FAQ
|
||||
- frequently asked questions about running PPP over ISDN.
|
||||
README.hysdn
|
||||
- info on driver for Hypercope active HYSDN cards
|
||||
README.mISDN
|
||||
- info on the Modular ISDN subsystem (mISDN).
|
||||
|
|
|
@ -111,6 +111,8 @@ following attributes:
|
|||
|
||||
name: Name assigned by driver to this key (interface or driver name).
|
||||
type: Driver type string ("wlan", "bluetooth", etc).
|
||||
persistent: Whether the soft blocked state is initialised from
|
||||
non-volatile storage at startup.
|
||||
state: Current state of the transmitter
|
||||
0: RFKILL_STATE_SOFT_BLOCKED
|
||||
transmitter is turned off by software
|
||||
|
|
|
@ -2853,7 +2853,7 @@ P: Sergey Lapin
|
|||
M: slapin@ossfans.org
|
||||
L: linux-zigbee-devel@lists.sourceforge.net
|
||||
W: http://apps.sourceforge.net/trac/linux-zigbee
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/lumag/lowpan.git
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/lowpan/lowpan.git
|
||||
S: Maintained
|
||||
F: net/ieee802154/
|
||||
F: drivers/ieee802154/
|
||||
|
|
|
@ -65,7 +65,7 @@ static inline char *nic_name(struct pci_dev *pdev)
|
|||
#define TX_CQ_LEN 1024
|
||||
#define RX_Q_LEN 1024 /* Does not support any other value */
|
||||
#define RX_CQ_LEN 1024
|
||||
#define MCC_Q_LEN 64 /* total size not to exceed 8 pages */
|
||||
#define MCC_Q_LEN 128 /* total size not to exceed 8 pages */
|
||||
#define MCC_CQ_LEN 256
|
||||
|
||||
#define BE_NAPI_WEIGHT 64
|
||||
|
@ -91,6 +91,61 @@ struct be_queue_info {
|
|||
atomic_t used; /* Number of valid elements in the queue */
|
||||
};
|
||||
|
||||
static inline u32 MODULO(u16 val, u16 limit)
|
||||
{
|
||||
BUG_ON(limit & (limit - 1));
|
||||
return val & (limit - 1);
|
||||
}
|
||||
|
||||
static inline void index_adv(u16 *index, u16 val, u16 limit)
|
||||
{
|
||||
*index = MODULO((*index + val), limit);
|
||||
}
|
||||
|
||||
static inline void index_inc(u16 *index, u16 limit)
|
||||
{
|
||||
*index = MODULO((*index + 1), limit);
|
||||
}
|
||||
|
||||
static inline void *queue_head_node(struct be_queue_info *q)
|
||||
{
|
||||
return q->dma_mem.va + q->head * q->entry_size;
|
||||
}
|
||||
|
||||
static inline void *queue_tail_node(struct be_queue_info *q)
|
||||
{
|
||||
return q->dma_mem.va + q->tail * q->entry_size;
|
||||
}
|
||||
|
||||
static inline void queue_head_inc(struct be_queue_info *q)
|
||||
{
|
||||
index_inc(&q->head, q->len);
|
||||
}
|
||||
|
||||
static inline void queue_tail_inc(struct be_queue_info *q)
|
||||
{
|
||||
index_inc(&q->tail, q->len);
|
||||
}
|
||||
|
||||
|
||||
struct be_eq_obj {
|
||||
struct be_queue_info q;
|
||||
char desc[32];
|
||||
|
||||
/* Adaptive interrupt coalescing (AIC) info */
|
||||
bool enable_aic;
|
||||
u16 min_eqd; /* in usecs */
|
||||
u16 max_eqd; /* in usecs */
|
||||
u16 cur_eqd; /* in usecs */
|
||||
|
||||
struct napi_struct napi;
|
||||
};
|
||||
|
||||
struct be_mcc_obj {
|
||||
struct be_queue_info q;
|
||||
struct be_queue_info cq;
|
||||
};
|
||||
|
||||
struct be_ctrl_info {
|
||||
u8 __iomem *csr;
|
||||
u8 __iomem *db; /* Door Bell */
|
||||
|
@ -98,11 +153,20 @@ struct be_ctrl_info {
|
|||
int pci_func;
|
||||
|
||||
/* Mbox used for cmd request/response */
|
||||
spinlock_t cmd_lock; /* For serializing cmds to BE card */
|
||||
spinlock_t mbox_lock; /* For serializing mbox cmds to BE card */
|
||||
struct be_dma_mem mbox_mem;
|
||||
/* Mbox mem is adjusted to align to 16 bytes. The allocated addr
|
||||
* is stored for freeing purpose */
|
||||
struct be_dma_mem mbox_mem_alloced;
|
||||
|
||||
/* MCC Rings */
|
||||
struct be_mcc_obj mcc_obj;
|
||||
spinlock_t mcc_lock; /* For serializing mcc cmds to BE card */
|
||||
spinlock_t mcc_cq_lock;
|
||||
|
||||
/* MCC Async callback */
|
||||
void (*async_cb)(void *adapter, bool link_up);
|
||||
void *adapter_ctxt;
|
||||
};
|
||||
|
||||
#include "be_cmds.h"
|
||||
|
@ -150,19 +214,6 @@ struct be_stats_obj {
|
|||
struct be_dma_mem cmd;
|
||||
};
|
||||
|
||||
struct be_eq_obj {
|
||||
struct be_queue_info q;
|
||||
char desc[32];
|
||||
|
||||
/* Adaptive interrupt coalescing (AIC) info */
|
||||
bool enable_aic;
|
||||
u16 min_eqd; /* in usecs */
|
||||
u16 max_eqd; /* in usecs */
|
||||
u16 cur_eqd; /* in usecs */
|
||||
|
||||
struct napi_struct napi;
|
||||
};
|
||||
|
||||
struct be_tx_obj {
|
||||
struct be_queue_info q;
|
||||
struct be_queue_info cq;
|
||||
|
@ -225,8 +276,9 @@ struct be_adapter {
|
|||
u32 if_handle; /* Used to configure filtering */
|
||||
u32 pmac_id; /* MAC addr handle used by BE card */
|
||||
|
||||
struct be_link_info link;
|
||||
bool link_up;
|
||||
u32 port_num;
|
||||
bool promiscuous;
|
||||
};
|
||||
|
||||
extern struct ethtool_ops be_ethtool_ops;
|
||||
|
@ -235,22 +287,6 @@ extern struct ethtool_ops be_ethtool_ops;
|
|||
|
||||
#define BE_SET_NETDEV_OPS(netdev, ops) (netdev->netdev_ops = ops)
|
||||
|
||||
static inline u32 MODULO(u16 val, u16 limit)
|
||||
{
|
||||
BUG_ON(limit & (limit - 1));
|
||||
return val & (limit - 1);
|
||||
}
|
||||
|
||||
static inline void index_adv(u16 *index, u16 val, u16 limit)
|
||||
{
|
||||
*index = MODULO((*index + val), limit);
|
||||
}
|
||||
|
||||
static inline void index_inc(u16 *index, u16 limit)
|
||||
{
|
||||
*index = MODULO((*index + 1), limit);
|
||||
}
|
||||
|
||||
#define PAGE_SHIFT_4K 12
|
||||
#define PAGE_SIZE_4K (1 << PAGE_SHIFT_4K)
|
||||
|
||||
|
@ -339,4 +375,6 @@ static inline u8 is_udp_pkt(struct sk_buff *skb)
|
|||
return val;
|
||||
}
|
||||
|
||||
extern void be_cq_notify(struct be_ctrl_info *ctrl, u16 qid, bool arm,
|
||||
u16 num_popped);
|
||||
#endif /* BE_H */
|
||||
|
|
|
@ -17,6 +17,133 @@
|
|||
|
||||
#include "be.h"
|
||||
|
||||
static void be_mcc_notify(struct be_ctrl_info *ctrl)
|
||||
{
|
||||
struct be_queue_info *mccq = &ctrl->mcc_obj.q;
|
||||
u32 val = 0;
|
||||
|
||||
val |= mccq->id & DB_MCCQ_RING_ID_MASK;
|
||||
val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
|
||||
iowrite32(val, ctrl->db + DB_MCCQ_OFFSET);
|
||||
}
|
||||
|
||||
/* To check if valid bit is set, check the entire word as we don't know
|
||||
* the endianness of the data (old entry is host endian while a new entry is
|
||||
* little endian) */
|
||||
static inline bool be_mcc_compl_is_new(struct be_mcc_cq_entry *compl)
|
||||
{
|
||||
if (compl->flags != 0) {
|
||||
compl->flags = le32_to_cpu(compl->flags);
|
||||
BUG_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0);
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/* Need to reset the entire word that houses the valid bit */
|
||||
static inline void be_mcc_compl_use(struct be_mcc_cq_entry *compl)
|
||||
{
|
||||
compl->flags = 0;
|
||||
}
|
||||
|
||||
static int be_mcc_compl_process(struct be_ctrl_info *ctrl,
|
||||
struct be_mcc_cq_entry *compl)
|
||||
{
|
||||
u16 compl_status, extd_status;
|
||||
|
||||
/* Just swap the status to host endian; mcc tag is opaquely copied
|
||||
* from mcc_wrb */
|
||||
be_dws_le_to_cpu(compl, 4);
|
||||
|
||||
compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
|
||||
CQE_STATUS_COMPL_MASK;
|
||||
if (compl_status != MCC_STATUS_SUCCESS) {
|
||||
extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
|
||||
CQE_STATUS_EXTD_MASK;
|
||||
printk(KERN_WARNING DRV_NAME
|
||||
" error in cmd completion: status(compl/extd)=%d/%d\n",
|
||||
compl_status, extd_status);
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Link state evt is a string of bytes; no need for endian swapping */
|
||||
static void be_async_link_state_process(struct be_ctrl_info *ctrl,
|
||||
struct be_async_event_link_state *evt)
|
||||
{
|
||||
ctrl->async_cb(ctrl->adapter_ctxt,
|
||||
evt->port_link_status == ASYNC_EVENT_LINK_UP ? true : false);
|
||||
}
|
||||
|
||||
static inline bool is_link_state_evt(u32 trailer)
|
||||
{
|
||||
return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
|
||||
ASYNC_TRAILER_EVENT_CODE_MASK) ==
|
||||
ASYNC_EVENT_CODE_LINK_STATE);
|
||||
}
|
||||
|
||||
static struct be_mcc_cq_entry *be_mcc_compl_get(struct be_ctrl_info *ctrl)
|
||||
{
|
||||
struct be_queue_info *mcc_cq = &ctrl->mcc_obj.cq;
|
||||
struct be_mcc_cq_entry *compl = queue_tail_node(mcc_cq);
|
||||
|
||||
if (be_mcc_compl_is_new(compl)) {
|
||||
queue_tail_inc(mcc_cq);
|
||||
return compl;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void be_process_mcc(struct be_ctrl_info *ctrl)
|
||||
{
|
||||
struct be_mcc_cq_entry *compl;
|
||||
int num = 0;
|
||||
|
||||
spin_lock_bh(&ctrl->mcc_cq_lock);
|
||||
while ((compl = be_mcc_compl_get(ctrl))) {
|
||||
if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
|
||||
/* Interpret flags as an async trailer */
|
||||
BUG_ON(!is_link_state_evt(compl->flags));
|
||||
|
||||
/* Interpret compl as a async link evt */
|
||||
be_async_link_state_process(ctrl,
|
||||
(struct be_async_event_link_state *) compl);
|
||||
} else {
|
||||
be_mcc_compl_process(ctrl, compl);
|
||||
atomic_dec(&ctrl->mcc_obj.q.used);
|
||||
}
|
||||
be_mcc_compl_use(compl);
|
||||
num++;
|
||||
}
|
||||
if (num)
|
||||
be_cq_notify(ctrl, ctrl->mcc_obj.cq.id, true, num);
|
||||
spin_unlock_bh(&ctrl->mcc_cq_lock);
|
||||
}
|
||||
|
||||
/* Wait till no more pending mcc requests are present */
|
||||
static void be_mcc_wait_compl(struct be_ctrl_info *ctrl)
|
||||
{
|
||||
#define mcc_timeout 50000 /* 5s timeout */
|
||||
int i;
|
||||
for (i = 0; i < mcc_timeout; i++) {
|
||||
be_process_mcc(ctrl);
|
||||
if (atomic_read(&ctrl->mcc_obj.q.used) == 0)
|
||||
break;
|
||||
udelay(100);
|
||||
}
|
||||
if (i == mcc_timeout)
|
||||
printk(KERN_WARNING DRV_NAME "mcc poll timed out\n");
|
||||
}
|
||||
|
||||
/* Notify MCC requests and wait for completion */
|
||||
static void be_mcc_notify_wait(struct be_ctrl_info *ctrl)
|
||||
{
|
||||
be_mcc_notify(ctrl);
|
||||
be_mcc_wait_compl(ctrl);
|
||||
}
|
||||
|
||||
static int be_mbox_db_ready_wait(void __iomem *db)
|
||||
{
|
||||
int cnt = 0, wait = 5;
|
||||
|
@ -44,11 +171,11 @@ static int be_mbox_db_ready_wait(void __iomem *db)
|
|||
|
||||
/*
|
||||
* Insert the mailbox address into the doorbell in two steps
|
||||
* Polls on the mbox doorbell till a command completion (or a timeout) occurs
|
||||
*/
|
||||
static int be_mbox_db_ring(struct be_ctrl_info *ctrl)
|
||||
{
|
||||
int status;
|
||||
u16 compl_status, extd_status;
|
||||
u32 val = 0;
|
||||
void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET;
|
||||
struct be_dma_mem *mbox_mem = &ctrl->mbox_mem;
|
||||
|
@ -79,24 +206,17 @@ static int be_mbox_db_ring(struct be_ctrl_info *ctrl)
|
|||
if (status != 0)
|
||||
return status;
|
||||
|
||||
/* compl entry has been made now */
|
||||
be_dws_le_to_cpu(cqe, sizeof(*cqe));
|
||||
if (!(cqe->flags & CQE_FLAGS_VALID_MASK)) {
|
||||
printk(KERN_WARNING DRV_NAME ": ERROR invalid mbox compl\n");
|
||||
/* A cq entry has been made now */
|
||||
if (be_mcc_compl_is_new(cqe)) {
|
||||
status = be_mcc_compl_process(ctrl, &mbox->cqe);
|
||||
be_mcc_compl_use(cqe);
|
||||
if (status)
|
||||
return status;
|
||||
} else {
|
||||
printk(KERN_WARNING DRV_NAME "invalid mailbox completion\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
compl_status = (cqe->status >> CQE_STATUS_COMPL_SHIFT) &
|
||||
CQE_STATUS_COMPL_MASK;
|
||||
if (compl_status != MCC_STATUS_SUCCESS) {
|
||||
extd_status = (cqe->status >> CQE_STATUS_EXTD_SHIFT) &
|
||||
CQE_STATUS_EXTD_MASK;
|
||||
printk(KERN_WARNING DRV_NAME
|
||||
": ERROR in cmd compl. status(compl/extd)=%d/%d\n",
|
||||
compl_status, extd_status);
|
||||
}
|
||||
|
||||
return compl_status;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int be_POST_stage_get(struct be_ctrl_info *ctrl, u16 *stage)
|
||||
|
@ -235,6 +355,18 @@ static inline struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem)
|
|||
return &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
|
||||
}
|
||||
|
||||
static inline struct be_mcc_wrb *wrb_from_mcc(struct be_queue_info *mccq)
|
||||
{
|
||||
struct be_mcc_wrb *wrb = NULL;
|
||||
if (atomic_read(&mccq->used) < mccq->len) {
|
||||
wrb = queue_head_node(mccq);
|
||||
queue_head_inc(mccq);
|
||||
atomic_inc(&mccq->used);
|
||||
memset(wrb, 0, sizeof(*wrb));
|
||||
}
|
||||
return wrb;
|
||||
}
|
||||
|
||||
int be_cmd_eq_create(struct be_ctrl_info *ctrl,
|
||||
struct be_queue_info *eq, int eq_delay)
|
||||
{
|
||||
|
@ -244,7 +376,7 @@ int be_cmd_eq_create(struct be_ctrl_info *ctrl,
|
|||
struct be_dma_mem *q_mem = &eq->dma_mem;
|
||||
int status;
|
||||
|
||||
spin_lock(&ctrl->cmd_lock);
|
||||
spin_lock(&ctrl->mbox_lock);
|
||||
memset(wrb, 0, sizeof(*wrb));
|
||||
|
||||
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
|
||||
|
@ -272,7 +404,7 @@ int be_cmd_eq_create(struct be_ctrl_info *ctrl,
|
|||
eq->id = le16_to_cpu(resp->eq_id);
|
||||
eq->created = true;
|
||||
}
|
||||
spin_unlock(&ctrl->cmd_lock);
|
||||
spin_unlock(&ctrl->mbox_lock);
|
||||
return status;
|
||||
}
|
||||
|
||||
|
@ -284,7 +416,7 @@ int be_cmd_mac_addr_query(struct be_ctrl_info *ctrl, u8 *mac_addr,
|
|||
struct be_cmd_resp_mac_query *resp = embedded_payload(wrb);
|
||||
int status;
|
||||
|
||||
spin_lock(&ctrl->cmd_lock);
|
||||
spin_lock(&ctrl->mbox_lock);
|
||||
memset(wrb, 0, sizeof(*wrb));
|
||||
|
||||
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
|
||||
|
@ -304,7 +436,7 @@ int be_cmd_mac_addr_query(struct be_ctrl_info *ctrl, u8 *mac_addr,
|
|||
if (!status)
|
||||
memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
|
||||
|
||||
spin_unlock(&ctrl->cmd_lock);
|
||||
spin_unlock(&ctrl->mbox_lock);
|
||||
return status;
|
||||
}
|
||||
|
||||
|
@ -315,7 +447,7 @@ int be_cmd_pmac_add(struct be_ctrl_info *ctrl, u8 *mac_addr,
|
|||
struct be_cmd_req_pmac_add *req = embedded_payload(wrb);
|
||||
int status;
|
||||
|
||||
spin_lock(&ctrl->cmd_lock);
|
||||
spin_lock(&ctrl->mbox_lock);
|
||||
memset(wrb, 0, sizeof(*wrb));
|
||||
|
||||
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
|
||||
|
@ -332,7 +464,7 @@ int be_cmd_pmac_add(struct be_ctrl_info *ctrl, u8 *mac_addr,
|
|||
*pmac_id = le32_to_cpu(resp->pmac_id);
|
||||
}
|
||||
|
||||
spin_unlock(&ctrl->cmd_lock);
|
||||
spin_unlock(&ctrl->mbox_lock);
|
||||
return status;
|
||||
}
|
||||
|
||||
|
@ -342,7 +474,7 @@ int be_cmd_pmac_del(struct be_ctrl_info *ctrl, u32 if_id, u32 pmac_id)
|
|||
struct be_cmd_req_pmac_del *req = embedded_payload(wrb);
|
||||
int status;
|
||||
|
||||
spin_lock(&ctrl->cmd_lock);
|
||||
spin_lock(&ctrl->mbox_lock);
|
||||
memset(wrb, 0, sizeof(*wrb));
|
||||
|
||||
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
|
||||
|
@ -354,7 +486,7 @@ int be_cmd_pmac_del(struct be_ctrl_info *ctrl, u32 if_id, u32 pmac_id)
|
|||
req->pmac_id = cpu_to_le32(pmac_id);
|
||||
|
||||
status = be_mbox_db_ring(ctrl);
|
||||
spin_unlock(&ctrl->cmd_lock);
|
||||
spin_unlock(&ctrl->mbox_lock);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
@ -370,7 +502,7 @@ int be_cmd_cq_create(struct be_ctrl_info *ctrl,
|
|||
void *ctxt = &req->context;
|
||||
int status;
|
||||
|
||||
spin_lock(&ctrl->cmd_lock);
|
||||
spin_lock(&ctrl->mbox_lock);
|
||||
memset(wrb, 0, sizeof(*wrb));
|
||||
|
||||
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
|
||||
|
@ -388,7 +520,7 @@ int be_cmd_cq_create(struct be_ctrl_info *ctrl,
|
|||
AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts);
|
||||
AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1);
|
||||
AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id);
|
||||
AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 0);
|
||||
AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1);
|
||||
AMAP_SET_BITS(struct amap_cq_context, func, ctxt, ctrl->pci_func);
|
||||
be_dws_cpu_to_le(ctxt, sizeof(req->context));
|
||||
|
||||
|
@ -399,7 +531,56 @@ int be_cmd_cq_create(struct be_ctrl_info *ctrl,
|
|||
cq->id = le16_to_cpu(resp->cq_id);
|
||||
cq->created = true;
|
||||
}
|
||||
spin_unlock(&ctrl->cmd_lock);
|
||||
spin_unlock(&ctrl->mbox_lock);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static u32 be_encoded_q_len(int q_len)
|
||||
{
|
||||
u32 len_encoded = fls(q_len); /* log2(len) + 1 */
|
||||
if (len_encoded == 16)
|
||||
len_encoded = 0;
|
||||
return len_encoded;
|
||||
}
|
||||
|
||||
int be_cmd_mccq_create(struct be_ctrl_info *ctrl,
|
||||
struct be_queue_info *mccq,
|
||||
struct be_queue_info *cq)
|
||||
{
|
||||
struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
|
||||
struct be_cmd_req_mcc_create *req = embedded_payload(wrb);
|
||||
struct be_dma_mem *q_mem = &mccq->dma_mem;
|
||||
void *ctxt = &req->context;
|
||||
int status;
|
||||
|
||||
spin_lock(&ctrl->mbox_lock);
|
||||
memset(wrb, 0, sizeof(*wrb));
|
||||
|
||||
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
|
||||
|
||||
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
|
||||
OPCODE_COMMON_MCC_CREATE, sizeof(*req));
|
||||
|
||||
req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
|
||||
|
||||
AMAP_SET_BITS(struct amap_mcc_context, fid, ctxt, ctrl->pci_func);
|
||||
AMAP_SET_BITS(struct amap_mcc_context, valid, ctxt, 1);
|
||||
AMAP_SET_BITS(struct amap_mcc_context, ring_size, ctxt,
|
||||
be_encoded_q_len(mccq->len));
|
||||
AMAP_SET_BITS(struct amap_mcc_context, cq_id, ctxt, cq->id);
|
||||
|
||||
be_dws_cpu_to_le(ctxt, sizeof(req->context));
|
||||
|
||||
be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
|
||||
|
||||
status = be_mbox_db_ring(ctrl);
|
||||
if (!status) {
|
||||
struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
|
||||
mccq->id = le16_to_cpu(resp->id);
|
||||
mccq->created = true;
|
||||
}
|
||||
spin_unlock(&ctrl->mbox_lock);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
@ -415,7 +596,7 @@ int be_cmd_txq_create(struct be_ctrl_info *ctrl,
|
|||
int status;
|
||||
u32 len_encoded;
|
||||
|
||||
spin_lock(&ctrl->cmd_lock);
|
||||
spin_lock(&ctrl->mbox_lock);
|
||||
memset(wrb, 0, sizeof(*wrb));
|
||||
|
||||
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
|
||||
|
@ -446,7 +627,7 @@ int be_cmd_txq_create(struct be_ctrl_info *ctrl,
|
|||
txq->id = le16_to_cpu(resp->cid);
|
||||
txq->created = true;
|
||||
}
|
||||
spin_unlock(&ctrl->cmd_lock);
|
||||
spin_unlock(&ctrl->mbox_lock);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
@ -460,7 +641,7 @@ int be_cmd_rxq_create(struct be_ctrl_info *ctrl,
|
|||
struct be_dma_mem *q_mem = &rxq->dma_mem;
|
||||
int status;
|
||||
|
||||
spin_lock(&ctrl->cmd_lock);
|
||||
spin_lock(&ctrl->mbox_lock);
|
||||
memset(wrb, 0, sizeof(*wrb));
|
||||
|
||||
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
|
||||
|
@ -482,7 +663,7 @@ int be_cmd_rxq_create(struct be_ctrl_info *ctrl,
|
|||
rxq->id = le16_to_cpu(resp->id);
|
||||
rxq->created = true;
|
||||
}
|
||||
spin_unlock(&ctrl->cmd_lock);
|
||||
spin_unlock(&ctrl->mbox_lock);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
@ -496,7 +677,7 @@ int be_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
|
|||
u8 subsys = 0, opcode = 0;
|
||||
int status;
|
||||
|
||||
spin_lock(&ctrl->cmd_lock);
|
||||
spin_lock(&ctrl->mbox_lock);
|
||||
|
||||
memset(wrb, 0, sizeof(*wrb));
|
||||
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
|
||||
|
@ -518,6 +699,10 @@ int be_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
|
|||
subsys = CMD_SUBSYSTEM_ETH;
|
||||
opcode = OPCODE_ETH_RX_DESTROY;
|
||||
break;
|
||||
case QTYPE_MCCQ:
|
||||
subsys = CMD_SUBSYSTEM_COMMON;
|
||||
opcode = OPCODE_COMMON_MCC_DESTROY;
|
||||
break;
|
||||
default:
|
||||
printk(KERN_WARNING DRV_NAME ":bad Q type in Q destroy cmd\n");
|
||||
status = -1;
|
||||
|
@ -528,7 +713,7 @@ int be_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
|
|||
|
||||
status = be_mbox_db_ring(ctrl);
|
||||
err:
|
||||
spin_unlock(&ctrl->cmd_lock);
|
||||
spin_unlock(&ctrl->mbox_lock);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
@ -541,7 +726,7 @@ int be_cmd_if_create(struct be_ctrl_info *ctrl, u32 flags, u8 *mac,
|
|||
struct be_cmd_req_if_create *req = embedded_payload(wrb);
|
||||
int status;
|
||||
|
||||
spin_lock(&ctrl->cmd_lock);
|
||||
spin_lock(&ctrl->mbox_lock);
|
||||
memset(wrb, 0, sizeof(*wrb));
|
||||
|
||||
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
|
||||
|
@ -562,7 +747,7 @@ int be_cmd_if_create(struct be_ctrl_info *ctrl, u32 flags, u8 *mac,
|
|||
*pmac_id = le32_to_cpu(resp->pmac_id);
|
||||
}
|
||||
|
||||
spin_unlock(&ctrl->cmd_lock);
|
||||
spin_unlock(&ctrl->mbox_lock);
|
||||
return status;
|
||||
}
|
||||
|
||||
|
@ -572,7 +757,7 @@ int be_cmd_if_destroy(struct be_ctrl_info *ctrl, u32 interface_id)
|
|||
struct be_cmd_req_if_destroy *req = embedded_payload(wrb);
|
||||
int status;
|
||||
|
||||
spin_lock(&ctrl->cmd_lock);
|
||||
spin_lock(&ctrl->mbox_lock);
|
||||
memset(wrb, 0, sizeof(*wrb));
|
||||
|
||||
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
|
||||
|
@ -583,7 +768,7 @@ int be_cmd_if_destroy(struct be_ctrl_info *ctrl, u32 interface_id)
|
|||
req->interface_id = cpu_to_le32(interface_id);
|
||||
status = be_mbox_db_ring(ctrl);
|
||||
|
||||
spin_unlock(&ctrl->cmd_lock);
|
||||
spin_unlock(&ctrl->mbox_lock);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
@ -598,7 +783,7 @@ int be_cmd_get_stats(struct be_ctrl_info *ctrl, struct be_dma_mem *nonemb_cmd)
|
|||
struct be_sge *sge = nonembedded_sgl(wrb);
|
||||
int status;
|
||||
|
||||
spin_lock(&ctrl->cmd_lock);
|
||||
spin_lock(&ctrl->mbox_lock);
|
||||
memset(wrb, 0, sizeof(*wrb));
|
||||
|
||||
memset(req, 0, sizeof(*req));
|
||||
|
@ -617,18 +802,20 @@ int be_cmd_get_stats(struct be_ctrl_info *ctrl, struct be_dma_mem *nonemb_cmd)
|
|||
be_dws_le_to_cpu(&resp->hw_stats, sizeof(resp->hw_stats));
|
||||
}
|
||||
|
||||
spin_unlock(&ctrl->cmd_lock);
|
||||
spin_unlock(&ctrl->mbox_lock);
|
||||
return status;
|
||||
}
|
||||
|
||||
int be_cmd_link_status_query(struct be_ctrl_info *ctrl,
|
||||
struct be_link_info *link)
|
||||
bool *link_up)
|
||||
{
|
||||
struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
|
||||
struct be_cmd_req_link_status *req = embedded_payload(wrb);
|
||||
int status;
|
||||
|
||||
spin_lock(&ctrl->cmd_lock);
|
||||
spin_lock(&ctrl->mbox_lock);
|
||||
|
||||
*link_up = false;
|
||||
memset(wrb, 0, sizeof(*wrb));
|
||||
|
||||
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
|
||||
|
@ -639,14 +826,11 @@ int be_cmd_link_status_query(struct be_ctrl_info *ctrl,
|
|||
status = be_mbox_db_ring(ctrl);
|
||||
if (!status) {
|
||||
struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
|
||||
link->speed = resp->mac_speed;
|
||||
link->duplex = resp->mac_duplex;
|
||||
link->fault = resp->mac_fault;
|
||||
} else {
|
||||
link->speed = PHY_LINK_SPEED_ZERO;
|
||||
if (resp->mac_speed != PHY_LINK_SPEED_ZERO)
|
||||
*link_up = true;
|
||||
}
|
||||
|
||||
spin_unlock(&ctrl->cmd_lock);
|
||||
spin_unlock(&ctrl->mbox_lock);
|
||||
return status;
|
||||
}
|
||||
|
||||
|
@ -656,7 +840,7 @@ int be_cmd_get_fw_ver(struct be_ctrl_info *ctrl, char *fw_ver)
|
|||
struct be_cmd_req_get_fw_version *req = embedded_payload(wrb);
|
||||
int status;
|
||||
|
||||
spin_lock(&ctrl->cmd_lock);
|
||||
spin_lock(&ctrl->mbox_lock);
|
||||
memset(wrb, 0, sizeof(*wrb));
|
||||
|
||||
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
|
||||
|
@ -670,7 +854,7 @@ int be_cmd_get_fw_ver(struct be_ctrl_info *ctrl, char *fw_ver)
|
|||
strncpy(fw_ver, resp->firmware_version_string, FW_VER_LEN);
|
||||
}
|
||||
|
||||
spin_unlock(&ctrl->cmd_lock);
|
||||
spin_unlock(&ctrl->mbox_lock);
|
||||
return status;
|
||||
}
|
||||
|
||||
|
@ -681,7 +865,7 @@ int be_cmd_modify_eqd(struct be_ctrl_info *ctrl, u32 eq_id, u32 eqd)
|
|||
struct be_cmd_req_modify_eq_delay *req = embedded_payload(wrb);
|
||||
int status;
|
||||
|
||||
spin_lock(&ctrl->cmd_lock);
|
||||
spin_lock(&ctrl->mbox_lock);
|
||||
memset(wrb, 0, sizeof(*wrb));
|
||||
|
||||
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
|
||||
|
@ -696,7 +880,7 @@ int be_cmd_modify_eqd(struct be_ctrl_info *ctrl, u32 eq_id, u32 eqd)
|
|||
|
||||
status = be_mbox_db_ring(ctrl);
|
||||
|
||||
spin_unlock(&ctrl->cmd_lock);
|
||||
spin_unlock(&ctrl->mbox_lock);
|
||||
return status;
|
||||
}
|
||||
|
||||
|
@ -707,7 +891,7 @@ int be_cmd_vlan_config(struct be_ctrl_info *ctrl, u32 if_id, u16 *vtag_array,
|
|||
struct be_cmd_req_vlan_config *req = embedded_payload(wrb);
|
||||
int status;
|
||||
|
||||
spin_lock(&ctrl->cmd_lock);
|
||||
spin_lock(&ctrl->mbox_lock);
|
||||
memset(wrb, 0, sizeof(*wrb));
|
||||
|
||||
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
|
||||
|
@ -726,18 +910,22 @@ int be_cmd_vlan_config(struct be_ctrl_info *ctrl, u32 if_id, u16 *vtag_array,
|
|||
|
||||
status = be_mbox_db_ring(ctrl);
|
||||
|
||||
spin_unlock(&ctrl->cmd_lock);
|
||||
spin_unlock(&ctrl->mbox_lock);
|
||||
return status;
|
||||
}
|
||||
|
||||
/* Use MCC for this command as it may be called in BH context */
|
||||
int be_cmd_promiscuous_config(struct be_ctrl_info *ctrl, u8 port_num, bool en)
|
||||
{
|
||||
struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
|
||||
struct be_cmd_req_promiscuous_config *req = embedded_payload(wrb);
|
||||
int status;
|
||||
struct be_mcc_wrb *wrb;
|
||||
struct be_cmd_req_promiscuous_config *req;
|
||||
|
||||
spin_lock(&ctrl->cmd_lock);
|
||||
memset(wrb, 0, sizeof(*wrb));
|
||||
spin_lock_bh(&ctrl->mcc_lock);
|
||||
|
||||
wrb = wrb_from_mcc(&ctrl->mcc_obj.q);
|
||||
BUG_ON(!wrb);
|
||||
|
||||
req = embedded_payload(wrb);
|
||||
|
||||
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
|
||||
|
||||
|
@ -749,21 +937,29 @@ int be_cmd_promiscuous_config(struct be_ctrl_info *ctrl, u8 port_num, bool en)
|
|||
else
|
||||
req->port0_promiscuous = en;
|
||||
|
||||
status = be_mbox_db_ring(ctrl);
|
||||
be_mcc_notify_wait(ctrl);
|
||||
|
||||
spin_unlock(&ctrl->cmd_lock);
|
||||
return status;
|
||||
spin_unlock_bh(&ctrl->mcc_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int be_cmd_mcast_mac_set(struct be_ctrl_info *ctrl, u32 if_id, u8 *mac_table,
|
||||
u32 num, bool promiscuous)
|
||||
/*
|
||||
* Use MCC for this command as it may be called in BH context
|
||||
* (mc == NULL) => multicast promiscous
|
||||
*/
|
||||
int be_cmd_multicast_set(struct be_ctrl_info *ctrl, u32 if_id,
|
||||
struct dev_mc_list *mc_list, u32 mc_count)
|
||||
{
|
||||
struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
|
||||
struct be_cmd_req_mcast_mac_config *req = embedded_payload(wrb);
|
||||
int status;
|
||||
#define BE_MAX_MC 32 /* set mcast promisc if > 32 */
|
||||
struct be_mcc_wrb *wrb;
|
||||
struct be_cmd_req_mcast_mac_config *req;
|
||||
|
||||
spin_lock(&ctrl->cmd_lock);
|
||||
memset(wrb, 0, sizeof(*wrb));
|
||||
spin_lock_bh(&ctrl->mcc_lock);
|
||||
|
||||
wrb = wrb_from_mcc(&ctrl->mcc_obj.q);
|
||||
BUG_ON(!wrb);
|
||||
|
||||
req = embedded_payload(wrb);
|
||||
|
||||
be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
|
||||
|
||||
|
@ -771,17 +967,23 @@ int be_cmd_mcast_mac_set(struct be_ctrl_info *ctrl, u32 if_id, u8 *mac_table,
|
|||
OPCODE_COMMON_NTWK_MULTICAST_SET, sizeof(*req));
|
||||
|
||||
req->interface_id = if_id;
|
||||
req->promiscuous = promiscuous;
|
||||
if (!promiscuous) {
|
||||
req->num_mac = cpu_to_le16(num);
|
||||
if (num)
|
||||
memcpy(req->mac, mac_table, ETH_ALEN * num);
|
||||
if (mc_list && mc_count <= BE_MAX_MC) {
|
||||
int i;
|
||||
struct dev_mc_list *mc;
|
||||
|
||||
req->num_mac = cpu_to_le16(mc_count);
|
||||
|
||||
for (mc = mc_list, i = 0; mc; mc = mc->next, i++)
|
||||
memcpy(req->mac[i].byte, mc->dmi_addr, ETH_ALEN);
|
||||
} else {
|
||||
req->promiscuous = 1;
|
||||
}
|
||||
|
||||
status = be_mbox_db_ring(ctrl);
|
||||
be_mcc_notify_wait(ctrl);
|
||||
|
||||
spin_unlock(&ctrl->cmd_lock);
|
||||
return status;
|
||||
spin_unlock_bh(&ctrl->mcc_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int be_cmd_set_flow_control(struct be_ctrl_info *ctrl, u32 tx_fc, u32 rx_fc)
|
||||
|
@ -790,7 +992,7 @@ int be_cmd_set_flow_control(struct be_ctrl_info *ctrl, u32 tx_fc, u32 rx_fc)
|
|||
struct be_cmd_req_set_flow_control *req = embedded_payload(wrb);
|
||||
int status;
|
||||
|
||||
spin_lock(&ctrl->cmd_lock);
|
||||
spin_lock(&ctrl->mbox_lock);
|
||||
|
||||
memset(wrb, 0, sizeof(*wrb));
|
||||
|
||||
|
@ -804,7 +1006,7 @@ int be_cmd_set_flow_control(struct be_ctrl_info *ctrl, u32 tx_fc, u32 rx_fc)
|
|||
|
||||
status = be_mbox_db_ring(ctrl);
|
||||
|
||||
spin_unlock(&ctrl->cmd_lock);
|
||||
spin_unlock(&ctrl->mbox_lock);
|
||||
return status;
|
||||
}
|
||||
|
||||
|
@ -814,7 +1016,7 @@ int be_cmd_get_flow_control(struct be_ctrl_info *ctrl, u32 *tx_fc, u32 *rx_fc)
|
|||
struct be_cmd_req_get_flow_control *req = embedded_payload(wrb);
|
||||
int status;
|
||||
|
||||
spin_lock(&ctrl->cmd_lock);
|
||||
spin_lock(&ctrl->mbox_lock);
|
||||
|
||||
memset(wrb, 0, sizeof(*wrb));
|
||||
|
||||
|
@ -831,7 +1033,7 @@ int be_cmd_get_flow_control(struct be_ctrl_info *ctrl, u32 *tx_fc, u32 *rx_fc)
|
|||
*rx_fc = le16_to_cpu(resp->rx_flow_control);
|
||||
}
|
||||
|
||||
spin_unlock(&ctrl->cmd_lock);
|
||||
spin_unlock(&ctrl->mbox_lock);
|
||||
return status;
|
||||
}
|
||||
|
||||
|
@ -841,7 +1043,7 @@ int be_cmd_query_fw_cfg(struct be_ctrl_info *ctrl, u32 *port_num)
|
|||
struct be_cmd_req_query_fw_cfg *req = embedded_payload(wrb);
|
||||
int status;
|
||||
|
||||
spin_lock(&ctrl->cmd_lock);
|
||||
spin_lock(&ctrl->mbox_lock);
|
||||
|
||||
memset(wrb, 0, sizeof(*wrb));
|
||||
|
||||
|
@ -856,6 +1058,6 @@ int be_cmd_query_fw_cfg(struct be_ctrl_info *ctrl, u32 *port_num)
|
|||
*port_num = le32_to_cpu(resp->phys_port);
|
||||
}
|
||||
|
||||
spin_unlock(&ctrl->cmd_lock);
|
||||
spin_unlock(&ctrl->mbox_lock);
|
||||
return status;
|
||||
}
|
||||
|
|
|
@ -76,6 +76,34 @@ struct be_mcc_cq_entry {
|
|||
u32 flags; /* dword 3 */
|
||||
};
|
||||
|
||||
/* When the async bit of mcc_compl is set, the last 4 bytes of
|
||||
* mcc_compl is interpreted as follows:
|
||||
*/
|
||||
#define ASYNC_TRAILER_EVENT_CODE_SHIFT 8 /* bits 8 - 15 */
|
||||
#define ASYNC_TRAILER_EVENT_CODE_MASK 0xFF
|
||||
#define ASYNC_EVENT_CODE_LINK_STATE 0x1
|
||||
struct be_async_event_trailer {
|
||||
u32 code;
|
||||
};
|
||||
|
||||
enum {
|
||||
ASYNC_EVENT_LINK_DOWN = 0x0,
|
||||
ASYNC_EVENT_LINK_UP = 0x1
|
||||
};
|
||||
|
||||
/* When the event code of an async trailer is link-state, the mcc_compl
|
||||
* must be interpreted as follows
|
||||
*/
|
||||
struct be_async_event_link_state {
|
||||
u8 physical_port;
|
||||
u8 port_link_status;
|
||||
u8 port_duplex;
|
||||
u8 port_speed;
|
||||
u8 port_fault;
|
||||
u8 rsvd0[7];
|
||||
struct be_async_event_trailer trailer;
|
||||
} __packed;
|
||||
|
||||
struct be_mcc_mailbox {
|
||||
struct be_mcc_wrb wrb;
|
||||
struct be_mcc_cq_entry cqe;
|
||||
|
@ -101,6 +129,7 @@ struct be_mcc_mailbox {
|
|||
#define OPCODE_COMMON_FIRMWARE_CONFIG 42
|
||||
#define OPCODE_COMMON_NTWK_INTERFACE_CREATE 50
|
||||
#define OPCODE_COMMON_NTWK_INTERFACE_DESTROY 51
|
||||
#define OPCODE_COMMON_MCC_DESTROY 53
|
||||
#define OPCODE_COMMON_CQ_DESTROY 54
|
||||
#define OPCODE_COMMON_EQ_DESTROY 55
|
||||
#define OPCODE_COMMON_QUERY_FIRMWARE_CONFIG 58
|
||||
|
@ -269,6 +298,38 @@ struct be_cmd_resp_cq_create {
|
|||
u16 rsvd0;
|
||||
} __packed;
|
||||
|
||||
/******************** Create MCCQ ***************************/
|
||||
/* Pseudo amap definition in which each bit of the actual structure is defined
|
||||
* as a byte: used to calculate offset/shift/mask of each field */
|
||||
struct amap_mcc_context {
|
||||
u8 con_index[14];
|
||||
u8 rsvd0[2];
|
||||
u8 ring_size[4];
|
||||
u8 fetch_wrb;
|
||||
u8 fetch_r2t;
|
||||
u8 cq_id[10];
|
||||
u8 prod_index[14];
|
||||
u8 fid[8];
|
||||
u8 pdid[9];
|
||||
u8 valid;
|
||||
u8 rsvd1[32];
|
||||
u8 rsvd2[32];
|
||||
} __packed;
|
||||
|
||||
struct be_cmd_req_mcc_create {
|
||||
struct be_cmd_req_hdr hdr;
|
||||
u16 num_pages;
|
||||
u16 rsvd0;
|
||||
u8 context[sizeof(struct amap_mcc_context) / 8];
|
||||
struct phys_addr pages[8];
|
||||
} __packed;
|
||||
|
||||
struct be_cmd_resp_mcc_create {
|
||||
struct be_cmd_resp_hdr hdr;
|
||||
u16 id;
|
||||
u16 rsvd0;
|
||||
} __packed;
|
||||
|
||||
/******************** Create TxQ ***************************/
|
||||
#define BE_ETH_TX_RING_TYPE_STANDARD 2
|
||||
#define BE_ULP1_NUM 1
|
||||
|
@ -341,7 +402,8 @@ enum {
|
|||
QTYPE_EQ = 1,
|
||||
QTYPE_CQ,
|
||||
QTYPE_TXQ,
|
||||
QTYPE_RXQ
|
||||
QTYPE_RXQ,
|
||||
QTYPE_MCCQ
|
||||
};
|
||||
|
||||
struct be_cmd_req_q_destroy {
|
||||
|
@ -546,12 +608,6 @@ struct be_cmd_req_link_status {
|
|||
u32 rsvd;
|
||||
};
|
||||
|
||||
struct be_link_info {
|
||||
u8 duplex;
|
||||
u8 speed;
|
||||
u8 fault;
|
||||
};
|
||||
|
||||
enum {
|
||||
PHY_LINK_DUPLEX_NONE = 0x0,
|
||||
PHY_LINK_DUPLEX_HALF = 0x1,
|
||||
|
@ -657,6 +713,9 @@ extern int be_cmd_cq_create(struct be_ctrl_info *ctrl,
|
|||
struct be_queue_info *cq, struct be_queue_info *eq,
|
||||
bool sol_evts, bool no_delay,
|
||||
int num_cqe_dma_coalesce);
|
||||
extern int be_cmd_mccq_create(struct be_ctrl_info *ctrl,
|
||||
struct be_queue_info *mccq,
|
||||
struct be_queue_info *cq);
|
||||
extern int be_cmd_txq_create(struct be_ctrl_info *ctrl,
|
||||
struct be_queue_info *txq,
|
||||
struct be_queue_info *cq);
|
||||
|
@ -667,7 +726,7 @@ extern int be_cmd_rxq_create(struct be_ctrl_info *ctrl,
|
|||
extern int be_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
|
||||
int type);
|
||||
extern int be_cmd_link_status_query(struct be_ctrl_info *ctrl,
|
||||
struct be_link_info *link);
|
||||
bool *link_up);
|
||||
extern int be_cmd_reset(struct be_ctrl_info *ctrl);
|
||||
extern int be_cmd_get_stats(struct be_ctrl_info *ctrl,
|
||||
struct be_dma_mem *nonemb_cmd);
|
||||
|
@ -679,10 +738,11 @@ extern int be_cmd_vlan_config(struct be_ctrl_info *ctrl, u32 if_id,
|
|||
bool promiscuous);
|
||||
extern int be_cmd_promiscuous_config(struct be_ctrl_info *ctrl,
|
||||
u8 port_num, bool en);
|
||||
extern int be_cmd_mcast_mac_set(struct be_ctrl_info *ctrl, u32 if_id,
|
||||
u8 *mac_table, u32 num, bool promiscuous);
|
||||
extern int be_cmd_multicast_set(struct be_ctrl_info *ctrl, u32 if_id,
|
||||
struct dev_mc_list *mc_list, u32 mc_count);
|
||||
extern int be_cmd_set_flow_control(struct be_ctrl_info *ctrl,
|
||||
u32 tx_fc, u32 rx_fc);
|
||||
extern int be_cmd_get_flow_control(struct be_ctrl_info *ctrl,
|
||||
u32 *tx_fc, u32 *rx_fc);
|
||||
extern int be_cmd_query_fw_cfg(struct be_ctrl_info *ctrl, u32 *port_num);
|
||||
extern void be_process_mcc(struct be_ctrl_info *ctrl);
|
||||
|
|
|
@ -61,7 +61,7 @@
|
|||
/* Clear the interrupt for this eq */
|
||||
#define DB_EQ_CLR_SHIFT (9) /* bit 9 */
|
||||
/* Must be 1 */
|
||||
#define DB_EQ_EVNT_SHIFT (10) /* bit 10 */
|
||||
#define DB_EQ_EVNT_SHIFT (10) /* bit 10 */
|
||||
/* Number of event entries processed */
|
||||
#define DB_EQ_NUM_POPPED_SHIFT (16) /* bits 16 - 28 */
|
||||
/* Rearm bit */
|
||||
|
@ -88,6 +88,12 @@
|
|||
/* Number of rx frags posted */
|
||||
#define DB_RQ_NUM_POSTED_SHIFT (24) /* bits 24 - 31 */
|
||||
|
||||
/********** MCC door bell ************/
|
||||
#define DB_MCCQ_OFFSET 0x140
|
||||
#define DB_MCCQ_RING_ID_MASK 0x7FF /* bits 0 - 10 */
|
||||
/* Number of entries posted */
|
||||
#define DB_MCCQ_NUM_POSTED_SHIFT (16) /* bits 16 - 29 */
|
||||
|
||||
/*
|
||||
* BE descriptors: host memory data structures whose formats
|
||||
* are hardwired in BE silicon.
|
||||
|
|
|
@ -60,26 +60,6 @@ static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline void *queue_head_node(struct be_queue_info *q)
|
||||
{
|
||||
return q->dma_mem.va + q->head * q->entry_size;
|
||||
}
|
||||
|
||||
static inline void *queue_tail_node(struct be_queue_info *q)
|
||||
{
|
||||
return q->dma_mem.va + q->tail * q->entry_size;
|
||||
}
|
||||
|
||||
static inline void queue_head_inc(struct be_queue_info *q)
|
||||
{
|
||||
index_inc(&q->head, q->len);
|
||||
}
|
||||
|
||||
static inline void queue_tail_inc(struct be_queue_info *q)
|
||||
{
|
||||
index_inc(&q->tail, q->len);
|
||||
}
|
||||
|
||||
static void be_intr_set(struct be_ctrl_info *ctrl, bool enable)
|
||||
{
|
||||
u8 __iomem *addr = ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
|
||||
|
@ -127,7 +107,7 @@ static void be_eq_notify(struct be_ctrl_info *ctrl, u16 qid,
|
|||
iowrite32(val, ctrl->db + DB_EQ_OFFSET);
|
||||
}
|
||||
|
||||
static void be_cq_notify(struct be_ctrl_info *ctrl, u16 qid,
|
||||
void be_cq_notify(struct be_ctrl_info *ctrl, u16 qid,
|
||||
bool arm, u16 num_popped)
|
||||
{
|
||||
u32 val = 0;
|
||||
|
@ -234,28 +214,24 @@ static void netdev_stats_update(struct be_adapter *adapter)
|
|||
dev_stats->tx_window_errors = 0;
|
||||
}
|
||||
|
||||
static void be_link_status_update(struct be_adapter *adapter)
|
||||
void be_link_status_update(void *ctxt, bool link_up)
|
||||
{
|
||||
struct be_link_info *prev = &adapter->link;
|
||||
struct be_link_info now = { 0 };
|
||||
struct be_adapter *adapter = ctxt;
|
||||
struct net_device *netdev = adapter->netdev;
|
||||
|
||||
be_cmd_link_status_query(&adapter->ctrl, &now);
|
||||
|
||||
/* If link came up or went down */
|
||||
if (now.speed != prev->speed && (now.speed == PHY_LINK_SPEED_ZERO ||
|
||||
prev->speed == PHY_LINK_SPEED_ZERO)) {
|
||||
if (now.speed == PHY_LINK_SPEED_ZERO) {
|
||||
netif_stop_queue(netdev);
|
||||
netif_carrier_off(netdev);
|
||||
printk(KERN_INFO "%s: Link down\n", netdev->name);
|
||||
} else {
|
||||
if (adapter->link_up != link_up) {
|
||||
if (link_up) {
|
||||
netif_start_queue(netdev);
|
||||
netif_carrier_on(netdev);
|
||||
printk(KERN_INFO "%s: Link up\n", netdev->name);
|
||||
} else {
|
||||
netif_stop_queue(netdev);
|
||||
netif_carrier_off(netdev);
|
||||
printk(KERN_INFO "%s: Link down\n", netdev->name);
|
||||
}
|
||||
adapter->link_up = link_up;
|
||||
}
|
||||
*prev = now;
|
||||
}
|
||||
|
||||
/* Update the EQ delay n BE based on the RX frags consumed / sec */
|
||||
|
@ -569,47 +545,32 @@ static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
|
|||
be_vid_config(netdev);
|
||||
}
|
||||
|
||||
static void be_set_multicast_filter(struct net_device *netdev)
|
||||
{
|
||||
struct be_adapter *adapter = netdev_priv(netdev);
|
||||
struct dev_mc_list *mc_ptr;
|
||||
u8 mac_addr[32][ETH_ALEN];
|
||||
int i = 0;
|
||||
|
||||
if (netdev->flags & IFF_ALLMULTI) {
|
||||
/* set BE in Multicast promiscuous */
|
||||
be_cmd_mcast_mac_set(&adapter->ctrl,
|
||||
adapter->if_handle, NULL, 0, true);
|
||||
return;
|
||||
}
|
||||
|
||||
for (mc_ptr = netdev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) {
|
||||
memcpy(&mac_addr[i][0], mc_ptr->dmi_addr, ETH_ALEN);
|
||||
if (++i >= 32) {
|
||||
be_cmd_mcast_mac_set(&adapter->ctrl,
|
||||
adapter->if_handle, &mac_addr[0][0], i, false);
|
||||
i = 0;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if (i) {
|
||||
/* reset the promiscuous mode also. */
|
||||
be_cmd_mcast_mac_set(&adapter->ctrl,
|
||||
adapter->if_handle, &mac_addr[0][0], i, false);
|
||||
}
|
||||
}
|
||||
|
||||
static void be_set_multicast_list(struct net_device *netdev)
|
||||
{
|
||||
struct be_adapter *adapter = netdev_priv(netdev);
|
||||
struct be_ctrl_info *ctrl = &adapter->ctrl;
|
||||
|
||||
if (netdev->flags & IFF_PROMISC) {
|
||||
be_cmd_promiscuous_config(&adapter->ctrl, adapter->port_num, 1);
|
||||
} else {
|
||||
be_cmd_promiscuous_config(&adapter->ctrl, adapter->port_num, 0);
|
||||
be_set_multicast_filter(netdev);
|
||||
be_cmd_promiscuous_config(ctrl, adapter->port_num, 1);
|
||||
adapter->promiscuous = true;
|
||||
goto done;
|
||||
}
|
||||
|
||||
/* BE was previously in promiscous mode; disable it */
|
||||
if (adapter->promiscuous) {
|
||||
adapter->promiscuous = false;
|
||||
be_cmd_promiscuous_config(ctrl, adapter->port_num, 0);
|
||||
}
|
||||
|
||||
if (netdev->flags & IFF_ALLMULTI) {
|
||||
be_cmd_multicast_set(ctrl, adapter->if_handle, NULL, 0);
|
||||
goto done;
|
||||
}
|
||||
|
||||
be_cmd_multicast_set(ctrl, adapter->if_handle, netdev->mc_list,
|
||||
netdev->mc_count);
|
||||
done:
|
||||
return;
|
||||
}
|
||||
|
||||
static void be_rx_rate_update(struct be_adapter *adapter)
|
||||
|
@ -960,10 +921,8 @@ static void be_post_rx_frags(struct be_adapter *adapter)
|
|||
return;
|
||||
}
|
||||
|
||||
static struct be_eth_tx_compl *
|
||||
be_tx_compl_get(struct be_adapter *adapter)
|
||||
static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
|
||||
{
|
||||
struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
|
||||
struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
|
||||
|
||||
if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
|
||||
|
@ -1051,6 +1010,59 @@ static void be_tx_q_clean(struct be_adapter *adapter)
|
|||
}
|
||||
}
|
||||
|
||||
static void be_mcc_queues_destroy(struct be_adapter *adapter)
|
||||
{
|
||||
struct be_queue_info *q;
|
||||
struct be_ctrl_info *ctrl = &adapter->ctrl;
|
||||
|
||||
q = &ctrl->mcc_obj.q;
|
||||
if (q->created)
|
||||
be_cmd_q_destroy(ctrl, q, QTYPE_MCCQ);
|
||||
be_queue_free(adapter, q);
|
||||
|
||||
q = &ctrl->mcc_obj.cq;
|
||||
if (q->created)
|
||||
be_cmd_q_destroy(ctrl, q, QTYPE_CQ);
|
||||
be_queue_free(adapter, q);
|
||||
}
|
||||
|
||||
/* Must be called only after TX qs are created as MCC shares TX EQ */
|
||||
static int be_mcc_queues_create(struct be_adapter *adapter)
|
||||
{
|
||||
struct be_queue_info *q, *cq;
|
||||
struct be_ctrl_info *ctrl = &adapter->ctrl;
|
||||
|
||||
/* Alloc MCC compl queue */
|
||||
cq = &ctrl->mcc_obj.cq;
|
||||
if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
|
||||
sizeof(struct be_mcc_cq_entry)))
|
||||
goto err;
|
||||
|
||||
/* Ask BE to create MCC compl queue; share TX's eq */
|
||||
if (be_cmd_cq_create(ctrl, cq, &adapter->tx_eq.q, false, true, 0))
|
||||
goto mcc_cq_free;
|
||||
|
||||
/* Alloc MCC queue */
|
||||
q = &ctrl->mcc_obj.q;
|
||||
if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
|
||||
goto mcc_cq_destroy;
|
||||
|
||||
/* Ask BE to create MCC queue */
|
||||
if (be_cmd_mccq_create(ctrl, q, cq))
|
||||
goto mcc_q_free;
|
||||
|
||||
return 0;
|
||||
|
||||
mcc_q_free:
|
||||
be_queue_free(adapter, q);
|
||||
mcc_cq_destroy:
|
||||
be_cmd_q_destroy(ctrl, cq, QTYPE_CQ);
|
||||
mcc_cq_free:
|
||||
be_queue_free(adapter, cq);
|
||||
err:
|
||||
return -1;
|
||||
}
|
||||
|
||||
static void be_tx_queues_destroy(struct be_adapter *adapter)
|
||||
{
|
||||
struct be_queue_info *q;
|
||||
|
@ -1263,7 +1275,7 @@ static irqreturn_t be_msix_rx(int irq, void *dev)
|
|||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static irqreturn_t be_msix_tx(int irq, void *dev)
|
||||
static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
|
||||
{
|
||||
struct be_adapter *adapter = dev;
|
||||
|
||||
|
@ -1324,40 +1336,51 @@ int be_poll_rx(struct napi_struct *napi, int budget)
|
|||
return work_done;
|
||||
}
|
||||
|
||||
/* For TX we don't honour budget; consume everything */
|
||||
int be_poll_tx(struct napi_struct *napi, int budget)
|
||||
void be_process_tx(struct be_adapter *adapter)
|
||||
{
|
||||
struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
|
||||
struct be_adapter *adapter =
|
||||
container_of(tx_eq, struct be_adapter, tx_eq);
|
||||
struct be_tx_obj *tx_obj = &adapter->tx_obj;
|
||||
struct be_queue_info *tx_cq = &tx_obj->cq;
|
||||
struct be_queue_info *txq = &tx_obj->q;
|
||||
struct be_queue_info *txq = &adapter->tx_obj.q;
|
||||
struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
|
||||
struct be_eth_tx_compl *txcp;
|
||||
u32 num_cmpl = 0;
|
||||
u16 end_idx;
|
||||
|
||||
while ((txcp = be_tx_compl_get(adapter))) {
|
||||
while ((txcp = be_tx_compl_get(tx_cq))) {
|
||||
end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
|
||||
wrb_index, txcp);
|
||||
be_tx_compl_process(adapter, end_idx);
|
||||
num_cmpl++;
|
||||
}
|
||||
|
||||
/* As Tx wrbs have been freed up, wake up netdev queue if
|
||||
* it was stopped due to lack of tx wrbs.
|
||||
*/
|
||||
if (netif_queue_stopped(adapter->netdev) &&
|
||||
if (num_cmpl) {
|
||||
be_cq_notify(&adapter->ctrl, tx_cq->id, true, num_cmpl);
|
||||
|
||||
/* As Tx wrbs have been freed up, wake up netdev queue if
|
||||
* it was stopped due to lack of tx wrbs.
|
||||
*/
|
||||
if (netif_queue_stopped(adapter->netdev) &&
|
||||
atomic_read(&txq->used) < txq->len / 2) {
|
||||
netif_wake_queue(adapter->netdev);
|
||||
netif_wake_queue(adapter->netdev);
|
||||
}
|
||||
|
||||
drvr_stats(adapter)->be_tx_events++;
|
||||
drvr_stats(adapter)->be_tx_compl += num_cmpl;
|
||||
}
|
||||
}
|
||||
|
||||
/* As TX and MCC share the same EQ check for both TX and MCC completions.
|
||||
* For TX/MCC we don't honour budget; consume everything
|
||||
*/
|
||||
static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
|
||||
{
|
||||
struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
|
||||
struct be_adapter *adapter =
|
||||
container_of(tx_eq, struct be_adapter, tx_eq);
|
||||
|
||||
napi_complete(napi);
|
||||
|
||||
be_cq_notify(&adapter->ctrl, tx_cq->id, true, num_cmpl);
|
||||
be_process_tx(adapter);
|
||||
|
||||
drvr_stats(adapter)->be_tx_events++;
|
||||
drvr_stats(adapter)->be_tx_compl += num_cmpl;
|
||||
be_process_mcc(&adapter->ctrl);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
@ -1368,9 +1391,6 @@ static void be_worker(struct work_struct *work)
|
|||
container_of(work, struct be_adapter, work.work);
|
||||
int status;
|
||||
|
||||
/* Check link */
|
||||
be_link_status_update(adapter);
|
||||
|
||||
/* Get Stats */
|
||||
status = be_cmd_get_stats(&adapter->ctrl, &adapter->stats.cmd);
|
||||
if (!status)
|
||||
|
@ -1419,7 +1439,7 @@ static int be_msix_register(struct be_adapter *adapter)
|
|||
|
||||
sprintf(tx_eq->desc, "%s-tx", netdev->name);
|
||||
vec = be_msix_vec_get(adapter, tx_eq->q.id);
|
||||
status = request_irq(vec, be_msix_tx, 0, tx_eq->desc, adapter);
|
||||
status = request_irq(vec, be_msix_tx_mcc, 0, tx_eq->desc, adapter);
|
||||
if (status)
|
||||
goto err;
|
||||
|
||||
|
@ -1495,6 +1515,39 @@ static int be_open(struct net_device *netdev)
|
|||
struct be_ctrl_info *ctrl = &adapter->ctrl;
|
||||
struct be_eq_obj *rx_eq = &adapter->rx_eq;
|
||||
struct be_eq_obj *tx_eq = &adapter->tx_eq;
|
||||
bool link_up;
|
||||
int status;
|
||||
|
||||
/* First time posting */
|
||||
be_post_rx_frags(adapter);
|
||||
|
||||
napi_enable(&rx_eq->napi);
|
||||
napi_enable(&tx_eq->napi);
|
||||
|
||||
be_irq_register(adapter);
|
||||
|
||||
be_intr_set(ctrl, true);
|
||||
|
||||
/* The evt queues are created in unarmed state; arm them */
|
||||
be_eq_notify(ctrl, rx_eq->q.id, true, false, 0);
|
||||
be_eq_notify(ctrl, tx_eq->q.id, true, false, 0);
|
||||
|
||||
/* Rx compl queue may be in unarmed state; rearm it */
|
||||
be_cq_notify(ctrl, adapter->rx_obj.cq.id, true, 0);
|
||||
|
||||
status = be_cmd_link_status_query(ctrl, &link_up);
|
||||
if (status)
|
||||
return status;
|
||||
be_link_status_update(adapter, link_up);
|
||||
|
||||
schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int be_setup(struct be_adapter *adapter)
|
||||
{
|
||||
struct be_ctrl_info *ctrl = &adapter->ctrl;
|
||||
struct net_device *netdev = adapter->netdev;
|
||||
u32 if_flags;
|
||||
int status;
|
||||
|
||||
|
@ -1521,29 +1574,14 @@ static int be_open(struct net_device *netdev)
|
|||
if (status != 0)
|
||||
goto tx_qs_destroy;
|
||||
|
||||
/* First time posting */
|
||||
be_post_rx_frags(adapter);
|
||||
status = be_mcc_queues_create(adapter);
|
||||
if (status != 0)
|
||||
goto rx_qs_destroy;
|
||||
|
||||
napi_enable(&rx_eq->napi);
|
||||
napi_enable(&tx_eq->napi);
|
||||
|
||||
be_irq_register(adapter);
|
||||
|
||||
be_intr_set(ctrl, true);
|
||||
|
||||
/* The evt queues are created in the unarmed state; arm them */
|
||||
be_eq_notify(ctrl, rx_eq->q.id, true, false, 0);
|
||||
be_eq_notify(ctrl, tx_eq->q.id, true, false, 0);
|
||||
|
||||
/* The compl queues are created in the unarmed state; arm them */
|
||||
be_cq_notify(ctrl, adapter->rx_obj.cq.id, true, 0);
|
||||
be_cq_notify(ctrl, adapter->tx_obj.cq.id, true, 0);
|
||||
|
||||
be_link_status_update(adapter);
|
||||
|
||||
schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
|
||||
return 0;
|
||||
|
||||
rx_qs_destroy:
|
||||
be_rx_queues_destroy(adapter);
|
||||
tx_qs_destroy:
|
||||
be_tx_queues_destroy(adapter);
|
||||
if_destroy:
|
||||
|
@ -1552,6 +1590,19 @@ static int be_open(struct net_device *netdev)
|
|||
return status;
|
||||
}
|
||||
|
||||
static int be_clear(struct be_adapter *adapter)
|
||||
{
|
||||
struct be_ctrl_info *ctrl = &adapter->ctrl;
|
||||
|
||||
be_rx_queues_destroy(adapter);
|
||||
be_tx_queues_destroy(adapter);
|
||||
|
||||
be_cmd_if_destroy(ctrl, adapter->if_handle);
|
||||
|
||||
be_mcc_queues_destroy(adapter);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int be_close(struct net_device *netdev)
|
||||
{
|
||||
struct be_adapter *adapter = netdev_priv(netdev);
|
||||
|
@ -1564,7 +1615,7 @@ static int be_close(struct net_device *netdev)
|
|||
|
||||
netif_stop_queue(netdev);
|
||||
netif_carrier_off(netdev);
|
||||
adapter->link.speed = PHY_LINK_SPEED_ZERO;
|
||||
adapter->link_up = false;
|
||||
|
||||
be_intr_set(ctrl, false);
|
||||
|
||||
|
@ -1581,10 +1632,6 @@ static int be_close(struct net_device *netdev)
|
|||
napi_disable(&rx_eq->napi);
|
||||
napi_disable(&tx_eq->napi);
|
||||
|
||||
be_rx_queues_destroy(adapter);
|
||||
be_tx_queues_destroy(adapter);
|
||||
|
||||
be_cmd_if_destroy(ctrl, adapter->if_handle);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1673,7 +1720,7 @@ static void be_netdev_init(struct net_device *netdev)
|
|||
|
||||
netif_napi_add(netdev, &adapter->rx_eq.napi, be_poll_rx,
|
||||
BE_NAPI_WEIGHT);
|
||||
netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx,
|
||||
netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
|
||||
BE_NAPI_WEIGHT);
|
||||
|
||||
netif_carrier_off(netdev);
|
||||
|
@ -1755,7 +1802,12 @@ static int be_ctrl_init(struct be_adapter *adapter)
|
|||
mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
|
||||
mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
|
||||
memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
|
||||
spin_lock_init(&ctrl->cmd_lock);
|
||||
spin_lock_init(&ctrl->mbox_lock);
|
||||
spin_lock_init(&ctrl->mcc_lock);
|
||||
spin_lock_init(&ctrl->mcc_cq_lock);
|
||||
|
||||
ctrl->async_cb = be_link_status_update;
|
||||
ctrl->adapter_ctxt = adapter;
|
||||
|
||||
val = ioread32(ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET);
|
||||
ctrl->pci_func = (val >> MEMBAR_CTRL_INT_CTRL_PFUNC_SHIFT) &
|
||||
|
@ -1793,6 +1845,8 @@ static void __devexit be_remove(struct pci_dev *pdev)
|
|||
|
||||
unregister_netdev(adapter->netdev);
|
||||
|
||||
be_clear(adapter);
|
||||
|
||||
be_stats_cleanup(adapter);
|
||||
|
||||
be_ctrl_cleanup(adapter);
|
||||
|
@ -1890,13 +1944,18 @@ static int __devinit be_probe(struct pci_dev *pdev,
|
|||
be_netdev_init(netdev);
|
||||
SET_NETDEV_DEV(netdev, &adapter->pdev->dev);
|
||||
|
||||
status = be_setup(adapter);
|
||||
if (status)
|
||||
goto stats_clean;
|
||||
status = register_netdev(netdev);
|
||||
if (status != 0)
|
||||
goto stats_clean;
|
||||
goto unsetup;
|
||||
|
||||
dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
|
||||
return 0;
|
||||
|
||||
unsetup:
|
||||
be_clear(adapter);
|
||||
stats_clean:
|
||||
be_stats_cleanup(adapter);
|
||||
ctrl_clean:
|
||||
|
@ -1921,6 +1980,7 @@ static int be_suspend(struct pci_dev *pdev, pm_message_t state)
|
|||
if (netif_running(netdev)) {
|
||||
rtnl_lock();
|
||||
be_close(netdev);
|
||||
be_clear(adapter);
|
||||
rtnl_unlock();
|
||||
}
|
||||
|
||||
|
@ -1947,6 +2007,7 @@ static int be_resume(struct pci_dev *pdev)
|
|||
|
||||
if (netif_running(netdev)) {
|
||||
rtnl_lock();
|
||||
be_setup(adapter);
|
||||
be_open(netdev);
|
||||
rtnl_unlock();
|
||||
}
|
||||
|
|
|
@ -1997,7 +1997,7 @@ static int e1000_clean(struct napi_struct *napi, int budget)
|
|||
struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi);
|
||||
struct e1000_hw *hw = &adapter->hw;
|
||||
struct net_device *poll_dev = adapter->netdev;
|
||||
int tx_cleaned = 0, work_done = 0;
|
||||
int tx_cleaned = 1, work_done = 0;
|
||||
|
||||
adapter = netdev_priv(poll_dev);
|
||||
|
||||
|
|
|
@ -668,7 +668,7 @@ int mlx4_en_start_port(struct net_device *dev)
|
|||
queue_work(mdev->workqueue, &priv->mcast_task);
|
||||
|
||||
priv->port_up = true;
|
||||
netif_start_queue(dev);
|
||||
netif_tx_start_all_queues(dev);
|
||||
return 0;
|
||||
|
||||
mac_err:
|
||||
|
@ -700,14 +700,14 @@ void mlx4_en_stop_port(struct net_device *dev)
|
|||
en_dbg(DRV, priv, "stop port called while port already down\n");
|
||||
return;
|
||||
}
|
||||
netif_stop_queue(dev);
|
||||
|
||||
/* Synchronize with tx routine */
|
||||
netif_tx_lock_bh(dev);
|
||||
priv->port_up = false;
|
||||
netif_tx_stop_all_queues(dev);
|
||||
netif_tx_unlock_bh(dev);
|
||||
|
||||
/* close port*/
|
||||
priv->port_up = false;
|
||||
mlx4_CLOSE_PORT(mdev->dev, priv->port);
|
||||
|
||||
/* Unregister Mac address for the port */
|
||||
|
@ -881,7 +881,6 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
|
|||
mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE);
|
||||
|
||||
cancel_delayed_work(&priv->stats_task);
|
||||
cancel_delayed_work(&priv->refill_task);
|
||||
/* flush any pending task for this netdev */
|
||||
flush_workqueue(mdev->workqueue);
|
||||
|
||||
|
@ -986,7 +985,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
|
|||
spin_lock_init(&priv->stats_lock);
|
||||
INIT_WORK(&priv->mcast_task, mlx4_en_do_set_multicast);
|
||||
INIT_WORK(&priv->mac_task, mlx4_en_do_set_mac);
|
||||
INIT_DELAYED_WORK(&priv->refill_task, mlx4_en_rx_refill);
|
||||
INIT_WORK(&priv->watchdog_task, mlx4_en_restart);
|
||||
INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
|
||||
INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
|
||||
|
|
|
@ -269,31 +269,6 @@ static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int mlx4_en_fill_rx_buf(struct net_device *dev,
|
||||
struct mlx4_en_rx_ring *ring)
|
||||
{
|
||||
struct mlx4_en_priv *priv = netdev_priv(dev);
|
||||
int num = 0;
|
||||
int err;
|
||||
|
||||
while ((u32) (ring->prod - ring->cons) < ring->actual_size) {
|
||||
err = mlx4_en_prepare_rx_desc(priv, ring, ring->prod &
|
||||
ring->size_mask);
|
||||
if (err) {
|
||||
if (netif_msg_rx_err(priv))
|
||||
en_warn(priv, "Failed preparing rx descriptor\n");
|
||||
priv->port_stats.rx_alloc_failed++;
|
||||
break;
|
||||
}
|
||||
++num;
|
||||
++ring->prod;
|
||||
}
|
||||
if ((u32) (ring->prod - ring->cons) == ring->actual_size)
|
||||
ring->full = 1;
|
||||
|
||||
return num;
|
||||
}
|
||||
|
||||
static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv,
|
||||
struct mlx4_en_rx_ring *ring)
|
||||
{
|
||||
|
@ -312,42 +287,6 @@ static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv,
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
void mlx4_en_rx_refill(struct work_struct *work)
|
||||
{
|
||||
struct delayed_work *delay = to_delayed_work(work);
|
||||
struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv,
|
||||
refill_task);
|
||||
struct mlx4_en_dev *mdev = priv->mdev;
|
||||
struct net_device *dev = priv->dev;
|
||||
struct mlx4_en_rx_ring *ring;
|
||||
int need_refill = 0;
|
||||
int i;
|
||||
|
||||
mutex_lock(&mdev->state_lock);
|
||||
if (!mdev->device_up || !priv->port_up)
|
||||
goto out;
|
||||
|
||||
/* We only get here if there are no receive buffers, so we can't race
|
||||
* with Rx interrupts while filling buffers */
|
||||
for (i = 0; i < priv->rx_ring_num; i++) {
|
||||
ring = &priv->rx_ring[i];
|
||||
if (ring->need_refill) {
|
||||
if (mlx4_en_fill_rx_buf(dev, ring)) {
|
||||
ring->need_refill = 0;
|
||||
mlx4_en_update_rx_prod_db(ring);
|
||||
} else
|
||||
need_refill = 1;
|
||||
}
|
||||
}
|
||||
if (need_refill)
|
||||
queue_delayed_work(mdev->workqueue, &priv->refill_task, HZ);
|
||||
|
||||
out:
|
||||
mutex_unlock(&mdev->state_lock);
|
||||
}
|
||||
|
||||
|
||||
int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
|
||||
struct mlx4_en_rx_ring *ring, u32 size, u16 stride)
|
||||
{
|
||||
|
@ -457,9 +396,6 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
|
|||
ring_ind--;
|
||||
goto err_allocator;
|
||||
}
|
||||
|
||||
/* Fill Rx buffers */
|
||||
ring->full = 0;
|
||||
}
|
||||
err = mlx4_en_fill_rx_buffers(priv);
|
||||
if (err)
|
||||
|
@ -647,33 +583,6 @@ static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
|
|||
return skb;
|
||||
}
|
||||
|
||||
static void mlx4_en_copy_desc(struct mlx4_en_priv *priv,
|
||||
struct mlx4_en_rx_ring *ring,
|
||||
int from, int to, int num)
|
||||
{
|
||||
struct skb_frag_struct *skb_frags_from;
|
||||
struct skb_frag_struct *skb_frags_to;
|
||||
struct mlx4_en_rx_desc *rx_desc_from;
|
||||
struct mlx4_en_rx_desc *rx_desc_to;
|
||||
int from_index, to_index;
|
||||
int nr, i;
|
||||
|
||||
for (i = 0; i < num; i++) {
|
||||
from_index = (from + i) & ring->size_mask;
|
||||
to_index = (to + i) & ring->size_mask;
|
||||
skb_frags_from = ring->rx_info + (from_index << priv->log_rx_info);
|
||||
skb_frags_to = ring->rx_info + (to_index << priv->log_rx_info);
|
||||
rx_desc_from = ring->buf + (from_index << ring->log_stride);
|
||||
rx_desc_to = ring->buf + (to_index << ring->log_stride);
|
||||
|
||||
for (nr = 0; nr < priv->num_frags; nr++) {
|
||||
skb_frags_to[nr].page = skb_frags_from[nr].page;
|
||||
skb_frags_to[nr].page_offset = skb_frags_from[nr].page_offset;
|
||||
rx_desc_to->data[nr].addr = rx_desc_from->data[nr].addr;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget)
|
||||
{
|
||||
|
@ -821,11 +730,6 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
|
|||
wmb(); /* ensure HW sees CQ consumer before we post new buffers */
|
||||
ring->cons = cq->mcq.cons_index;
|
||||
ring->prod += polled; /* Polled descriptors were realocated in place */
|
||||
if (unlikely(!ring->full)) {
|
||||
mlx4_en_copy_desc(priv, ring, ring->cons - polled,
|
||||
ring->prod - polled, polled);
|
||||
mlx4_en_fill_rx_buf(dev, ring);
|
||||
}
|
||||
mlx4_en_update_rx_prod_db(ring);
|
||||
return polled;
|
||||
}
|
||||
|
|
|
@ -515,16 +515,9 @@ static int get_real_size(struct sk_buff *skb, struct net_device *dev,
|
|||
else {
|
||||
if (netif_msg_tx_err(priv))
|
||||
en_warn(priv, "Non-linear headers\n");
|
||||
dev_kfree_skb_any(skb);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
if (unlikely(*lso_header_size > MAX_LSO_HDR_SIZE)) {
|
||||
if (netif_msg_tx_err(priv))
|
||||
en_warn(priv, "LSO header size too big\n");
|
||||
dev_kfree_skb_any(skb);
|
||||
return 0;
|
||||
}
|
||||
} else {
|
||||
*lso_header_size = 0;
|
||||
if (!is_inline(skb, NULL))
|
||||
|
@ -616,13 +609,9 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
int lso_header_size;
|
||||
void *fragptr;
|
||||
|
||||
if (unlikely(!skb->len)) {
|
||||
dev_kfree_skb_any(skb);
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
real_size = get_real_size(skb, dev, &lso_header_size);
|
||||
if (unlikely(!real_size))
|
||||
return NETDEV_TX_OK;
|
||||
goto tx_drop;
|
||||
|
||||
/* Allign descriptor to TXBB size */
|
||||
desc_size = ALIGN(real_size, TXBB_SIZE);
|
||||
|
@ -630,8 +619,7 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
if (unlikely(nr_txbb > MAX_DESC_TXBBS)) {
|
||||
if (netif_msg_tx_err(priv))
|
||||
en_warn(priv, "Oversized header or SG list\n");
|
||||
dev_kfree_skb_any(skb);
|
||||
return NETDEV_TX_OK;
|
||||
goto tx_drop;
|
||||
}
|
||||
|
||||
tx_ind = skb->queue_mapping;
|
||||
|
@ -653,14 +641,6 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
|
||||
/* Now that we know what Tx ring to use */
|
||||
if (unlikely(!priv->port_up)) {
|
||||
if (netif_msg_tx_err(priv))
|
||||
en_warn(priv, "xmit: port down!\n");
|
||||
dev_kfree_skb_any(skb);
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
/* Track current inflight packets for performance analysis */
|
||||
AVG_PERF_COUNTER(priv->pstats.inflight_avg,
|
||||
(u32) (ring->prod - ring->cons - 1));
|
||||
|
@ -785,5 +765,10 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
mlx4_en_xmit_poll(priv, tx_ind);
|
||||
|
||||
return 0;
|
||||
|
||||
tx_drop:
|
||||
dev_kfree_skb_any(skb);
|
||||
priv->stats.tx_dropped++;
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
|
|
|
@ -99,7 +99,6 @@
|
|||
#define RSS_FACTOR 2
|
||||
#define TXBB_SIZE 64
|
||||
#define HEADROOM (2048 / TXBB_SIZE + 1)
|
||||
#define MAX_LSO_HDR_SIZE 92
|
||||
#define STAMP_STRIDE 64
|
||||
#define STAMP_DWORDS (STAMP_STRIDE / 4)
|
||||
#define STAMP_SHIFT 31
|
||||
|
@ -296,8 +295,6 @@ struct mlx4_en_rx_ring {
|
|||
u32 prod;
|
||||
u32 cons;
|
||||
u32 buf_size;
|
||||
int need_refill;
|
||||
int full;
|
||||
void *buf;
|
||||
void *rx_info;
|
||||
unsigned long bytes;
|
||||
|
@ -495,7 +492,6 @@ struct mlx4_en_priv {
|
|||
struct mlx4_en_cq rx_cq[MAX_RX_RINGS];
|
||||
struct work_struct mcast_task;
|
||||
struct work_struct mac_task;
|
||||
struct delayed_work refill_task;
|
||||
struct work_struct watchdog_task;
|
||||
struct work_struct linkstate_task;
|
||||
struct delayed_work stats_task;
|
||||
|
@ -565,7 +561,6 @@ void mlx4_en_set_default_rss_map(struct mlx4_en_priv *priv,
|
|||
int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv);
|
||||
void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv);
|
||||
int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring);
|
||||
void mlx4_en_rx_refill(struct work_struct *work);
|
||||
void mlx4_en_rx_irq(struct mlx4_cq *mcq);
|
||||
|
||||
int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port, u64 mac, u64 clear, u8 mode);
|
||||
|
|
|
@ -1750,12 +1750,12 @@ static void mv643xx_eth_program_unicast_filter(struct net_device *dev)
|
|||
|
||||
uc_addr_set(mp, dev->dev_addr);
|
||||
|
||||
port_config = rdlp(mp, PORT_CONFIG);
|
||||
port_config = rdlp(mp, PORT_CONFIG) & ~UNICAST_PROMISCUOUS_MODE;
|
||||
|
||||
nibbles = uc_addr_filter_mask(dev);
|
||||
if (!nibbles) {
|
||||
port_config |= UNICAST_PROMISCUOUS_MODE;
|
||||
wrlp(mp, PORT_CONFIG, port_config);
|
||||
return;
|
||||
nibbles = 0xffff;
|
||||
}
|
||||
|
||||
for (i = 0; i < 16; i += 4) {
|
||||
|
@ -1776,7 +1776,6 @@ static void mv643xx_eth_program_unicast_filter(struct net_device *dev)
|
|||
wrl(mp, off, v);
|
||||
}
|
||||
|
||||
port_config &= ~UNICAST_PROMISCUOUS_MODE;
|
||||
wrlp(mp, PORT_CONFIG, port_config);
|
||||
}
|
||||
|
||||
|
|
|
@ -3837,7 +3837,9 @@ static void ql_reset_work(struct work_struct *work)
|
|||
16) | ISP_CONTROL_RI));
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
|
||||
ssleep(1);
|
||||
spin_lock_irqsave(&qdev->hw_lock, hw_flags);
|
||||
} while (--max_wait_time);
|
||||
spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
|
||||
|
||||
|
|
|
@ -51,9 +51,6 @@
|
|||
#define TX_BUFFS_AVAIL(tp) \
|
||||
(tp->dirty_tx + NUM_TX_DESC - tp->cur_tx - 1)
|
||||
|
||||
/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
|
||||
static const int max_interrupt_work = 20;
|
||||
|
||||
/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
|
||||
The RTL chips use a 64 element hash table based on the Ethernet CRC. */
|
||||
static const int multicast_filter_limit = 32;
|
||||
|
|
|
@ -169,10 +169,12 @@ config USB_NET_CDCETHER
|
|||
The Linux-USB CDC Ethernet Gadget driver is an open implementation.
|
||||
This driver should work with at least the following devices:
|
||||
|
||||
* Dell Wireless 5530 HSPA
|
||||
* Ericsson PipeRider (all variants)
|
||||
* Ericsson Mobile Broadband Module (all variants)
|
||||
* Motorola (DM100 and SB4100)
|
||||
* Broadcom Cable Modem (reference design)
|
||||
* Toshiba PCX1100U
|
||||
* Toshiba (PCX1100U and F3507g)
|
||||
* ...
|
||||
|
||||
This driver creates an interface named "ethX", where X depends on
|
||||
|
|
|
@ -533,6 +533,31 @@ static const struct usb_device_id products [] = {
|
|||
USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1900, USB_CLASS_COMM,
|
||||
USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
|
||||
.driver_info = (unsigned long) &cdc_info,
|
||||
}, {
|
||||
/* Ericsson F3507g ver. 2 */
|
||||
USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1902, USB_CLASS_COMM,
|
||||
USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
|
||||
.driver_info = (unsigned long) &cdc_info,
|
||||
}, {
|
||||
/* Ericsson F3607gw */
|
||||
USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1904, USB_CLASS_COMM,
|
||||
USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
|
||||
.driver_info = (unsigned long) &cdc_info,
|
||||
}, {
|
||||
/* Ericsson F3307 */
|
||||
USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1906, USB_CLASS_COMM,
|
||||
USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
|
||||
.driver_info = (unsigned long) &cdc_info,
|
||||
}, {
|
||||
/* Toshiba F3507g */
|
||||
USB_DEVICE_AND_INTERFACE_INFO(0x0930, 0x130b, USB_CLASS_COMM,
|
||||
USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
|
||||
.driver_info = (unsigned long) &cdc_info,
|
||||
}, {
|
||||
/* Dell F3507g */
|
||||
USB_DEVICE_AND_INTERFACE_INFO(0x413c, 0x8147, USB_CLASS_COMM,
|
||||
USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
|
||||
.driver_info = (unsigned long) &cdc_info,
|
||||
},
|
||||
{ }, // END
|
||||
};
|
||||
|
|
|
@ -307,9 +307,10 @@ static const struct usb_device_id products [] = {
|
|||
USB_DEVICE (0x1286, 0x8001), // "blob" bootloader
|
||||
.driver_info = (unsigned long) &blob_info,
|
||||
}, {
|
||||
// Linux Ethernet/RNDIS gadget on pxa210/25x/26x, second config
|
||||
// e.g. Gumstix, current OpenZaurus, ...
|
||||
USB_DEVICE_VER (0x0525, 0xa4a2, 0x0203, 0x0203),
|
||||
// Linux Ethernet/RNDIS gadget, mostly on PXA, second config
|
||||
// e.g. Gumstix, current OpenZaurus, ... or anything else
|
||||
// that just enables this gadget option.
|
||||
USB_DEVICE (0x0525, 0xa4a2),
|
||||
.driver_info = (unsigned long) &linuxdev_info,
|
||||
},
|
||||
#endif
|
||||
|
|
|
@ -297,7 +297,7 @@ static int update_eth_regs_async(pegasus_t * pegasus)
|
|||
|
||||
pegasus->dr.bRequestType = PEGASUS_REQT_WRITE;
|
||||
pegasus->dr.bRequest = PEGASUS_REQ_SET_REGS;
|
||||
pegasus->dr.wValue = 0;
|
||||
pegasus->dr.wValue = cpu_to_le16(0);
|
||||
pegasus->dr.wIndex = cpu_to_le16(EthCtrl0);
|
||||
pegasus->dr.wLength = cpu_to_le16(3);
|
||||
pegasus->ctrl_urb->transfer_buffer_length = 3;
|
||||
|
@ -446,11 +446,12 @@ static int write_eprom_word(pegasus_t * pegasus, __u8 index, __u16 data)
|
|||
int i;
|
||||
__u8 tmp, d[4] = { 0x3f, 0, 0, EPROM_WRITE };
|
||||
int ret;
|
||||
__le16 le_data = cpu_to_le16(data);
|
||||
|
||||
set_registers(pegasus, EpromOffset, 4, d);
|
||||
enable_eprom_write(pegasus);
|
||||
set_register(pegasus, EpromOffset, index);
|
||||
set_registers(pegasus, EpromData, 2, &data);
|
||||
set_registers(pegasus, EpromData, 2, &le_data);
|
||||
set_register(pegasus, EpromCtrl, EPROM_WRITE);
|
||||
|
||||
for (i = 0; i < REG_TIMEOUT; i++) {
|
||||
|
@ -923,29 +924,32 @@ static struct net_device_stats *pegasus_netdev_stats(struct net_device *dev)
|
|||
|
||||
static inline void disable_net_traffic(pegasus_t * pegasus)
|
||||
{
|
||||
int tmp = 0;
|
||||
__le16 tmp = cpu_to_le16(0);
|
||||
|
||||
set_registers(pegasus, EthCtrl0, 2, &tmp);
|
||||
set_registers(pegasus, EthCtrl0, sizeof(tmp), &tmp);
|
||||
}
|
||||
|
||||
static inline void get_interrupt_interval(pegasus_t * pegasus)
|
||||
{
|
||||
__u8 data[2];
|
||||
u16 data;
|
||||
u8 interval;
|
||||
|
||||
read_eprom_word(pegasus, 4, (__u16 *) data);
|
||||
read_eprom_word(pegasus, 4, &data);
|
||||
interval = data >> 8;
|
||||
if (pegasus->usb->speed != USB_SPEED_HIGH) {
|
||||
if (data[1] < 0x80) {
|
||||
if (interval < 0x80) {
|
||||
if (netif_msg_timer(pegasus))
|
||||
dev_info(&pegasus->intf->dev, "intr interval "
|
||||
"changed from %ums to %ums\n",
|
||||
data[1], 0x80);
|
||||
data[1] = 0x80;
|
||||
interval, 0x80);
|
||||
interval = 0x80;
|
||||
data = (data & 0x00FF) | ((u16)interval << 8);
|
||||
#ifdef PEGASUS_WRITE_EEPROM
|
||||
write_eprom_word(pegasus, 4, *(__u16 *) data);
|
||||
write_eprom_word(pegasus, 4, data);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
pegasus->intr_interval = data[1];
|
||||
pegasus->intr_interval = interval;
|
||||
}
|
||||
|
||||
static void set_carrier(struct net_device *net)
|
||||
|
@ -1299,7 +1303,8 @@ static int pegasus_blacklisted(struct usb_device *udev)
|
|||
/* Special quirk to keep the driver from handling the Belkin Bluetooth
|
||||
* dongle which happens to have the same ID.
|
||||
*/
|
||||
if ((udd->idVendor == VENDOR_BELKIN && udd->idProduct == 0x0121) &&
|
||||
if ((udd->idVendor == cpu_to_le16(VENDOR_BELKIN)) &&
|
||||
(udd->idProduct == cpu_to_le16(0x0121)) &&
|
||||
(udd->bDeviceClass == USB_CLASS_WIRELESS_CONTROLLER) &&
|
||||
(udd->bDeviceProtocol == 1))
|
||||
return 1;
|
||||
|
|
|
@ -1847,7 +1847,7 @@ static void velocity_free_tx_buf(struct velocity_info *vptr, struct velocity_td_
|
|||
*/
|
||||
if (tdinfo->skb_dma) {
|
||||
|
||||
pktlen = (skb->len > ETH_ZLEN ? : ETH_ZLEN);
|
||||
pktlen = max_t(unsigned int, skb->len, ETH_ZLEN);
|
||||
for (i = 0; i < tdinfo->nskb_dma; i++) {
|
||||
#ifdef VELOCITY_ZERO_COPY_SUPPORT
|
||||
pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i], le16_to_cpu(td->tdesc1.len), PCI_DMA_TODEVICE);
|
||||
|
|
|
@ -538,6 +538,7 @@ ath5k_pci_probe(struct pci_dev *pdev,
|
|||
sc->iobase = mem; /* So we can unmap it on detach */
|
||||
sc->cachelsz = csz * sizeof(u32); /* convert to bytes */
|
||||
sc->opmode = NL80211_IFTYPE_STATION;
|
||||
sc->bintval = 1000;
|
||||
mutex_init(&sc->lock);
|
||||
spin_lock_init(&sc->rxbuflock);
|
||||
spin_lock_init(&sc->txbuflock);
|
||||
|
@ -686,6 +687,13 @@ ath5k_pci_resume(struct pci_dev *pdev)
|
|||
if (err)
|
||||
return err;
|
||||
|
||||
/*
|
||||
* Suspend/Resume resets the PCI configuration space, so we have to
|
||||
* re-disable the RETRY_TIMEOUT register (0x41) to keep
|
||||
* PCI Tx retries from interfering with C3 CPU state
|
||||
*/
|
||||
pci_write_config_byte(pdev, 0x41, 0);
|
||||
|
||||
err = request_irq(pdev->irq, ath5k_intr, IRQF_SHARED, "ath", sc);
|
||||
if (err) {
|
||||
ATH5K_ERR(sc, "request_irq failed\n");
|
||||
|
@ -2748,9 +2756,6 @@ static int ath5k_add_interface(struct ieee80211_hw *hw,
|
|||
goto end;
|
||||
}
|
||||
|
||||
/* Set to a reasonable value. Note that this will
|
||||
* be set to mac80211's value at ath5k_config(). */
|
||||
sc->bintval = 1000;
|
||||
ath5k_hw_set_lladdr(sc->ah, conf->mac_addr);
|
||||
|
||||
ret = 0;
|
||||
|
|
|
@ -1196,8 +1196,8 @@ void ath_radio_disable(struct ath_softc *sc)
|
|||
|
||||
ath9k_hw_phy_disable(ah);
|
||||
ath9k_hw_configpcipowersave(ah, 1);
|
||||
ath9k_hw_setpower(ah, ATH9K_PM_FULL_SLEEP);
|
||||
ath9k_ps_restore(sc);
|
||||
ath9k_hw_setpower(ah, ATH9K_PM_FULL_SLEEP);
|
||||
}
|
||||
|
||||
/*******************/
|
||||
|
|
|
@ -87,6 +87,7 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||
struct ath_softc *sc;
|
||||
struct ieee80211_hw *hw;
|
||||
u8 csz;
|
||||
u32 val;
|
||||
int ret = 0;
|
||||
struct ath_hw *ah;
|
||||
|
||||
|
@ -133,6 +134,14 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||
|
||||
pci_set_master(pdev);
|
||||
|
||||
/*
|
||||
* Disable the RETRY_TIMEOUT register (0x41) to keep
|
||||
* PCI Tx retries from interfering with C3 CPU state.
|
||||
*/
|
||||
pci_read_config_dword(pdev, 0x40, &val);
|
||||
if ((val & 0x0000ff00) != 0)
|
||||
pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
|
||||
|
||||
ret = pci_request_region(pdev, 0, "ath9k");
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "PCI memory region reserve error\n");
|
||||
|
@ -239,12 +248,21 @@ static int ath_pci_resume(struct pci_dev *pdev)
|
|||
struct ieee80211_hw *hw = pci_get_drvdata(pdev);
|
||||
struct ath_wiphy *aphy = hw->priv;
|
||||
struct ath_softc *sc = aphy->sc;
|
||||
u32 val;
|
||||
int err;
|
||||
|
||||
err = pci_enable_device(pdev);
|
||||
if (err)
|
||||
return err;
|
||||
pci_restore_state(pdev);
|
||||
/*
|
||||
* Suspend/Resume resets the PCI configuration space, so we have to
|
||||
* re-disable the RETRY_TIMEOUT register (0x41) to keep
|
||||
* PCI Tx retries from interfering with C3 CPU state
|
||||
*/
|
||||
pci_read_config_dword(pdev, 0x40, &val);
|
||||
if ((val & 0x0000ff00) != 0)
|
||||
pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
|
||||
|
||||
/* Enable LED */
|
||||
ath9k_hw_cfg_output(sc->sc_ah, ATH_LED_PIN,
|
||||
|
|
|
@ -539,11 +539,14 @@ static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb)
|
|||
if (ath_beacon_dtim_pending_cab(skb)) {
|
||||
/*
|
||||
* Remain awake waiting for buffered broadcast/multicast
|
||||
* frames.
|
||||
* frames. If the last broadcast/multicast frame is not
|
||||
* received properly, the next beacon frame will work as
|
||||
* a backup trigger for returning into NETWORK SLEEP state,
|
||||
* so we are waiting for it as well.
|
||||
*/
|
||||
DPRINTF(sc, ATH_DBG_PS, "Received DTIM beacon indicating "
|
||||
"buffered broadcast/multicast frame(s)\n");
|
||||
sc->sc_flags |= SC_OP_WAIT_FOR_CAB;
|
||||
sc->sc_flags |= SC_OP_WAIT_FOR_CAB | SC_OP_WAIT_FOR_BEACON;
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
|
@ -288,6 +288,7 @@ struct iwm_priv {
|
|||
u8 *eeprom;
|
||||
struct timer_list watchdog;
|
||||
struct work_struct reset_worker;
|
||||
struct mutex mutex;
|
||||
struct rfkill *rfkill;
|
||||
|
||||
char private[0] __attribute__((__aligned__(NETDEV_ALIGN)));
|
||||
|
@ -315,8 +316,11 @@ extern const struct iw_handler_def iwm_iw_handler_def;
|
|||
void *iwm_if_alloc(int sizeof_bus, struct device *dev,
|
||||
struct iwm_if_ops *if_ops);
|
||||
void iwm_if_free(struct iwm_priv *iwm);
|
||||
int iwm_if_add(struct iwm_priv *iwm);
|
||||
void iwm_if_remove(struct iwm_priv *iwm);
|
||||
int iwm_mode_to_nl80211_iftype(int mode);
|
||||
int iwm_priv_init(struct iwm_priv *iwm);
|
||||
void iwm_priv_deinit(struct iwm_priv *iwm);
|
||||
void iwm_reset(struct iwm_priv *iwm);
|
||||
void iwm_tx_credit_init_pools(struct iwm_priv *iwm,
|
||||
struct iwm_umac_notif_alive *alive);
|
||||
|
|
|
@ -112,6 +112,9 @@ static void iwm_statistics_request(struct work_struct *work)
|
|||
iwm_send_umac_stats_req(iwm, 0);
|
||||
}
|
||||
|
||||
int __iwm_up(struct iwm_priv *iwm);
|
||||
int __iwm_down(struct iwm_priv *iwm);
|
||||
|
||||
static void iwm_reset_worker(struct work_struct *work)
|
||||
{
|
||||
struct iwm_priv *iwm;
|
||||
|
@ -120,6 +123,19 @@ static void iwm_reset_worker(struct work_struct *work)
|
|||
|
||||
iwm = container_of(work, struct iwm_priv, reset_worker);
|
||||
|
||||
/*
|
||||
* XXX: The iwm->mutex is introduced purely for this reset work,
|
||||
* because the other users for iwm_up and iwm_down are only netdev
|
||||
* ndo_open and ndo_stop which are already protected by rtnl.
|
||||
* Please remove iwm->mutex together if iwm_reset_worker() is not
|
||||
* required in the future.
|
||||
*/
|
||||
if (!mutex_trylock(&iwm->mutex)) {
|
||||
IWM_WARN(iwm, "We are in the middle of interface bringing "
|
||||
"UP/DOWN. Skip driver resetting.\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (iwm->umac_profile_active) {
|
||||
profile = kmalloc(sizeof(struct iwm_umac_profile), GFP_KERNEL);
|
||||
if (profile)
|
||||
|
@ -128,10 +144,10 @@ static void iwm_reset_worker(struct work_struct *work)
|
|||
IWM_ERR(iwm, "Couldn't alloc memory for profile\n");
|
||||
}
|
||||
|
||||
iwm_down(iwm);
|
||||
__iwm_down(iwm);
|
||||
|
||||
while (retry++ < 3) {
|
||||
ret = iwm_up(iwm);
|
||||
ret = __iwm_up(iwm);
|
||||
if (!ret)
|
||||
break;
|
||||
|
||||
|
@ -142,7 +158,7 @@ static void iwm_reset_worker(struct work_struct *work)
|
|||
IWM_WARN(iwm, "iwm_up() failed: %d\n", ret);
|
||||
|
||||
kfree(profile);
|
||||
return;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (profile) {
|
||||
|
@ -151,6 +167,9 @@ static void iwm_reset_worker(struct work_struct *work)
|
|||
iwm_send_mlme_profile(iwm);
|
||||
kfree(profile);
|
||||
}
|
||||
|
||||
out:
|
||||
mutex_unlock(&iwm->mutex);
|
||||
}
|
||||
|
||||
static void iwm_watchdog(unsigned long data)
|
||||
|
@ -215,10 +234,21 @@ int iwm_priv_init(struct iwm_priv *iwm)
|
|||
init_timer(&iwm->watchdog);
|
||||
iwm->watchdog.function = iwm_watchdog;
|
||||
iwm->watchdog.data = (unsigned long)iwm;
|
||||
mutex_init(&iwm->mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void iwm_priv_deinit(struct iwm_priv *iwm)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < IWM_TX_QUEUES; i++)
|
||||
destroy_workqueue(iwm->txq[i].wq);
|
||||
|
||||
destroy_workqueue(iwm->rx_wq);
|
||||
}
|
||||
|
||||
/*
|
||||
* We reset all the structures, and we reset the UMAC.
|
||||
* After calling this routine, you're expected to reload
|
||||
|
@ -466,7 +496,7 @@ void iwm_link_off(struct iwm_priv *iwm)
|
|||
|
||||
iwm_rx_free(iwm);
|
||||
|
||||
cancel_delayed_work(&iwm->stats_request);
|
||||
cancel_delayed_work_sync(&iwm->stats_request);
|
||||
memset(wstats, 0, sizeof(struct iw_statistics));
|
||||
wstats->qual.updated = IW_QUAL_ALL_INVALID;
|
||||
|
||||
|
@ -511,7 +541,7 @@ static int iwm_channels_init(struct iwm_priv *iwm)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int iwm_up(struct iwm_priv *iwm)
|
||||
int __iwm_up(struct iwm_priv *iwm)
|
||||
{
|
||||
int ret;
|
||||
struct iwm_notif *notif_reboot, *notif_ack = NULL;
|
||||
|
@ -647,7 +677,18 @@ int iwm_up(struct iwm_priv *iwm)
|
|||
return -EIO;
|
||||
}
|
||||
|
||||
int iwm_down(struct iwm_priv *iwm)
|
||||
int iwm_up(struct iwm_priv *iwm)
|
||||
{
|
||||
int ret;
|
||||
|
||||
mutex_lock(&iwm->mutex);
|
||||
ret = __iwm_up(iwm);
|
||||
mutex_unlock(&iwm->mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int __iwm_down(struct iwm_priv *iwm)
|
||||
{
|
||||
int ret;
|
||||
|
||||
|
@ -678,3 +719,14 @@ int iwm_down(struct iwm_priv *iwm)
|
|||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int iwm_down(struct iwm_priv *iwm)
|
||||
{
|
||||
int ret;
|
||||
|
||||
mutex_lock(&iwm->mutex);
|
||||
ret = __iwm_down(iwm);
|
||||
mutex_unlock(&iwm->mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -114,32 +114,31 @@ void *iwm_if_alloc(int sizeof_bus, struct device *dev,
|
|||
iwm = wdev_to_iwm(wdev);
|
||||
iwm->bus_ops = if_ops;
|
||||
iwm->wdev = wdev;
|
||||
iwm_priv_init(iwm);
|
||||
|
||||
ret = iwm_priv_init(iwm);
|
||||
if (ret) {
|
||||
dev_err(dev, "failed to init iwm_priv\n");
|
||||
goto out_wdev;
|
||||
}
|
||||
|
||||
wdev->iftype = iwm_mode_to_nl80211_iftype(iwm->conf.mode);
|
||||
|
||||
ndev = alloc_netdev_mq(0, "wlan%d", ether_setup,
|
||||
IWM_TX_QUEUES);
|
||||
ndev = alloc_netdev_mq(0, "wlan%d", ether_setup, IWM_TX_QUEUES);
|
||||
if (!ndev) {
|
||||
dev_err(dev, "no memory for network device instance\n");
|
||||
goto out_wdev;
|
||||
goto out_priv;
|
||||
}
|
||||
|
||||
ndev->netdev_ops = &iwm_netdev_ops;
|
||||
ndev->wireless_handlers = &iwm_iw_handler_def;
|
||||
ndev->ieee80211_ptr = wdev;
|
||||
SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy));
|
||||
ret = register_netdev(ndev);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "Failed to register netdev: %d\n", ret);
|
||||
goto out_ndev;
|
||||
}
|
||||
|
||||
wdev->netdev = ndev;
|
||||
|
||||
return iwm;
|
||||
|
||||
out_ndev:
|
||||
free_netdev(ndev);
|
||||
out_priv:
|
||||
iwm_priv_deinit(iwm);
|
||||
|
||||
out_wdev:
|
||||
iwm_wdev_free(iwm);
|
||||
|
@ -148,15 +147,29 @@ void *iwm_if_alloc(int sizeof_bus, struct device *dev,
|
|||
|
||||
void iwm_if_free(struct iwm_priv *iwm)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (!iwm_to_ndev(iwm))
|
||||
return;
|
||||
|
||||
unregister_netdev(iwm_to_ndev(iwm));
|
||||
free_netdev(iwm_to_ndev(iwm));
|
||||
iwm_wdev_free(iwm);
|
||||
destroy_workqueue(iwm->rx_wq);
|
||||
for (i = 0; i < IWM_TX_QUEUES; i++)
|
||||
destroy_workqueue(iwm->txq[i].wq);
|
||||
iwm_priv_deinit(iwm);
|
||||
}
|
||||
|
||||
int iwm_if_add(struct iwm_priv *iwm)
|
||||
{
|
||||
struct net_device *ndev = iwm_to_ndev(iwm);
|
||||
int ret;
|
||||
|
||||
ret = register_netdev(ndev);
|
||||
if (ret < 0) {
|
||||
dev_err(&ndev->dev, "Failed to register netdev: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void iwm_if_remove(struct iwm_priv *iwm)
|
||||
{
|
||||
unregister_netdev(iwm_to_ndev(iwm));
|
||||
}
|
||||
|
|
|
@ -454,10 +454,18 @@ static int iwm_sdio_probe(struct sdio_func *func,
|
|||
|
||||
INIT_WORK(&hw->isr_worker, iwm_sdio_isr_worker);
|
||||
|
||||
ret = iwm_if_add(iwm);
|
||||
if (ret) {
|
||||
dev_err(dev, "add SDIO interface failed\n");
|
||||
goto destroy_wq;
|
||||
}
|
||||
|
||||
dev_info(dev, "IWM SDIO probe\n");
|
||||
|
||||
return 0;
|
||||
|
||||
destroy_wq:
|
||||
destroy_workqueue(hw->isr_wq);
|
||||
debugfs_exit:
|
||||
iwm_debugfs_exit(iwm);
|
||||
if_free:
|
||||
|
@ -471,9 +479,10 @@ static void iwm_sdio_remove(struct sdio_func *func)
|
|||
struct iwm_priv *iwm = hw_to_iwm(hw);
|
||||
struct device *dev = &func->dev;
|
||||
|
||||
iwm_if_remove(iwm);
|
||||
destroy_workqueue(hw->isr_wq);
|
||||
iwm_debugfs_exit(iwm);
|
||||
iwm_if_free(iwm);
|
||||
destroy_workqueue(hw->isr_wq);
|
||||
|
||||
sdio_set_drvdata(func, NULL);
|
||||
|
||||
|
|
|
@ -67,6 +67,7 @@ static struct usb_device_id usb_ids[] = {
|
|||
{ USB_DEVICE(0x079b, 0x0062), .driver_info = DEVICE_ZD1211B },
|
||||
{ USB_DEVICE(0x1582, 0x6003), .driver_info = DEVICE_ZD1211B },
|
||||
{ USB_DEVICE(0x050d, 0x705c), .driver_info = DEVICE_ZD1211B },
|
||||
{ USB_DEVICE(0x083a, 0xe503), .driver_info = DEVICE_ZD1211B },
|
||||
{ USB_DEVICE(0x083a, 0xe506), .driver_info = DEVICE_ZD1211B },
|
||||
{ USB_DEVICE(0x083a, 0x4505), .driver_info = DEVICE_ZD1211B },
|
||||
{ USB_DEVICE(0x0471, 0x1236), .driver_info = DEVICE_ZD1211B },
|
||||
|
|
|
@ -958,12 +958,12 @@ static void acer_rfkill_update(struct work_struct *ignored)
|
|||
|
||||
status = get_u32(&state, ACER_CAP_WIRELESS);
|
||||
if (ACPI_SUCCESS(status))
|
||||
rfkill_set_sw_state(wireless_rfkill, !!state);
|
||||
rfkill_set_sw_state(wireless_rfkill, !state);
|
||||
|
||||
if (has_cap(ACER_CAP_BLUETOOTH)) {
|
||||
status = get_u32(&state, ACER_CAP_BLUETOOTH);
|
||||
if (ACPI_SUCCESS(status))
|
||||
rfkill_set_sw_state(bluetooth_rfkill, !!state);
|
||||
rfkill_set_sw_state(bluetooth_rfkill, !state);
|
||||
}
|
||||
|
||||
schedule_delayed_work(&acer_rfkill_work, round_jiffies_relative(HZ));
|
||||
|
|
|
@ -180,6 +180,7 @@ static struct key_entry eeepc_keymap[] = {
|
|||
*/
|
||||
static int eeepc_hotk_add(struct acpi_device *device);
|
||||
static int eeepc_hotk_remove(struct acpi_device *device, int type);
|
||||
static int eeepc_hotk_resume(struct acpi_device *device);
|
||||
|
||||
static const struct acpi_device_id eeepc_device_ids[] = {
|
||||
{EEEPC_HOTK_HID, 0},
|
||||
|
@ -194,6 +195,7 @@ static struct acpi_driver eeepc_hotk_driver = {
|
|||
.ops = {
|
||||
.add = eeepc_hotk_add,
|
||||
.remove = eeepc_hotk_remove,
|
||||
.resume = eeepc_hotk_resume,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -512,15 +514,12 @@ static int notify_brn(void)
|
|||
return -1;
|
||||
}
|
||||
|
||||
static void eeepc_rfkill_notify(acpi_handle handle, u32 event, void *data)
|
||||
static void eeepc_rfkill_hotplug(void)
|
||||
{
|
||||
struct pci_dev *dev;
|
||||
struct pci_bus *bus = pci_find_bus(0, 1);
|
||||
bool blocked;
|
||||
|
||||
if (event != ACPI_NOTIFY_BUS_CHECK)
|
||||
return;
|
||||
|
||||
if (!bus) {
|
||||
printk(EEEPC_WARNING "Unable to find PCI bus 1?\n");
|
||||
return;
|
||||
|
@ -551,6 +550,14 @@ static void eeepc_rfkill_notify(acpi_handle handle, u32 event, void *data)
|
|||
rfkill_set_sw_state(ehotk->eeepc_wlan_rfkill, blocked);
|
||||
}
|
||||
|
||||
static void eeepc_rfkill_notify(acpi_handle handle, u32 event, void *data)
|
||||
{
|
||||
if (event != ACPI_NOTIFY_BUS_CHECK)
|
||||
return;
|
||||
|
||||
eeepc_rfkill_hotplug();
|
||||
}
|
||||
|
||||
static void eeepc_hotk_notify(acpi_handle handle, u32 event, void *data)
|
||||
{
|
||||
static struct key_entry *key;
|
||||
|
@ -675,8 +682,8 @@ static int eeepc_hotk_add(struct acpi_device *device)
|
|||
if (!ehotk->eeepc_wlan_rfkill)
|
||||
goto wlan_fail;
|
||||
|
||||
rfkill_set_sw_state(ehotk->eeepc_wlan_rfkill,
|
||||
get_acpi(CM_ASL_WLAN) != 1);
|
||||
rfkill_init_sw_state(ehotk->eeepc_wlan_rfkill,
|
||||
get_acpi(CM_ASL_WLAN) != 1);
|
||||
result = rfkill_register(ehotk->eeepc_wlan_rfkill);
|
||||
if (result)
|
||||
goto wlan_fail;
|
||||
|
@ -693,8 +700,8 @@ static int eeepc_hotk_add(struct acpi_device *device)
|
|||
if (!ehotk->eeepc_bluetooth_rfkill)
|
||||
goto bluetooth_fail;
|
||||
|
||||
rfkill_set_sw_state(ehotk->eeepc_bluetooth_rfkill,
|
||||
get_acpi(CM_ASL_BLUETOOTH) != 1);
|
||||
rfkill_init_sw_state(ehotk->eeepc_bluetooth_rfkill,
|
||||
get_acpi(CM_ASL_BLUETOOTH) != 1);
|
||||
result = rfkill_register(ehotk->eeepc_bluetooth_rfkill);
|
||||
if (result)
|
||||
goto bluetooth_fail;
|
||||
|
@ -734,6 +741,33 @@ static int eeepc_hotk_remove(struct acpi_device *device, int type)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int eeepc_hotk_resume(struct acpi_device *device)
|
||||
{
|
||||
if (ehotk->eeepc_wlan_rfkill) {
|
||||
bool wlan;
|
||||
|
||||
/* Workaround - it seems that _PTS disables the wireless
|
||||
without notification or changing the value read by WLAN.
|
||||
Normally this is fine because the correct value is restored
|
||||
from the non-volatile storage on resume, but we need to do
|
||||
it ourself if case suspend is aborted, or we lose wireless.
|
||||
*/
|
||||
wlan = get_acpi(CM_ASL_WLAN);
|
||||
set_acpi(CM_ASL_WLAN, wlan);
|
||||
|
||||
rfkill_set_sw_state(ehotk->eeepc_wlan_rfkill,
|
||||
wlan != 1);
|
||||
|
||||
eeepc_rfkill_hotplug();
|
||||
}
|
||||
|
||||
if (ehotk->eeepc_bluetooth_rfkill)
|
||||
rfkill_set_sw_state(ehotk->eeepc_bluetooth_rfkill,
|
||||
get_acpi(CM_ASL_BLUETOOTH) != 1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Hwmon
|
||||
*/
|
||||
|
|
|
@ -1163,8 +1163,8 @@ static int __init tpacpi_new_rfkill(const enum tpacpi_rfk_id id,
|
|||
{
|
||||
struct tpacpi_rfk *atp_rfk;
|
||||
int res;
|
||||
bool initial_sw_state = false;
|
||||
int initial_sw_status;
|
||||
bool sw_state = false;
|
||||
int sw_status;
|
||||
|
||||
BUG_ON(id >= TPACPI_RFK_SW_MAX || tpacpi_rfkill_switches[id]);
|
||||
|
||||
|
@ -1185,17 +1185,17 @@ static int __init tpacpi_new_rfkill(const enum tpacpi_rfk_id id,
|
|||
atp_rfk->id = id;
|
||||
atp_rfk->ops = tp_rfkops;
|
||||
|
||||
initial_sw_status = (tp_rfkops->get_status)();
|
||||
if (initial_sw_status < 0) {
|
||||
sw_status = (tp_rfkops->get_status)();
|
||||
if (sw_status < 0) {
|
||||
printk(TPACPI_ERR
|
||||
"failed to read initial state for %s, error %d\n",
|
||||
name, initial_sw_status);
|
||||
name, sw_status);
|
||||
} else {
|
||||
initial_sw_state = (initial_sw_status == TPACPI_RFK_RADIO_OFF);
|
||||
sw_state = (sw_status == TPACPI_RFK_RADIO_OFF);
|
||||
if (set_default) {
|
||||
/* try to keep the initial state, since we ask the
|
||||
* firmware to preserve it across S5 in NVRAM */
|
||||
rfkill_set_sw_state(atp_rfk->rfkill, initial_sw_state);
|
||||
rfkill_init_sw_state(atp_rfk->rfkill, sw_state);
|
||||
}
|
||||
}
|
||||
rfkill_set_hw_state(atp_rfk->rfkill, tpacpi_rfk_check_hwblock_state());
|
||||
|
|
|
@ -160,8 +160,9 @@ struct rfkill * __must_check rfkill_alloc(const char *name,
|
|||
* the rfkill structure. Before calling this function the driver needs
|
||||
* to be ready to service method calls from rfkill.
|
||||
*
|
||||
* If the software blocked state is not set before registration,
|
||||
* set_block will be called to initialize it to a default value.
|
||||
* If rfkill_init_sw_state() is not called before registration,
|
||||
* set_block() will be called to initialize the software blocked state
|
||||
* to a default value.
|
||||
*
|
||||
* If the hardware blocked state is not set before registration,
|
||||
* it is assumed to be unblocked.
|
||||
|
@ -234,9 +235,11 @@ bool __must_check rfkill_set_hw_state(struct rfkill *rfkill, bool blocked);
|
|||
* rfkill drivers that get events when the soft-blocked state changes
|
||||
* (yes, some platforms directly act on input but allow changing again)
|
||||
* use this function to notify the rfkill core (and through that also
|
||||
* userspace) of the current state. It is not necessary to notify on
|
||||
* resume; since hibernation can always change the soft-blocked state,
|
||||
* the rfkill core will unconditionally restore the previous state.
|
||||
* userspace) of the current state.
|
||||
*
|
||||
* Drivers should also call this function after resume if the state has
|
||||
* been changed by the user. This only makes sense for "persistent"
|
||||
* devices (see rfkill_init_sw_state()).
|
||||
*
|
||||
* This function can be called in any context, even from within rfkill
|
||||
* callbacks.
|
||||
|
@ -246,6 +249,22 @@ bool __must_check rfkill_set_hw_state(struct rfkill *rfkill, bool blocked);
|
|||
*/
|
||||
bool rfkill_set_sw_state(struct rfkill *rfkill, bool blocked);
|
||||
|
||||
/**
|
||||
* rfkill_init_sw_state - Initialize persistent software block state
|
||||
* @rfkill: pointer to the rfkill class to modify.
|
||||
* @state: the current software block state to set
|
||||
*
|
||||
* rfkill drivers that preserve their software block state over power off
|
||||
* use this function to notify the rfkill core (and through that also
|
||||
* userspace) of their initial state. It should only be used before
|
||||
* registration.
|
||||
*
|
||||
* In addition, it marks the device as "persistent", an attribute which
|
||||
* can be read by userspace. Persistent devices are expected to preserve
|
||||
* their own state when suspended.
|
||||
*/
|
||||
void rfkill_init_sw_state(struct rfkill *rfkill, bool blocked);
|
||||
|
||||
/**
|
||||
* rfkill_set_states - Set the internal rfkill block states
|
||||
* @rfkill: pointer to the rfkill class to modify.
|
||||
|
@ -307,6 +326,10 @@ static inline bool rfkill_set_sw_state(struct rfkill *rfkill, bool blocked)
|
|||
return blocked;
|
||||
}
|
||||
|
||||
static inline void rfkill_init_sw_state(struct rfkill *rfkill, bool blocked)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void rfkill_set_states(struct rfkill *rfkill, bool sw, bool hw)
|
||||
{
|
||||
}
|
||||
|
|
|
@ -94,8 +94,6 @@ unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
|
|||
poll_table *wait);
|
||||
void iucv_sock_link(struct iucv_sock_list *l, struct sock *s);
|
||||
void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *s);
|
||||
int iucv_sock_wait_state(struct sock *sk, int state, int state2,
|
||||
unsigned long timeo);
|
||||
int iucv_sock_wait_cnt(struct sock *sk, unsigned long timeo);
|
||||
void iucv_accept_enqueue(struct sock *parent, struct sock *sk);
|
||||
void iucv_accept_unlink(struct sock *sk);
|
||||
|
|
|
@ -39,14 +39,6 @@
|
|||
|
||||
#include "af802154.h"
|
||||
|
||||
#define DBG_DUMP(data, len) { \
|
||||
int i; \
|
||||
pr_debug("function: %s: data: len %d:\n", __func__, len); \
|
||||
for (i = 0; i < len; i++) {\
|
||||
pr_debug("%02x: %02x\n", i, (data)[i]); \
|
||||
} \
|
||||
}
|
||||
|
||||
/*
|
||||
* Utility function for families
|
||||
*/
|
||||
|
@ -302,10 +294,12 @@ static struct net_proto_family ieee802154_family_ops = {
|
|||
static int ieee802154_rcv(struct sk_buff *skb, struct net_device *dev,
|
||||
struct packet_type *pt, struct net_device *orig_dev)
|
||||
{
|
||||
DBG_DUMP(skb->data, skb->len);
|
||||
if (!netif_running(dev))
|
||||
return -ENODEV;
|
||||
pr_debug("got frame, type %d, dev %p\n", dev->type, dev);
|
||||
#ifdef DEBUG
|
||||
print_hex_dump_bytes("ieee802154_rcv ", DUMP_PREFIX_NONE, skb->data, skb->len);
|
||||
#endif
|
||||
|
||||
if (!net_eq(dev_net(dev), &init_net))
|
||||
goto drop;
|
||||
|
|
|
@ -1085,8 +1085,16 @@ static int rt_intern_hash(unsigned hash, struct rtable *rt,
|
|||
now = jiffies;
|
||||
|
||||
if (!rt_caching(dev_net(rt->u.dst.dev))) {
|
||||
rt_drop(rt);
|
||||
return 0;
|
||||
/*
|
||||
* If we're not caching, just tell the caller we
|
||||
* were successful and don't touch the route. The
|
||||
* caller hold the sole reference to the cache entry, and
|
||||
* it will be released when the caller is done with it.
|
||||
* If we drop it here, the callers have no way to resolve routes
|
||||
* when we're not caching. Instead, just point *rp at rt, so
|
||||
* the caller gets a single use out of the route
|
||||
*/
|
||||
goto report_and_exit;
|
||||
}
|
||||
|
||||
rthp = &rt_hash_table[hash].chain;
|
||||
|
@ -1217,6 +1225,8 @@ static int rt_intern_hash(unsigned hash, struct rtable *rt,
|
|||
rcu_assign_pointer(rt_hash_table[hash].chain, rt);
|
||||
|
||||
spin_unlock_bh(rt_hash_lock_addr(hash));
|
||||
|
||||
report_and_exit:
|
||||
if (rp)
|
||||
*rp = rt;
|
||||
else
|
||||
|
|
|
@ -54,6 +54,38 @@ static const u8 iprm_shutdown[8] =
|
|||
#define CB_TRGCLS(skb) ((skb)->cb + CB_TAG_LEN) /* iucv msg target class */
|
||||
#define CB_TRGCLS_LEN (TRGCLS_SIZE)
|
||||
|
||||
#define __iucv_sock_wait(sk, condition, timeo, ret) \
|
||||
do { \
|
||||
DEFINE_WAIT(__wait); \
|
||||
long __timeo = timeo; \
|
||||
ret = 0; \
|
||||
while (!(condition)) { \
|
||||
prepare_to_wait(sk->sk_sleep, &__wait, TASK_INTERRUPTIBLE); \
|
||||
if (!__timeo) { \
|
||||
ret = -EAGAIN; \
|
||||
break; \
|
||||
} \
|
||||
if (signal_pending(current)) { \
|
||||
ret = sock_intr_errno(__timeo); \
|
||||
break; \
|
||||
} \
|
||||
release_sock(sk); \
|
||||
__timeo = schedule_timeout(__timeo); \
|
||||
lock_sock(sk); \
|
||||
ret = sock_error(sk); \
|
||||
if (ret) \
|
||||
break; \
|
||||
} \
|
||||
finish_wait(sk->sk_sleep, &__wait); \
|
||||
} while (0)
|
||||
|
||||
#define iucv_sock_wait(sk, condition, timeo) \
|
||||
({ \
|
||||
int __ret = 0; \
|
||||
if (!(condition)) \
|
||||
__iucv_sock_wait(sk, condition, timeo, __ret); \
|
||||
__ret; \
|
||||
})
|
||||
|
||||
static void iucv_sock_kill(struct sock *sk);
|
||||
static void iucv_sock_close(struct sock *sk);
|
||||
|
@ -238,6 +270,48 @@ static inline size_t iucv_msg_length(struct iucv_message *msg)
|
|||
return msg->length;
|
||||
}
|
||||
|
||||
/**
|
||||
* iucv_sock_in_state() - check for specific states
|
||||
* @sk: sock structure
|
||||
* @state: first iucv sk state
|
||||
* @state: second iucv sk state
|
||||
*
|
||||
* Returns true if the socket in either in the first or second state.
|
||||
*/
|
||||
static int iucv_sock_in_state(struct sock *sk, int state, int state2)
|
||||
{
|
||||
return (sk->sk_state == state || sk->sk_state == state2);
|
||||
}
|
||||
|
||||
/**
|
||||
* iucv_below_msglim() - function to check if messages can be sent
|
||||
* @sk: sock structure
|
||||
*
|
||||
* Returns true if the send queue length is lower than the message limit.
|
||||
* Always returns true if the socket is not connected (no iucv path for
|
||||
* checking the message limit).
|
||||
*/
|
||||
static inline int iucv_below_msglim(struct sock *sk)
|
||||
{
|
||||
struct iucv_sock *iucv = iucv_sk(sk);
|
||||
|
||||
if (sk->sk_state != IUCV_CONNECTED)
|
||||
return 1;
|
||||
return (skb_queue_len(&iucv->send_skb_q) < iucv->path->msglim);
|
||||
}
|
||||
|
||||
/**
|
||||
* iucv_sock_wake_msglim() - Wake up thread waiting on msg limit
|
||||
*/
|
||||
static void iucv_sock_wake_msglim(struct sock *sk)
|
||||
{
|
||||
read_lock(&sk->sk_callback_lock);
|
||||
if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
|
||||
wake_up_interruptible_all(sk->sk_sleep);
|
||||
sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
|
||||
read_unlock(&sk->sk_callback_lock);
|
||||
}
|
||||
|
||||
/* Timers */
|
||||
static void iucv_sock_timeout(unsigned long arg)
|
||||
{
|
||||
|
@ -329,7 +403,9 @@ static void iucv_sock_close(struct sock *sk)
|
|||
timeo = sk->sk_lingertime;
|
||||
else
|
||||
timeo = IUCV_DISCONN_TIMEOUT;
|
||||
err = iucv_sock_wait_state(sk, IUCV_CLOSED, 0, timeo);
|
||||
err = iucv_sock_wait(sk,
|
||||
iucv_sock_in_state(sk, IUCV_CLOSED, 0),
|
||||
timeo);
|
||||
}
|
||||
|
||||
case IUCV_CLOSING: /* fall through */
|
||||
|
@ -510,39 +586,6 @@ struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
int iucv_sock_wait_state(struct sock *sk, int state, int state2,
|
||||
unsigned long timeo)
|
||||
{
|
||||
DECLARE_WAITQUEUE(wait, current);
|
||||
int err = 0;
|
||||
|
||||
add_wait_queue(sk->sk_sleep, &wait);
|
||||
while (sk->sk_state != state && sk->sk_state != state2) {
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
|
||||
if (!timeo) {
|
||||
err = -EAGAIN;
|
||||
break;
|
||||
}
|
||||
|
||||
if (signal_pending(current)) {
|
||||
err = sock_intr_errno(timeo);
|
||||
break;
|
||||
}
|
||||
|
||||
release_sock(sk);
|
||||
timeo = schedule_timeout(timeo);
|
||||
lock_sock(sk);
|
||||
|
||||
err = sock_error(sk);
|
||||
if (err)
|
||||
break;
|
||||
}
|
||||
set_current_state(TASK_RUNNING);
|
||||
remove_wait_queue(sk->sk_sleep, &wait);
|
||||
return err;
|
||||
}
|
||||
|
||||
/* Bind an unbound socket */
|
||||
static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
|
||||
int addr_len)
|
||||
|
@ -687,8 +730,9 @@ static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
|
|||
}
|
||||
|
||||
if (sk->sk_state != IUCV_CONNECTED) {
|
||||
err = iucv_sock_wait_state(sk, IUCV_CONNECTED, IUCV_DISCONN,
|
||||
sock_sndtimeo(sk, flags & O_NONBLOCK));
|
||||
err = iucv_sock_wait(sk, iucv_sock_in_state(sk, IUCV_CONNECTED,
|
||||
IUCV_DISCONN),
|
||||
sock_sndtimeo(sk, flags & O_NONBLOCK));
|
||||
}
|
||||
|
||||
if (sk->sk_state == IUCV_DISCONN) {
|
||||
|
@ -842,9 +886,11 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
|
|||
struct iucv_message txmsg;
|
||||
struct cmsghdr *cmsg;
|
||||
int cmsg_done;
|
||||
long timeo;
|
||||
char user_id[9];
|
||||
char appl_id[9];
|
||||
int err;
|
||||
int noblock = msg->msg_flags & MSG_DONTWAIT;
|
||||
|
||||
err = sock_error(sk);
|
||||
if (err)
|
||||
|
@ -864,108 +910,119 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
|
|||
goto out;
|
||||
}
|
||||
|
||||
if (sk->sk_state == IUCV_CONNECTED) {
|
||||
/* initialize defaults */
|
||||
cmsg_done = 0; /* check for duplicate headers */
|
||||
txmsg.class = 0;
|
||||
/* Return if the socket is not in connected state */
|
||||
if (sk->sk_state != IUCV_CONNECTED) {
|
||||
err = -ENOTCONN;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* iterate over control messages */
|
||||
for (cmsg = CMSG_FIRSTHDR(msg); cmsg;
|
||||
cmsg = CMSG_NXTHDR(msg, cmsg)) {
|
||||
/* initialize defaults */
|
||||
cmsg_done = 0; /* check for duplicate headers */
|
||||
txmsg.class = 0;
|
||||
|
||||
if (!CMSG_OK(msg, cmsg)) {
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
/* iterate over control messages */
|
||||
for (cmsg = CMSG_FIRSTHDR(msg); cmsg;
|
||||
cmsg = CMSG_NXTHDR(msg, cmsg)) {
|
||||
|
||||
if (cmsg->cmsg_level != SOL_IUCV)
|
||||
continue;
|
||||
|
||||
if (cmsg->cmsg_type & cmsg_done) {
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
cmsg_done |= cmsg->cmsg_type;
|
||||
|
||||
switch (cmsg->cmsg_type) {
|
||||
case SCM_IUCV_TRGCLS:
|
||||
if (cmsg->cmsg_len != CMSG_LEN(TRGCLS_SIZE)) {
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* set iucv message target class */
|
||||
memcpy(&txmsg.class,
|
||||
(void *) CMSG_DATA(cmsg), TRGCLS_SIZE);
|
||||
|
||||
break;
|
||||
|
||||
default:
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* allocate one skb for each iucv message:
|
||||
* this is fine for SOCK_SEQPACKET (unless we want to support
|
||||
* segmented records using the MSG_EOR flag), but
|
||||
* for SOCK_STREAM we might want to improve it in future */
|
||||
if (!(skb = sock_alloc_send_skb(sk, len,
|
||||
msg->msg_flags & MSG_DONTWAIT,
|
||||
&err)))
|
||||
if (!CMSG_OK(msg, cmsg)) {
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
|
||||
if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
|
||||
err = -EFAULT;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* increment and save iucv message tag for msg_completion cbk */
|
||||
txmsg.tag = iucv->send_tag++;
|
||||
memcpy(CB_TAG(skb), &txmsg.tag, CB_TAG_LEN);
|
||||
skb_queue_tail(&iucv->send_skb_q, skb);
|
||||
if (cmsg->cmsg_level != SOL_IUCV)
|
||||
continue;
|
||||
|
||||
if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags)
|
||||
&& skb->len <= 7) {
|
||||
err = iucv_send_iprm(iucv->path, &txmsg, skb);
|
||||
if (cmsg->cmsg_type & cmsg_done) {
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
cmsg_done |= cmsg->cmsg_type;
|
||||
|
||||
/* on success: there is no message_complete callback
|
||||
* for an IPRMDATA msg; remove skb from send queue */
|
||||
if (err == 0) {
|
||||
skb_unlink(skb, &iucv->send_skb_q);
|
||||
kfree_skb(skb);
|
||||
switch (cmsg->cmsg_type) {
|
||||
case SCM_IUCV_TRGCLS:
|
||||
if (cmsg->cmsg_len != CMSG_LEN(TRGCLS_SIZE)) {
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* this error should never happen since the
|
||||
* IUCV_IPRMDATA path flag is set... sever path */
|
||||
if (err == 0x15) {
|
||||
iucv_path_sever(iucv->path, NULL);
|
||||
skb_unlink(skb, &iucv->send_skb_q);
|
||||
err = -EPIPE;
|
||||
goto fail;
|
||||
}
|
||||
} else
|
||||
err = iucv_message_send(iucv->path, &txmsg, 0, 0,
|
||||
(void *) skb->data, skb->len);
|
||||
if (err) {
|
||||
if (err == 3) {
|
||||
user_id[8] = 0;
|
||||
memcpy(user_id, iucv->dst_user_id, 8);
|
||||
appl_id[8] = 0;
|
||||
memcpy(appl_id, iucv->dst_name, 8);
|
||||
pr_err("Application %s on z/VM guest %s"
|
||||
" exceeds message limit\n",
|
||||
user_id, appl_id);
|
||||
}
|
||||
/* set iucv message target class */
|
||||
memcpy(&txmsg.class,
|
||||
(void *) CMSG_DATA(cmsg), TRGCLS_SIZE);
|
||||
|
||||
break;
|
||||
|
||||
default:
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* allocate one skb for each iucv message:
|
||||
* this is fine for SOCK_SEQPACKET (unless we want to support
|
||||
* segmented records using the MSG_EOR flag), but
|
||||
* for SOCK_STREAM we might want to improve it in future */
|
||||
skb = sock_alloc_send_skb(sk, len, noblock, &err);
|
||||
if (!skb)
|
||||
goto out;
|
||||
if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
|
||||
err = -EFAULT;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* wait if outstanding messages for iucv path has reached */
|
||||
timeo = sock_sndtimeo(sk, noblock);
|
||||
err = iucv_sock_wait(sk, iucv_below_msglim(sk), timeo);
|
||||
if (err)
|
||||
goto fail;
|
||||
|
||||
/* return -ECONNRESET if the socket is no longer connected */
|
||||
if (sk->sk_state != IUCV_CONNECTED) {
|
||||
err = -ECONNRESET;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* increment and save iucv message tag for msg_completion cbk */
|
||||
txmsg.tag = iucv->send_tag++;
|
||||
memcpy(CB_TAG(skb), &txmsg.tag, CB_TAG_LEN);
|
||||
skb_queue_tail(&iucv->send_skb_q, skb);
|
||||
|
||||
if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags)
|
||||
&& skb->len <= 7) {
|
||||
err = iucv_send_iprm(iucv->path, &txmsg, skb);
|
||||
|
||||
/* on success: there is no message_complete callback
|
||||
* for an IPRMDATA msg; remove skb from send queue */
|
||||
if (err == 0) {
|
||||
skb_unlink(skb, &iucv->send_skb_q);
|
||||
kfree_skb(skb);
|
||||
}
|
||||
|
||||
/* this error should never happen since the
|
||||
* IUCV_IPRMDATA path flag is set... sever path */
|
||||
if (err == 0x15) {
|
||||
iucv_path_sever(iucv->path, NULL);
|
||||
skb_unlink(skb, &iucv->send_skb_q);
|
||||
err = -EPIPE;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
} else {
|
||||
err = -ENOTCONN;
|
||||
goto out;
|
||||
} else
|
||||
err = iucv_message_send(iucv->path, &txmsg, 0, 0,
|
||||
(void *) skb->data, skb->len);
|
||||
if (err) {
|
||||
if (err == 3) {
|
||||
user_id[8] = 0;
|
||||
memcpy(user_id, iucv->dst_user_id, 8);
|
||||
appl_id[8] = 0;
|
||||
memcpy(appl_id, iucv->dst_name, 8);
|
||||
pr_err("Application %s on z/VM guest %s"
|
||||
" exceeds message limit\n",
|
||||
appl_id, user_id);
|
||||
err = -EAGAIN;
|
||||
} else
|
||||
err = -EPIPE;
|
||||
skb_unlink(skb, &iucv->send_skb_q);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
release_sock(sk);
|
||||
|
@ -1581,7 +1638,11 @@ static void iucv_callback_txdone(struct iucv_path *path,
|
|||
|
||||
spin_unlock_irqrestore(&list->lock, flags);
|
||||
|
||||
kfree_skb(this);
|
||||
if (this) {
|
||||
kfree_skb(this);
|
||||
/* wake up any process waiting for sending */
|
||||
iucv_sock_wake_msglim(sk);
|
||||
}
|
||||
}
|
||||
BUG_ON(!this);
|
||||
|
||||
|
|
|
@ -56,7 +56,6 @@ struct rfkill {
|
|||
u32 idx;
|
||||
|
||||
bool registered;
|
||||
bool suspended;
|
||||
bool persistent;
|
||||
|
||||
const struct rfkill_ops *ops;
|
||||
|
@ -224,7 +223,7 @@ static void rfkill_send_events(struct rfkill *rfkill, enum rfkill_operation op)
|
|||
|
||||
static void rfkill_event(struct rfkill *rfkill)
|
||||
{
|
||||
if (!rfkill->registered || rfkill->suspended)
|
||||
if (!rfkill->registered)
|
||||
return;
|
||||
|
||||
kobject_uevent(&rfkill->dev.kobj, KOBJ_CHANGE);
|
||||
|
@ -270,6 +269,9 @@ static void rfkill_set_block(struct rfkill *rfkill, bool blocked)
|
|||
unsigned long flags;
|
||||
int err;
|
||||
|
||||
if (unlikely(rfkill->dev.power.power_state.event & PM_EVENT_SLEEP))
|
||||
return;
|
||||
|
||||
/*
|
||||
* Some platforms (...!) generate input events which affect the
|
||||
* _hard_ kill state -- whenever something tries to change the
|
||||
|
@ -292,9 +294,6 @@ static void rfkill_set_block(struct rfkill *rfkill, bool blocked)
|
|||
rfkill->state |= RFKILL_BLOCK_SW_SETCALL;
|
||||
spin_unlock_irqrestore(&rfkill->lock, flags);
|
||||
|
||||
if (unlikely(rfkill->dev.power.power_state.event & PM_EVENT_SLEEP))
|
||||
return;
|
||||
|
||||
err = rfkill->ops->set_block(rfkill->data, blocked);
|
||||
|
||||
spin_lock_irqsave(&rfkill->lock, flags);
|
||||
|
@ -508,19 +507,32 @@ bool rfkill_set_sw_state(struct rfkill *rfkill, bool blocked)
|
|||
blocked = blocked || hwblock;
|
||||
spin_unlock_irqrestore(&rfkill->lock, flags);
|
||||
|
||||
if (!rfkill->registered) {
|
||||
rfkill->persistent = true;
|
||||
} else {
|
||||
if (prev != blocked && !hwblock)
|
||||
schedule_work(&rfkill->uevent_work);
|
||||
if (!rfkill->registered)
|
||||
return blocked;
|
||||
|
||||
rfkill_led_trigger_event(rfkill);
|
||||
}
|
||||
if (prev != blocked && !hwblock)
|
||||
schedule_work(&rfkill->uevent_work);
|
||||
|
||||
rfkill_led_trigger_event(rfkill);
|
||||
|
||||
return blocked;
|
||||
}
|
||||
EXPORT_SYMBOL(rfkill_set_sw_state);
|
||||
|
||||
void rfkill_init_sw_state(struct rfkill *rfkill, bool blocked)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
BUG_ON(!rfkill);
|
||||
BUG_ON(rfkill->registered);
|
||||
|
||||
spin_lock_irqsave(&rfkill->lock, flags);
|
||||
__rfkill_set_sw_state(rfkill, blocked);
|
||||
rfkill->persistent = true;
|
||||
spin_unlock_irqrestore(&rfkill->lock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL(rfkill_init_sw_state);
|
||||
|
||||
void rfkill_set_states(struct rfkill *rfkill, bool sw, bool hw)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
@ -598,6 +610,15 @@ static ssize_t rfkill_idx_show(struct device *dev,
|
|||
return sprintf(buf, "%d\n", rfkill->idx);
|
||||
}
|
||||
|
||||
static ssize_t rfkill_persistent_show(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct rfkill *rfkill = to_rfkill(dev);
|
||||
|
||||
return sprintf(buf, "%d\n", rfkill->persistent);
|
||||
}
|
||||
|
||||
static u8 user_state_from_blocked(unsigned long state)
|
||||
{
|
||||
if (state & RFKILL_BLOCK_HW)
|
||||
|
@ -656,6 +677,7 @@ static struct device_attribute rfkill_dev_attrs[] = {
|
|||
__ATTR(name, S_IRUGO, rfkill_name_show, NULL),
|
||||
__ATTR(type, S_IRUGO, rfkill_type_show, NULL),
|
||||
__ATTR(index, S_IRUGO, rfkill_idx_show, NULL),
|
||||
__ATTR(persistent, S_IRUGO, rfkill_persistent_show, NULL),
|
||||
__ATTR(state, S_IRUGO|S_IWUSR, rfkill_state_show, rfkill_state_store),
|
||||
__ATTR(claim, S_IRUGO|S_IWUSR, rfkill_claim_show, rfkill_claim_store),
|
||||
__ATTR_NULL
|
||||
|
@ -718,8 +740,6 @@ static int rfkill_suspend(struct device *dev, pm_message_t state)
|
|||
|
||||
rfkill_pause_polling(rfkill);
|
||||
|
||||
rfkill->suspended = true;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -728,10 +748,10 @@ static int rfkill_resume(struct device *dev)
|
|||
struct rfkill *rfkill = to_rfkill(dev);
|
||||
bool cur;
|
||||
|
||||
cur = !!(rfkill->state & RFKILL_BLOCK_SW);
|
||||
rfkill_set_block(rfkill, cur);
|
||||
|
||||
rfkill->suspended = false;
|
||||
if (!rfkill->persistent) {
|
||||
cur = !!(rfkill->state & RFKILL_BLOCK_SW);
|
||||
rfkill_set_block(rfkill, cur);
|
||||
}
|
||||
|
||||
rfkill_resume_polling(rfkill);
|
||||
|
||||
|
|
|
@ -1687,13 +1687,52 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
|
|||
if (err)
|
||||
goto out_rtnl;
|
||||
|
||||
if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
|
||||
dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN) {
|
||||
err = -EINVAL;
|
||||
err = get_vlan(info->attrs[NL80211_ATTR_STA_VLAN], drv, ¶ms.vlan);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
/* validate settings */
|
||||
err = 0;
|
||||
|
||||
switch (dev->ieee80211_ptr->iftype) {
|
||||
case NL80211_IFTYPE_AP:
|
||||
case NL80211_IFTYPE_AP_VLAN:
|
||||
/* disallow mesh-specific things */
|
||||
if (params.plink_action)
|
||||
err = -EINVAL;
|
||||
break;
|
||||
case NL80211_IFTYPE_STATION:
|
||||
/* disallow everything but AUTHORIZED flag */
|
||||
if (params.plink_action)
|
||||
err = -EINVAL;
|
||||
if (params.vlan)
|
||||
err = -EINVAL;
|
||||
if (params.supported_rates)
|
||||
err = -EINVAL;
|
||||
if (params.ht_capa)
|
||||
err = -EINVAL;
|
||||
if (params.listen_interval >= 0)
|
||||
err = -EINVAL;
|
||||
if (params.sta_flags_mask & ~BIT(NL80211_STA_FLAG_AUTHORIZED))
|
||||
err = -EINVAL;
|
||||
break;
|
||||
case NL80211_IFTYPE_MESH_POINT:
|
||||
/* disallow things mesh doesn't support */
|
||||
if (params.vlan)
|
||||
err = -EINVAL;
|
||||
if (params.ht_capa)
|
||||
err = -EINVAL;
|
||||
if (params.listen_interval >= 0)
|
||||
err = -EINVAL;
|
||||
if (params.supported_rates)
|
||||
err = -EINVAL;
|
||||
if (params.sta_flags_mask)
|
||||
err = -EINVAL;
|
||||
break;
|
||||
default:
|
||||
err = -EINVAL;
|
||||
}
|
||||
|
||||
err = get_vlan(info->attrs[NL80211_ATTR_STA_VLAN], drv, ¶ms.vlan);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
|
@ -1728,9 +1767,6 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
|
|||
if (!info->attrs[NL80211_ATTR_MAC])
|
||||
return -EINVAL;
|
||||
|
||||
if (!info->attrs[NL80211_ATTR_STA_AID])
|
||||
return -EINVAL;
|
||||
|
||||
if (!info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL])
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -1745,9 +1781,11 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
|
|||
params.listen_interval =
|
||||
nla_get_u16(info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL]);
|
||||
|
||||
params.aid = nla_get_u16(info->attrs[NL80211_ATTR_STA_AID]);
|
||||
if (!params.aid || params.aid > IEEE80211_MAX_AID)
|
||||
return -EINVAL;
|
||||
if (info->attrs[NL80211_ATTR_STA_AID]) {
|
||||
params.aid = nla_get_u16(info->attrs[NL80211_ATTR_STA_AID]);
|
||||
if (!params.aid || params.aid > IEEE80211_MAX_AID)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (info->attrs[NL80211_ATTR_HT_CAPABILITY])
|
||||
params.ht_capa =
|
||||
|
@ -1762,13 +1800,39 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
|
|||
if (err)
|
||||
goto out_rtnl;
|
||||
|
||||
if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
|
||||
dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN) {
|
||||
err = -EINVAL;
|
||||
err = get_vlan(info->attrs[NL80211_ATTR_STA_VLAN], drv, ¶ms.vlan);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
/* validate settings */
|
||||
err = 0;
|
||||
|
||||
switch (dev->ieee80211_ptr->iftype) {
|
||||
case NL80211_IFTYPE_AP:
|
||||
case NL80211_IFTYPE_AP_VLAN:
|
||||
/* all ok but must have AID */
|
||||
if (!params.aid)
|
||||
err = -EINVAL;
|
||||
break;
|
||||
case NL80211_IFTYPE_MESH_POINT:
|
||||
/* disallow things mesh doesn't support */
|
||||
if (params.vlan)
|
||||
err = -EINVAL;
|
||||
if (params.aid)
|
||||
err = -EINVAL;
|
||||
if (params.ht_capa)
|
||||
err = -EINVAL;
|
||||
if (params.listen_interval >= 0)
|
||||
err = -EINVAL;
|
||||
if (params.supported_rates)
|
||||
err = -EINVAL;
|
||||
if (params.sta_flags_mask)
|
||||
err = -EINVAL;
|
||||
break;
|
||||
default:
|
||||
err = -EINVAL;
|
||||
}
|
||||
|
||||
err = get_vlan(info->attrs[NL80211_ATTR_STA_VLAN], drv, ¶ms.vlan);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
|
@ -1812,7 +1876,8 @@ static int nl80211_del_station(struct sk_buff *skb, struct genl_info *info)
|
|||
goto out_rtnl;
|
||||
|
||||
if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
|
||||
dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN) {
|
||||
dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN &&
|
||||
dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT) {
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue