2007-09-26 08:57:13 +08:00
|
|
|
/*
|
2010-08-07 02:45:38 +08:00
|
|
|
Copyright (C) 2010 Willow Garage <http://www.willowgarage.com>
|
|
|
|
Copyright (C) 2004 - 2010 Ivo van Doorn <IvDoorn@gmail.com>
|
2007-09-26 08:57:13 +08:00
|
|
|
<http://rt2x00.serialmonkey.com>
|
|
|
|
|
|
|
|
This program is free software; you can redistribute it and/or modify
|
|
|
|
it under the terms of the GNU General Public License as published by
|
|
|
|
the Free Software Foundation; either version 2 of the License, or
|
|
|
|
(at your option) any later version.
|
|
|
|
|
|
|
|
This program is distributed in the hope that it will be useful,
|
|
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
GNU General Public License for more details.
|
|
|
|
|
|
|
|
You should have received a copy of the GNU General Public License
|
2013-12-06 19:32:11 +08:00
|
|
|
along with this program; if not, see <http://www.gnu.org/licenses/>.
|
2007-09-26 08:57:13 +08:00
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
Module: rt2x00usb
|
|
|
|
Abstract: rt2x00 generic usb device routines.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/module.h>
|
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h
percpu.h is included by sched.h and module.h and thus ends up being
included when building most .c files. percpu.h includes slab.h which
in turn includes gfp.h making everything defined by the two files
universally available and complicating inclusion dependencies.
percpu.h -> slab.h dependency is about to be removed. Prepare for
this change by updating users of gfp and slab facilities include those
headers directly instead of assuming availability. As this conversion
needs to touch large number of source files, the following script is
used as the basis of conversion.
http://userweb.kernel.org/~tj/misc/slabh-sweep.py
The script does the followings.
* Scan files for gfp and slab usages and update includes such that
only the necessary includes are there. ie. if only gfp is used,
gfp.h, if slab is used, slab.h.
* When the script inserts a new include, it looks at the include
blocks and try to put the new include such that its order conforms
to its surrounding. It's put in the include block which contains
core kernel includes, in the same order that the rest are ordered -
alphabetical, Christmas tree, rev-Xmas-tree or at the end if there
doesn't seem to be any matching order.
* If the script can't find a place to put a new include (mostly
because the file doesn't have fitting include block), it prints out
an error message indicating which .h file needs to be added to the
file.
The conversion was done in the following steps.
1. The initial automatic conversion of all .c files updated slightly
over 4000 files, deleting around 700 includes and adding ~480 gfp.h
and ~3000 slab.h inclusions. The script emitted errors for ~400
files.
2. Each error was manually checked. Some didn't need the inclusion,
some needed manual addition while adding it to implementation .h or
embedding .c file was more appropriate for others. This step added
inclusions to around 150 files.
3. The script was run again and the output was compared to the edits
from #2 to make sure no file was left behind.
4. Several build tests were done and a couple of problems were fixed.
e.g. lib/decompress_*.c used malloc/free() wrappers around slab
APIs requiring slab.h to be added manually.
5. The script was run on all .h files but without automatically
editing them as sprinkling gfp.h and slab.h inclusions around .h
files could easily lead to inclusion dependency hell. Most gfp.h
inclusion directives were ignored as stuff from gfp.h was usually
wildly available and often used in preprocessor macros. Each
slab.h inclusion directive was examined and added manually as
necessary.
6. percpu.h was updated not to include slab.h.
7. Build test were done on the following configurations and failures
were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my
distributed build env didn't work with gcov compiles) and a few
more options had to be turned off depending on archs to make things
build (like ipr on powerpc/64 which failed due to missing writeq).
* x86 and x86_64 UP and SMP allmodconfig and a custom test config.
* powerpc and powerpc64 SMP allmodconfig
* sparc and sparc64 SMP allmodconfig
* ia64 SMP allmodconfig
* s390 SMP allmodconfig
* alpha SMP allmodconfig
* um on x86_64 SMP allmodconfig
8. percpu.h modifications were reverted so that it could be applied as
a separate patch and serve as bisection point.
Given the fact that I had only a couple of failures from tests on step
6, I'm fairly confident about the coverage of this conversion patch.
If there is a breakage, it's likely to be something in one of the arch
headers which should be easily discoverable easily on most builds of
the specific arch.
Signed-off-by: Tejun Heo <tj@kernel.org>
Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 16:04:11 +08:00
|
|
|
#include <linux/slab.h>
|
2007-09-26 08:57:13 +08:00
|
|
|
#include <linux/usb.h>
|
2007-10-27 19:43:29 +08:00
|
|
|
#include <linux/bug.h>
|
2007-09-26 08:57:13 +08:00
|
|
|
|
|
|
|
#include "rt2x00.h"
|
|
|
|
#include "rt2x00usb.h"
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Interfacing with the HW.
|
|
|
|
*/
|
2007-10-27 19:41:25 +08:00
|
|
|
int rt2x00usb_vendor_request(struct rt2x00_dev *rt2x00dev,
|
2007-09-26 08:57:13 +08:00
|
|
|
const u8 request, const u8 requesttype,
|
|
|
|
const u16 offset, const u16 value,
|
|
|
|
void *buffer, const u16 buffer_length,
|
2007-09-26 02:54:20 +08:00
|
|
|
const int timeout)
|
2007-09-26 08:57:13 +08:00
|
|
|
{
|
2008-06-17 01:57:11 +08:00
|
|
|
struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev);
|
2007-09-26 08:57:13 +08:00
|
|
|
int status;
|
|
|
|
unsigned int i;
|
|
|
|
unsigned int pipe =
|
|
|
|
(requesttype == USB_VENDOR_REQUEST_IN) ?
|
|
|
|
usb_rcvctrlpipe(usb_dev, 0) : usb_sndctrlpipe(usb_dev, 0);
|
|
|
|
|
2009-11-06 03:22:03 +08:00
|
|
|
if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
|
|
|
|
return -ENODEV;
|
2007-10-27 19:43:29 +08:00
|
|
|
|
2007-09-26 08:57:13 +08:00
|
|
|
for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
|
|
|
|
status = usb_control_msg(usb_dev, pipe, request, requesttype,
|
|
|
|
value, offset, buffer, buffer_length,
|
|
|
|
timeout);
|
|
|
|
if (status >= 0)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/*
|
2007-09-26 02:54:20 +08:00
|
|
|
* Check for errors
|
2007-09-26 08:57:13 +08:00
|
|
|
* -ENODEV: Device has disappeared, no point continuing.
|
2007-09-26 02:54:20 +08:00
|
|
|
* All other errors: Try again.
|
2007-09-26 08:57:13 +08:00
|
|
|
*/
|
2009-11-06 03:22:03 +08:00
|
|
|
else if (status == -ENODEV) {
|
|
|
|
clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
|
2007-09-26 08:57:13 +08:00
|
|
|
break;
|
2009-11-06 03:22:03 +08:00
|
|
|
}
|
2007-09-26 08:57:13 +08:00
|
|
|
}
|
|
|
|
|
2014-04-03 22:48:56 +08:00
|
|
|
/* If the port is powered down, we get a -EPROTO error, and this
|
|
|
|
* leads to a endless loop. So just say that the device is gone.
|
|
|
|
*/
|
|
|
|
if (status == -EPROTO)
|
|
|
|
clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
|
|
|
|
|
2013-04-19 23:33:40 +08:00
|
|
|
rt2x00_err(rt2x00dev,
|
|
|
|
"Vendor Request 0x%02x failed for offset 0x%04x with error %d\n",
|
|
|
|
request, offset, status);
|
2007-09-26 08:57:13 +08:00
|
|
|
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(rt2x00usb_vendor_request);
|
|
|
|
|
2007-10-27 19:43:29 +08:00
|
|
|
int rt2x00usb_vendor_req_buff_lock(struct rt2x00_dev *rt2x00dev,
|
|
|
|
const u8 request, const u8 requesttype,
|
|
|
|
const u16 offset, void *buffer,
|
|
|
|
const u16 buffer_length, const int timeout)
|
2007-09-26 08:57:13 +08:00
|
|
|
{
|
|
|
|
int status;
|
|
|
|
|
2008-11-10 06:40:46 +08:00
|
|
|
BUG_ON(!mutex_is_locked(&rt2x00dev->csr_mutex));
|
2007-10-27 19:43:29 +08:00
|
|
|
|
2007-09-26 08:57:13 +08:00
|
|
|
/*
|
|
|
|
* Check for Cache availability.
|
|
|
|
*/
|
2008-02-11 05:49:13 +08:00
|
|
|
if (unlikely(!rt2x00dev->csr.cache || buffer_length > CSR_CACHE_SIZE)) {
|
2013-04-19 23:33:40 +08:00
|
|
|
rt2x00_err(rt2x00dev, "CSR cache not available\n");
|
2007-09-26 08:57:13 +08:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (requesttype == USB_VENDOR_REQUEST_OUT)
|
2008-02-11 05:49:13 +08:00
|
|
|
memcpy(rt2x00dev->csr.cache, buffer, buffer_length);
|
2007-09-26 08:57:13 +08:00
|
|
|
|
|
|
|
status = rt2x00usb_vendor_request(rt2x00dev, request, requesttype,
|
2008-02-11 05:49:13 +08:00
|
|
|
offset, 0, rt2x00dev->csr.cache,
|
2007-09-26 08:57:13 +08:00
|
|
|
buffer_length, timeout);
|
|
|
|
|
|
|
|
if (!status && requesttype == USB_VENDOR_REQUEST_IN)
|
2008-02-11 05:49:13 +08:00
|
|
|
memcpy(buffer, rt2x00dev->csr.cache, buffer_length);
|
2007-09-26 08:57:13 +08:00
|
|
|
|
|
|
|
return status;
|
|
|
|
}
|
2007-10-27 19:43:29 +08:00
|
|
|
EXPORT_SYMBOL_GPL(rt2x00usb_vendor_req_buff_lock);
|
|
|
|
|
|
|
|
int rt2x00usb_vendor_request_buff(struct rt2x00_dev *rt2x00dev,
|
|
|
|
const u8 request, const u8 requesttype,
|
|
|
|
const u16 offset, void *buffer,
|
|
|
|
const u16 buffer_length, const int timeout)
|
2008-07-19 22:16:54 +08:00
|
|
|
{
|
|
|
|
int status = 0;
|
|
|
|
unsigned char *tb;
|
|
|
|
u16 off, len, bsize;
|
|
|
|
|
2008-11-10 06:40:46 +08:00
|
|
|
mutex_lock(&rt2x00dev->csr_mutex);
|
2008-07-19 22:16:54 +08:00
|
|
|
|
2008-08-02 16:31:09 +08:00
|
|
|
tb = (char *)buffer;
|
2008-07-19 22:16:54 +08:00
|
|
|
off = offset;
|
|
|
|
len = buffer_length;
|
|
|
|
while (len && !status) {
|
|
|
|
bsize = min_t(u16, CSR_CACHE_SIZE, len);
|
|
|
|
status = rt2x00usb_vendor_req_buff_lock(rt2x00dev, request,
|
|
|
|
requesttype, off, tb,
|
|
|
|
bsize, timeout);
|
|
|
|
|
|
|
|
tb += bsize;
|
|
|
|
len -= bsize;
|
|
|
|
off += bsize;
|
|
|
|
}
|
|
|
|
|
2008-11-10 06:40:46 +08:00
|
|
|
mutex_unlock(&rt2x00dev->csr_mutex);
|
2008-07-19 22:16:54 +08:00
|
|
|
|
|
|
|
return status;
|
|
|
|
}
|
2010-06-03 16:51:51 +08:00
|
|
|
EXPORT_SYMBOL_GPL(rt2x00usb_vendor_request_buff);
|
2008-07-19 22:16:54 +08:00
|
|
|
|
2008-11-11 02:42:18 +08:00
|
|
|
int rt2x00usb_regbusy_read(struct rt2x00_dev *rt2x00dev,
|
|
|
|
const unsigned int offset,
|
2009-11-05 01:35:18 +08:00
|
|
|
const struct rt2x00_field32 field,
|
2008-11-11 02:42:18 +08:00
|
|
|
u32 *reg)
|
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
|
2009-11-06 03:22:03 +08:00
|
|
|
if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
|
|
|
|
return -ENODEV;
|
|
|
|
|
2008-11-11 02:42:18 +08:00
|
|
|
for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
|
|
|
|
rt2x00usb_register_read_lock(rt2x00dev, offset, reg);
|
|
|
|
if (!rt2x00_get_field32(*reg, field))
|
|
|
|
return 1;
|
|
|
|
udelay(REGISTER_BUSY_DELAY);
|
|
|
|
}
|
|
|
|
|
2013-04-19 23:33:40 +08:00
|
|
|
rt2x00_err(rt2x00dev, "Indirect register access failed: offset=0x%.08x, value=0x%.08x\n",
|
|
|
|
offset, *reg);
|
2008-11-11 02:42:18 +08:00
|
|
|
*reg = ~0;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(rt2x00usb_regbusy_read);
|
|
|
|
|
2011-04-18 21:29:12 +08:00
|
|
|
|
|
|
|
struct rt2x00_async_read_data {
|
|
|
|
__le32 reg;
|
|
|
|
struct usb_ctrlrequest cr;
|
|
|
|
struct rt2x00_dev *rt2x00dev;
|
2011-04-30 23:14:23 +08:00
|
|
|
bool (*callback)(struct rt2x00_dev *, int, u32);
|
2011-04-18 21:29:12 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
static void rt2x00usb_register_read_async_cb(struct urb *urb)
|
|
|
|
{
|
|
|
|
struct rt2x00_async_read_data *rd = urb->context;
|
2011-04-30 23:14:23 +08:00
|
|
|
if (rd->callback(rd->rt2x00dev, urb->status, le32_to_cpu(rd->reg))) {
|
|
|
|
if (usb_submit_urb(urb, GFP_ATOMIC) < 0)
|
|
|
|
kfree(rd);
|
|
|
|
} else
|
|
|
|
kfree(rd);
|
2011-04-18 21:29:12 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void rt2x00usb_register_read_async(struct rt2x00_dev *rt2x00dev,
|
|
|
|
const unsigned int offset,
|
2011-04-30 23:14:23 +08:00
|
|
|
bool (*callback)(struct rt2x00_dev*, int, u32))
|
2011-04-18 21:29:12 +08:00
|
|
|
{
|
|
|
|
struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev);
|
|
|
|
struct urb *urb;
|
|
|
|
struct rt2x00_async_read_data *rd;
|
|
|
|
|
|
|
|
rd = kmalloc(sizeof(*rd), GFP_ATOMIC);
|
|
|
|
if (!rd)
|
|
|
|
return;
|
|
|
|
|
|
|
|
urb = usb_alloc_urb(0, GFP_ATOMIC);
|
|
|
|
if (!urb) {
|
|
|
|
kfree(rd);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
rd->rt2x00dev = rt2x00dev;
|
|
|
|
rd->callback = callback;
|
|
|
|
rd->cr.bRequestType = USB_VENDOR_REQUEST_IN;
|
|
|
|
rd->cr.bRequest = USB_MULTI_READ;
|
|
|
|
rd->cr.wValue = 0;
|
|
|
|
rd->cr.wIndex = cpu_to_le16(offset);
|
|
|
|
rd->cr.wLength = cpu_to_le16(sizeof(u32));
|
|
|
|
|
|
|
|
usb_fill_control_urb(urb, usb_dev, usb_rcvctrlpipe(usb_dev, 0),
|
|
|
|
(unsigned char *)(&rd->cr), &rd->reg, sizeof(rd->reg),
|
|
|
|
rt2x00usb_register_read_async_cb, rd);
|
|
|
|
if (usb_submit_urb(urb, GFP_ATOMIC) < 0)
|
|
|
|
kfree(rd);
|
|
|
|
usb_free_urb(urb);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(rt2x00usb_register_read_async);
|
|
|
|
|
2007-09-26 08:57:13 +08:00
|
|
|
/*
|
|
|
|
* TX data handlers.
|
|
|
|
*/
|
2010-08-07 02:45:38 +08:00
|
|
|
static void rt2x00usb_work_txdone_entry(struct queue_entry *entry)
|
2007-09-26 08:57:13 +08:00
|
|
|
{
|
|
|
|
/*
|
2010-08-07 02:45:38 +08:00
|
|
|
* If the transfer to hardware succeeded, it does not mean the
|
2008-05-10 19:42:06 +08:00
|
|
|
* frame was send out correctly. It only means the frame
|
2011-03-31 09:57:33 +08:00
|
|
|
* was successfully pushed to the hardware, we have no
|
2008-05-10 19:42:06 +08:00
|
|
|
* way to determine the transmission status right now.
|
|
|
|
* (Only indirectly by looking at the failed TX counters
|
|
|
|
* in the register).
|
2007-09-26 08:57:13 +08:00
|
|
|
*/
|
2010-08-07 02:45:38 +08:00
|
|
|
if (test_bit(ENTRY_DATA_IO_FAILED, &entry->flags))
|
2010-08-07 02:46:53 +08:00
|
|
|
rt2x00lib_txdone_noinfo(entry, TXDONE_FAILURE);
|
2010-08-07 02:45:38 +08:00
|
|
|
else
|
2010-08-07 02:46:53 +08:00
|
|
|
rt2x00lib_txdone_noinfo(entry, TXDONE_UNKNOWN);
|
2007-09-26 08:57:13 +08:00
|
|
|
}
|
|
|
|
|
2010-08-07 02:45:38 +08:00
|
|
|
static void rt2x00usb_work_txdone(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct rt2x00_dev *rt2x00dev =
|
|
|
|
container_of(work, struct rt2x00_dev, txdone_work);
|
|
|
|
struct data_queue *queue;
|
|
|
|
struct queue_entry *entry;
|
|
|
|
|
|
|
|
tx_queue_for_each(rt2x00dev, queue) {
|
|
|
|
while (!rt2x00queue_empty(queue)) {
|
|
|
|
entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
|
|
|
|
|
2010-12-13 19:36:18 +08:00
|
|
|
if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags) ||
|
|
|
|
!test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags))
|
2010-08-07 02:45:38 +08:00
|
|
|
break;
|
|
|
|
|
|
|
|
rt2x00usb_work_txdone_entry(entry);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void rt2x00usb_interrupt_txdone(struct urb *urb)
|
|
|
|
{
|
|
|
|
struct queue_entry *entry = (struct queue_entry *)urb->context;
|
|
|
|
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
|
|
|
|
|
2011-08-10 21:32:23 +08:00
|
|
|
if (!test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
|
2010-08-07 02:45:38 +08:00
|
|
|
return;
|
|
|
|
/*
|
|
|
|
* Check if the frame was correctly uploaded
|
|
|
|
*/
|
|
|
|
if (urb->status)
|
2010-10-11 21:38:45 +08:00
|
|
|
set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
|
2011-08-10 21:32:23 +08:00
|
|
|
/*
|
|
|
|
* Report the frame as DMA done
|
|
|
|
*/
|
|
|
|
rt2x00lib_dmadone(entry);
|
2010-08-07 02:45:38 +08:00
|
|
|
|
2011-08-10 21:32:23 +08:00
|
|
|
if (rt2x00dev->ops->lib->tx_dma_done)
|
|
|
|
rt2x00dev->ops->lib->tx_dma_done(entry);
|
2010-08-07 02:45:38 +08:00
|
|
|
/*
|
|
|
|
* Schedule the delayed work for reading the TX status
|
|
|
|
* from the device.
|
|
|
|
*/
|
2011-04-18 21:30:36 +08:00
|
|
|
if (!test_bit(REQUIRE_TXSTATUS_FIFO, &rt2x00dev->cap_flags) ||
|
|
|
|
!kfifo_is_empty(&rt2x00dev->txstatus_fifo))
|
|
|
|
queue_work(rt2x00dev->workqueue, &rt2x00dev->txdone_work);
|
2010-08-07 02:45:38 +08:00
|
|
|
}
|
|
|
|
|
2013-03-15 16:57:56 +08:00
|
|
|
static bool rt2x00usb_kick_tx_entry(struct queue_entry *entry, void *data)
|
2008-06-07 04:47:39 +08:00
|
|
|
{
|
2010-06-30 03:40:34 +08:00
|
|
|
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
|
|
|
|
struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev);
|
2008-06-07 04:47:39 +08:00
|
|
|
struct queue_entry_priv_usb *entry_priv = entry->priv_data;
|
2010-06-30 03:40:34 +08:00
|
|
|
u32 length;
|
2010-12-13 19:32:49 +08:00
|
|
|
int status;
|
2010-06-30 03:40:34 +08:00
|
|
|
|
2010-12-13 19:36:18 +08:00
|
|
|
if (!test_and_clear_bit(ENTRY_DATA_PENDING, &entry->flags) ||
|
|
|
|
test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags))
|
2011-05-05 03:42:05 +08:00
|
|
|
return false;
|
2010-06-30 03:40:34 +08:00
|
|
|
|
2010-08-24 01:54:02 +08:00
|
|
|
/*
|
2011-12-28 08:53:22 +08:00
|
|
|
* USB devices require certain padding at the end of each frame
|
|
|
|
* and urb. Those paddings are not included in skbs. Pass entry
|
|
|
|
* to the driver to determine what the overall length should be.
|
2010-08-24 01:54:02 +08:00
|
|
|
*/
|
|
|
|
length = rt2x00dev->ops->lib->get_tx_data_len(entry);
|
2008-06-07 04:47:39 +08:00
|
|
|
|
2011-12-28 08:53:22 +08:00
|
|
|
status = skb_padto(entry->skb, length);
|
|
|
|
if (unlikely(status)) {
|
|
|
|
/* TODO: report something more appropriate than IO_FAILED. */
|
2013-04-19 23:33:40 +08:00
|
|
|
rt2x00_warn(rt2x00dev, "TX SKB padding error, out of memory\n");
|
2011-12-28 08:53:22 +08:00
|
|
|
set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
|
|
|
|
rt2x00lib_dmadone(entry);
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2010-08-24 01:54:02 +08:00
|
|
|
usb_fill_bulk_urb(entry_priv->urb, usb_dev,
|
|
|
|
usb_sndbulkpipe(usb_dev, entry->queue->usb_endpoint),
|
|
|
|
entry->skb->data, length,
|
|
|
|
rt2x00usb_interrupt_txdone, entry);
|
|
|
|
|
2010-12-13 19:32:49 +08:00
|
|
|
status = usb_submit_urb(entry_priv->urb, GFP_ATOMIC);
|
|
|
|
if (status) {
|
|
|
|
if (status == -ENODEV)
|
|
|
|
clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
|
2010-10-11 21:39:48 +08:00
|
|
|
set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
|
|
|
|
rt2x00lib_dmadone(entry);
|
|
|
|
}
|
2011-04-18 21:27:43 +08:00
|
|
|
|
|
|
|
return false;
|
2008-06-07 04:47:39 +08:00
|
|
|
}
|
|
|
|
|
2010-12-13 19:35:17 +08:00
|
|
|
/*
|
|
|
|
* RX data handlers.
|
|
|
|
*/
|
|
|
|
static void rt2x00usb_work_rxdone(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct rt2x00_dev *rt2x00dev =
|
|
|
|
container_of(work, struct rt2x00_dev, rxdone_work);
|
|
|
|
struct queue_entry *entry;
|
|
|
|
struct skb_frame_desc *skbdesc;
|
|
|
|
u8 rxd[32];
|
|
|
|
|
|
|
|
while (!rt2x00queue_empty(rt2x00dev->rx)) {
|
|
|
|
entry = rt2x00queue_get_entry(rt2x00dev->rx, Q_INDEX_DONE);
|
|
|
|
|
2010-12-13 19:36:18 +08:00
|
|
|
if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags) ||
|
|
|
|
!test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags))
|
2010-12-13 19:35:17 +08:00
|
|
|
break;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Fill in desc fields of the skb descriptor
|
|
|
|
*/
|
|
|
|
skbdesc = get_skb_frame_desc(entry->skb);
|
|
|
|
skbdesc->desc = rxd;
|
|
|
|
skbdesc->desc_len = entry->queue->desc_size;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Send the frame to rt2x00lib for further processing.
|
|
|
|
*/
|
2012-04-19 19:24:10 +08:00
|
|
|
rt2x00lib_rxdone(entry, GFP_KERNEL);
|
2010-12-13 19:35:17 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void rt2x00usb_interrupt_rxdone(struct urb *urb)
|
|
|
|
{
|
|
|
|
struct queue_entry *entry = (struct queue_entry *)urb->context;
|
|
|
|
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
|
|
|
|
|
|
|
|
if (!test_and_clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
|
|
|
|
return;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Report the frame as DMA done
|
|
|
|
*/
|
|
|
|
rt2x00lib_dmadone(entry);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check if the received data is simply too small
|
|
|
|
* to be actually valid, or if the urb is signaling
|
|
|
|
* a problem.
|
|
|
|
*/
|
|
|
|
if (urb->actual_length < entry->queue->desc_size || urb->status)
|
|
|
|
set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Schedule the delayed work for reading the RX status
|
|
|
|
* from the device.
|
|
|
|
*/
|
2011-01-30 20:24:05 +08:00
|
|
|
queue_work(rt2x00dev->workqueue, &rt2x00dev->rxdone_work);
|
2010-12-13 19:35:17 +08:00
|
|
|
}
|
|
|
|
|
2013-03-15 16:57:56 +08:00
|
|
|
static bool rt2x00usb_kick_rx_entry(struct queue_entry *entry, void *data)
|
2010-12-13 19:35:17 +08:00
|
|
|
{
|
|
|
|
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
|
|
|
|
struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev);
|
|
|
|
struct queue_entry_priv_usb *entry_priv = entry->priv_data;
|
|
|
|
int status;
|
|
|
|
|
2010-12-13 19:36:18 +08:00
|
|
|
if (test_and_set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags) ||
|
|
|
|
test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags))
|
2011-05-05 03:42:05 +08:00
|
|
|
return false;
|
2010-12-13 19:35:17 +08:00
|
|
|
|
2010-12-13 19:36:00 +08:00
|
|
|
rt2x00lib_dmastart(entry);
|
|
|
|
|
2010-12-13 19:35:17 +08:00
|
|
|
usb_fill_bulk_urb(entry_priv->urb, usb_dev,
|
|
|
|
usb_rcvbulkpipe(usb_dev, entry->queue->usb_endpoint),
|
|
|
|
entry->skb->data, entry->skb->len,
|
|
|
|
rt2x00usb_interrupt_rxdone, entry);
|
|
|
|
|
|
|
|
status = usb_submit_urb(entry_priv->urb, GFP_ATOMIC);
|
|
|
|
if (status) {
|
|
|
|
if (status == -ENODEV)
|
|
|
|
clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
|
|
|
|
set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
|
|
|
|
rt2x00lib_dmadone(entry);
|
|
|
|
}
|
2011-04-18 21:27:43 +08:00
|
|
|
|
|
|
|
return false;
|
2010-12-13 19:35:17 +08:00
|
|
|
}
|
|
|
|
|
2010-12-13 19:34:54 +08:00
|
|
|
void rt2x00usb_kick_queue(struct data_queue *queue)
|
2008-06-07 04:47:39 +08:00
|
|
|
{
|
2010-12-13 19:34:54 +08:00
|
|
|
switch (queue->qid) {
|
2010-12-13 19:36:38 +08:00
|
|
|
case QID_AC_VO:
|
|
|
|
case QID_AC_VI:
|
2010-12-13 19:34:54 +08:00
|
|
|
case QID_AC_BE:
|
|
|
|
case QID_AC_BK:
|
|
|
|
if (!rt2x00queue_empty(queue))
|
2013-03-15 16:57:56 +08:00
|
|
|
rt2x00queue_for_each_entry(queue,
|
|
|
|
Q_INDEX_DONE,
|
|
|
|
Q_INDEX,
|
|
|
|
NULL,
|
2010-12-13 19:34:54 +08:00
|
|
|
rt2x00usb_kick_tx_entry);
|
|
|
|
break;
|
2010-12-13 19:35:17 +08:00
|
|
|
case QID_RX:
|
|
|
|
if (!rt2x00queue_full(queue))
|
2013-03-15 16:57:56 +08:00
|
|
|
rt2x00queue_for_each_entry(queue,
|
|
|
|
Q_INDEX,
|
|
|
|
Q_INDEX_DONE,
|
|
|
|
NULL,
|
2010-12-13 19:35:17 +08:00
|
|
|
rt2x00usb_kick_rx_entry);
|
|
|
|
break;
|
2010-12-13 19:34:54 +08:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
2008-06-07 04:47:39 +08:00
|
|
|
}
|
2010-12-13 19:34:54 +08:00
|
|
|
EXPORT_SYMBOL_GPL(rt2x00usb_kick_queue);
|
2008-06-07 04:47:39 +08:00
|
|
|
|
2013-03-15 16:57:56 +08:00
|
|
|
static bool rt2x00usb_flush_entry(struct queue_entry *entry, void *data)
|
2009-01-28 07:32:33 +08:00
|
|
|
{
|
2010-08-24 01:54:21 +08:00
|
|
|
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
|
|
|
|
struct queue_entry_priv_usb *entry_priv = entry->priv_data;
|
|
|
|
struct queue_entry_priv_usb_bcn *bcn_priv = entry->priv_data;
|
2009-01-28 07:32:33 +08:00
|
|
|
|
2010-08-24 01:54:21 +08:00
|
|
|
if (!test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
|
2011-05-05 03:42:05 +08:00
|
|
|
return false;
|
2010-08-24 01:54:21 +08:00
|
|
|
|
|
|
|
usb_kill_urb(entry_priv->urb);
|
2009-01-28 07:32:33 +08:00
|
|
|
|
|
|
|
/*
|
2010-08-24 01:54:21 +08:00
|
|
|
* Kill guardian urb (if required by driver).
|
2009-01-28 07:32:33 +08:00
|
|
|
*/
|
2010-08-24 01:54:21 +08:00
|
|
|
if ((entry->queue->qid == QID_BEACON) &&
|
2011-04-18 21:27:06 +08:00
|
|
|
(test_bit(REQUIRE_BEACON_GUARD, &rt2x00dev->cap_flags)))
|
2010-08-24 01:54:21 +08:00
|
|
|
usb_kill_urb(bcn_priv->guardian_urb);
|
2011-04-18 21:27:43 +08:00
|
|
|
|
|
|
|
return false;
|
2010-08-24 01:54:21 +08:00
|
|
|
}
|
2009-01-28 07:32:33 +08:00
|
|
|
|
2011-04-18 21:31:02 +08:00
|
|
|
void rt2x00usb_flush_queue(struct data_queue *queue, bool drop)
|
2010-08-24 01:54:21 +08:00
|
|
|
{
|
2010-12-13 19:35:40 +08:00
|
|
|
struct work_struct *completion;
|
|
|
|
unsigned int i;
|
|
|
|
|
2011-04-18 21:31:02 +08:00
|
|
|
if (drop)
|
2013-03-15 16:57:56 +08:00
|
|
|
rt2x00queue_for_each_entry(queue, Q_INDEX_DONE, Q_INDEX, NULL,
|
2011-04-18 21:31:02 +08:00
|
|
|
rt2x00usb_flush_entry);
|
2010-12-13 19:35:40 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Obtain the queue completion handler
|
|
|
|
*/
|
|
|
|
switch (queue->qid) {
|
2010-12-13 19:36:38 +08:00
|
|
|
case QID_AC_VO:
|
|
|
|
case QID_AC_VI:
|
2010-12-13 19:35:40 +08:00
|
|
|
case QID_AC_BE:
|
|
|
|
case QID_AC_BK:
|
|
|
|
completion = &queue->rt2x00dev->txdone_work;
|
|
|
|
break;
|
|
|
|
case QID_RX:
|
|
|
|
completion = &queue->rt2x00dev->rxdone_work;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2011-04-18 21:31:02 +08:00
|
|
|
for (i = 0; i < 10; i++) {
|
2010-12-13 19:35:40 +08:00
|
|
|
/*
|
|
|
|
* Check if the driver is already done, otherwise we
|
|
|
|
* have to sleep a little while to give the driver/hw
|
|
|
|
* the oppurtunity to complete interrupt process itself.
|
|
|
|
*/
|
|
|
|
if (rt2x00queue_empty(queue))
|
|
|
|
break;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Schedule the completion handler manually, when this
|
|
|
|
* worker function runs, it should cleanup the queue.
|
|
|
|
*/
|
2011-01-30 20:24:05 +08:00
|
|
|
queue_work(queue->rt2x00dev->workqueue, completion);
|
2010-12-13 19:35:40 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Wait for a little while to give the driver
|
|
|
|
* the oppurtunity to recover itself.
|
|
|
|
*/
|
|
|
|
msleep(10);
|
|
|
|
}
|
2009-01-28 07:32:33 +08:00
|
|
|
}
|
2010-12-13 19:35:40 +08:00
|
|
|
EXPORT_SYMBOL_GPL(rt2x00usb_flush_queue);
|
2009-01-28 07:32:33 +08:00
|
|
|
|
2010-08-31 03:15:19 +08:00
|
|
|
static void rt2x00usb_watchdog_tx_dma(struct data_queue *queue)
|
2010-07-11 18:25:46 +08:00
|
|
|
{
|
2013-04-19 23:33:40 +08:00
|
|
|
rt2x00_warn(queue->rt2x00dev, "TX queue %d DMA timed out, invoke forced forced reset\n",
|
|
|
|
queue->qid);
|
2010-07-11 18:25:46 +08:00
|
|
|
|
2013-10-06 00:15:33 +08:00
|
|
|
rt2x00queue_stop_queue(queue);
|
2010-12-13 19:35:40 +08:00
|
|
|
rt2x00queue_flush_queue(queue, true);
|
2013-10-06 00:15:33 +08:00
|
|
|
rt2x00queue_start_queue(queue);
|
2010-07-11 18:25:46 +08:00
|
|
|
}
|
|
|
|
|
2011-04-18 21:29:38 +08:00
|
|
|
static int rt2x00usb_dma_timeout(struct data_queue *queue)
|
|
|
|
{
|
|
|
|
struct queue_entry *entry;
|
|
|
|
|
|
|
|
entry = rt2x00queue_get_entry(queue, Q_INDEX_DMA_DONE);
|
|
|
|
return rt2x00queue_dma_timeout(entry);
|
|
|
|
}
|
|
|
|
|
2010-07-11 18:25:46 +08:00
|
|
|
void rt2x00usb_watchdog(struct rt2x00_dev *rt2x00dev)
|
|
|
|
{
|
|
|
|
struct data_queue *queue;
|
|
|
|
|
|
|
|
tx_queue_for_each(rt2x00dev, queue) {
|
2010-10-11 21:39:04 +08:00
|
|
|
if (!rt2x00queue_empty(queue)) {
|
2011-04-18 21:29:38 +08:00
|
|
|
if (rt2x00usb_dma_timeout(queue))
|
2010-10-11 21:39:04 +08:00
|
|
|
rt2x00usb_watchdog_tx_dma(queue);
|
|
|
|
}
|
2010-07-11 18:25:46 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(rt2x00usb_watchdog);
|
|
|
|
|
2007-09-26 08:57:13 +08:00
|
|
|
/*
|
|
|
|
* Radio handlers
|
|
|
|
*/
|
|
|
|
void rt2x00usb_disable_radio(struct rt2x00_dev *rt2x00dev)
|
|
|
|
{
|
2008-04-22 01:01:58 +08:00
|
|
|
rt2x00usb_vendor_request_sw(rt2x00dev, USB_RX_CONTROL, 0, 0,
|
2007-09-26 08:57:13 +08:00
|
|
|
REGISTER_TIMEOUT);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(rt2x00usb_disable_radio);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Device initialization handlers.
|
|
|
|
*/
|
2008-11-08 22:25:33 +08:00
|
|
|
void rt2x00usb_clear_entry(struct queue_entry *entry)
|
2008-01-07 06:41:45 +08:00
|
|
|
{
|
2010-08-07 02:45:38 +08:00
|
|
|
entry->flags = 0;
|
|
|
|
|
2010-12-13 19:35:17 +08:00
|
|
|
if (entry->queue->qid == QID_RX)
|
2013-03-15 16:57:56 +08:00
|
|
|
rt2x00usb_kick_rx_entry(entry, NULL);
|
2008-01-07 06:41:45 +08:00
|
|
|
}
|
2008-11-08 22:25:33 +08:00
|
|
|
EXPORT_SYMBOL_GPL(rt2x00usb_clear_entry);
|
2008-01-07 06:41:45 +08:00
|
|
|
|
2008-11-14 06:07:33 +08:00
|
|
|
static void rt2x00usb_assign_endpoint(struct data_queue *queue,
|
|
|
|
struct usb_endpoint_descriptor *ep_desc)
|
|
|
|
{
|
|
|
|
struct usb_device *usb_dev = to_usb_device_intf(queue->rt2x00dev->dev);
|
|
|
|
int pipe;
|
|
|
|
|
|
|
|
queue->usb_endpoint = usb_endpoint_num(ep_desc);
|
|
|
|
|
|
|
|
if (queue->qid == QID_RX) {
|
|
|
|
pipe = usb_rcvbulkpipe(usb_dev, queue->usb_endpoint);
|
|
|
|
queue->usb_maxpacket = usb_maxpacket(usb_dev, pipe, 0);
|
|
|
|
} else {
|
|
|
|
pipe = usb_sndbulkpipe(usb_dev, queue->usb_endpoint);
|
|
|
|
queue->usb_maxpacket = usb_maxpacket(usb_dev, pipe, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!queue->usb_maxpacket)
|
|
|
|
queue->usb_maxpacket = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int rt2x00usb_find_endpoints(struct rt2x00_dev *rt2x00dev)
|
|
|
|
{
|
|
|
|
struct usb_interface *intf = to_usb_interface(rt2x00dev->dev);
|
|
|
|
struct usb_host_interface *intf_desc = intf->cur_altsetting;
|
|
|
|
struct usb_endpoint_descriptor *ep_desc;
|
|
|
|
struct data_queue *queue = rt2x00dev->tx;
|
|
|
|
struct usb_endpoint_descriptor *tx_ep_desc = NULL;
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Walk through all available endpoints to search for "bulk in"
|
|
|
|
* and "bulk out" endpoints. When we find such endpoints collect
|
|
|
|
* the information we need from the descriptor and assign it
|
|
|
|
* to the queue.
|
|
|
|
*/
|
|
|
|
for (i = 0; i < intf_desc->desc.bNumEndpoints; i++) {
|
|
|
|
ep_desc = &intf_desc->endpoint[i].desc;
|
|
|
|
|
|
|
|
if (usb_endpoint_is_bulk_in(ep_desc)) {
|
|
|
|
rt2x00usb_assign_endpoint(rt2x00dev->rx, ep_desc);
|
2008-12-20 18:00:23 +08:00
|
|
|
} else if (usb_endpoint_is_bulk_out(ep_desc) &&
|
|
|
|
(queue != queue_end(rt2x00dev))) {
|
2008-11-14 06:07:33 +08:00
|
|
|
rt2x00usb_assign_endpoint(queue, ep_desc);
|
2008-12-20 18:00:23 +08:00
|
|
|
queue = queue_next(queue);
|
2008-11-14 06:07:33 +08:00
|
|
|
|
|
|
|
tx_ep_desc = ep_desc;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* At least 1 endpoint for RX and 1 endpoint for TX must be available.
|
|
|
|
*/
|
|
|
|
if (!rt2x00dev->rx->usb_endpoint || !rt2x00dev->tx->usb_endpoint) {
|
2013-04-19 23:33:40 +08:00
|
|
|
rt2x00_err(rt2x00dev, "Bulk-in/Bulk-out endpoints not found\n");
|
2008-11-14 06:07:33 +08:00
|
|
|
return -EPIPE;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* It might be possible not all queues have a dedicated endpoint.
|
|
|
|
* Loop through all TX queues and copy the endpoint information
|
|
|
|
* which we have gathered from already assigned endpoints.
|
|
|
|
*/
|
|
|
|
txall_queue_for_each(rt2x00dev, queue) {
|
|
|
|
if (!queue->usb_endpoint)
|
|
|
|
rt2x00usb_assign_endpoint(queue, tx_ep_desc);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-10-11 21:37:25 +08:00
|
|
|
static int rt2x00usb_alloc_entries(struct data_queue *queue)
|
2007-09-26 08:57:13 +08:00
|
|
|
{
|
2010-10-11 21:37:25 +08:00
|
|
|
struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
|
2008-05-10 19:46:03 +08:00
|
|
|
struct queue_entry_priv_usb *entry_priv;
|
|
|
|
struct queue_entry_priv_usb_bcn *bcn_priv;
|
2007-09-26 08:57:13 +08:00
|
|
|
unsigned int i;
|
|
|
|
|
2008-05-10 19:46:03 +08:00
|
|
|
for (i = 0; i < queue->limit; i++) {
|
|
|
|
entry_priv = queue->entries[i].priv_data;
|
|
|
|
entry_priv->urb = usb_alloc_urb(0, GFP_KERNEL);
|
|
|
|
if (!entry_priv->urb)
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2007-09-26 08:57:13 +08:00
|
|
|
/*
|
2008-05-10 19:46:03 +08:00
|
|
|
* If this is not the beacon queue or
|
|
|
|
* no guardian byte was required for the beacon,
|
|
|
|
* then we are done.
|
2007-09-26 08:57:13 +08:00
|
|
|
*/
|
2010-10-11 21:37:25 +08:00
|
|
|
if (queue->qid != QID_BEACON ||
|
2011-04-18 21:27:06 +08:00
|
|
|
!test_bit(REQUIRE_BEACON_GUARD, &rt2x00dev->cap_flags))
|
2008-05-10 19:46:03 +08:00
|
|
|
return 0;
|
|
|
|
|
2008-02-06 05:42:23 +08:00
|
|
|
for (i = 0; i < queue->limit; i++) {
|
2008-05-10 19:46:03 +08:00
|
|
|
bcn_priv = queue->entries[i].priv_data;
|
|
|
|
bcn_priv->guardian_urb = usb_alloc_urb(0, GFP_KERNEL);
|
|
|
|
if (!bcn_priv->guardian_urb)
|
2007-09-26 08:57:13 +08:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-10-11 21:37:25 +08:00
|
|
|
static void rt2x00usb_free_entries(struct data_queue *queue)
|
2007-09-26 08:57:13 +08:00
|
|
|
{
|
2010-10-11 21:37:25 +08:00
|
|
|
struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
|
2008-05-10 19:46:03 +08:00
|
|
|
struct queue_entry_priv_usb *entry_priv;
|
|
|
|
struct queue_entry_priv_usb_bcn *bcn_priv;
|
2007-09-26 08:57:13 +08:00
|
|
|
unsigned int i;
|
|
|
|
|
2008-02-06 05:42:23 +08:00
|
|
|
if (!queue->entries)
|
2007-09-26 08:57:13 +08:00
|
|
|
return;
|
|
|
|
|
2008-02-06 05:42:23 +08:00
|
|
|
for (i = 0; i < queue->limit; i++) {
|
2008-05-10 19:46:03 +08:00
|
|
|
entry_priv = queue->entries[i].priv_data;
|
|
|
|
usb_kill_urb(entry_priv->urb);
|
|
|
|
usb_free_urb(entry_priv->urb);
|
2007-09-26 08:57:13 +08:00
|
|
|
}
|
2008-05-10 19:46:03 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If this is not the beacon queue or
|
|
|
|
* no guardian byte was required for the beacon,
|
|
|
|
* then we are done.
|
|
|
|
*/
|
2010-10-11 21:37:25 +08:00
|
|
|
if (queue->qid != QID_BEACON ||
|
2011-04-18 21:27:06 +08:00
|
|
|
!test_bit(REQUIRE_BEACON_GUARD, &rt2x00dev->cap_flags))
|
2008-05-10 19:46:03 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
for (i = 0; i < queue->limit; i++) {
|
|
|
|
bcn_priv = queue->entries[i].priv_data;
|
|
|
|
usb_kill_urb(bcn_priv->guardian_urb);
|
|
|
|
usb_free_urb(bcn_priv->guardian_urb);
|
|
|
|
}
|
2007-09-26 08:57:13 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
int rt2x00usb_initialize(struct rt2x00_dev *rt2x00dev)
|
|
|
|
{
|
2008-02-06 05:42:23 +08:00
|
|
|
struct data_queue *queue;
|
2008-06-17 01:56:08 +08:00
|
|
|
int status;
|
2007-09-26 08:57:13 +08:00
|
|
|
|
2008-11-14 06:07:33 +08:00
|
|
|
/*
|
|
|
|
* Find endpoints for each queue
|
|
|
|
*/
|
|
|
|
status = rt2x00usb_find_endpoints(rt2x00dev);
|
|
|
|
if (status)
|
|
|
|
goto exit;
|
|
|
|
|
2007-09-26 08:57:13 +08:00
|
|
|
/*
|
|
|
|
* Allocate DMA
|
|
|
|
*/
|
2008-02-06 05:42:23 +08:00
|
|
|
queue_for_each(rt2x00dev, queue) {
|
2010-10-11 21:37:25 +08:00
|
|
|
status = rt2x00usb_alloc_entries(queue);
|
2007-09-26 08:57:13 +08:00
|
|
|
if (status)
|
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
exit:
|
|
|
|
rt2x00usb_uninitialize(rt2x00dev);
|
|
|
|
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(rt2x00usb_initialize);
|
|
|
|
|
|
|
|
void rt2x00usb_uninitialize(struct rt2x00_dev *rt2x00dev)
|
|
|
|
{
|
2008-02-06 05:42:23 +08:00
|
|
|
struct data_queue *queue;
|
2007-09-26 08:57:13 +08:00
|
|
|
|
2008-02-06 05:42:23 +08:00
|
|
|
queue_for_each(rt2x00dev, queue)
|
2010-10-11 21:37:25 +08:00
|
|
|
rt2x00usb_free_entries(queue);
|
2007-09-26 08:57:13 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(rt2x00usb_uninitialize);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* USB driver handlers.
|
|
|
|
*/
|
|
|
|
static void rt2x00usb_free_reg(struct rt2x00_dev *rt2x00dev)
|
|
|
|
{
|
|
|
|
kfree(rt2x00dev->rf);
|
|
|
|
rt2x00dev->rf = NULL;
|
|
|
|
|
|
|
|
kfree(rt2x00dev->eeprom);
|
|
|
|
rt2x00dev->eeprom = NULL;
|
|
|
|
|
2008-02-11 05:49:13 +08:00
|
|
|
kfree(rt2x00dev->csr.cache);
|
|
|
|
rt2x00dev->csr.cache = NULL;
|
2007-09-26 08:57:13 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int rt2x00usb_alloc_reg(struct rt2x00_dev *rt2x00dev)
|
|
|
|
{
|
2008-02-11 05:49:13 +08:00
|
|
|
rt2x00dev->csr.cache = kzalloc(CSR_CACHE_SIZE, GFP_KERNEL);
|
|
|
|
if (!rt2x00dev->csr.cache)
|
2007-09-26 08:57:13 +08:00
|
|
|
goto exit;
|
|
|
|
|
|
|
|
rt2x00dev->eeprom = kzalloc(rt2x00dev->ops->eeprom_size, GFP_KERNEL);
|
|
|
|
if (!rt2x00dev->eeprom)
|
|
|
|
goto exit;
|
|
|
|
|
|
|
|
rt2x00dev->rf = kzalloc(rt2x00dev->ops->rf_size, GFP_KERNEL);
|
|
|
|
if (!rt2x00dev->rf)
|
|
|
|
goto exit;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
exit:
|
2013-04-19 23:33:40 +08:00
|
|
|
rt2x00_probe_err("Failed to allocate registers\n");
|
2007-09-26 08:57:13 +08:00
|
|
|
|
|
|
|
rt2x00usb_free_reg(rt2x00dev);
|
|
|
|
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
int rt2x00usb_probe(struct usb_interface *usb_intf,
|
2011-04-18 21:32:13 +08:00
|
|
|
const struct rt2x00_ops *ops)
|
2007-09-26 08:57:13 +08:00
|
|
|
{
|
|
|
|
struct usb_device *usb_dev = interface_to_usbdev(usb_intf);
|
|
|
|
struct ieee80211_hw *hw;
|
|
|
|
struct rt2x00_dev *rt2x00dev;
|
|
|
|
int retval;
|
|
|
|
|
|
|
|
usb_dev = usb_get_dev(usb_dev);
|
rt2x00: reset usb devices at probe
When module is reloaded, device may fail to work, with messages:
[ 342.211926] phy40 -> rt2x00lib_rxdone_read_signal: Warning - Frame received with unrecognized signal, mode=0x0000, signal=0x0059, type=2.
[ 342.314254] phy40 -> rt2x00lib_rxdone_read_signal: Warning - Frame received with unrecognized signal, mode=0x0000, signal=0x004a, type=2.
[ 342.416458] phy40 -> rt2x00lib_rxdone: Warning - Wrong frame size 3183 max 2432.
[ 342.518605] phy40 -> rt2x00lib_rxdone_read_signal: Warning - Frame received with unrecognized signal, mode=0x0000, signal=0x00c9, type=2.
[ 342.620836] phy40 -> rt2x00lib_rxdone_read_signal: Warning - Frame received with unrecognized signal, mode=0x0000, signal=0x00ae, type=1.
[ 342.723201] phy40 -> rt2x00lib_rxdone: Warning - Wrong frame size 0 max 2432.
[ 342.825399] phy40 -> rt2x00lib_rxdone: Warning - Wrong frame size 0 max 2432.
[ 342.927624] phy40 -> rt2x00lib_rxdone: Warning - Wrong frame size 0 max 2432.
[ 343.029804] phy40 -> rt2x00lib_rxdone: Warning - Wrong frame size 2491 max 2432.
[ 343.132008] phy40 -> rt2x00lib_rxdone: Warning - Wrong frame size 2576 max 2432.
[ 343.234326] phy40 -> rt2x00lib_rxdone_read_signal: Warning - Frame received with unrecognized signal, mode=0x0000, signal=0x004c, type=1.
[ 343.438723] phy40 -> rt2x00lib_rxdone_read_signal: Warning - Frame received with unrecognized signal, mode=0x0000, signal=0x00e6, type=1.
Whereas replugging device make it functional. To solve that problem
force reset device during probe.
With patch messages are gone. Unfortunately device may sometimes
still does not operate correctly after module reload (fail to receive
data after associate), but such cases are rarer than without the patch.
Signed-off-by: Stanislaw Gruszka <stf_xl@wp.pl>
Acked-by: Ivo van Doorn <IvDoorn@gmail.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
2011-06-20 01:47:39 +08:00
|
|
|
usb_reset_device(usb_dev);
|
2007-09-26 08:57:13 +08:00
|
|
|
|
|
|
|
hw = ieee80211_alloc_hw(sizeof(struct rt2x00_dev), ops->hw);
|
|
|
|
if (!hw) {
|
2013-04-19 23:33:40 +08:00
|
|
|
rt2x00_probe_err("Failed to allocate hardware\n");
|
2007-09-26 08:57:13 +08:00
|
|
|
retval = -ENOMEM;
|
|
|
|
goto exit_put_device;
|
|
|
|
}
|
|
|
|
|
|
|
|
usb_set_intfdata(usb_intf, hw);
|
|
|
|
|
|
|
|
rt2x00dev = hw->priv;
|
2008-06-17 01:55:43 +08:00
|
|
|
rt2x00dev->dev = &usb_intf->dev;
|
2007-09-26 08:57:13 +08:00
|
|
|
rt2x00dev->ops = ops;
|
|
|
|
rt2x00dev->hw = hw;
|
|
|
|
|
2009-11-08 19:30:14 +08:00
|
|
|
rt2x00_set_chip_intf(rt2x00dev, RT2X00_CHIP_INTF_USB);
|
|
|
|
|
2010-08-07 02:45:38 +08:00
|
|
|
INIT_WORK(&rt2x00dev->rxdone_work, rt2x00usb_work_rxdone);
|
|
|
|
INIT_WORK(&rt2x00dev->txdone_work, rt2x00usb_work_txdone);
|
rt2x00: rt2800usb: rework txstatus code
Currently we read tx status register after each urb data transfer. As
callback procedure also trigger reading, that causing we have many
"threads" of reading status. To prevent that introduce TX_STATUS_READING
flags, and check if we are already in process of sequential reading
TX_STA_FIFO, before requesting new reads.
Change timer to hrtimer, that make TX_STA_FIFO overruns less possible.
Use 200 us for initial timeout, and then reschedule in 100 us period,
this values probably have to be tuned.
Make changes on txdone work. Schedule it from
rt2800usb_tx_sta_fifo_read_completed() callback when first valid status
show up. Check in callback if tx status timeout happens, and schedule
work on that condition too. That make possible to remove tx status
timeout from generic watchdog. I moved that to rt2800usb.
Loop in txdone work, that should prevent situation when we queue work,
which is already processed, and after finish work is not rescheduled
again.
Signed-off-by: Stanislaw Gruszka <sgruszka@redhat.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
2012-03-14 18:16:19 +08:00
|
|
|
hrtimer_init(&rt2x00dev->txstatus_timer, CLOCK_MONOTONIC,
|
|
|
|
HRTIMER_MODE_REL);
|
2010-08-07 02:45:38 +08:00
|
|
|
|
2007-09-26 08:57:13 +08:00
|
|
|
retval = rt2x00usb_alloc_reg(rt2x00dev);
|
|
|
|
if (retval)
|
|
|
|
goto exit_free_device;
|
|
|
|
|
|
|
|
retval = rt2x00lib_probe_dev(rt2x00dev);
|
|
|
|
if (retval)
|
|
|
|
goto exit_free_reg;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
exit_free_reg:
|
|
|
|
rt2x00usb_free_reg(rt2x00dev);
|
|
|
|
|
|
|
|
exit_free_device:
|
|
|
|
ieee80211_free_hw(hw);
|
|
|
|
|
|
|
|
exit_put_device:
|
|
|
|
usb_put_dev(usb_dev);
|
|
|
|
|
|
|
|
usb_set_intfdata(usb_intf, NULL);
|
|
|
|
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(rt2x00usb_probe);
|
|
|
|
|
|
|
|
void rt2x00usb_disconnect(struct usb_interface *usb_intf)
|
|
|
|
{
|
|
|
|
struct ieee80211_hw *hw = usb_get_intfdata(usb_intf);
|
|
|
|
struct rt2x00_dev *rt2x00dev = hw->priv;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Free all allocated data.
|
|
|
|
*/
|
|
|
|
rt2x00lib_remove_dev(rt2x00dev);
|
|
|
|
rt2x00usb_free_reg(rt2x00dev);
|
|
|
|
ieee80211_free_hw(hw);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Free the USB device data.
|
|
|
|
*/
|
|
|
|
usb_set_intfdata(usb_intf, NULL);
|
|
|
|
usb_put_dev(interface_to_usbdev(usb_intf));
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(rt2x00usb_disconnect);
|
|
|
|
|
|
|
|
#ifdef CONFIG_PM
|
|
|
|
int rt2x00usb_suspend(struct usb_interface *usb_intf, pm_message_t state)
|
|
|
|
{
|
|
|
|
struct ieee80211_hw *hw = usb_get_intfdata(usb_intf);
|
|
|
|
struct rt2x00_dev *rt2x00dev = hw->priv;
|
|
|
|
|
2011-08-12 20:02:04 +08:00
|
|
|
return rt2x00lib_suspend(rt2x00dev, state);
|
2007-09-26 08:57:13 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(rt2x00usb_suspend);
|
|
|
|
|
|
|
|
int rt2x00usb_resume(struct usb_interface *usb_intf)
|
|
|
|
{
|
|
|
|
struct ieee80211_hw *hw = usb_get_intfdata(usb_intf);
|
|
|
|
struct rt2x00_dev *rt2x00dev = hw->priv;
|
|
|
|
|
2009-03-29 03:51:58 +08:00
|
|
|
return rt2x00lib_resume(rt2x00dev);
|
2007-09-26 08:57:13 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(rt2x00usb_resume);
|
|
|
|
#endif /* CONFIG_PM */
|
|
|
|
|
|
|
|
/*
|
2008-02-06 05:42:23 +08:00
|
|
|
* rt2x00usb module information.
|
2007-09-26 08:57:13 +08:00
|
|
|
*/
|
|
|
|
MODULE_AUTHOR(DRV_PROJECT);
|
|
|
|
MODULE_VERSION(DRV_VERSION);
|
2008-02-06 05:42:23 +08:00
|
|
|
MODULE_DESCRIPTION("rt2x00 usb library");
|
2007-09-26 08:57:13 +08:00
|
|
|
MODULE_LICENSE("GPL");
|