linux/drivers/usb/chipidea/udc.c

1906 lines
45 KiB
C
Raw Normal View History

/*
* udc.c - ChipIdea UDC driver
*
* Copyright (C) 2008 Chipidea - MIPS Technologies, Inc. All rights reserved.
*
* Author: David Lopo
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/dmapool.h>
#include <linux/err.h>
#include <linux/irqreturn.h>
#include <linux/kernel.h>
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h percpu.h is included by sched.h and module.h and thus ends up being included when building most .c files. percpu.h includes slab.h which in turn includes gfp.h making everything defined by the two files universally available and complicating inclusion dependencies. percpu.h -> slab.h dependency is about to be removed. Prepare for this change by updating users of gfp and slab facilities include those headers directly instead of assuming availability. As this conversion needs to touch large number of source files, the following script is used as the basis of conversion. http://userweb.kernel.org/~tj/misc/slabh-sweep.py The script does the followings. * Scan files for gfp and slab usages and update includes such that only the necessary includes are there. ie. if only gfp is used, gfp.h, if slab is used, slab.h. * When the script inserts a new include, it looks at the include blocks and try to put the new include such that its order conforms to its surrounding. It's put in the include block which contains core kernel includes, in the same order that the rest are ordered - alphabetical, Christmas tree, rev-Xmas-tree or at the end if there doesn't seem to be any matching order. * If the script can't find a place to put a new include (mostly because the file doesn't have fitting include block), it prints out an error message indicating which .h file needs to be added to the file. The conversion was done in the following steps. 1. The initial automatic conversion of all .c files updated slightly over 4000 files, deleting around 700 includes and adding ~480 gfp.h and ~3000 slab.h inclusions. The script emitted errors for ~400 files. 2. Each error was manually checked. Some didn't need the inclusion, some needed manual addition while adding it to implementation .h or embedding .c file was more appropriate for others. This step added inclusions to around 150 files. 3. The script was run again and the output was compared to the edits from #2 to make sure no file was left behind. 4. Several build tests were done and a couple of problems were fixed. e.g. lib/decompress_*.c used malloc/free() wrappers around slab APIs requiring slab.h to be added manually. 5. The script was run on all .h files but without automatically editing them as sprinkling gfp.h and slab.h inclusions around .h files could easily lead to inclusion dependency hell. Most gfp.h inclusion directives were ignored as stuff from gfp.h was usually wildly available and often used in preprocessor macros. Each slab.h inclusion directive was examined and added manually as necessary. 6. percpu.h was updated not to include slab.h. 7. Build test were done on the following configurations and failures were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my distributed build env didn't work with gcov compiles) and a few more options had to be turned off depending on archs to make things build (like ipr on powerpc/64 which failed due to missing writeq). * x86 and x86_64 UP and SMP allmodconfig and a custom test config. * powerpc and powerpc64 SMP allmodconfig * sparc and sparc64 SMP allmodconfig * ia64 SMP allmodconfig * s390 SMP allmodconfig * alpha SMP allmodconfig * um on x86_64 SMP allmodconfig 8. percpu.h modifications were reverted so that it could be applied as a separate patch and serve as bisection point. Given the fact that I had only a couple of failures from tests on step 6, I'm fairly confident about the coverage of this conversion patch. If there is a breakage, it's likely to be something in one of the arch headers which should be easily discoverable easily on most builds of the specific arch. Signed-off-by: Tejun Heo <tj@kernel.org> Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 16:04:11 +08:00
#include <linux/slab.h>
#include <linux/pm_runtime.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
#include <linux/usb/otg-fsm.h>
#include <linux/usb/chipidea.h>
#include "ci.h"
#include "udc.h"
#include "bits.h"
#include "debug.h"
#include "otg.h"
#include "otg_fsm.h"
/* control endpoint description */
static const struct usb_endpoint_descriptor
ctrl_endpt_out_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_OUT,
.bmAttributes = USB_ENDPOINT_XFER_CONTROL,
.wMaxPacketSize = cpu_to_le16(CTRL_PAYLOAD_MAX),
};
static const struct usb_endpoint_descriptor
ctrl_endpt_in_desc = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_CONTROL,
.wMaxPacketSize = cpu_to_le16(CTRL_PAYLOAD_MAX),
};
/**
* hw_ep_bit: calculates the bit number
* @num: endpoint number
* @dir: endpoint direction
*
* This function returns bit number
*/
static inline int hw_ep_bit(int num, int dir)
{
return num + (dir ? 16 : 0);
}
static inline int ep_to_bit(struct ci_hdrc *ci, int n)
{
int fill = 16 - ci->hw_ep_max / 2;
if (n >= ci->hw_ep_max / 2)
n += fill;
return n;
}
/**
* hw_device_state: enables/disables interrupts (execute without interruption)
* @dma: 0 => disable, !0 => enable and set dma engine
*
* This function returns an error code
*/
static int hw_device_state(struct ci_hdrc *ci, u32 dma)
{
if (dma) {
hw_write(ci, OP_ENDPTLISTADDR, ~0, dma);
/* interrupt, error, port change, reset, sleep/suspend */
hw_write(ci, OP_USBINTR, ~0,
USBi_UI|USBi_UEI|USBi_PCI|USBi_URI|USBi_SLI);
hw_write(ci, OP_USBCMD, USBCMD_RS, USBCMD_RS);
} else {
hw_write(ci, OP_USBINTR, ~0, 0);
hw_write(ci, OP_USBCMD, USBCMD_RS, 0);
}
return 0;
}
/**
* hw_ep_flush: flush endpoint fifo (execute without interruption)
* @num: endpoint number
* @dir: endpoint direction
*
* This function returns an error code
*/
static int hw_ep_flush(struct ci_hdrc *ci, int num, int dir)
{
int n = hw_ep_bit(num, dir);
do {
/* flush any pending transfer */
hw_write(ci, OP_ENDPTFLUSH, ~0, BIT(n));
while (hw_read(ci, OP_ENDPTFLUSH, BIT(n)))
cpu_relax();
} while (hw_read(ci, OP_ENDPTSTAT, BIT(n)));
return 0;
}
/**
* hw_ep_disable: disables endpoint (execute without interruption)
* @num: endpoint number
* @dir: endpoint direction
*
* This function returns an error code
*/
static int hw_ep_disable(struct ci_hdrc *ci, int num, int dir)
{
hw_ep_flush(ci, num, dir);
hw_write(ci, OP_ENDPTCTRL + num,
dir ? ENDPTCTRL_TXE : ENDPTCTRL_RXE, 0);
return 0;
}
/**
* hw_ep_enable: enables endpoint (execute without interruption)
* @num: endpoint number
* @dir: endpoint direction
* @type: endpoint type
*
* This function returns an error code
*/
static int hw_ep_enable(struct ci_hdrc *ci, int num, int dir, int type)
{
u32 mask, data;
if (dir) {
mask = ENDPTCTRL_TXT; /* type */
data = type << __ffs(mask);
mask |= ENDPTCTRL_TXS; /* unstall */
mask |= ENDPTCTRL_TXR; /* reset data toggle */
data |= ENDPTCTRL_TXR;
mask |= ENDPTCTRL_TXE; /* enable */
data |= ENDPTCTRL_TXE;
} else {
mask = ENDPTCTRL_RXT; /* type */
data = type << __ffs(mask);
mask |= ENDPTCTRL_RXS; /* unstall */
mask |= ENDPTCTRL_RXR; /* reset data toggle */
data |= ENDPTCTRL_RXR;
mask |= ENDPTCTRL_RXE; /* enable */
data |= ENDPTCTRL_RXE;
}
hw_write(ci, OP_ENDPTCTRL + num, mask, data);
return 0;
}
/**
* hw_ep_get_halt: return endpoint halt status
* @num: endpoint number
* @dir: endpoint direction
*
* This function returns 1 if endpoint halted
*/
static int hw_ep_get_halt(struct ci_hdrc *ci, int num, int dir)
{
u32 mask = dir ? ENDPTCTRL_TXS : ENDPTCTRL_RXS;
return hw_read(ci, OP_ENDPTCTRL + num, mask) ? 1 : 0;
}
/**
* hw_ep_prime: primes endpoint (execute without interruption)
* @num: endpoint number
* @dir: endpoint direction
* @is_ctrl: true if control endpoint
*
* This function returns an error code
*/
static int hw_ep_prime(struct ci_hdrc *ci, int num, int dir, int is_ctrl)
{
int n = hw_ep_bit(num, dir);
if (is_ctrl && dir == RX && hw_read(ci, OP_ENDPTSETUPSTAT, BIT(num)))
return -EAGAIN;
hw_write(ci, OP_ENDPTPRIME, ~0, BIT(n));
while (hw_read(ci, OP_ENDPTPRIME, BIT(n)))
cpu_relax();
if (is_ctrl && dir == RX && hw_read(ci, OP_ENDPTSETUPSTAT, BIT(num)))
return -EAGAIN;
/* status shoult be tested according with manual but it doesn't work */
return 0;
}
/**
* hw_ep_set_halt: configures ep halt & resets data toggle after clear (execute
* without interruption)
* @num: endpoint number
* @dir: endpoint direction
* @value: true => stall, false => unstall
*
* This function returns an error code
*/
static int hw_ep_set_halt(struct ci_hdrc *ci, int num, int dir, int value)
{
if (value != 0 && value != 1)
return -EINVAL;
do {
enum ci_hw_regs reg = OP_ENDPTCTRL + num;
u32 mask_xs = dir ? ENDPTCTRL_TXS : ENDPTCTRL_RXS;
u32 mask_xr = dir ? ENDPTCTRL_TXR : ENDPTCTRL_RXR;
/* data toggle - reserved for EP0 but it's in ESS */
hw_write(ci, reg, mask_xs|mask_xr,
value ? mask_xs : mask_xr);
} while (value != hw_ep_get_halt(ci, num, dir));
return 0;
}
/**
* hw_is_port_high_speed: test if port is high speed
*
* This function returns true if high speed port
*/
static int hw_port_is_high_speed(struct ci_hdrc *ci)
{
return ci->hw_bank.lpm ? hw_read(ci, OP_DEVLC, DEVLC_PSPD) :
hw_read(ci, OP_PORTSC, PORTSC_HSP);
}
/**
* hw_test_and_clear_complete: test & clear complete status (execute without
* interruption)
* @n: endpoint number
*
* This function returns complete status
*/
static int hw_test_and_clear_complete(struct ci_hdrc *ci, int n)
{
n = ep_to_bit(ci, n);
return hw_test_and_clear(ci, OP_ENDPTCOMPLETE, BIT(n));
}
/**
* hw_test_and_clear_intr_active: test & clear active interrupts (execute
* without interruption)
*
* This function returns active interrutps
*/
static u32 hw_test_and_clear_intr_active(struct ci_hdrc *ci)
{
u32 reg = hw_read_intr_status(ci) & hw_read_intr_enable(ci);
hw_write(ci, OP_USBSTS, ~0, reg);
return reg;
}
/**
* hw_test_and_clear_setup_guard: test & clear setup guard (execute without
* interruption)
*
* This function returns guard value
*/
static int hw_test_and_clear_setup_guard(struct ci_hdrc *ci)
{
return hw_test_and_write(ci, OP_USBCMD, USBCMD_SUTW, 0);
}
/**
* hw_test_and_set_setup_guard: test & set setup guard (execute without
* interruption)
*
* This function returns guard value
*/
static int hw_test_and_set_setup_guard(struct ci_hdrc *ci)
{
return hw_test_and_write(ci, OP_USBCMD, USBCMD_SUTW, USBCMD_SUTW);
}
/**
* hw_usb_set_address: configures USB address (execute without interruption)
* @value: new USB address
*
* This function explicitly sets the address, without the "USBADRA" (advance)
* feature, which is not supported by older versions of the controller.
*/
static void hw_usb_set_address(struct ci_hdrc *ci, u8 value)
{
hw_write(ci, OP_DEVICEADDR, DEVICEADDR_USBADR,
value << __ffs(DEVICEADDR_USBADR));
}
/**
* hw_usb_reset: restart device after a bus reset (execute without
* interruption)
*
* This function returns an error code
*/
static int hw_usb_reset(struct ci_hdrc *ci)
{
hw_usb_set_address(ci, 0);
/* ESS flushes only at end?!? */
hw_write(ci, OP_ENDPTFLUSH, ~0, ~0);
/* clear setup token semaphores */
hw_write(ci, OP_ENDPTSETUPSTAT, 0, 0);
/* clear complete status */
hw_write(ci, OP_ENDPTCOMPLETE, 0, 0);
/* wait until all bits cleared */
while (hw_read(ci, OP_ENDPTPRIME, ~0))
udelay(10); /* not RTOS friendly */
/* reset all endpoints ? */
/* reset internal status and wait for further instructions
no need to verify the port reset status (ESS does it) */
return 0;
}
/******************************************************************************
* UTIL block
*****************************************************************************/
static int add_td_to_list(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq,
unsigned length)
{
int i;
u32 temp;
struct td_node *lastnode, *node = kzalloc(sizeof(struct td_node),
GFP_ATOMIC);
if (node == NULL)
return -ENOMEM;
node->ptr = dma_pool_alloc(hwep->td_pool, GFP_ATOMIC,
&node->dma);
if (node->ptr == NULL) {
kfree(node);
return -ENOMEM;
}
memset(node->ptr, 0, sizeof(struct ci_hw_td));
node->ptr->token = cpu_to_le32(length << __ffs(TD_TOTAL_BYTES));
node->ptr->token &= cpu_to_le32(TD_TOTAL_BYTES);
node->ptr->token |= cpu_to_le32(TD_STATUS_ACTIVE);
if (hwep->type == USB_ENDPOINT_XFER_ISOC && hwep->dir == TX) {
u32 mul = hwreq->req.length / hwep->ep.maxpacket;
if (hwreq->req.length == 0
|| hwreq->req.length % hwep->ep.maxpacket)
mul++;
node->ptr->token |= mul << __ffs(TD_MULTO);
}
temp = (u32) (hwreq->req.dma + hwreq->req.actual);
if (length) {
node->ptr->page[0] = cpu_to_le32(temp);
for (i = 1; i < TD_PAGE_COUNT; i++) {
u32 page = temp + i * CI_HDRC_PAGE_SIZE;
page &= ~TD_RESERVED_MASK;
node->ptr->page[i] = cpu_to_le32(page);
}
}
hwreq->req.actual += length;
if (!list_empty(&hwreq->tds)) {
/* get the last entry */
lastnode = list_entry(hwreq->tds.prev,
struct td_node, td);
lastnode->ptr->next = cpu_to_le32(node->dma);
}
INIT_LIST_HEAD(&node->td);
list_add_tail(&node->td, &hwreq->tds);
return 0;
}
/**
* _usb_addr: calculates endpoint address from direction & number
* @ep: endpoint
*/
static inline u8 _usb_addr(struct ci_hw_ep *ep)
{
return ((ep->dir == TX) ? USB_ENDPOINT_DIR_MASK : 0) | ep->num;
}
/**
* _hardware_queue: configures a request at hardware level
* @gadget: gadget
* @hwep: endpoint
*
* This function returns an error code
*/
static int _hardware_enqueue(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq)
{
struct ci_hdrc *ci = hwep->ci;
int ret = 0;
unsigned rest = hwreq->req.length;
int pages = TD_PAGE_COUNT;
struct td_node *firstnode, *lastnode;
/* don't queue twice */
if (hwreq->req.status == -EALREADY)
return -EALREADY;
hwreq->req.status = -EALREADY;
ret = usb_gadget_map_request(&ci->gadget, &hwreq->req, hwep->dir);
if (ret)
return ret;
/*
* The first buffer could be not page aligned.
* In that case we have to span into one extra td.
*/
if (hwreq->req.dma % PAGE_SIZE)
pages--;
if (rest == 0)
add_td_to_list(hwep, hwreq, 0);
while (rest > 0) {
unsigned count = min(hwreq->req.length - hwreq->req.actual,
(unsigned)(pages * CI_HDRC_PAGE_SIZE));
add_td_to_list(hwep, hwreq, count);
rest -= count;
}
if (hwreq->req.zero && hwreq->req.length
&& (hwreq->req.length % hwep->ep.maxpacket == 0))
add_td_to_list(hwep, hwreq, 0);
firstnode = list_first_entry(&hwreq->tds, struct td_node, td);
lastnode = list_entry(hwreq->tds.prev,
struct td_node, td);
lastnode->ptr->next = cpu_to_le32(TD_TERMINATE);
if (!hwreq->req.no_interrupt)
lastnode->ptr->token |= cpu_to_le32(TD_IOC);
usb: chipidea: udc: fix memory access of shared memory on armv5 machines The udc uses an shared dma memory space between hard and software. This memory layout is described in ci13xxx_qh and ci13xxx_td which are marked with the attribute ((packed)). The compiler currently does not know about the alignment of the memory layout, and will create strb and ldrb operations. The Datasheet of the synopsys core describes, that some operations on the mapped memory need to be atomic double word operations. I.e. the next pointer addressing in the qhead, as otherwise the hardware will read wrong data and totally stuck. This is also possible while working with the current active td queue, and preparing the td->ptr.next in software while the hardware is still working with the current active td which is supposed to be changed: writeb(0xde, &td->ptr.next + 0x0); /* strb */ writeb(0xad, &td->ptr.next + 0x1); /* strb */ <----- hardware reads value of td->ptr.next and get stuck! writeb(0xbe, &td->ptr.next + 0x2); /* strb */ writeb(0xef, &td->ptr.next + 0x3); /* strb */ This appeares on armv5 machines where the hardware does not support unaligned 32bit operations. This patch adds the attribute ((aligned(4))) to the structures to tell the compiler to use 32bit operations. It also adds an wmb() for the prepared TD data before it gets enqueued into the qhead. Cc: stable <stable@vger.kernel.org> # v3.5 Signed-off-by: Michael Grzeschik <m.grzeschik@pengutronix.de> Reviewed-by: Felipe Balbi <balbi@ti.com> Signed-off-by: Alexander Shishkin <alexander.shishkin@linux.intel.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2013-04-04 18:13:46 +08:00
wmb();
hwreq->req.actual = 0;
if (!list_empty(&hwep->qh.queue)) {
struct ci_hw_req *hwreqprev;
int n = hw_ep_bit(hwep->num, hwep->dir);
int tmp_stat;
struct td_node *prevlastnode;
u32 next = firstnode->dma & TD_ADDR_MASK;
hwreqprev = list_entry(hwep->qh.queue.prev,
struct ci_hw_req, queue);
prevlastnode = list_entry(hwreqprev->tds.prev,
struct td_node, td);
prevlastnode->ptr->next = cpu_to_le32(next);
wmb();
if (hw_read(ci, OP_ENDPTPRIME, BIT(n)))
goto done;
do {
hw_write(ci, OP_USBCMD, USBCMD_ATDTW, USBCMD_ATDTW);
tmp_stat = hw_read(ci, OP_ENDPTSTAT, BIT(n));
} while (!hw_read(ci, OP_USBCMD, USBCMD_ATDTW));
hw_write(ci, OP_USBCMD, USBCMD_ATDTW, 0);
if (tmp_stat)
goto done;
}
/* QH configuration */
hwep->qh.ptr->td.next = cpu_to_le32(firstnode->dma);
hwep->qh.ptr->td.token &=
cpu_to_le32(~(TD_STATUS_HALTED|TD_STATUS_ACTIVE));
if (hwep->type == USB_ENDPOINT_XFER_ISOC && hwep->dir == RX) {
u32 mul = hwreq->req.length / hwep->ep.maxpacket;
if (hwreq->req.length == 0
|| hwreq->req.length % hwep->ep.maxpacket)
mul++;
hwep->qh.ptr->cap |= mul << __ffs(QH_MULT);
}
wmb(); /* synchronize before ep prime */
ret = hw_ep_prime(ci, hwep->num, hwep->dir,
hwep->type == USB_ENDPOINT_XFER_CONTROL);
done:
return ret;
}
/*
* free_pending_td: remove a pending request for the endpoint
* @hwep: endpoint
*/
static void free_pending_td(struct ci_hw_ep *hwep)
{
struct td_node *pending = hwep->pending_td;
dma_pool_free(hwep->td_pool, pending->ptr, pending->dma);
hwep->pending_td = NULL;
kfree(pending);
}
/**
* _hardware_dequeue: handles a request at hardware level
* @gadget: gadget
* @hwep: endpoint
*
* This function returns an error code
*/
static int _hardware_dequeue(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq)
{
u32 tmptoken;
struct td_node *node, *tmpnode;
unsigned remaining_length;
unsigned actual = hwreq->req.length;
if (hwreq->req.status != -EALREADY)
return -EINVAL;
hwreq->req.status = 0;
list_for_each_entry_safe(node, tmpnode, &hwreq->tds, td) {
tmptoken = le32_to_cpu(node->ptr->token);
if ((TD_STATUS_ACTIVE & tmptoken) != 0) {
hwreq->req.status = -EALREADY;
return -EBUSY;
}
remaining_length = (tmptoken & TD_TOTAL_BYTES);
remaining_length >>= __ffs(TD_TOTAL_BYTES);
actual -= remaining_length;
hwreq->req.status = tmptoken & TD_STATUS;
if ((TD_STATUS_HALTED & hwreq->req.status)) {
hwreq->req.status = -EPIPE;
break;
} else if ((TD_STATUS_DT_ERR & hwreq->req.status)) {
hwreq->req.status = -EPROTO;
break;
} else if ((TD_STATUS_TR_ERR & hwreq->req.status)) {
hwreq->req.status = -EILSEQ;
break;
}
if (remaining_length) {
if (hwep->dir) {
hwreq->req.status = -EPROTO;
break;
}
}
/*
* As the hardware could still address the freed td
* which will run the udc unusable, the cleanup of the
* td has to be delayed by one.
*/
if (hwep->pending_td)
free_pending_td(hwep);
hwep->pending_td = node;
list_del_init(&node->td);
}
usb_gadget_unmap_request(&hwep->ci->gadget, &hwreq->req, hwep->dir);
hwreq->req.actual += actual;
if (hwreq->req.status)
return hwreq->req.status;
return hwreq->req.actual;
}
/**
* _ep_nuke: dequeues all endpoint requests
* @hwep: endpoint
*
* This function returns an error code
* Caller must hold lock
*/
static int _ep_nuke(struct ci_hw_ep *hwep)
__releases(hwep->lock)
__acquires(hwep->lock)
{
struct td_node *node, *tmpnode;
if (hwep == NULL)
return -EINVAL;
hw_ep_flush(hwep->ci, hwep->num, hwep->dir);
while (!list_empty(&hwep->qh.queue)) {
/* pop oldest request */
struct ci_hw_req *hwreq = list_entry(hwep->qh.queue.next,
struct ci_hw_req, queue);
list_for_each_entry_safe(node, tmpnode, &hwreq->tds, td) {
dma_pool_free(hwep->td_pool, node->ptr, node->dma);
list_del_init(&node->td);
node->ptr = NULL;
kfree(node);
}
list_del_init(&hwreq->queue);
hwreq->req.status = -ESHUTDOWN;
if (hwreq->req.complete != NULL) {
spin_unlock(hwep->lock);
usb_gadget_giveback_request(&hwep->ep, &hwreq->req);
spin_lock(hwep->lock);
}
}
if (hwep->pending_td)
free_pending_td(hwep);
return 0;
}
/**
* _gadget_stop_activity: stops all USB activity, flushes & disables all endpts
* @gadget: gadget
*
* This function returns an error code
*/
static int _gadget_stop_activity(struct usb_gadget *gadget)
{
struct usb_ep *ep;
struct ci_hdrc *ci = container_of(gadget, struct ci_hdrc, gadget);
unsigned long flags;
spin_lock_irqsave(&ci->lock, flags);
ci->gadget.speed = USB_SPEED_UNKNOWN;
ci->remote_wakeup = 0;
ci->suspended = 0;
spin_unlock_irqrestore(&ci->lock, flags);
/* flush all endpoints */
gadget_for_each_ep(ep, gadget) {
usb_ep_fifo_flush(ep);
}
usb_ep_fifo_flush(&ci->ep0out->ep);
usb_ep_fifo_flush(&ci->ep0in->ep);
/* make sure to disable all endpoints */
gadget_for_each_ep(ep, gadget) {
usb_ep_disable(ep);
}
if (ci->status != NULL) {
usb_ep_free_request(&ci->ep0in->ep, ci->status);
ci->status = NULL;
}
return 0;
}
/******************************************************************************
* ISR block
*****************************************************************************/
/**
* isr_reset_handler: USB reset interrupt handler
* @ci: UDC device
*
* This function resets USB engine after a bus reset occurred
*/
static void isr_reset_handler(struct ci_hdrc *ci)
__releases(ci->lock)
__acquires(ci->lock)
{
int retval;
usb: chipidea: udc: Fix spinlock recursion during bus reset After configuration, the host also possible sends bus reset at any time, at such situation, it will trigger below spinlock recursion dump. This commit unlocks the spinlock before calling gadget's disconnect. BUG: spinlock recursion on CPU#0, swapper/0/0 lock: 0xbf128014, .magic: dead4ead, .owner: swapper/0/0, .owner_cpu: 0 CPU: 0 PID: 0 Comm: swapper/0 Not tainted 3.11.0-next-20130910+ #106 [<80014e20>] (unwind_backtrace+0x0/0xec) from [<80011a6c>] (show_stack+0x10/0x14) [<80011a6c>] (show_stack+0x10/0x14) from [<805c143c>] (dump_stack+0x94/0xbc) [<805c143c>] (dump_stack+0x94/0xbc) from [<80282cf8>] (do_raw_spin_lock+0x16c/0x18c) [<80282cf8>] (do_raw_spin_lock+0x16c/0x18c) from [<805c77e0>] (_raw_spin_lock_irqsave+0x50/0x5c) [<805c77e0>] (_raw_spin_lock_irqsave+0x50/0x5c) from [<803cff88>] (ep_disable+0x24/0x110) [<803cff88>] (ep_disable+0x24/0x110) from [<7f015d50>] (gserial_disconnect+0xa0/0x15c [u_serial]) [<7f015d50>] (gserial_disconnect+0xa0/0x15c [u_serial]) from [<7f01c06c>] (acm_disable+0xc/0x30 [usb_f_acm]) [<7f01c06c>] (acm_disable+0xc/0x30 [usb_f_acm]) from [<7f001478>] (reset_config.isra.10+0x34/0x5c [libcomposite]) [<7f001478>] (reset_config.isra.10+0x34/0x5c [libcomposite]) from [<7f0014d4>] (composite_disconnect+0x34/0x5c [libcomposite]) [<7f0014d4>] (composite_disconnect+0x34/0x5c [libcomposite]) from [<803d1024>] (udc_irq+0x770/0xce4) [<803d1024>] (udc_irq+0x770/0xce4) from [<803cdcc0>] (ci_irq+0x98/0x164) [<803cdcc0>] (ci_irq+0x98/0x164) from [<8007edfc>] (handle_irq_event_percpu+0x50/0x17c) [<8007edfc>] (handle_irq_event_percpu+0x50/0x17c) from [<8007ef64>] (handle_irq_event+0x3c/0x5c) [<8007ef64>] (handle_irq_event+0x3c/0x5c) from [<80081e98>] (handle_fasteoi_irq+0x98/0x168) [<80081e98>] (handle_fasteoi_irq+0x98/0x168) from [<8007e598>] (generic_handle_irq+0x28/0x3c) [<8007e598>] (generic_handle_irq+0x28/0x3c) from [<8000edf4>] (handle_IRQ+0x4c/0xb4) [<8000edf4>] (handle_IRQ+0x4c/0xb4) from [<800085bc>] (gic_handle_irq+0x28/0x5c) [<800085bc>] (gic_handle_irq+0x28/0x5c) from [<800125c0>] (__irq_svc+0x40/0x54) Exception stack(0x8083bf68 to 0x8083bfb0) bf60: 81533b80 00000000 00096234 8001d760 8088e12c 00000000 bf80: 8083a000 8083a000 8084290c 805cb414 808428ac 8083a000 00000001 8083bfb0 bfa0: 8000f138 8000f13c 60000013 ffffffff [<800125c0>] (__irq_svc+0x40/0x54) from [<8000f13c>] (arch_cpu_idle+0x30/0x3c) [<8000f13c>] (arch_cpu_idle+0x30/0x3c) from [<8005eb94>] (cpu_startup_entry+0xf4/0x148) [<8005eb94>] (cpu_startup_entry+0xf4/0x148) from [<807f1a2c>] (start_kernel+0x2c4/0x318) BUG: spinlock lockup suspected on CPU#0, swapper/0/0 lock: 0xbf128014, .magic: dead4ead, .owner: swapper/0/0, .owner_cpu: 0 CPU: 0 PID: 0 Comm: swapper/0 Not tainted 3.11.0-next-20130910+ #106 [<80014e20>] (unwind_backtrace+0x0/0xec) from [<80011a6c>] (show_stack+0x10/0x14) [<80011a6c>] (show_stack+0x10/0x14) from [<805c143c>] (dump_stack+0x94/0xbc) [<805c143c>] (dump_stack+0x94/0xbc) from [<80282c94>] (do_raw_spin_lock+0x108/0x18c) [<80282c94>] (do_raw_spin_lock+0x108/0x18c) from [<805c77e0>] (_raw_spin_lock_irqsave+0x50/0x5c) [<805c77e0>] (_raw_spin_lock_irqsave+0x50/0x5c) from [<803cff88>] (ep_disable+0x24/0x110) [<803cff88>] (ep_disable+0x24/0x110) from [<7f015d50>] (gserial_disconnect+0xa0/0x15c [u_serial]) [<7f015d50>] (gserial_disconnect+0xa0/0x15c [u_serial]) from [<7f01c06c>] (acm_disable+0xc/0x30 [usb_f_acm]) [<7f01c06c>] (acm_disable+0xc/0x30 [usb_f_acm]) from [<7f001478>] (reset_config.isra.10+0x34/0x5c [libcomposite]) [<7f001478>] (reset_config.isra.10+0x34/0x5c [libcomposite]) from [<7f0014d4>] (composite_disconnect+0x34/0x5c [libcomposite]) [<7f0014d4>] (composite_disconnect+0x34/0x5c [libcomposite]) from [<803d1024>] (udc_irq+0x770/0xce4) [<803d1024>] (udc_irq+0x770/0xce4) from [<803cdcc0>] (ci_irq+0x98/0x164) [<803cdcc0>] (ci_irq+0x98/0x164) from [<8007edfc>] (handle_irq_event_percpu+0x50/0x17c) [<8007edfc>] (handle_irq_event_percpu+0x50/0x17c) from [<8007ef64>] (handle_irq_event+0x3c/0x5c) [<8007ef64>] (handle_irq_event+0x3c/0x5c) from [<80081e98>] (handle_fasteoi_irq+0x98/0x168) [<80081e98>] (handle_fasteoi_irq+0x98/0x168) from [<8007e598>] (generic_handle_irq+0x28/0x3c) [<8007e598>] (generic_handle_irq+0x28/0x3c) from [<8000edf4>] (handle_IRQ+0x4c/0xb4) [<8000edf4>] (handle_IRQ+0x4c/0xb4) from [<800085bc>] (gic_handle_irq+0x28/0x5c) [<800085bc>] (gic_handle_irq+0x28/0x5c) from [<800125c0>] (__irq_svc+0x40/0x54) Exception stack(0x8083bf68 to 0x8083bfb0) bf60: 81533b80 00000000 00096234 8001d760 8088e12c 00000000 bf80: 8083a000 8083a000 8084290c 805cb414 808428ac 8083a000 00000001 8083bfb0 bfa0: 8000f138 8000f13c 60000013 ffffffff [<800125c0>] (__irq_svc+0x40/0x54) from [<8000f13c>] (arch_cpu_idle+0x30/0x3c) [<8000f13c>] (arch_cpu_idle+0x30/0x3c) from [<8005eb94>] (cpu_startup_entry+0xf4/0x148) [<8005eb94>] (cpu_startup_entry+0xf4/0x148) from [<807f1a2c>] (start_kernel+0x2c4/0x318) Signed-off-by: Peter Chen <peter.chen@freescale.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2013-10-09 14:39:52 +08:00
spin_unlock(&ci->lock);
if (ci->gadget.speed != USB_SPEED_UNKNOWN)
usb_gadget_udc_reset(&ci->gadget, ci->driver);
retval = _gadget_stop_activity(&ci->gadget);
if (retval)
goto done;
retval = hw_usb_reset(ci);
if (retval)
goto done;
ci->status = usb_ep_alloc_request(&ci->ep0in->ep, GFP_ATOMIC);
if (ci->status == NULL)
retval = -ENOMEM;
done:
spin_lock(&ci->lock);
if (retval)
dev_err(ci->dev, "error: %i\n", retval);
}
/**
* isr_get_status_complete: get_status request complete function
* @ep: endpoint
* @req: request handled
*
* Caller must release lock
*/
static void isr_get_status_complete(struct usb_ep *ep, struct usb_request *req)
{
if (ep == NULL || req == NULL)
return;
kfree(req->buf);
usb_ep_free_request(ep, req);
}
/**
* _ep_queue: queues (submits) an I/O request to an endpoint
*
* Caller must hold lock
*/
static int _ep_queue(struct usb_ep *ep, struct usb_request *req,
gfp_t __maybe_unused gfp_flags)
{
struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
struct ci_hw_req *hwreq = container_of(req, struct ci_hw_req, req);
struct ci_hdrc *ci = hwep->ci;
int retval = 0;
if (ep == NULL || req == NULL || hwep->ep.desc == NULL)
return -EINVAL;
if (hwep->type == USB_ENDPOINT_XFER_CONTROL) {
if (req->length)
hwep = (ci->ep0_dir == RX) ?
ci->ep0out : ci->ep0in;
if (!list_empty(&hwep->qh.queue)) {
_ep_nuke(hwep);
retval = -EOVERFLOW;
dev_warn(hwep->ci->dev, "endpoint ctrl %X nuked\n",
_usb_addr(hwep));
}
}
if (usb_endpoint_xfer_isoc(hwep->ep.desc) &&
hwreq->req.length > (1 + hwep->ep.mult) * hwep->ep.maxpacket) {
dev_err(hwep->ci->dev, "request length too big for isochronous\n");
return -EMSGSIZE;
}
/* first nuke then test link, e.g. previous status has not sent */
if (!list_empty(&hwreq->queue)) {
dev_err(hwep->ci->dev, "request already in queue\n");
return -EBUSY;
}
/* push request */
hwreq->req.status = -EINPROGRESS;
hwreq->req.actual = 0;
retval = _hardware_enqueue(hwep, hwreq);
if (retval == -EALREADY)
retval = 0;
if (!retval)
list_add_tail(&hwreq->queue, &hwep->qh.queue);
return retval;
}
/**
* isr_get_status_response: get_status request response
* @ci: ci struct
* @setup: setup request packet
*
* This function returns an error code
*/
static int isr_get_status_response(struct ci_hdrc *ci,
struct usb_ctrlrequest *setup)
__releases(hwep->lock)
__acquires(hwep->lock)
{
struct ci_hw_ep *hwep = ci->ep0in;
struct usb_request *req = NULL;
gfp_t gfp_flags = GFP_ATOMIC;
int dir, num, retval;
if (hwep == NULL || setup == NULL)
return -EINVAL;
spin_unlock(hwep->lock);
req = usb_ep_alloc_request(&hwep->ep, gfp_flags);
spin_lock(hwep->lock);
if (req == NULL)
return -ENOMEM;
req->complete = isr_get_status_complete;
req->length = 2;
req->buf = kzalloc(req->length, gfp_flags);
if (req->buf == NULL) {
retval = -ENOMEM;
goto err_free_req;
}
if ((setup->bRequestType & USB_RECIP_MASK) == USB_RECIP_DEVICE) {
*(u16 *)req->buf = (ci->remote_wakeup << 1) |
ci->gadget.is_selfpowered;
} else if ((setup->bRequestType & USB_RECIP_MASK) \
== USB_RECIP_ENDPOINT) {
dir = (le16_to_cpu(setup->wIndex) & USB_ENDPOINT_DIR_MASK) ?
TX : RX;
num = le16_to_cpu(setup->wIndex) & USB_ENDPOINT_NUMBER_MASK;
*(u16 *)req->buf = hw_ep_get_halt(ci, num, dir);
}
/* else do nothing; reserved for future use */
retval = _ep_queue(&hwep->ep, req, gfp_flags);
if (retval)
goto err_free_buf;
return 0;
err_free_buf:
kfree(req->buf);
err_free_req:
spin_unlock(hwep->lock);
usb_ep_free_request(&hwep->ep, req);
spin_lock(hwep->lock);
return retval;
}
/**
* isr_setup_status_complete: setup_status request complete function
* @ep: endpoint
* @req: request handled
*
* Caller must release lock. Put the port in test mode if test mode
* feature is selected.
*/
static void
isr_setup_status_complete(struct usb_ep *ep, struct usb_request *req)
{
struct ci_hdrc *ci = req->context;
unsigned long flags;
if (ci->setaddr) {
hw_usb_set_address(ci, ci->address);
ci->setaddr = false;
if (ci->address)
usb_gadget_set_state(&ci->gadget, USB_STATE_ADDRESS);
}
spin_lock_irqsave(&ci->lock, flags);
if (ci->test_mode)
hw_port_test_set(ci, ci->test_mode);
spin_unlock_irqrestore(&ci->lock, flags);
}
/**
* isr_setup_status_phase: queues the status phase of a setup transation
* @ci: ci struct
*
* This function returns an error code
*/
static int isr_setup_status_phase(struct ci_hdrc *ci)
{
int retval;
struct ci_hw_ep *hwep;
hwep = (ci->ep0_dir == TX) ? ci->ep0out : ci->ep0in;
ci->status->context = ci;
ci->status->complete = isr_setup_status_complete;
retval = _ep_queue(&hwep->ep, ci->status, GFP_ATOMIC);
return retval;
}
/**
* isr_tr_complete_low: transaction complete low level handler
* @hwep: endpoint
*
* This function returns an error code
* Caller must hold lock
*/
static int isr_tr_complete_low(struct ci_hw_ep *hwep)
__releases(hwep->lock)
__acquires(hwep->lock)
{
struct ci_hw_req *hwreq, *hwreqtemp;
struct ci_hw_ep *hweptemp = hwep;
int retval = 0;
list_for_each_entry_safe(hwreq, hwreqtemp, &hwep->qh.queue,
queue) {
retval = _hardware_dequeue(hwep, hwreq);
if (retval < 0)
break;
list_del_init(&hwreq->queue);
if (hwreq->req.complete != NULL) {
spin_unlock(hwep->lock);
if ((hwep->type == USB_ENDPOINT_XFER_CONTROL) &&
hwreq->req.length)
hweptemp = hwep->ci->ep0in;
usb_gadget_giveback_request(&hweptemp->ep, &hwreq->req);
spin_lock(hwep->lock);
}
}
if (retval == -EBUSY)
retval = 0;
return retval;
}
/**
* isr_setup_packet_handler: setup packet handler
* @ci: UDC descriptor
*
* This function handles setup packet
*/
static void isr_setup_packet_handler(struct ci_hdrc *ci)
__releases(ci->lock)
__acquires(ci->lock)
{
struct ci_hw_ep *hwep = &ci->ci_hw_ep[0];
struct usb_ctrlrequest req;
int type, num, dir, err = -EINVAL;
u8 tmode = 0;
/*
* Flush data and handshake transactions of previous
* setup packet.
*/
_ep_nuke(ci->ep0out);
_ep_nuke(ci->ep0in);
/* read_setup_packet */
do {
hw_test_and_set_setup_guard(ci);
memcpy(&req, &hwep->qh.ptr->setup, sizeof(req));
} while (!hw_test_and_clear_setup_guard(ci));
type = req.bRequestType;
ci->ep0_dir = (type & USB_DIR_IN) ? TX : RX;
switch (req.bRequest) {
case USB_REQ_CLEAR_FEATURE:
if (type == (USB_DIR_OUT|USB_RECIP_ENDPOINT) &&
le16_to_cpu(req.wValue) ==
USB_ENDPOINT_HALT) {
if (req.wLength != 0)
break;
num = le16_to_cpu(req.wIndex);
dir = num & USB_ENDPOINT_DIR_MASK;
num &= USB_ENDPOINT_NUMBER_MASK;
if (dir) /* TX */
num += ci->hw_ep_max / 2;
if (!ci->ci_hw_ep[num].wedge) {
spin_unlock(&ci->lock);
err = usb_ep_clear_halt(
&ci->ci_hw_ep[num].ep);
spin_lock(&ci->lock);
if (err)
break;
}
err = isr_setup_status_phase(ci);
} else if (type == (USB_DIR_OUT|USB_RECIP_DEVICE) &&
le16_to_cpu(req.wValue) ==
USB_DEVICE_REMOTE_WAKEUP) {
if (req.wLength != 0)
break;
ci->remote_wakeup = 0;
err = isr_setup_status_phase(ci);
} else {
goto delegate;
}
break;
case USB_REQ_GET_STATUS:
if (type != (USB_DIR_IN|USB_RECIP_DEVICE) &&
type != (USB_DIR_IN|USB_RECIP_ENDPOINT) &&
type != (USB_DIR_IN|USB_RECIP_INTERFACE))
goto delegate;
if (le16_to_cpu(req.wLength) != 2 ||
le16_to_cpu(req.wValue) != 0)
break;
err = isr_get_status_response(ci, &req);
break;
case USB_REQ_SET_ADDRESS:
if (type != (USB_DIR_OUT|USB_RECIP_DEVICE))
goto delegate;
if (le16_to_cpu(req.wLength) != 0 ||
le16_to_cpu(req.wIndex) != 0)
break;
ci->address = (u8)le16_to_cpu(req.wValue);
ci->setaddr = true;
err = isr_setup_status_phase(ci);
break;
case USB_REQ_SET_FEATURE:
if (type == (USB_DIR_OUT|USB_RECIP_ENDPOINT) &&
le16_to_cpu(req.wValue) ==
USB_ENDPOINT_HALT) {
if (req.wLength != 0)
break;
num = le16_to_cpu(req.wIndex);
dir = num & USB_ENDPOINT_DIR_MASK;
num &= USB_ENDPOINT_NUMBER_MASK;
if (dir) /* TX */
num += ci->hw_ep_max / 2;
spin_unlock(&ci->lock);
err = usb_ep_set_halt(&ci->ci_hw_ep[num].ep);
spin_lock(&ci->lock);
if (!err)
isr_setup_status_phase(ci);
} else if (type == (USB_DIR_OUT|USB_RECIP_DEVICE)) {
if (req.wLength != 0)
break;
switch (le16_to_cpu(req.wValue)) {
case USB_DEVICE_REMOTE_WAKEUP:
ci->remote_wakeup = 1;
err = isr_setup_status_phase(ci);
break;
case USB_DEVICE_TEST_MODE:
tmode = le16_to_cpu(req.wIndex) >> 8;
switch (tmode) {
case TEST_J:
case TEST_K:
case TEST_SE0_NAK:
case TEST_PACKET:
case TEST_FORCE_EN:
ci->test_mode = tmode;
err = isr_setup_status_phase(
ci);
break;
default:
break;
}
break;
case USB_DEVICE_B_HNP_ENABLE:
if (ci_otg_is_fsm_mode(ci)) {
ci->gadget.b_hnp_enable = 1;
err = isr_setup_status_phase(
ci);
}
break;
default:
goto delegate;
}
} else {
goto delegate;
}
break;
default:
delegate:
if (req.wLength == 0) /* no data phase */
ci->ep0_dir = TX;
spin_unlock(&ci->lock);
err = ci->driver->setup(&ci->gadget, &req);
spin_lock(&ci->lock);
break;
}
if (err < 0) {
spin_unlock(&ci->lock);
if (usb_ep_set_halt(&hwep->ep))
dev_err(ci->dev, "error: ep_set_halt\n");
spin_lock(&ci->lock);
}
}
/**
* isr_tr_complete_handler: transaction complete interrupt handler
* @ci: UDC descriptor
*
* This function handles traffic events
*/
static void isr_tr_complete_handler(struct ci_hdrc *ci)
__releases(ci->lock)
__acquires(ci->lock)
{
unsigned i;
int err;
for (i = 0; i < ci->hw_ep_max; i++) {
struct ci_hw_ep *hwep = &ci->ci_hw_ep[i];
if (hwep->ep.desc == NULL)
continue; /* not configured */
if (hw_test_and_clear_complete(ci, i)) {
err = isr_tr_complete_low(hwep);
if (hwep->type == USB_ENDPOINT_XFER_CONTROL) {
if (err > 0) /* needs status phase */
err = isr_setup_status_phase(ci);
if (err < 0) {
spin_unlock(&ci->lock);
if (usb_ep_set_halt(&hwep->ep))
dev_err(ci->dev,
"error: ep_set_halt\n");
spin_lock(&ci->lock);
}
}
}
/* Only handle setup packet below */
if (i == 0 &&
hw_test_and_clear(ci, OP_ENDPTSETUPSTAT, BIT(0)))
isr_setup_packet_handler(ci);
}
}
/******************************************************************************
* ENDPT block
*****************************************************************************/
/**
* ep_enable: configure endpoint, making it usable
*
* Check usb_ep_enable() at "usb_gadget.h" for details
*/
static int ep_enable(struct usb_ep *ep,
const struct usb_endpoint_descriptor *desc)
{
struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
int retval = 0;
unsigned long flags;
u32 cap = 0;
if (ep == NULL || desc == NULL)
return -EINVAL;
spin_lock_irqsave(hwep->lock, flags);
/* only internal SW should enable ctrl endpts */
hwep->ep.desc = desc;
if (!list_empty(&hwep->qh.queue))
dev_warn(hwep->ci->dev, "enabling a non-empty endpoint!\n");
hwep->dir = usb_endpoint_dir_in(desc) ? TX : RX;
hwep->num = usb_endpoint_num(desc);
hwep->type = usb_endpoint_type(desc);
hwep->ep.maxpacket = usb_endpoint_maxp(desc) & 0x07ff;
hwep->ep.mult = QH_ISO_MULT(usb_endpoint_maxp(desc));
if (hwep->type == USB_ENDPOINT_XFER_CONTROL)
cap |= QH_IOS;
usb: chipidea: udc: Disable auto ZLP generation on ep0 There are 2 methods for ZLP (zero-length packet) generation: 1) In software 2) Automatic generation by device controller 1) is implemented in UDC driver and it attaches ZLP to IN packet if descriptor->size < wLength 2) can be enabled/disabled by setting ZLT bit in the QH When gadget ffs is connected to ubuntu host, the host sends get descriptor request and wLength in setup packet is 255 while the size of descriptor which will be sent by gadget in IN packet is 64 byte. So the composite driver sets req->zero = 1. In UDC driver following code will be executed then if (hwreq->req.zero && hwreq->req.length && (hwreq->req.length % hwep->ep.maxpacket == 0)) add_td_to_list(hwep, hwreq, 0); Case-A: So in case of ubuntu host, UDC driver will attach a ZLP to the IN packet. ubuntu host will request 255 byte in IN request, gadget will send 64 byte with ZLP and host will come to know that there is no more data. But hold on, by default ZLT=0 for endpoint 0 so hardware also tries to automatically generate the ZLP which blocks enumeration for ~6 seconds due to endpoint 0 STALL, NAKs are sent to host for any requests (OUT/PING) Case-B: In case when gadget ffs is connected to Apple device, Apple device sends setup packet with wLength=64. So descriptor->size = 64 and wLength=64 therefore req->zero = 0 and UDC driver will not attach any ZLP to the IN packet. Apple device requests 64 bytes, gets 64 bytes and doesn't further request for IN data. But ZLT=0 by default for endpoint 0 so hardware tries to automatically generate the ZLP which blocks enumeration for ~6 seconds due to endpoint 0 STALL, NAKs are sent to host for any requests (OUT/PING) According to USB2.0 specs: 8.5.3.2 Variable-length Data Stage A control pipe may have a variable-length data phase in which the host requests more data than is contained in the specified data structure. When all of the data structure is returned to the host, the function should indicate that the Data stage is ended by returning a packet that is shorter than the MaxPacketSize for the pipe. If the data structure is an exact multiple of wMaxPacketSize for the pipe, the function will return a zero-length packet to indicate the end of the Data stage. In Case-A mentioned above: If we disable software ZLP generation & ZLT=0 for endpoint 0 OR if software ZLP generation is not disabled but we set ZLT=1 for endpoint 0 then enumeration doesn't block for 6 seconds. In Case-B mentioned above: If we disable software ZLP generation & ZLT=0 for endpoint then enumeration still blocks due to ZLP automatically generated by hardware and host not needing it. But if we keep software ZLP generation enabled but we set ZLT=1 for endpoint 0 then enumeration doesn't block for 6 seconds. So the proper solution for this issue seems to disable automatic ZLP generation by hardware (i.e by setting ZLT=1 for endpoint 0) and let software (UDC driver) handle the ZLP generation based on req->zero field. Cc: stable@vger.kernel.org Signed-off-by: Abbas Raza <Abbas_Raza@mentor.com> Signed-off-by: Peter Chen <peter.chen@freescale.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2014-07-17 19:34:31 +08:00
cap |= QH_ZLT;
cap |= (hwep->ep.maxpacket << __ffs(QH_MAX_PKT)) & QH_MAX_PKT;
/*
* For ISO-TX, we set mult at QH as the largest value, and use
* MultO at TD as real mult value.
*/
if (hwep->type == USB_ENDPOINT_XFER_ISOC && hwep->dir == TX)
cap |= 3 << __ffs(QH_MULT);
hwep->qh.ptr->cap = cpu_to_le32(cap);
hwep->qh.ptr->td.next |= cpu_to_le32(TD_TERMINATE); /* needed? */
if (hwep->num != 0 && hwep->type == USB_ENDPOINT_XFER_CONTROL) {
dev_err(hwep->ci->dev, "Set control xfer at non-ep0\n");
retval = -EINVAL;
}
/*
* Enable endpoints in the HW other than ep0 as ep0
* is always enabled
*/
if (hwep->num)
retval |= hw_ep_enable(hwep->ci, hwep->num, hwep->dir,
hwep->type);
spin_unlock_irqrestore(hwep->lock, flags);
return retval;
}
/**
* ep_disable: endpoint is no longer usable
*
* Check usb_ep_disable() at "usb_gadget.h" for details
*/
static int ep_disable(struct usb_ep *ep)
{
struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
int direction, retval = 0;
unsigned long flags;
if (ep == NULL)
return -EINVAL;
else if (hwep->ep.desc == NULL)
return -EBUSY;
spin_lock_irqsave(hwep->lock, flags);
/* only internal SW should disable ctrl endpts */
direction = hwep->dir;
do {
retval |= _ep_nuke(hwep);
retval |= hw_ep_disable(hwep->ci, hwep->num, hwep->dir);
if (hwep->type == USB_ENDPOINT_XFER_CONTROL)
hwep->dir = (hwep->dir == TX) ? RX : TX;
} while (hwep->dir != direction);
hwep->ep.desc = NULL;
spin_unlock_irqrestore(hwep->lock, flags);
return retval;
}
/**
* ep_alloc_request: allocate a request object to use with this endpoint
*
* Check usb_ep_alloc_request() at "usb_gadget.h" for details
*/
static struct usb_request *ep_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
{
struct ci_hw_req *hwreq = NULL;
if (ep == NULL)
return NULL;
hwreq = kzalloc(sizeof(struct ci_hw_req), gfp_flags);
if (hwreq != NULL) {
INIT_LIST_HEAD(&hwreq->queue);
INIT_LIST_HEAD(&hwreq->tds);
}
return (hwreq == NULL) ? NULL : &hwreq->req;
}
/**
* ep_free_request: frees a request object
*
* Check usb_ep_free_request() at "usb_gadget.h" for details
*/
static void ep_free_request(struct usb_ep *ep, struct usb_request *req)
{
struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
struct ci_hw_req *hwreq = container_of(req, struct ci_hw_req, req);
struct td_node *node, *tmpnode;
unsigned long flags;
if (ep == NULL || req == NULL) {
return;
} else if (!list_empty(&hwreq->queue)) {
dev_err(hwep->ci->dev, "freeing queued request\n");
return;
}
spin_lock_irqsave(hwep->lock, flags);
list_for_each_entry_safe(node, tmpnode, &hwreq->tds, td) {
dma_pool_free(hwep->td_pool, node->ptr, node->dma);
list_del_init(&node->td);
node->ptr = NULL;
kfree(node);
}
kfree(hwreq);
spin_unlock_irqrestore(hwep->lock, flags);
}
/**
* ep_queue: queues (submits) an I/O request to an endpoint
*
* Check usb_ep_queue()* at usb_gadget.h" for details
*/
static int ep_queue(struct usb_ep *ep, struct usb_request *req,
gfp_t __maybe_unused gfp_flags)
{
struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
int retval = 0;
unsigned long flags;
if (ep == NULL || req == NULL || hwep->ep.desc == NULL)
return -EINVAL;
spin_lock_irqsave(hwep->lock, flags);
retval = _ep_queue(ep, req, gfp_flags);
spin_unlock_irqrestore(hwep->lock, flags);
return retval;
}
/**
* ep_dequeue: dequeues (cancels, unlinks) an I/O request from an endpoint
*
* Check usb_ep_dequeue() at "usb_gadget.h" for details
*/
static int ep_dequeue(struct usb_ep *ep, struct usb_request *req)
{
struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
struct ci_hw_req *hwreq = container_of(req, struct ci_hw_req, req);
unsigned long flags;
usb: chipidea: udc: delete td from req's td list at ep_dequeue We need to delete un-finished td from current request's td list at ep_dequeue API, otherwise, this non-user td will be remained at td list before this request is freed. So if we do ep_queue-> ep_dequeue->ep_queue sequence, when the complete interrupt for the second ep_queue comes, we search td list for this request, the first td (added by the first ep_queue) will be handled, and its status is still active, so we will consider the this transfer still not be completed, but in fact, it has completed. It causes the peripheral side considers it never receives current data for this transfer. We met this problem when do "Error Recovery Test - Device Configured" test item for USBCV2 MSC test, the host has never received ACK for the IN token for CSW due to peripheral considers it does not get this CBW, the USBCV test log like belows: -------------------------------------------------------------------------- INFO Issuing BOT MSC Reset, reset should always succeed INFO Retrieving status on CBW endpoint INFO CBW endpoint status = 0x0 INFO Retrieving status on CSW endpoint INFO CSW endpoint status = 0x0 INFO Issuing required command (Test Unit Ready) to verify device has recovered INFO Issuing CBW (attempt #1): INFO |----- CBW LUN = 0x0 INFO |----- CBW Flags = 0x0 INFO |----- CBW Data Transfer Length = 0x0 INFO |----- CBW CDB Length = 0x6 INFO |----- CBW CDB-00 = 0x0 INFO |----- CBW CDB-01 = 0x0 INFO |----- CBW CDB-02 = 0x0 INFO |----- CBW CDB-03 = 0x0 INFO |----- CBW CDB-04 = 0x0 INFO |----- CBW CDB-05 = 0x0 INFO Issuing CSW : try 1 INFO CSW Bulk Request timed out! ERROR Failed CSW phase : should have been success or stall FAIL (5.3.4) The CSW status value must be 0x00, 0x01, or 0x02. ERROR BOTCommonMSCRequest failed: error=80004000 Cc: Andrzej Pietrasiewicz <andrzej.p@samsung.com> Cc: stable@vger.kernel.org Signed-off-by: Peter Chen <peter.chen@freescale.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2014-07-02 12:16:31 +08:00
struct td_node *node, *tmpnode;
if (ep == NULL || req == NULL || hwreq->req.status != -EALREADY ||
hwep->ep.desc == NULL || list_empty(&hwreq->queue) ||
list_empty(&hwep->qh.queue))
return -EINVAL;
spin_lock_irqsave(hwep->lock, flags);
hw_ep_flush(hwep->ci, hwep->num, hwep->dir);
usb: chipidea: udc: delete td from req's td list at ep_dequeue We need to delete un-finished td from current request's td list at ep_dequeue API, otherwise, this non-user td will be remained at td list before this request is freed. So if we do ep_queue-> ep_dequeue->ep_queue sequence, when the complete interrupt for the second ep_queue comes, we search td list for this request, the first td (added by the first ep_queue) will be handled, and its status is still active, so we will consider the this transfer still not be completed, but in fact, it has completed. It causes the peripheral side considers it never receives current data for this transfer. We met this problem when do "Error Recovery Test - Device Configured" test item for USBCV2 MSC test, the host has never received ACK for the IN token for CSW due to peripheral considers it does not get this CBW, the USBCV test log like belows: -------------------------------------------------------------------------- INFO Issuing BOT MSC Reset, reset should always succeed INFO Retrieving status on CBW endpoint INFO CBW endpoint status = 0x0 INFO Retrieving status on CSW endpoint INFO CSW endpoint status = 0x0 INFO Issuing required command (Test Unit Ready) to verify device has recovered INFO Issuing CBW (attempt #1): INFO |----- CBW LUN = 0x0 INFO |----- CBW Flags = 0x0 INFO |----- CBW Data Transfer Length = 0x0 INFO |----- CBW CDB Length = 0x6 INFO |----- CBW CDB-00 = 0x0 INFO |----- CBW CDB-01 = 0x0 INFO |----- CBW CDB-02 = 0x0 INFO |----- CBW CDB-03 = 0x0 INFO |----- CBW CDB-04 = 0x0 INFO |----- CBW CDB-05 = 0x0 INFO Issuing CSW : try 1 INFO CSW Bulk Request timed out! ERROR Failed CSW phase : should have been success or stall FAIL (5.3.4) The CSW status value must be 0x00, 0x01, or 0x02. ERROR BOTCommonMSCRequest failed: error=80004000 Cc: Andrzej Pietrasiewicz <andrzej.p@samsung.com> Cc: stable@vger.kernel.org Signed-off-by: Peter Chen <peter.chen@freescale.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2014-07-02 12:16:31 +08:00
list_for_each_entry_safe(node, tmpnode, &hwreq->tds, td) {
dma_pool_free(hwep->td_pool, node->ptr, node->dma);
list_del(&node->td);
kfree(node);
}
/* pop request */
list_del_init(&hwreq->queue);
usb_gadget_unmap_request(&hwep->ci->gadget, req, hwep->dir);
req->status = -ECONNRESET;
if (hwreq->req.complete != NULL) {
spin_unlock(hwep->lock);
usb_gadget_giveback_request(&hwep->ep, &hwreq->req);
spin_lock(hwep->lock);
}
spin_unlock_irqrestore(hwep->lock, flags);
return 0;
}
/**
* ep_set_halt: sets the endpoint halt feature
*
* Check usb_ep_set_halt() at "usb_gadget.h" for details
*/
static int ep_set_halt(struct usb_ep *ep, int value)
{
struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
int direction, retval = 0;
unsigned long flags;
if (ep == NULL || hwep->ep.desc == NULL)
return -EINVAL;
if (usb_endpoint_xfer_isoc(hwep->ep.desc))
return -EOPNOTSUPP;
spin_lock_irqsave(hwep->lock, flags);
#ifndef STALL_IN
/* g_file_storage MS compliant but g_zero fails chapter 9 compliance */
if (value && hwep->type == USB_ENDPOINT_XFER_BULK && hwep->dir == TX &&
!list_empty(&hwep->qh.queue)) {
spin_unlock_irqrestore(hwep->lock, flags);
return -EAGAIN;
}
#endif
direction = hwep->dir;
do {
retval |= hw_ep_set_halt(hwep->ci, hwep->num, hwep->dir, value);
if (!value)
hwep->wedge = 0;
if (hwep->type == USB_ENDPOINT_XFER_CONTROL)
hwep->dir = (hwep->dir == TX) ? RX : TX;
} while (hwep->dir != direction);
spin_unlock_irqrestore(hwep->lock, flags);
return retval;
}
/**
* ep_set_wedge: sets the halt feature and ignores clear requests
*
* Check usb_ep_set_wedge() at "usb_gadget.h" for details
*/
static int ep_set_wedge(struct usb_ep *ep)
{
struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
unsigned long flags;
if (ep == NULL || hwep->ep.desc == NULL)
return -EINVAL;
spin_lock_irqsave(hwep->lock, flags);
hwep->wedge = 1;
spin_unlock_irqrestore(hwep->lock, flags);
return usb_ep_set_halt(ep);
}
/**
* ep_fifo_flush: flushes contents of a fifo
*
* Check usb_ep_fifo_flush() at "usb_gadget.h" for details
*/
static void ep_fifo_flush(struct usb_ep *ep)
{
struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
unsigned long flags;
if (ep == NULL) {
dev_err(hwep->ci->dev, "%02X: -EINVAL\n", _usb_addr(hwep));
return;
}
spin_lock_irqsave(hwep->lock, flags);
hw_ep_flush(hwep->ci, hwep->num, hwep->dir);
spin_unlock_irqrestore(hwep->lock, flags);
}
/**
* Endpoint-specific part of the API to the USB controller hardware
* Check "usb_gadget.h" for details
*/
static const struct usb_ep_ops usb_ep_ops = {
.enable = ep_enable,
.disable = ep_disable,
.alloc_request = ep_alloc_request,
.free_request = ep_free_request,
.queue = ep_queue,
.dequeue = ep_dequeue,
.set_halt = ep_set_halt,
.set_wedge = ep_set_wedge,
.fifo_flush = ep_fifo_flush,
};
/******************************************************************************
* GADGET block
*****************************************************************************/
static int ci_udc_vbus_session(struct usb_gadget *_gadget, int is_active)
{
struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget);
unsigned long flags;
int gadget_ready = 0;
spin_lock_irqsave(&ci->lock, flags);
ci->vbus_active = is_active;
if (ci->driver)
gadget_ready = 1;
spin_unlock_irqrestore(&ci->lock, flags);
if (gadget_ready) {
if (is_active) {
pm_runtime_get_sync(&_gadget->dev);
hw_device_reset(ci);
hw_device_state(ci, ci->ep0out->qh.dma);
usb_gadget_set_state(_gadget, USB_STATE_POWERED);
} else {
if (ci->driver)
ci->driver->disconnect(&ci->gadget);
hw_device_state(ci, 0);
if (ci->platdata->notify_event)
ci->platdata->notify_event(ci,
CI_HDRC_CONTROLLER_STOPPED_EVENT);
_gadget_stop_activity(&ci->gadget);
pm_runtime_put_sync(&_gadget->dev);
usb_gadget_set_state(_gadget, USB_STATE_NOTATTACHED);
}
}
return 0;
}
static int ci_udc_wakeup(struct usb_gadget *_gadget)
{
struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget);
unsigned long flags;
int ret = 0;
spin_lock_irqsave(&ci->lock, flags);
if (!ci->remote_wakeup) {
ret = -EOPNOTSUPP;
goto out;
}
if (!hw_read(ci, OP_PORTSC, PORTSC_SUSP)) {
ret = -EINVAL;
goto out;
}
hw_write(ci, OP_PORTSC, PORTSC_FPR, PORTSC_FPR);
out:
spin_unlock_irqrestore(&ci->lock, flags);
return ret;
}
static int ci_udc_vbus_draw(struct usb_gadget *_gadget, unsigned ma)
{
struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget);
if (ci->usb_phy)
return usb_phy_set_power(ci->usb_phy, ma);
return -ENOTSUPP;
}
static int ci_udc_selfpowered(struct usb_gadget *_gadget, int is_on)
{
struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget);
struct ci_hw_ep *hwep = ci->ep0in;
unsigned long flags;
spin_lock_irqsave(hwep->lock, flags);
_gadget->is_selfpowered = (is_on != 0);
spin_unlock_irqrestore(hwep->lock, flags);
return 0;
}
/* Change Data+ pullup status
* this func is used by usb_gadget_connect/disconnet
*/
static int ci_udc_pullup(struct usb_gadget *_gadget, int is_on)
{
struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget);
if (!ci->vbus_active)
return -EOPNOTSUPP;
if (is_on)
hw_write(ci, OP_USBCMD, USBCMD_RS, USBCMD_RS);
else
hw_write(ci, OP_USBCMD, USBCMD_RS, 0);
return 0;
}
static int ci_udc_start(struct usb_gadget *gadget,
struct usb_gadget_driver *driver);
static int ci_udc_stop(struct usb_gadget *gadget);
/**
* Device operations part of the API to the USB controller hardware,
* which don't involve endpoints (or i/o)
* Check "usb_gadget.h" for details
*/
static const struct usb_gadget_ops usb_gadget_ops = {
.vbus_session = ci_udc_vbus_session,
.wakeup = ci_udc_wakeup,
.set_selfpowered = ci_udc_selfpowered,
.pullup = ci_udc_pullup,
.vbus_draw = ci_udc_vbus_draw,
.udc_start = ci_udc_start,
.udc_stop = ci_udc_stop,
};
static int init_eps(struct ci_hdrc *ci)
{
int retval = 0, i, j;
for (i = 0; i < ci->hw_ep_max/2; i++)
for (j = RX; j <= TX; j++) {
int k = i + j * ci->hw_ep_max/2;
struct ci_hw_ep *hwep = &ci->ci_hw_ep[k];
scnprintf(hwep->name, sizeof(hwep->name), "ep%i%s", i,
(j == TX) ? "in" : "out");
hwep->ci = ci;
hwep->lock = &ci->lock;
hwep->td_pool = ci->td_pool;
hwep->ep.name = hwep->name;
hwep->ep.ops = &usb_ep_ops;
/*
* for ep0: maxP defined in desc, for other
* eps, maxP is set by epautoconfig() called
* by gadget layer
*/
usb_ep_set_maxpacket_limit(&hwep->ep, (unsigned short)~0);
INIT_LIST_HEAD(&hwep->qh.queue);
hwep->qh.ptr = dma_pool_alloc(ci->qh_pool, GFP_KERNEL,
&hwep->qh.dma);
if (hwep->qh.ptr == NULL)
retval = -ENOMEM;
else
memset(hwep->qh.ptr, 0, sizeof(*hwep->qh.ptr));
/*
* set up shorthands for ep0 out and in endpoints,
* don't add to gadget's ep_list
*/
if (i == 0) {
if (j == RX)
ci->ep0out = hwep;
else
ci->ep0in = hwep;
usb_ep_set_maxpacket_limit(&hwep->ep, CTRL_PAYLOAD_MAX);
continue;
}
list_add_tail(&hwep->ep.ep_list, &ci->gadget.ep_list);
}
return retval;
}
static void destroy_eps(struct ci_hdrc *ci)
{
int i;
for (i = 0; i < ci->hw_ep_max; i++) {
struct ci_hw_ep *hwep = &ci->ci_hw_ep[i];
if (hwep->pending_td)
free_pending_td(hwep);
dma_pool_free(ci->qh_pool, hwep->qh.ptr, hwep->qh.dma);
}
}
/**
* ci_udc_start: register a gadget driver
* @gadget: our gadget
* @driver: the driver being registered
*
* Interrupts are enabled here.
*/
static int ci_udc_start(struct usb_gadget *gadget,
struct usb_gadget_driver *driver)
{
struct ci_hdrc *ci = container_of(gadget, struct ci_hdrc, gadget);
unsigned long flags;
int retval = -ENOMEM;
if (driver->disconnect == NULL)
return -EINVAL;
ci->ep0out->ep.desc = &ctrl_endpt_out_desc;
retval = usb_ep_enable(&ci->ep0out->ep);
if (retval)
return retval;
ci->ep0in->ep.desc = &ctrl_endpt_in_desc;
retval = usb_ep_enable(&ci->ep0in->ep);
if (retval)
return retval;
ci->driver = driver;
/* Start otg fsm for B-device */
if (ci_otg_is_fsm_mode(ci) && ci->fsm.id) {
ci_hdrc_otg_fsm_start(ci);
return retval;
}
pm_runtime_get_sync(&ci->gadget.dev);
if (ci->vbus_active) {
usb: chipidea: udc: Fix calling spin_lock_irqsave at sleep context Fixing the below dump: root@freescale ~$ modprobe g_serial g_serial gadget: Gadget Serial v2.4 g_serial gadget: g_serial ready BUG: sleeping function called from invalid context at /home/b29397/work/projects/upstream/usb/usb/drivers/base/power/runtime.c:952 in_atomic(): 1, irqs_disabled(): 128, pid: 805, name: modprobe 2 locks held by modprobe/805: #0: (udc_lock){+.+.+.}, at: [<7f000a74>] usb_gadget_probe_driver+0x44/0xb4 [udc_core] #1: (&(&ci->lock)->rlock){......}, at: [<7f033488>] ci_udc_start+0x94/0x110 [ci_hdrc] irq event stamp: 3878 hardirqs last enabled at (3877): [<806b6720>] _raw_spin_unlock_irqrestore+0x40/0x6c hardirqs last disabled at (3878): [<806b6474>] _raw_spin_lock_irqsave+0x2c/0xa8 softirqs last enabled at (3872): [<8002ec0c>] __do_softirq+0x1c8/0x2e8 softirqs last disabled at (3857): [<8002f180>] irq_exit+0xbc/0x110 CPU: 0 PID: 805 Comm: modprobe Not tainted 3.11.0-next-20130910+ #85 [<80016b94>] (unwind_backtrace+0x0/0xf8) from [<80012e0c>] (show_stack+0x20/0x24) [<80012e0c>] (show_stack+0x20/0x24) from [<806af554>] (dump_stack+0x9c/0xc4) [<806af554>] (dump_stack+0x9c/0xc4) from [<8005940c>] (__might_sleep+0xf4/0x134) [<8005940c>] (__might_sleep+0xf4/0x134) from [<803a04a4>] (__pm_runtime_resume+0x94/0xa0) [<803a04a4>] (__pm_runtime_resume+0x94/0xa0) from [<7f0334a4>] (ci_udc_start+0xb0/0x110 [ci_hdrc]) [<7f0334a4>] (ci_udc_start+0xb0/0x110 [ci_hdrc]) from [<7f0009b4>] (udc_bind_to_driver+0x5c/0xd8 [udc_core]) [<7f0009b4>] (udc_bind_to_driver+0x5c/0xd8 [udc_core]) from [<7f000ab0>] (usb_gadget_probe_driver+0x80/0xb4 [udc_core]) [<7f000ab0>] (usb_gadget_probe_driver+0x80/0xb4 [udc_core]) from [<7f008618>] (usb_composite_probe+0xac/0xd8 [libcomposite]) [<7f008618>] (usb_composite_probe+0xac/0xd8 [libcomposite]) from [<7f04b168>] (init+0x8c/0xb4 [g_serial]) [<7f04b168>] (init+0x8c/0xb4 [g_serial]) from [<800088e8>] (do_one_initcall+0x108/0x16c) [<800088e8>] (do_one_initcall+0x108/0x16c) from [<8008e518>] (load_module+0x1b00/0x20a4) [<8008e518>] (load_module+0x1b00/0x20a4) from [<8008eba8>] (SyS_init_module+0xec/0x100) [<8008eba8>] (SyS_init_module+0xec/0x100) from [<8000ec40>] (ret_fast_syscall+0x0/0x48) Signed-off-by: Peter Chen <peter.chen@freescale.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2013-10-08 10:30:17 +08:00
spin_lock_irqsave(&ci->lock, flags);
hw_device_reset(ci);
} else {
pm_runtime_put_sync(&ci->gadget.dev);
usb: chipidea: udc: Fix calling spin_lock_irqsave at sleep context Fixing the below dump: root@freescale ~$ modprobe g_serial g_serial gadget: Gadget Serial v2.4 g_serial gadget: g_serial ready BUG: sleeping function called from invalid context at /home/b29397/work/projects/upstream/usb/usb/drivers/base/power/runtime.c:952 in_atomic(): 1, irqs_disabled(): 128, pid: 805, name: modprobe 2 locks held by modprobe/805: #0: (udc_lock){+.+.+.}, at: [<7f000a74>] usb_gadget_probe_driver+0x44/0xb4 [udc_core] #1: (&(&ci->lock)->rlock){......}, at: [<7f033488>] ci_udc_start+0x94/0x110 [ci_hdrc] irq event stamp: 3878 hardirqs last enabled at (3877): [<806b6720>] _raw_spin_unlock_irqrestore+0x40/0x6c hardirqs last disabled at (3878): [<806b6474>] _raw_spin_lock_irqsave+0x2c/0xa8 softirqs last enabled at (3872): [<8002ec0c>] __do_softirq+0x1c8/0x2e8 softirqs last disabled at (3857): [<8002f180>] irq_exit+0xbc/0x110 CPU: 0 PID: 805 Comm: modprobe Not tainted 3.11.0-next-20130910+ #85 [<80016b94>] (unwind_backtrace+0x0/0xf8) from [<80012e0c>] (show_stack+0x20/0x24) [<80012e0c>] (show_stack+0x20/0x24) from [<806af554>] (dump_stack+0x9c/0xc4) [<806af554>] (dump_stack+0x9c/0xc4) from [<8005940c>] (__might_sleep+0xf4/0x134) [<8005940c>] (__might_sleep+0xf4/0x134) from [<803a04a4>] (__pm_runtime_resume+0x94/0xa0) [<803a04a4>] (__pm_runtime_resume+0x94/0xa0) from [<7f0334a4>] (ci_udc_start+0xb0/0x110 [ci_hdrc]) [<7f0334a4>] (ci_udc_start+0xb0/0x110 [ci_hdrc]) from [<7f0009b4>] (udc_bind_to_driver+0x5c/0xd8 [udc_core]) [<7f0009b4>] (udc_bind_to_driver+0x5c/0xd8 [udc_core]) from [<7f000ab0>] (usb_gadget_probe_driver+0x80/0xb4 [udc_core]) [<7f000ab0>] (usb_gadget_probe_driver+0x80/0xb4 [udc_core]) from [<7f008618>] (usb_composite_probe+0xac/0xd8 [libcomposite]) [<7f008618>] (usb_composite_probe+0xac/0xd8 [libcomposite]) from [<7f04b168>] (init+0x8c/0xb4 [g_serial]) [<7f04b168>] (init+0x8c/0xb4 [g_serial]) from [<800088e8>] (do_one_initcall+0x108/0x16c) [<800088e8>] (do_one_initcall+0x108/0x16c) from [<8008e518>] (load_module+0x1b00/0x20a4) [<8008e518>] (load_module+0x1b00/0x20a4) from [<8008eba8>] (SyS_init_module+0xec/0x100) [<8008eba8>] (SyS_init_module+0xec/0x100) from [<8000ec40>] (ret_fast_syscall+0x0/0x48) Signed-off-by: Peter Chen <peter.chen@freescale.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2013-10-08 10:30:17 +08:00
return retval;
}
retval = hw_device_state(ci, ci->ep0out->qh.dma);
usb: chipidea: udc: Fix calling spin_lock_irqsave at sleep context Fixing the below dump: root@freescale ~$ modprobe g_serial g_serial gadget: Gadget Serial v2.4 g_serial gadget: g_serial ready BUG: sleeping function called from invalid context at /home/b29397/work/projects/upstream/usb/usb/drivers/base/power/runtime.c:952 in_atomic(): 1, irqs_disabled(): 128, pid: 805, name: modprobe 2 locks held by modprobe/805: #0: (udc_lock){+.+.+.}, at: [<7f000a74>] usb_gadget_probe_driver+0x44/0xb4 [udc_core] #1: (&(&ci->lock)->rlock){......}, at: [<7f033488>] ci_udc_start+0x94/0x110 [ci_hdrc] irq event stamp: 3878 hardirqs last enabled at (3877): [<806b6720>] _raw_spin_unlock_irqrestore+0x40/0x6c hardirqs last disabled at (3878): [<806b6474>] _raw_spin_lock_irqsave+0x2c/0xa8 softirqs last enabled at (3872): [<8002ec0c>] __do_softirq+0x1c8/0x2e8 softirqs last disabled at (3857): [<8002f180>] irq_exit+0xbc/0x110 CPU: 0 PID: 805 Comm: modprobe Not tainted 3.11.0-next-20130910+ #85 [<80016b94>] (unwind_backtrace+0x0/0xf8) from [<80012e0c>] (show_stack+0x20/0x24) [<80012e0c>] (show_stack+0x20/0x24) from [<806af554>] (dump_stack+0x9c/0xc4) [<806af554>] (dump_stack+0x9c/0xc4) from [<8005940c>] (__might_sleep+0xf4/0x134) [<8005940c>] (__might_sleep+0xf4/0x134) from [<803a04a4>] (__pm_runtime_resume+0x94/0xa0) [<803a04a4>] (__pm_runtime_resume+0x94/0xa0) from [<7f0334a4>] (ci_udc_start+0xb0/0x110 [ci_hdrc]) [<7f0334a4>] (ci_udc_start+0xb0/0x110 [ci_hdrc]) from [<7f0009b4>] (udc_bind_to_driver+0x5c/0xd8 [udc_core]) [<7f0009b4>] (udc_bind_to_driver+0x5c/0xd8 [udc_core]) from [<7f000ab0>] (usb_gadget_probe_driver+0x80/0xb4 [udc_core]) [<7f000ab0>] (usb_gadget_probe_driver+0x80/0xb4 [udc_core]) from [<7f008618>] (usb_composite_probe+0xac/0xd8 [libcomposite]) [<7f008618>] (usb_composite_probe+0xac/0xd8 [libcomposite]) from [<7f04b168>] (init+0x8c/0xb4 [g_serial]) [<7f04b168>] (init+0x8c/0xb4 [g_serial]) from [<800088e8>] (do_one_initcall+0x108/0x16c) [<800088e8>] (do_one_initcall+0x108/0x16c) from [<8008e518>] (load_module+0x1b00/0x20a4) [<8008e518>] (load_module+0x1b00/0x20a4) from [<8008eba8>] (SyS_init_module+0xec/0x100) [<8008eba8>] (SyS_init_module+0xec/0x100) from [<8000ec40>] (ret_fast_syscall+0x0/0x48) Signed-off-by: Peter Chen <peter.chen@freescale.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2013-10-08 10:30:17 +08:00
spin_unlock_irqrestore(&ci->lock, flags);
if (retval)
pm_runtime_put_sync(&ci->gadget.dev);
return retval;
}
/**
* ci_udc_stop: unregister a gadget driver
*/
static int ci_udc_stop(struct usb_gadget *gadget)
{
struct ci_hdrc *ci = container_of(gadget, struct ci_hdrc, gadget);
unsigned long flags;
spin_lock_irqsave(&ci->lock, flags);
if (ci->vbus_active) {
hw_device_state(ci, 0);
if (ci->platdata->notify_event)
ci->platdata->notify_event(ci,
CI_HDRC_CONTROLLER_STOPPED_EVENT);
spin_unlock_irqrestore(&ci->lock, flags);
_gadget_stop_activity(&ci->gadget);
spin_lock_irqsave(&ci->lock, flags);
pm_runtime_put(&ci->gadget.dev);
}
usb: chipidea: udc: fix the oops after rmmod gadget When we rmmod gadget, the ci->driver needs to be cleared. Otherwise, when we plug in usb cable again, the driver will consider gadget is there, and go to enumeration procedure, but in fact, it was removed. ci_hdrc ci_hdrc.0: Connected to host Unable to handle kernel paging request at virtual address 7f02a42c pgd = 80004000 [7f02a42c] *pgd=3f13d811, *pte=00000000, *ppte=00000000 Internal error: Oops: 7 [#1] SMP ARM Modules linked in: usb_f_acm u_serial libcomposite configfs [last unloaded: g_serial] CPU: 0 PID: 0 Comm: swapper/0 Not tainted 3.10.0+ #42 task: 807dba88 ti: 807d0000 task.ti: 807d0000 PC is at udc_irq+0x8fc/0xea4 LR is at l2x0_cache_sync+0x5c/0x6c pc : [<803de7f4>] lr : [<8001d0f0>] psr: 20000193 sp : 807d1d98 ip : 807d1d80 fp : 807d1df4 r10: af809900 r9 : 808184d4 r8 : 00080001 r7 : 00082001 r6 : afb711f8 r5 : afb71010 r4 : ffffffea r3 : 7f02a41c r2 : afb71010 r1 : 807d1dc0 r0 : afb71068 Flags: nzCv IRQs off FIQs on Mode SVC_32 ISA ARM Segment kernel Control: 10c53c7d Table: 3f01804a DAC: 00000017 Process swapper/0 (pid: 0, stack limit = 0x807d0238) Stack: (0x807d1d98 to 0x807d2000) 1d80: 00000000 afb71014 1da0: 000040f6 00000000 00000001 00000000 00007530 00000000 afb71010 001dcd65 1dc0: 01000680 00400000 807d1e2c afb71010 0000004e 00000000 00000000 0000004b 1de0: 808184d4 af809900 807d1e0c 807d1df8 803dbc24 803ddf04 afba75c0 0000004e 1e00: 807d1e44 807d1e10 8007a19c 803dbb9c 8108e7e0 8108e7e0 9ceddce0 af809900 1e20: 0000004e 807d0000 0000004b 00000000 00000010 00000000 807d1e5c 807d1e48 1e40: 8007a334 8007a154 af809900 0000004e 807d1e74 807d1e60 8007d3b4 8007a2f0 1e60: 0000004b 807cce3c 807d1e8c 807d1e78 80079b08 8007d300 00000180 807d8ba0 1e80: 807d1eb4 807d1e90 8000eef4 80079aec 00000000 f400010c 807d8ce4 807d1ed8 1ea0: f4000100 96d5c75d 807d1ed4 807d1eb8 80008600 8000eeac 8042699c 60000013 1ec0: ffffffff 807d1f0c 807d1f54 807d1ed8 8000e180 800085dc 807d1f20 00000046 1ee0: 9cedd275 00000010 8108f080 807de294 00000001 807de248 96d5c75d 00000010 1f00: 00000000 807d1f54 00000000 807d1f20 8005ff54 8042699c 60000013 ffffffff 1f20: 9cedd275 00000010 00000005 8108f080 8108f080 00000001 807de248 8086bd00 1f40: 807d0000 00000001 807d1f7c 807d1f58 80426af0 80426950 807d0000 00000000 1f60: 808184c0 808184c0 807d8954 805b886c 807d1f8c 807d1f80 8000f294 80426a44 1f80: 807d1fac 807d1f90 8005f110 8000f288 807d1fac 807d8908 805b4748 807dc86c 1fa0: 807d1fbc 807d1fb0 805aa58c 8005f068 807d1ff4 807d1fc0 8077c860 805aa530 1fc0: ffffffff ffffffff 8077c330 00000000 00000000 807bef88 00000000 10c53c7d 1fe0: 807d88d0 807bef84 00000000 807d1ff8 10008074 8077c594 00000000 00000000 Backtrace: [<803ddef8>] (udc_irq+0x0/0xea4) from [<803dbc24>] (ci_irq+0x94/0x14c) [<803dbb90>] (ci_irq+0x0/0x14c) from [<8007a19c>] (handle_irq_event_percpu+0x54/0x19c) r5:0000004e r4:afba75c0 [<8007a148>] (handle_irq_event_percpu+0x0/0x19c) from [<8007a334>] (handle_irq_event+0x50/0x70) [<8007a2e4>] (handle_irq_event+0x0/0x70) from [<8007d3b4>] (handle_fasteoi_irq+0xc0/0x16c) r5:0000004e r4:af809900 [<8007d2f4>] (handle_fasteoi_irq+0x0/0x16c) from [<80079b08>] (generic_handle_irq+0x28/0x38) r5:807cce3c r4:0000004b [<80079ae0>] (generic_handle_irq+0x0/0x38) from [<8000eef4>] (handle_IRQ+0x54/0xb4) r4:807d8ba0 r3:00000180 [<8000eea0>] (handle_IRQ+0x0/0xb4) from [<80008600>] (gic_handle_irq+0x30/0x64) r8:96d5c75d r7:f4000100 r6:807d1ed8 r5:807d8ce4 r4:f400010c r3:00000000 [<800085d0>] (gic_handle_irq+0x0/0x64) from [<8000e180>] (__irq_svc+0x40/0x54) Exception stack(0x807d1ed8 to 0x807d1f20) 1ec0: 807d1f20 00000046 1ee0: 9cedd275 00000010 8108f080 807de294 00000001 807de248 96d5c75d 00000010 1f00: 00000000 807d1f54 00000000 807d1f20 8005ff54 8042699c 60000013 ffffffff r7:807d1f0c r6:ffffffff r5:60000013 r4:8042699c [<80426944>] (cpuidle_enter_state+0x0/0xf4) from [<80426af0>] (cpuidle_idle_call+0xb8/0x174) r9:00000001 r8:807d0000 r7:8086bd00 r6:807de248 r5:00000001 r4:8108f080 [<80426a38>] (cpuidle_idle_call+0x0/0x174) from [<8000f294>] (arch_cpu_idle+0x18/0x5c) [<8000f27c>] (arch_cpu_idle+0x0/0x5c) from [<8005f110>] (cpu_startup_entry+0xb4/0x148) [<8005f05c>] (cpu_startup_entry+0x0/0x148) from [<805aa58c>] (rest_init+0x68/0x80) r7:807dc86c [<805aa524>] (rest_init+0x0/0x80) from [<8077c860>] (start_kernel+0x2d8/0x334) [<8077c588>] (start_kernel+0x0/0x334) from [<10008074>] (0x10008074) Code: e59031e0 e51b203c e24b1034 e2820058 (e5933010) ---[ end trace f874b2c5533c04bc ]--- Kernel panic - not syncing: Fatal exception in interrupt Tested-by: Marek Vasut <marex@denx.de> Acked-by: Shawn Guo <shawn.guo@linaro.org> Signed-off-by: Peter Chen <peter.chen@freescale.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2013-09-17 12:37:20 +08:00
ci->driver = NULL;
spin_unlock_irqrestore(&ci->lock, flags);
return 0;
}
/******************************************************************************
* BUS block
*****************************************************************************/
/**
* udc_irq: ci interrupt handler
*
* This function returns IRQ_HANDLED if the IRQ has been handled
* It locks access to registers
*/
static irqreturn_t udc_irq(struct ci_hdrc *ci)
{
irqreturn_t retval;
u32 intr;
if (ci == NULL)
return IRQ_HANDLED;
spin_lock(&ci->lock);
if (ci->platdata->flags & CI_HDRC_REGS_SHARED) {
if (hw_read(ci, OP_USBMODE, USBMODE_CM) !=
USBMODE_CM_DC) {
spin_unlock(&ci->lock);
return IRQ_NONE;
}
}
intr = hw_test_and_clear_intr_active(ci);
if (intr) {
/* order defines priority - do NOT change it */
if (USBi_URI & intr)
isr_reset_handler(ci);
if (USBi_PCI & intr) {
ci->gadget.speed = hw_port_is_high_speed(ci) ?
USB_SPEED_HIGH : USB_SPEED_FULL;
if (ci->suspended && ci->driver->resume) {
spin_unlock(&ci->lock);
ci->driver->resume(&ci->gadget);
spin_lock(&ci->lock);
ci->suspended = 0;
}
}
if (USBi_UI & intr)
isr_tr_complete_handler(ci);
if (USBi_SLI & intr) {
if (ci->gadget.speed != USB_SPEED_UNKNOWN &&
ci->driver->suspend) {
ci->suspended = 1;
spin_unlock(&ci->lock);
ci->driver->suspend(&ci->gadget);
usb_gadget_set_state(&ci->gadget,
USB_STATE_SUSPENDED);
spin_lock(&ci->lock);
}
}
retval = IRQ_HANDLED;
} else {
retval = IRQ_NONE;
}
spin_unlock(&ci->lock);
return retval;
}
/**
* udc_start: initialize gadget role
* @ci: chipidea controller
*/
static int udc_start(struct ci_hdrc *ci)
{
struct device *dev = ci->dev;
int retval = 0;
spin_lock_init(&ci->lock);
ci->gadget.ops = &usb_gadget_ops;
ci->gadget.speed = USB_SPEED_UNKNOWN;
ci->gadget.max_speed = USB_SPEED_HIGH;
ci->gadget.is_otg = ci->is_otg ? 1 : 0;
ci->gadget.name = ci->platdata->name;
INIT_LIST_HEAD(&ci->gadget.ep_list);
/* alloc resources */
ci->qh_pool = dma_pool_create("ci_hw_qh", dev,
sizeof(struct ci_hw_qh),
64, CI_HDRC_PAGE_SIZE);
if (ci->qh_pool == NULL)
return -ENOMEM;
ci->td_pool = dma_pool_create("ci_hw_td", dev,
sizeof(struct ci_hw_td),
64, CI_HDRC_PAGE_SIZE);
if (ci->td_pool == NULL) {
retval = -ENOMEM;
goto free_qh_pool;
}
retval = init_eps(ci);
if (retval)
goto free_pools;
ci->gadget.ep0 = &ci->ep0in->ep;
retval = usb_add_gadget_udc(dev, &ci->gadget);
usb: gadget: convert all users to the new udc infrastructure peripheral drivers are using usb_add_gadget()/usb_del_gadget() to register/unregister to the udc-core. The udc-core will take the first available gadget driver and attach function driver which is calling usb_gadget_register_driver(). This is the same behaviour we have right now. Only dummy_hcd was tested, the others were compiled tested. Cc: Alan Stern <stern@rowland.harvard.edu> Cc: Anton Tikhomirov <av.tikhomirov@samsung.com> Cc: Ben Dooks <ben-linux@fluff.org> Cc: Dan Carpenter <error27@gmail.com> Cc: Darius Augulis <augulis.darius@gmail.com> Cc: Eric Miao <eric.y.miao@gmail.com> Cc: Jingoo Han <jg1.han@samsung.com> Cc: Kukjin Kim <kgene.kim@samsung.com> Cc: Kuninori Morimoto <kuninori.morimoto.gx@renesas.com> Cc: Li Yang <leoli@freescale.com> Cc: Michael Hennerich <michael.hennerich@analog.com> Acked-by: Mike Frysinger <vapier@gentoo.org> Cc: Nicolas Ferre <nicolas.ferre@atmel.com> Cc: Pavankumar Kondeti <pkondeti@codeaurora.org> Cc: Roy Huang <roy.huang@analog.com> Cc: Russell King <linux@arm.linux.org.uk> Cc: Toshiharu Okada <toshiharu-linux@dsn.okisemi.com> Cc: Xiaochen Shen <xiaochen.shen@intel.com> Cc: Yoshihiro Shimoda <shimoda.yoshihiro@renesas.com> Cc: Yuan-Hsin Chen <yhchen@faraday-tech.com> Cc: cxie4 <cxie4@marvell.com> Cc: linux-geode@lists.infradead.org Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Signed-off-by: Felipe Balbi <balbi@ti.com> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
2011-06-28 21:33:47 +08:00
if (retval)
goto destroy_eps;
usb: gadget: convert all users to the new udc infrastructure peripheral drivers are using usb_add_gadget()/usb_del_gadget() to register/unregister to the udc-core. The udc-core will take the first available gadget driver and attach function driver which is calling usb_gadget_register_driver(). This is the same behaviour we have right now. Only dummy_hcd was tested, the others were compiled tested. Cc: Alan Stern <stern@rowland.harvard.edu> Cc: Anton Tikhomirov <av.tikhomirov@samsung.com> Cc: Ben Dooks <ben-linux@fluff.org> Cc: Dan Carpenter <error27@gmail.com> Cc: Darius Augulis <augulis.darius@gmail.com> Cc: Eric Miao <eric.y.miao@gmail.com> Cc: Jingoo Han <jg1.han@samsung.com> Cc: Kukjin Kim <kgene.kim@samsung.com> Cc: Kuninori Morimoto <kuninori.morimoto.gx@renesas.com> Cc: Li Yang <leoli@freescale.com> Cc: Michael Hennerich <michael.hennerich@analog.com> Acked-by: Mike Frysinger <vapier@gentoo.org> Cc: Nicolas Ferre <nicolas.ferre@atmel.com> Cc: Pavankumar Kondeti <pkondeti@codeaurora.org> Cc: Roy Huang <roy.huang@analog.com> Cc: Russell King <linux@arm.linux.org.uk> Cc: Toshiharu Okada <toshiharu-linux@dsn.okisemi.com> Cc: Xiaochen Shen <xiaochen.shen@intel.com> Cc: Yoshihiro Shimoda <shimoda.yoshihiro@renesas.com> Cc: Yuan-Hsin Chen <yhchen@faraday-tech.com> Cc: cxie4 <cxie4@marvell.com> Cc: linux-geode@lists.infradead.org Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Signed-off-by: Felipe Balbi <balbi@ti.com> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
2011-06-28 21:33:47 +08:00
pm_runtime_no_callbacks(&ci->gadget.dev);
pm_runtime_enable(&ci->gadget.dev);
return retval;
destroy_eps:
destroy_eps(ci);
free_pools:
dma_pool_destroy(ci->td_pool);
free_qh_pool:
dma_pool_destroy(ci->qh_pool);
return retval;
}
/**
* ci_hdrc_gadget_destroy: parent remove must call this to remove UDC
*
* No interrupts active, the IRQ has been released
*/
void ci_hdrc_gadget_destroy(struct ci_hdrc *ci)
{
if (!ci->roles[CI_ROLE_GADGET])
return;
usb_del_gadget_udc(&ci->gadget);
destroy_eps(ci);
dma_pool_destroy(ci->td_pool);
dma_pool_destroy(ci->qh_pool);
}
static int udc_id_switch_for_device(struct ci_hdrc *ci)
{
if (ci->is_otg)
/* Clear and enable BSV irq */
hw_write_otgsc(ci, OTGSC_BSVIS | OTGSC_BSVIE,
OTGSC_BSVIS | OTGSC_BSVIE);
return 0;
}
static void udc_id_switch_for_host(struct ci_hdrc *ci)
{
/*
* host doesn't care B_SESSION_VALID event
* so clear and disbale BSV irq
*/
if (ci->is_otg)
hw_write_otgsc(ci, OTGSC_BSVIE | OTGSC_BSVIS, OTGSC_BSVIS);
}
/**
* ci_hdrc_gadget_init - initialize device related bits
* ci: the controller
*
* This function initializes the gadget, if the device is "device capable".
*/
int ci_hdrc_gadget_init(struct ci_hdrc *ci)
{
struct ci_role_driver *rdrv;
if (!hw_read(ci, CAP_DCCPARAMS, DCCPARAMS_DC))
return -ENXIO;
rdrv = devm_kzalloc(ci->dev, sizeof(struct ci_role_driver), GFP_KERNEL);
if (!rdrv)
return -ENOMEM;
rdrv->start = udc_id_switch_for_device;
rdrv->stop = udc_id_switch_for_host;
rdrv->irq = udc_irq;
rdrv->name = "gadget";
ci->roles[CI_ROLE_GADGET] = rdrv;
return udc_start(ci);
}