mirror of https://gitee.com/openkylin/linux.git
xHCI patches for 3.7
Hi Greg, Here's seven patches for 3.7. The first four fix an issue with Set Address command timeouts. It turns out that Set Address timeouts can trigger a warning that was put in to avoid a NULL pointer dereference. This patchset fixes the underlying cause of the NULL pointer that was papered over by the warning. They should be applied to stable, but I'm a bit nervous about the size, so I'd rather they go into 3.7, rather than trying to stuff them into a late 3.6-rc. The other three patches are various trivial fixes. Sarah Sharp -----BEGIN PGP SIGNATURE----- Version: GnuPG v1.4.11 (GNU/Linux) iQIcBAABAgAGBQJQUmVEAAoJEBMGWMLi1Gc5bUcP/3vjdrtQgpnBcGgrsEnYADCh 08Iq/dmKEUbBgLugm0xV/e832JIWtOnJITme2wYIuWPKVCFWnfyjeNSf08SxXoO/ XiaM8o+T46ISuxVqFuoepmNRes3eV4Sy2etOHdoVtpjRpk7RK9squhMCS1I8hwac 45s+tgxsRRhxWe/BzNDMcMpiNgg87iVvDp3JHjWNfHkBauZliDz+LwcSCRgw2eQg MKBFm6VafpoFwFNDhbsOofrPAoHD31AeePTfcs5XcMmVJgdhICWZdIEeV4vWKpPI u0Frs3oyzmFei6TqakWhRkeRl1hJ2YzrCS7rjNsdRzQBebKfU52yNaH7Todm0WIE zhwgK5ai8mf43Pe62z2mLKQtN9ImQ103SBZuYjkCSVdfH5pVDg9Qt8F3e2BcoT0O JPutS8BaRNxKlZ0lmO9Fvqi5oQu71BLYRXzVs7QSdiaNNG4g7GUHkYeftRXHu7zL GoHkZ8+06wGzQ8M10h9O42Zu+4ZA1XfJfRzZ1Jc7Qys0eS887PbIX4Qq24UXvzna e7gK1tibWvxYAe27i9ih1y1TH7mh9+UuFQeJ3rJGDN70CP2oPW+p3wVGG/8P/rVQ xX4a7DbuPZre0Zt/oRZ9CV3xo0E82QsO82Ff3tw+wmjiwufgUv1qYePZ/W8ChtNo lcTBMawyMyVmcpV5g6+I =DJtk -----END PGP SIGNATURE----- Merge tag 'for-usb-next-2012-09-13' of git://git.kernel.org/pub/scm/linux/kernel/git/sarah/xhci into usb-next xHCI patches for 3.7 Hi Greg, Here's seven patches for 3.7. The first four fix an issue with Set Address command timeouts. It turns out that Set Address timeouts can trigger a warning that was put in to avoid a NULL pointer dereference. This patchset fixes the underlying cause of the NULL pointer that was papered over by the warning. They should be applied to stable, but I'm a bit nervous about the size, so I'd rather they go into 3.7, rather than trying to stuff them into a late 3.6-rc. The other three patches are various trivial fixes. Sarah Sharp
This commit is contained in:
commit
f80e866f72
|
@ -29,7 +29,7 @@
|
|||
#define PORT_RWC_BITS (PORT_CSC | PORT_PEC | PORT_WRC | PORT_OCC | \
|
||||
PORT_RC | PORT_PLC | PORT_PE)
|
||||
|
||||
/* usb 1.1 root hub device descriptor */
|
||||
/* USB 3.0 BOS descriptor and a capability descriptor, combined */
|
||||
static u8 usb_bos_descriptor [] = {
|
||||
USB_DT_BOS_SIZE, /* __u8 bLength, 5 bytes */
|
||||
USB_DT_BOS, /* __u8 bDescriptorType */
|
||||
|
@ -422,7 +422,7 @@ void xhci_set_link_state(struct xhci_hcd *xhci, __le32 __iomem **port_array,
|
|||
xhci_writel(xhci, temp, port_array[port_id]);
|
||||
}
|
||||
|
||||
void xhci_set_remote_wake_mask(struct xhci_hcd *xhci,
|
||||
static void xhci_set_remote_wake_mask(struct xhci_hcd *xhci,
|
||||
__le32 __iomem **port_array, int port_id, u16 wake_mask)
|
||||
{
|
||||
u32 temp;
|
||||
|
|
|
@ -1772,6 +1772,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
|
|||
{
|
||||
struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
|
||||
struct dev_info *dev_info, *next;
|
||||
struct xhci_cd *cur_cd, *next_cd;
|
||||
unsigned long flags;
|
||||
int size;
|
||||
int i, j, num_ports;
|
||||
|
@ -1795,6 +1796,11 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
|
|||
xhci_ring_free(xhci, xhci->cmd_ring);
|
||||
xhci->cmd_ring = NULL;
|
||||
xhci_dbg(xhci, "Freed command ring\n");
|
||||
list_for_each_entry_safe(cur_cd, next_cd,
|
||||
&xhci->cancel_cmd_list, cancel_cmd_list) {
|
||||
list_del(&cur_cd->cancel_cmd_list);
|
||||
kfree(cur_cd);
|
||||
}
|
||||
|
||||
for (i = 1; i < MAX_HC_SLOTS; ++i)
|
||||
xhci_free_virt_device(xhci, i);
|
||||
|
@ -2340,6 +2346,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
|
|||
xhci->cmd_ring = xhci_ring_alloc(xhci, 1, 1, TYPE_COMMAND, flags);
|
||||
if (!xhci->cmd_ring)
|
||||
goto fail;
|
||||
INIT_LIST_HEAD(&xhci->cancel_cmd_list);
|
||||
xhci_dbg(xhci, "Allocated command ring at %p\n", xhci->cmd_ring);
|
||||
xhci_dbg(xhci, "First segment DMA is 0x%llx\n",
|
||||
(unsigned long long)xhci->cmd_ring->first_seg->dma);
|
||||
|
|
|
@ -280,12 +280,123 @@ static inline int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
|
|||
/* Ring the host controller doorbell after placing a command on the ring */
|
||||
void xhci_ring_cmd_db(struct xhci_hcd *xhci)
|
||||
{
|
||||
if (!(xhci->cmd_ring_state & CMD_RING_STATE_RUNNING))
|
||||
return;
|
||||
|
||||
xhci_dbg(xhci, "// Ding dong!\n");
|
||||
xhci_writel(xhci, DB_VALUE_HOST, &xhci->dba->doorbell[0]);
|
||||
/* Flush PCI posted writes */
|
||||
xhci_readl(xhci, &xhci->dba->doorbell[0]);
|
||||
}
|
||||
|
||||
static int xhci_abort_cmd_ring(struct xhci_hcd *xhci)
|
||||
{
|
||||
u64 temp_64;
|
||||
int ret;
|
||||
|
||||
xhci_dbg(xhci, "Abort command ring\n");
|
||||
|
||||
if (!(xhci->cmd_ring_state & CMD_RING_STATE_RUNNING)) {
|
||||
xhci_dbg(xhci, "The command ring isn't running, "
|
||||
"Have the command ring been stopped?\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
temp_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
|
||||
if (!(temp_64 & CMD_RING_RUNNING)) {
|
||||
xhci_dbg(xhci, "Command ring had been stopped\n");
|
||||
return 0;
|
||||
}
|
||||
xhci->cmd_ring_state = CMD_RING_STATE_ABORTED;
|
||||
xhci_write_64(xhci, temp_64 | CMD_RING_ABORT,
|
||||
&xhci->op_regs->cmd_ring);
|
||||
|
||||
/* Section 4.6.1.2 of xHCI 1.0 spec says software should
|
||||
* time the completion od all xHCI commands, including
|
||||
* the Command Abort operation. If software doesn't see
|
||||
* CRR negated in a timely manner (e.g. longer than 5
|
||||
* seconds), then it should assume that the there are
|
||||
* larger problems with the xHC and assert HCRST.
|
||||
*/
|
||||
ret = handshake(xhci, &xhci->op_regs->cmd_ring,
|
||||
CMD_RING_RUNNING, 0, 5 * 1000 * 1000);
|
||||
if (ret < 0) {
|
||||
xhci_err(xhci, "Stopped the command ring failed, "
|
||||
"maybe the host is dead\n");
|
||||
xhci->xhc_state |= XHCI_STATE_DYING;
|
||||
xhci_quiesce(xhci);
|
||||
xhci_halt(xhci);
|
||||
return -ESHUTDOWN;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int xhci_queue_cd(struct xhci_hcd *xhci,
|
||||
struct xhci_command *command,
|
||||
union xhci_trb *cmd_trb)
|
||||
{
|
||||
struct xhci_cd *cd;
|
||||
cd = kzalloc(sizeof(struct xhci_cd), GFP_ATOMIC);
|
||||
if (!cd)
|
||||
return -ENOMEM;
|
||||
INIT_LIST_HEAD(&cd->cancel_cmd_list);
|
||||
|
||||
cd->command = command;
|
||||
cd->cmd_trb = cmd_trb;
|
||||
list_add_tail(&cd->cancel_cmd_list, &xhci->cancel_cmd_list);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Cancel the command which has issue.
|
||||
*
|
||||
* Some commands may hang due to waiting for acknowledgement from
|
||||
* usb device. It is outside of the xHC's ability to control and
|
||||
* will cause the command ring is blocked. When it occurs software
|
||||
* should intervene to recover the command ring.
|
||||
* See Section 4.6.1.1 and 4.6.1.2
|
||||
*/
|
||||
int xhci_cancel_cmd(struct xhci_hcd *xhci, struct xhci_command *command,
|
||||
union xhci_trb *cmd_trb)
|
||||
{
|
||||
int retval = 0;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&xhci->lock, flags);
|
||||
|
||||
if (xhci->xhc_state & XHCI_STATE_DYING) {
|
||||
xhci_warn(xhci, "Abort the command ring,"
|
||||
" but the xHCI is dead.\n");
|
||||
retval = -ESHUTDOWN;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* queue the cmd desriptor to cancel_cmd_list */
|
||||
retval = xhci_queue_cd(xhci, command, cmd_trb);
|
||||
if (retval) {
|
||||
xhci_warn(xhci, "Queuing command descriptor failed.\n");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* abort command ring */
|
||||
retval = xhci_abort_cmd_ring(xhci);
|
||||
if (retval) {
|
||||
xhci_err(xhci, "Abort command ring failed\n");
|
||||
if (unlikely(retval == -ESHUTDOWN)) {
|
||||
spin_unlock_irqrestore(&xhci->lock, flags);
|
||||
usb_hc_died(xhci_to_hcd(xhci)->primary_hcd);
|
||||
xhci_dbg(xhci, "xHCI host controller is dead.\n");
|
||||
return retval;
|
||||
}
|
||||
}
|
||||
|
||||
fail:
|
||||
spin_unlock_irqrestore(&xhci->lock, flags);
|
||||
return retval;
|
||||
}
|
||||
|
||||
void xhci_ring_ep_doorbell(struct xhci_hcd *xhci,
|
||||
unsigned int slot_id,
|
||||
unsigned int ep_index,
|
||||
|
@ -1059,6 +1170,20 @@ static void handle_reset_ep_completion(struct xhci_hcd *xhci,
|
|||
}
|
||||
}
|
||||
|
||||
/* Complete the command and detele it from the devcie's command queue.
|
||||
*/
|
||||
static void xhci_complete_cmd_in_cmd_wait_list(struct xhci_hcd *xhci,
|
||||
struct xhci_command *command, u32 status)
|
||||
{
|
||||
command->status = status;
|
||||
list_del(&command->cmd_list);
|
||||
if (command->completion)
|
||||
complete(command->completion);
|
||||
else
|
||||
xhci_free_command(xhci, command);
|
||||
}
|
||||
|
||||
|
||||
/* Check to see if a command in the device's command queue matches this one.
|
||||
* Signal the completion or free the command, and return 1. Return 0 if the
|
||||
* completed command isn't at the head of the command list.
|
||||
|
@ -1077,15 +1202,144 @@ static int handle_cmd_in_cmd_wait_list(struct xhci_hcd *xhci,
|
|||
if (xhci->cmd_ring->dequeue != command->command_trb)
|
||||
return 0;
|
||||
|
||||
command->status = GET_COMP_CODE(le32_to_cpu(event->status));
|
||||
list_del(&command->cmd_list);
|
||||
if (command->completion)
|
||||
complete(command->completion);
|
||||
else
|
||||
xhci_free_command(xhci, command);
|
||||
xhci_complete_cmd_in_cmd_wait_list(xhci, command,
|
||||
GET_COMP_CODE(le32_to_cpu(event->status)));
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Finding the command trb need to be cancelled and modifying it to
|
||||
* NO OP command. And if the command is in device's command wait
|
||||
* list, finishing and freeing it.
|
||||
*
|
||||
* If we can't find the command trb, we think it had already been
|
||||
* executed.
|
||||
*/
|
||||
static void xhci_cmd_to_noop(struct xhci_hcd *xhci, struct xhci_cd *cur_cd)
|
||||
{
|
||||
struct xhci_segment *cur_seg;
|
||||
union xhci_trb *cmd_trb;
|
||||
u32 cycle_state;
|
||||
|
||||
if (xhci->cmd_ring->dequeue == xhci->cmd_ring->enqueue)
|
||||
return;
|
||||
|
||||
/* find the current segment of command ring */
|
||||
cur_seg = find_trb_seg(xhci->cmd_ring->first_seg,
|
||||
xhci->cmd_ring->dequeue, &cycle_state);
|
||||
|
||||
/* find the command trb matched by cd from command ring */
|
||||
for (cmd_trb = xhci->cmd_ring->dequeue;
|
||||
cmd_trb != xhci->cmd_ring->enqueue;
|
||||
next_trb(xhci, xhci->cmd_ring, &cur_seg, &cmd_trb)) {
|
||||
/* If the trb is link trb, continue */
|
||||
if (TRB_TYPE_LINK_LE32(cmd_trb->generic.field[3]))
|
||||
continue;
|
||||
|
||||
if (cur_cd->cmd_trb == cmd_trb) {
|
||||
|
||||
/* If the command in device's command list, we should
|
||||
* finish it and free the command structure.
|
||||
*/
|
||||
if (cur_cd->command)
|
||||
xhci_complete_cmd_in_cmd_wait_list(xhci,
|
||||
cur_cd->command, COMP_CMD_STOP);
|
||||
|
||||
/* get cycle state from the origin command trb */
|
||||
cycle_state = le32_to_cpu(cmd_trb->generic.field[3])
|
||||
& TRB_CYCLE;
|
||||
|
||||
/* modify the command trb to NO OP command */
|
||||
cmd_trb->generic.field[0] = 0;
|
||||
cmd_trb->generic.field[1] = 0;
|
||||
cmd_trb->generic.field[2] = 0;
|
||||
cmd_trb->generic.field[3] = cpu_to_le32(
|
||||
TRB_TYPE(TRB_CMD_NOOP) | cycle_state);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void xhci_cancel_cmd_in_cd_list(struct xhci_hcd *xhci)
|
||||
{
|
||||
struct xhci_cd *cur_cd, *next_cd;
|
||||
|
||||
if (list_empty(&xhci->cancel_cmd_list))
|
||||
return;
|
||||
|
||||
list_for_each_entry_safe(cur_cd, next_cd,
|
||||
&xhci->cancel_cmd_list, cancel_cmd_list) {
|
||||
xhci_cmd_to_noop(xhci, cur_cd);
|
||||
list_del(&cur_cd->cancel_cmd_list);
|
||||
kfree(cur_cd);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* traversing the cancel_cmd_list. If the command descriptor according
|
||||
* to cmd_trb is found, the function free it and return 1, otherwise
|
||||
* return 0.
|
||||
*/
|
||||
static int xhci_search_cmd_trb_in_cd_list(struct xhci_hcd *xhci,
|
||||
union xhci_trb *cmd_trb)
|
||||
{
|
||||
struct xhci_cd *cur_cd, *next_cd;
|
||||
|
||||
if (list_empty(&xhci->cancel_cmd_list))
|
||||
return 0;
|
||||
|
||||
list_for_each_entry_safe(cur_cd, next_cd,
|
||||
&xhci->cancel_cmd_list, cancel_cmd_list) {
|
||||
if (cur_cd->cmd_trb == cmd_trb) {
|
||||
if (cur_cd->command)
|
||||
xhci_complete_cmd_in_cmd_wait_list(xhci,
|
||||
cur_cd->command, COMP_CMD_STOP);
|
||||
list_del(&cur_cd->cancel_cmd_list);
|
||||
kfree(cur_cd);
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the cmd_trb_comp_code is COMP_CMD_ABORT, we just check whether the
|
||||
* trb pointed by the command ring dequeue pointer is the trb we want to
|
||||
* cancel or not. And if the cmd_trb_comp_code is COMP_CMD_STOP, we will
|
||||
* traverse the cancel_cmd_list to trun the all of the commands according
|
||||
* to command descriptor to NO-OP trb.
|
||||
*/
|
||||
static int handle_stopped_cmd_ring(struct xhci_hcd *xhci,
|
||||
int cmd_trb_comp_code)
|
||||
{
|
||||
int cur_trb_is_good = 0;
|
||||
|
||||
/* Searching the cmd trb pointed by the command ring dequeue
|
||||
* pointer in command descriptor list. If it is found, free it.
|
||||
*/
|
||||
cur_trb_is_good = xhci_search_cmd_trb_in_cd_list(xhci,
|
||||
xhci->cmd_ring->dequeue);
|
||||
|
||||
if (cmd_trb_comp_code == COMP_CMD_ABORT)
|
||||
xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
|
||||
else if (cmd_trb_comp_code == COMP_CMD_STOP) {
|
||||
/* traversing the cancel_cmd_list and canceling
|
||||
* the command according to command descriptor
|
||||
*/
|
||||
xhci_cancel_cmd_in_cd_list(xhci);
|
||||
|
||||
xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
|
||||
/*
|
||||
* ring command ring doorbell again to restart the
|
||||
* command ring
|
||||
*/
|
||||
if (xhci->cmd_ring->dequeue != xhci->cmd_ring->enqueue)
|
||||
xhci_ring_cmd_db(xhci);
|
||||
}
|
||||
return cur_trb_is_good;
|
||||
}
|
||||
|
||||
static void handle_cmd_completion(struct xhci_hcd *xhci,
|
||||
struct xhci_event_cmd *event)
|
||||
{
|
||||
|
@ -1111,6 +1365,22 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
|
|||
xhci->error_bitmask |= 1 << 5;
|
||||
return;
|
||||
}
|
||||
|
||||
if ((GET_COMP_CODE(le32_to_cpu(event->status)) == COMP_CMD_ABORT) ||
|
||||
(GET_COMP_CODE(le32_to_cpu(event->status)) == COMP_CMD_STOP)) {
|
||||
/* If the return value is 0, we think the trb pointed by
|
||||
* command ring dequeue pointer is a good trb. The good
|
||||
* trb means we don't want to cancel the trb, but it have
|
||||
* been stopped by host. So we should handle it normally.
|
||||
* Otherwise, driver should invoke inc_deq() and return.
|
||||
*/
|
||||
if (handle_stopped_cmd_ring(xhci,
|
||||
GET_COMP_CODE(le32_to_cpu(event->status)))) {
|
||||
inc_deq(xhci, xhci->cmd_ring);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
switch (le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3])
|
||||
& TRB_TYPE_BITMASK) {
|
||||
case TRB_TYPE(TRB_ENABLE_SLOT):
|
||||
|
@ -2003,6 +2273,8 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
|
|||
*/
|
||||
static int handle_tx_event(struct xhci_hcd *xhci,
|
||||
struct xhci_transfer_event *event)
|
||||
__releases(&xhci->lock)
|
||||
__acquires(&xhci->lock)
|
||||
{
|
||||
struct xhci_virt_device *xdev;
|
||||
struct xhci_virt_ep *ep;
|
||||
|
@ -2580,7 +2852,7 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
|
|||
xhci_err(xhci, "Ring expansion failed\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
if (enqueue_is_link_trb(ep_ring)) {
|
||||
struct xhci_ring *ring = ep_ring;
|
||||
|
|
|
@ -51,7 +51,7 @@ MODULE_PARM_DESC(link_quirk, "Don't clear the chain bit on a link TRB");
|
|||
* handshake done). There are two failure modes: "usec" have passed (major
|
||||
* hardware flakeout), or the register reads as all-ones (hardware removed).
|
||||
*/
|
||||
static int handshake(struct xhci_hcd *xhci, void __iomem *ptr,
|
||||
int handshake(struct xhci_hcd *xhci, void __iomem *ptr,
|
||||
u32 mask, u32 done, int usec)
|
||||
{
|
||||
u32 result;
|
||||
|
@ -104,9 +104,10 @@ int xhci_halt(struct xhci_hcd *xhci)
|
|||
|
||||
ret = handshake(xhci, &xhci->op_regs->status,
|
||||
STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC);
|
||||
if (!ret)
|
||||
if (!ret) {
|
||||
xhci->xhc_state |= XHCI_STATE_HALTED;
|
||||
else
|
||||
xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
|
||||
} else
|
||||
xhci_warn(xhci, "Host not halted after %u microseconds.\n",
|
||||
XHCI_MAX_HALT_USEC);
|
||||
return ret;
|
||||
|
@ -485,6 +486,7 @@ static int xhci_run_finished(struct xhci_hcd *xhci)
|
|||
return -ENODEV;
|
||||
}
|
||||
xhci->shared_hcd->state = HC_STATE_RUNNING;
|
||||
xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
|
||||
|
||||
if (xhci->quirks & XHCI_NEC_HOST)
|
||||
xhci_ring_cmd_db(xhci);
|
||||
|
@ -1827,7 +1829,7 @@ static void xhci_finish_resource_reservation(struct xhci_hcd *xhci,
|
|||
xhci->num_active_eps);
|
||||
}
|
||||
|
||||
unsigned int xhci_get_block_size(struct usb_device *udev)
|
||||
static unsigned int xhci_get_block_size(struct usb_device *udev)
|
||||
{
|
||||
switch (udev->speed) {
|
||||
case USB_SPEED_LOW:
|
||||
|
@ -1845,7 +1847,8 @@ unsigned int xhci_get_block_size(struct usb_device *udev)
|
|||
}
|
||||
}
|
||||
|
||||
unsigned int xhci_get_largest_overhead(struct xhci_interval_bw *interval_bw)
|
||||
static unsigned int
|
||||
xhci_get_largest_overhead(struct xhci_interval_bw *interval_bw)
|
||||
{
|
||||
if (interval_bw->overhead[LS_OVERHEAD_TYPE])
|
||||
return LS_OVERHEAD;
|
||||
|
@ -2400,6 +2403,7 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
|
|||
struct completion *cmd_completion;
|
||||
u32 *cmd_status;
|
||||
struct xhci_virt_device *virt_dev;
|
||||
union xhci_trb *cmd_trb;
|
||||
|
||||
spin_lock_irqsave(&xhci->lock, flags);
|
||||
virt_dev = xhci->devs[udev->slot_id];
|
||||
|
@ -2445,6 +2449,7 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
|
|||
}
|
||||
init_completion(cmd_completion);
|
||||
|
||||
cmd_trb = xhci->cmd_ring->dequeue;
|
||||
if (!ctx_change)
|
||||
ret = xhci_queue_configure_endpoint(xhci, in_ctx->dma,
|
||||
udev->slot_id, must_succeed);
|
||||
|
@ -2466,14 +2471,17 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
|
|||
/* Wait for the configure endpoint command to complete */
|
||||
timeleft = wait_for_completion_interruptible_timeout(
|
||||
cmd_completion,
|
||||
USB_CTRL_SET_TIMEOUT);
|
||||
XHCI_CMD_DEFAULT_TIMEOUT);
|
||||
if (timeleft <= 0) {
|
||||
xhci_warn(xhci, "%s while waiting for %s command\n",
|
||||
timeleft == 0 ? "Timeout" : "Signal",
|
||||
ctx_change == 0 ?
|
||||
"configure endpoint" :
|
||||
"evaluate context");
|
||||
/* FIXME cancel the configure endpoint command */
|
||||
/* cancel the configure endpoint command */
|
||||
ret = xhci_cancel_cmd(xhci, command, cmd_trb);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
return -ETIME;
|
||||
}
|
||||
|
||||
|
@ -3422,8 +3430,10 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
|
|||
unsigned long flags;
|
||||
int timeleft;
|
||||
int ret;
|
||||
union xhci_trb *cmd_trb;
|
||||
|
||||
spin_lock_irqsave(&xhci->lock, flags);
|
||||
cmd_trb = xhci->cmd_ring->dequeue;
|
||||
ret = xhci_queue_slot_control(xhci, TRB_ENABLE_SLOT, 0);
|
||||
if (ret) {
|
||||
spin_unlock_irqrestore(&xhci->lock, flags);
|
||||
|
@ -3435,12 +3445,12 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
|
|||
|
||||
/* XXX: how much time for xHC slot assignment? */
|
||||
timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev,
|
||||
USB_CTRL_SET_TIMEOUT);
|
||||
XHCI_CMD_DEFAULT_TIMEOUT);
|
||||
if (timeleft <= 0) {
|
||||
xhci_warn(xhci, "%s while waiting for a slot\n",
|
||||
timeleft == 0 ? "Timeout" : "Signal");
|
||||
/* FIXME cancel the enable slot request */
|
||||
return 0;
|
||||
/* cancel the enable slot request */
|
||||
return xhci_cancel_cmd(xhci, NULL, cmd_trb);
|
||||
}
|
||||
|
||||
if (!xhci->slot_id) {
|
||||
|
@ -3501,6 +3511,7 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
|
|||
struct xhci_slot_ctx *slot_ctx;
|
||||
struct xhci_input_control_ctx *ctrl_ctx;
|
||||
u64 temp_64;
|
||||
union xhci_trb *cmd_trb;
|
||||
|
||||
if (!udev->slot_id) {
|
||||
xhci_dbg(xhci, "Bad Slot ID %d\n", udev->slot_id);
|
||||
|
@ -3539,6 +3550,7 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
|
|||
xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
|
||||
|
||||
spin_lock_irqsave(&xhci->lock, flags);
|
||||
cmd_trb = xhci->cmd_ring->dequeue;
|
||||
ret = xhci_queue_address_device(xhci, virt_dev->in_ctx->dma,
|
||||
udev->slot_id);
|
||||
if (ret) {
|
||||
|
@ -3551,7 +3563,7 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
|
|||
|
||||
/* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */
|
||||
timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev,
|
||||
USB_CTRL_SET_TIMEOUT);
|
||||
XHCI_CMD_DEFAULT_TIMEOUT);
|
||||
/* FIXME: From section 4.3.4: "Software shall be responsible for timing
|
||||
* the SetAddress() "recovery interval" required by USB and aborting the
|
||||
* command on a timeout.
|
||||
|
@ -3559,7 +3571,10 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
|
|||
if (timeleft <= 0) {
|
||||
xhci_warn(xhci, "%s while waiting for address device command\n",
|
||||
timeleft == 0 ? "Timeout" : "Signal");
|
||||
/* FIXME cancel the address device command */
|
||||
/* cancel the address device command */
|
||||
ret = xhci_cancel_cmd(xhci, NULL, cmd_trb);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
return -ETIME;
|
||||
}
|
||||
|
||||
|
|
|
@ -1256,6 +1256,16 @@ struct xhci_td {
|
|||
union xhci_trb *last_trb;
|
||||
};
|
||||
|
||||
/* xHCI command default timeout value */
|
||||
#define XHCI_CMD_DEFAULT_TIMEOUT (5 * HZ)
|
||||
|
||||
/* command descriptor */
|
||||
struct xhci_cd {
|
||||
struct list_head cancel_cmd_list;
|
||||
struct xhci_command *command;
|
||||
union xhci_trb *cmd_trb;
|
||||
};
|
||||
|
||||
struct xhci_dequeue_state {
|
||||
struct xhci_segment *new_deq_seg;
|
||||
union xhci_trb *new_deq_ptr;
|
||||
|
@ -1421,6 +1431,11 @@ struct xhci_hcd {
|
|||
/* data structures */
|
||||
struct xhci_device_context_array *dcbaa;
|
||||
struct xhci_ring *cmd_ring;
|
||||
unsigned int cmd_ring_state;
|
||||
#define CMD_RING_STATE_RUNNING (1 << 0)
|
||||
#define CMD_RING_STATE_ABORTED (1 << 1)
|
||||
#define CMD_RING_STATE_STOPPED (1 << 2)
|
||||
struct list_head cancel_cmd_list;
|
||||
unsigned int cmd_ring_reserved_trbs;
|
||||
struct xhci_ring *event_ring;
|
||||
struct xhci_erst erst;
|
||||
|
@ -1698,6 +1713,8 @@ static inline void xhci_unregister_plat(void)
|
|||
|
||||
/* xHCI host controller glue */
|
||||
typedef void (*xhci_get_quirks_t)(struct device *, struct xhci_hcd *);
|
||||
int handshake(struct xhci_hcd *xhci, void __iomem *ptr,
|
||||
u32 mask, u32 done, int usec);
|
||||
void xhci_quiesce(struct xhci_hcd *xhci);
|
||||
int xhci_halt(struct xhci_hcd *xhci);
|
||||
int xhci_reset(struct xhci_hcd *xhci);
|
||||
|
@ -1788,6 +1805,8 @@ void xhci_queue_config_ep_quirk(struct xhci_hcd *xhci,
|
|||
unsigned int slot_id, unsigned int ep_index,
|
||||
struct xhci_dequeue_state *deq_state);
|
||||
void xhci_stop_endpoint_command_watchdog(unsigned long arg);
|
||||
int xhci_cancel_cmd(struct xhci_hcd *xhci, struct xhci_command *command,
|
||||
union xhci_trb *cmd_trb);
|
||||
void xhci_ring_ep_doorbell(struct xhci_hcd *xhci, unsigned int slot_id,
|
||||
unsigned int ep_index, unsigned int stream_id);
|
||||
|
||||
|
|
Loading…
Reference in New Issue