xhci: remove endpoint ring cache

Anurag Kumar Vulisha reported several issues with xhci endpoint
ring caching.

31 Rings are cached per device before a ring is freed.
These cached rings are not used as default if a new ring is needed.
They are only used if the driver fails to allocate memory for a ring.

The current ring cache is more a reason to why we run out memory than a
help when we actually do so.

Anurag Kumar Vulisha tried to use cached rings as a first option and
found new issues with cached ring initialization.
Cached rings were first zeroed and then manually reinitialized with link
trbs etc, but forgetting to set some important bits like cycle toggle bit.

Remove the ring cache completely as it's a faulty premature optimization
eating memory

Reported-by: Anurag Kumar Vulisha <anuragku@xilinx.com>
Tested-by: Anurag Kumar Vulisha <anuragku@xilinx.com>
Signed-off-by: Mathias Nyman <mathias.nyman@linux.intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Mathias Nyman 2017-06-15 11:55:42 +03:00 committed by Greg Kroah-Hartman
parent d590c23111
commit c5628a2af8
3 changed files with 15 additions and 89 deletions

View File

@ -407,64 +407,17 @@ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
return NULL;
}
void xhci_free_or_cache_endpoint_ring(struct xhci_hcd *xhci,
void xhci_free_endpoint_ring(struct xhci_hcd *xhci,
struct xhci_virt_device *virt_dev,
unsigned int ep_index)
{
int rings_cached;
rings_cached = virt_dev->num_rings_cached;
if (rings_cached < XHCI_MAX_RINGS_CACHED) {
virt_dev->ring_cache[rings_cached] =
virt_dev->eps[ep_index].ring;
virt_dev->num_rings_cached++;
xhci_dbg(xhci, "Cached old ring, "
"%d ring%s cached\n",
virt_dev->num_rings_cached,
(virt_dev->num_rings_cached > 1) ? "s" : "");
} else {
xhci_ring_free(xhci, virt_dev->eps[ep_index].ring);
xhci_dbg(xhci, "Ring cache full (%d rings), "
"freeing ring\n",
virt_dev->num_rings_cached);
}
xhci_ring_free(xhci, virt_dev->eps[ep_index].ring);
virt_dev->eps[ep_index].ring = NULL;
}
/* Zero an endpoint ring (except for link TRBs) and move the enqueue and dequeue
* pointers to the beginning of the ring.
*/
static void xhci_reinit_cached_ring(struct xhci_hcd *xhci,
struct xhci_ring *ring, unsigned int cycle_state,
enum xhci_ring_type type)
{
struct xhci_segment *seg = ring->first_seg;
int i;
do {
memset(seg->trbs, 0,
sizeof(union xhci_trb)*TRBS_PER_SEGMENT);
if (cycle_state == 0) {
for (i = 0; i < TRBS_PER_SEGMENT; i++)
seg->trbs[i].link.control |=
cpu_to_le32(TRB_CYCLE);
}
/* All endpoint rings have link TRBs */
xhci_link_segments(xhci, seg, seg->next, type);
seg = seg->next;
} while (seg != ring->first_seg);
ring->type = type;
xhci_initialize_ring_info(ring, cycle_state);
/* td list should be empty since all URBs have been cancelled,
* but just in case...
*/
INIT_LIST_HEAD(&ring->td_list);
}
/*
* Expand an existing ring.
* Look for a cached ring or allocate a new ring which has same segment numbers
* and link the two rings.
* Allocate a new ring which has same segment numbers and link the two rings.
*/
int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring,
unsigned int num_trbs, gfp_t flags)
@ -968,12 +921,6 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
/* If necessary, update the number of active TTs on this root port */
xhci_update_tt_active_eps(xhci, dev, old_active_eps);
if (dev->ring_cache) {
for (i = 0; i < dev->num_rings_cached; i++)
xhci_ring_free(xhci, dev->ring_cache[i]);
kfree(dev->ring_cache);
}
if (dev->in_ctx)
xhci_free_container_ctx(xhci, dev->in_ctx);
if (dev->out_ctx)
@ -1062,14 +1009,6 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
if (!dev->eps[0].ring)
goto fail;
/* Allocate pointers to the ring cache */
dev->ring_cache = kzalloc(
sizeof(struct xhci_ring *)*XHCI_MAX_RINGS_CACHED,
flags);
if (!dev->ring_cache)
goto fail;
dev->num_rings_cached = 0;
dev->udev = udev;
/* Point to output device context in dcbaa. */
@ -1537,17 +1476,9 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
/* Set up the endpoint ring */
virt_dev->eps[ep_index].new_ring =
xhci_ring_alloc(xhci, 2, 1, ring_type, max_packet, mem_flags);
if (!virt_dev->eps[ep_index].new_ring) {
/* Attempt to use the ring cache */
if (virt_dev->num_rings_cached == 0)
return -ENOMEM;
virt_dev->num_rings_cached--;
virt_dev->eps[ep_index].new_ring =
virt_dev->ring_cache[virt_dev->num_rings_cached];
virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL;
xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring,
1, ring_type);
}
if (!virt_dev->eps[ep_index].new_ring)
return -ENOMEM;
virt_dev->eps[ep_index].skip = false;
ep_ring = virt_dev->eps[ep_index].new_ring;

View File

@ -1695,8 +1695,7 @@ static int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
if (xhci->quirks & XHCI_MTK_HOST) {
ret = xhci_mtk_add_ep_quirk(hcd, udev, ep);
if (ret < 0) {
xhci_free_or_cache_endpoint_ring(xhci,
virt_dev, ep_index);
xhci_free_endpoint_ring(xhci, virt_dev, ep_index);
return ret;
}
}
@ -2720,23 +2719,23 @@ static int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
for (i = 1; i < 31; i++) {
if ((le32_to_cpu(ctrl_ctx->drop_flags) & (1 << (i + 1))) &&
!(le32_to_cpu(ctrl_ctx->add_flags) & (1 << (i + 1)))) {
xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
xhci_free_endpoint_ring(xhci, virt_dev, i);
xhci_check_bw_drop_ep_streams(xhci, virt_dev, i);
}
}
xhci_zero_in_ctx(xhci, virt_dev);
/*
* Install any rings for completely new endpoints or changed endpoints,
* and free or cache any old rings from changed endpoints.
* and free any old rings from changed endpoints.
*/
for (i = 1; i < 31; i++) {
if (!virt_dev->eps[i].new_ring)
continue;
/* Only cache or free the old ring if it exists.
/* Only free the old ring if it exists.
* It may not if this is the first add of an endpoint.
*/
if (virt_dev->eps[i].ring) {
xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
xhci_free_endpoint_ring(xhci, virt_dev, i);
}
xhci_check_bw_drop_ep_streams(xhci, virt_dev, i);
virt_dev->eps[i].ring = virt_dev->eps[i].new_ring;
@ -3336,7 +3335,7 @@ void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci,
*
* Wait for the Reset Device command to finish. Remove all structures
* associated with the endpoints that were disabled. Clear the input device
* structure? Cache the rings? Reset the control endpoint 0 max packet size?
* structure? Reset the control endpoint 0 max packet size?
*
* If the virt_dev to be reset does not exist or does not match the udev,
* it means the device is lost, possibly due to the xHC restore error and
@ -3466,7 +3465,7 @@ static int xhci_discover_or_reset_device(struct usb_hcd *hcd,
spin_unlock_irqrestore(&xhci->lock, flags);
}
/* Everything but endpoint 0 is disabled, so free or cache the rings. */
/* Everything but endpoint 0 is disabled, so free the rings. */
last_freed_endpoint = 1;
for (i = 1; i < 31; i++) {
struct xhci_virt_ep *ep = &virt_dev->eps[i];
@ -3480,7 +3479,7 @@ static int xhci_discover_or_reset_device(struct usb_hcd *hcd,
}
if (ep->ring) {
xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
xhci_free_endpoint_ring(xhci, virt_dev, i);
last_freed_endpoint = i;
}
if (!list_empty(&virt_dev->eps[i].bw_endpoint_list))

View File

@ -992,10 +992,6 @@ struct xhci_virt_device {
struct xhci_container_ctx *out_ctx;
/* Used for addressing devices and configuration changes */
struct xhci_container_ctx *in_ctx;
/* Rings saved to ensure old alt settings can be re-instated */
struct xhci_ring **ring_cache;
int num_rings_cached;
#define XHCI_MAX_RINGS_CACHED 31
struct xhci_virt_ep eps[31];
u8 fake_port;
u8 real_port;
@ -1960,7 +1956,7 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev,
void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring);
int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring,
unsigned int num_trbs, gfp_t flags);
void xhci_free_or_cache_endpoint_ring(struct xhci_hcd *xhci,
void xhci_free_endpoint_ring(struct xhci_hcd *xhci,
struct xhci_virt_device *virt_dev,
unsigned int ep_index);
struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,