staging: fsl-dpaa2/eth: Fix potential endless loop

We incorrectly assumed that dpaa2_io_release() can only
return -EBUSY as an error code, when in fact it can also
fail in case some of its arguments don't have valid values.

Make sure we only retry the operation while the portal is
busy and abort for all other error cases, otherwise we risk
entering an endless loop.

Signed-off-by: Ioana Radulescu <ruxandra.radulescu@nxp.com>
Signed-off-by: Bogdan Purcareata <bogdan.purcareata@nxp.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Ioana Radulescu 2017-10-11 08:29:43 -05:00 committed by Greg Kroah-Hartman
parent ce10d7b4e8
commit 87eb55e418
1 changed files with 35 additions and 23 deletions

View File

@ -718,6 +718,23 @@ static int set_tx_csum(struct dpaa2_eth_priv *priv, bool enable)
return 0;
}
/* Free buffers acquired from the buffer pool or which were meant to
* be released in the pool
*/
static void free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, int count)
{
struct device *dev = priv->net_dev->dev.parent;
void *vaddr;
int i;
for (i = 0; i < count; i++) {
vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]);
dma_unmap_single(dev, buf_array[i], DPAA2_ETH_RX_BUF_SIZE,
DMA_BIDIRECTIONAL);
skb_free_frag(vaddr);
}
}
/* Perform a single release command to add buffers
* to the specified buffer pool
*/
@ -727,7 +744,7 @@ static int add_bufs(struct dpaa2_eth_priv *priv, u16 bpid)
u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
void *buf;
dma_addr_t addr;
int i;
int i, err;
for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) {
/* Allocate buffer visible to WRIOP + skb shared info +
@ -754,22 +771,27 @@ static int add_bufs(struct dpaa2_eth_priv *priv, u16 bpid)
}
release_bufs:
/* In case the portal is busy, retry until successful.
* The buffer release function would only fail if the QBMan portal
* was busy, which implies portal contention (i.e. more CPUs than
* portals, i.e. GPPs w/o affine DPIOs). For all practical purposes,
* there is little we can realistically do, short of giving up -
* in which case we'd risk depleting the buffer pool and never again
* receiving the Rx interrupt which would kick-start the refill logic.
* So just keep retrying, at the risk of being moved to ksoftirqd.
*/
while (dpaa2_io_service_release(NULL, bpid, buf_array, i))
/* In case the portal is busy, retry until successful */
while ((err = dpaa2_io_service_release(NULL, bpid,
buf_array, i)) == -EBUSY)
cpu_relax();
/* If release command failed, clean up and bail out;
* not much else we can do about it
*/
if (err) {
free_bufs(priv, buf_array, i);
return 0;
}
return i;
err_map:
skb_free_frag(buf);
err_alloc:
/* If we managed to allocate at least some buffers,
* release them to hardware
*/
if (i)
goto release_bufs;
@ -811,10 +833,8 @@ static int seed_pool(struct dpaa2_eth_priv *priv, u16 bpid)
*/
static void drain_bufs(struct dpaa2_eth_priv *priv, int count)
{
struct device *dev = priv->net_dev->dev.parent;
u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
void *vaddr;
int ret, i;
int ret;
do {
ret = dpaa2_io_service_acquire(NULL, priv->bpid,
@ -823,15 +843,7 @@ static void drain_bufs(struct dpaa2_eth_priv *priv, int count)
netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n");
return;
}
for (i = 0; i < ret; i++) {
/* Same logic as on regular Rx path */
vaddr = dpaa2_iova_to_virt(priv->iommu_domain,
buf_array[i]);
dma_unmap_single(dev, buf_array[i],
DPAA2_ETH_RX_BUF_SIZE,
DMA_FROM_DEVICE);
skb_free_frag(vaddr);
}
free_bufs(priv, buf_array, ret);
} while (ret);
}