mirror of https://gitee.com/openkylin/linux.git
dmaengine fixes for 4.14-rc5
Here are fixes for this round - fix spinlock usage amd fifo response for altera driver - fix ti crossbar race condition - fix edma memcpy align -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQIcBAABAgAGBQJZ4nVCAAoJEHwUBw8lI4NH85UQAIdhLBxuJ4Np6VITMzKt0d0g xch/lPJKTRC9gQWrHXIiOeRmA4NXpqNfHxhuj1qcOlJzt1Jgf+QxH6WF3QBj1EZX 3Z2OPYXknF/5SPG6Ovwvf9LADGUq/l8SQoVwFsnRLXPw00S/+yIc7sNBdLqQgdpM kdFGy4ZvqE+jYzzUPIJWCUG8AdvYRGhT0Hcbt8dTV5u6XaRd4hIcbGqUqSadIclx yuWrQilEhzNqbwUZpBzLKZZfZmA7+8VUUrmL1ONJ1/8hrASo8GWSxpj4+RzbUPzH 57VNUCAF1Z3yW4nd9QDE0LTs8jHestYIK8bfa0RW0tDWgwXdnqOsJfXsRgCk6bFc envCn5d3ws0i0+7GrDL6dbstHdP9m6XejKSQMUNrZ6vunsPYrrmuelwobaoRfmWN BMM6ShwpsKKrzK3FvCuNcqehSBSjKJR00nciqn2XTyUtPv2UOPy/cAn4+po6MW+U HPgzjq10VOsA6jdpJhoU1c6gDRWTRDuEVeKIALwTyqXKs9Q2Fl73vifAlhDy0nCu r5/YPmCEbt3lmku4STUPUqokmAx7NC7zZGcFscTO2ly8wRydMwxP1pJ3Vz4edaF+ JNkZqdge9j8AxgnFjT3eGabjHPpGogBqgph/m4VQIC14rETttPVYEL4ILNk3Gd55 Fx5xvJCvZznmzmExHsXt =d2EB -----END PGP SIGNATURE----- Merge tag 'dmaengine-fix-4.14-rc5' of git://git.infradead.org/users/vkoul/slave-dma Pull dmaengine fixes from Vinod Koul: "Here are fixes for this round - fix spinlock usage amd fifo response for altera driver - fix ti crossbar race condition - fix edma memcpy align" * tag 'dmaengine-fix-4.14-rc5' of git://git.infradead.org/users/vkoul/slave-dma: dmaengine: altera: fix spinlock usage dmaengine: altera: fix response FIFO emptying dmaengine: ti-dma-crossbar: Fix possible race condition with dma_inuse dmaengine: edma: Align the memcpy acnt array size with the transfer
This commit is contained in:
commit
7a23c5abb9
|
@ -212,11 +212,12 @@ struct msgdma_device {
|
||||||
static struct msgdma_sw_desc *msgdma_get_descriptor(struct msgdma_device *mdev)
|
static struct msgdma_sw_desc *msgdma_get_descriptor(struct msgdma_device *mdev)
|
||||||
{
|
{
|
||||||
struct msgdma_sw_desc *desc;
|
struct msgdma_sw_desc *desc;
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
spin_lock_bh(&mdev->lock);
|
spin_lock_irqsave(&mdev->lock, flags);
|
||||||
desc = list_first_entry(&mdev->free_list, struct msgdma_sw_desc, node);
|
desc = list_first_entry(&mdev->free_list, struct msgdma_sw_desc, node);
|
||||||
list_del(&desc->node);
|
list_del(&desc->node);
|
||||||
spin_unlock_bh(&mdev->lock);
|
spin_unlock_irqrestore(&mdev->lock, flags);
|
||||||
|
|
||||||
INIT_LIST_HEAD(&desc->tx_list);
|
INIT_LIST_HEAD(&desc->tx_list);
|
||||||
|
|
||||||
|
@ -306,13 +307,14 @@ static dma_cookie_t msgdma_tx_submit(struct dma_async_tx_descriptor *tx)
|
||||||
struct msgdma_device *mdev = to_mdev(tx->chan);
|
struct msgdma_device *mdev = to_mdev(tx->chan);
|
||||||
struct msgdma_sw_desc *new;
|
struct msgdma_sw_desc *new;
|
||||||
dma_cookie_t cookie;
|
dma_cookie_t cookie;
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
new = tx_to_desc(tx);
|
new = tx_to_desc(tx);
|
||||||
spin_lock_bh(&mdev->lock);
|
spin_lock_irqsave(&mdev->lock, flags);
|
||||||
cookie = dma_cookie_assign(tx);
|
cookie = dma_cookie_assign(tx);
|
||||||
|
|
||||||
list_add_tail(&new->node, &mdev->pending_list);
|
list_add_tail(&new->node, &mdev->pending_list);
|
||||||
spin_unlock_bh(&mdev->lock);
|
spin_unlock_irqrestore(&mdev->lock, flags);
|
||||||
|
|
||||||
return cookie;
|
return cookie;
|
||||||
}
|
}
|
||||||
|
@ -336,17 +338,18 @@ msgdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst,
|
||||||
struct msgdma_extended_desc *desc;
|
struct msgdma_extended_desc *desc;
|
||||||
size_t copy;
|
size_t copy;
|
||||||
u32 desc_cnt;
|
u32 desc_cnt;
|
||||||
|
unsigned long irqflags;
|
||||||
|
|
||||||
desc_cnt = DIV_ROUND_UP(len, MSGDMA_MAX_TRANS_LEN);
|
desc_cnt = DIV_ROUND_UP(len, MSGDMA_MAX_TRANS_LEN);
|
||||||
|
|
||||||
spin_lock_bh(&mdev->lock);
|
spin_lock_irqsave(&mdev->lock, irqflags);
|
||||||
if (desc_cnt > mdev->desc_free_cnt) {
|
if (desc_cnt > mdev->desc_free_cnt) {
|
||||||
spin_unlock_bh(&mdev->lock);
|
spin_unlock_bh(&mdev->lock);
|
||||||
dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev);
|
dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
mdev->desc_free_cnt -= desc_cnt;
|
mdev->desc_free_cnt -= desc_cnt;
|
||||||
spin_unlock_bh(&mdev->lock);
|
spin_unlock_irqrestore(&mdev->lock, irqflags);
|
||||||
|
|
||||||
do {
|
do {
|
||||||
/* Allocate and populate the descriptor */
|
/* Allocate and populate the descriptor */
|
||||||
|
@ -397,18 +400,19 @@ msgdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
|
||||||
u32 desc_cnt = 0, i;
|
u32 desc_cnt = 0, i;
|
||||||
struct scatterlist *sg;
|
struct scatterlist *sg;
|
||||||
u32 stride;
|
u32 stride;
|
||||||
|
unsigned long irqflags;
|
||||||
|
|
||||||
for_each_sg(sgl, sg, sg_len, i)
|
for_each_sg(sgl, sg, sg_len, i)
|
||||||
desc_cnt += DIV_ROUND_UP(sg_dma_len(sg), MSGDMA_MAX_TRANS_LEN);
|
desc_cnt += DIV_ROUND_UP(sg_dma_len(sg), MSGDMA_MAX_TRANS_LEN);
|
||||||
|
|
||||||
spin_lock_bh(&mdev->lock);
|
spin_lock_irqsave(&mdev->lock, irqflags);
|
||||||
if (desc_cnt > mdev->desc_free_cnt) {
|
if (desc_cnt > mdev->desc_free_cnt) {
|
||||||
spin_unlock_bh(&mdev->lock);
|
spin_unlock_bh(&mdev->lock);
|
||||||
dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev);
|
dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
mdev->desc_free_cnt -= desc_cnt;
|
mdev->desc_free_cnt -= desc_cnt;
|
||||||
spin_unlock_bh(&mdev->lock);
|
spin_unlock_irqrestore(&mdev->lock, irqflags);
|
||||||
|
|
||||||
avail = sg_dma_len(sgl);
|
avail = sg_dma_len(sgl);
|
||||||
|
|
||||||
|
@ -566,10 +570,11 @@ static void msgdma_start_transfer(struct msgdma_device *mdev)
|
||||||
static void msgdma_issue_pending(struct dma_chan *chan)
|
static void msgdma_issue_pending(struct dma_chan *chan)
|
||||||
{
|
{
|
||||||
struct msgdma_device *mdev = to_mdev(chan);
|
struct msgdma_device *mdev = to_mdev(chan);
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
spin_lock_bh(&mdev->lock);
|
spin_lock_irqsave(&mdev->lock, flags);
|
||||||
msgdma_start_transfer(mdev);
|
msgdma_start_transfer(mdev);
|
||||||
spin_unlock_bh(&mdev->lock);
|
spin_unlock_irqrestore(&mdev->lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -634,10 +639,11 @@ static void msgdma_free_descriptors(struct msgdma_device *mdev)
|
||||||
static void msgdma_free_chan_resources(struct dma_chan *dchan)
|
static void msgdma_free_chan_resources(struct dma_chan *dchan)
|
||||||
{
|
{
|
||||||
struct msgdma_device *mdev = to_mdev(dchan);
|
struct msgdma_device *mdev = to_mdev(dchan);
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
spin_lock_bh(&mdev->lock);
|
spin_lock_irqsave(&mdev->lock, flags);
|
||||||
msgdma_free_descriptors(mdev);
|
msgdma_free_descriptors(mdev);
|
||||||
spin_unlock_bh(&mdev->lock);
|
spin_unlock_irqrestore(&mdev->lock, flags);
|
||||||
kfree(mdev->sw_desq);
|
kfree(mdev->sw_desq);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -682,8 +688,9 @@ static void msgdma_tasklet(unsigned long data)
|
||||||
u32 count;
|
u32 count;
|
||||||
u32 __maybe_unused size;
|
u32 __maybe_unused size;
|
||||||
u32 __maybe_unused status;
|
u32 __maybe_unused status;
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
spin_lock(&mdev->lock);
|
spin_lock_irqsave(&mdev->lock, flags);
|
||||||
|
|
||||||
/* Read number of responses that are available */
|
/* Read number of responses that are available */
|
||||||
count = ioread32(mdev->csr + MSGDMA_CSR_RESP_FILL_LEVEL);
|
count = ioread32(mdev->csr + MSGDMA_CSR_RESP_FILL_LEVEL);
|
||||||
|
@ -698,13 +705,13 @@ static void msgdma_tasklet(unsigned long data)
|
||||||
* bits. So we need to just drop these values.
|
* bits. So we need to just drop these values.
|
||||||
*/
|
*/
|
||||||
size = ioread32(mdev->resp + MSGDMA_RESP_BYTES_TRANSFERRED);
|
size = ioread32(mdev->resp + MSGDMA_RESP_BYTES_TRANSFERRED);
|
||||||
status = ioread32(mdev->resp - MSGDMA_RESP_STATUS);
|
status = ioread32(mdev->resp + MSGDMA_RESP_STATUS);
|
||||||
|
|
||||||
msgdma_complete_descriptor(mdev);
|
msgdma_complete_descriptor(mdev);
|
||||||
msgdma_chan_desc_cleanup(mdev);
|
msgdma_chan_desc_cleanup(mdev);
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_unlock(&mdev->lock);
|
spin_unlock_irqrestore(&mdev->lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -1143,11 +1143,24 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
|
||||||
struct edma_desc *edesc;
|
struct edma_desc *edesc;
|
||||||
struct device *dev = chan->device->dev;
|
struct device *dev = chan->device->dev;
|
||||||
struct edma_chan *echan = to_edma_chan(chan);
|
struct edma_chan *echan = to_edma_chan(chan);
|
||||||
unsigned int width, pset_len;
|
unsigned int width, pset_len, array_size;
|
||||||
|
|
||||||
if (unlikely(!echan || !len))
|
if (unlikely(!echan || !len))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
/* Align the array size (acnt block) with the transfer properties */
|
||||||
|
switch (__ffs((src | dest | len))) {
|
||||||
|
case 0:
|
||||||
|
array_size = SZ_32K - 1;
|
||||||
|
break;
|
||||||
|
case 1:
|
||||||
|
array_size = SZ_32K - 2;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
array_size = SZ_32K - 4;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
if (len < SZ_64K) {
|
if (len < SZ_64K) {
|
||||||
/*
|
/*
|
||||||
* Transfer size less than 64K can be handled with one paRAM
|
* Transfer size less than 64K can be handled with one paRAM
|
||||||
|
@ -1169,7 +1182,7 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
|
||||||
* When the full_length is multibple of 32767 one slot can be
|
* When the full_length is multibple of 32767 one slot can be
|
||||||
* used to complete the transfer.
|
* used to complete the transfer.
|
||||||
*/
|
*/
|
||||||
width = SZ_32K - 1;
|
width = array_size;
|
||||||
pset_len = rounddown(len, width);
|
pset_len = rounddown(len, width);
|
||||||
/* One slot is enough for lengths multiple of (SZ_32K -1) */
|
/* One slot is enough for lengths multiple of (SZ_32K -1) */
|
||||||
if (unlikely(pset_len == len))
|
if (unlikely(pset_len == len))
|
||||||
|
@ -1217,7 +1230,7 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
|
||||||
}
|
}
|
||||||
dest += pset_len;
|
dest += pset_len;
|
||||||
src += pset_len;
|
src += pset_len;
|
||||||
pset_len = width = len % (SZ_32K - 1);
|
pset_len = width = len % array_size;
|
||||||
|
|
||||||
ret = edma_config_pset(chan, &edesc->pset[1], src, dest, 1,
|
ret = edma_config_pset(chan, &edesc->pset[1], src, dest, 1,
|
||||||
width, pset_len, DMA_MEM_TO_MEM);
|
width, pset_len, DMA_MEM_TO_MEM);
|
||||||
|
|
|
@ -262,13 +262,14 @@ static void *ti_dra7_xbar_route_allocate(struct of_phandle_args *dma_spec,
|
||||||
mutex_lock(&xbar->mutex);
|
mutex_lock(&xbar->mutex);
|
||||||
map->xbar_out = find_first_zero_bit(xbar->dma_inuse,
|
map->xbar_out = find_first_zero_bit(xbar->dma_inuse,
|
||||||
xbar->dma_requests);
|
xbar->dma_requests);
|
||||||
mutex_unlock(&xbar->mutex);
|
|
||||||
if (map->xbar_out == xbar->dma_requests) {
|
if (map->xbar_out == xbar->dma_requests) {
|
||||||
|
mutex_unlock(&xbar->mutex);
|
||||||
dev_err(&pdev->dev, "Run out of free DMA requests\n");
|
dev_err(&pdev->dev, "Run out of free DMA requests\n");
|
||||||
kfree(map);
|
kfree(map);
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
}
|
}
|
||||||
set_bit(map->xbar_out, xbar->dma_inuse);
|
set_bit(map->xbar_out, xbar->dma_inuse);
|
||||||
|
mutex_unlock(&xbar->mutex);
|
||||||
|
|
||||||
map->xbar_in = (u16)dma_spec->args[0];
|
map->xbar_in = (u16)dma_spec->args[0];
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue