dmaengine: dw: fix cyclic transfer callbacks

Cyclic transfer callbacks rely on block completion interrupts which were
disabled in commit ff7b05f29f ("dmaengine/dw_dmac: Don't handle block
interrupts").  This re-enables block interrupts so the cyclic callbacks
can work.  Other transfer types are not affected as they set the INT_EN
bit only on the last block.

Fixes: ff7b05f29f ("dmaengine/dw_dmac: Don't handle block interrupts")
Signed-off-by: Mans Rullgard <mans@mansr.com>
Reviewed-by: Viresh Kumar <viresh.kumar@linaro.org>
Signed-off-by: Vinod Koul <vinod.koul@intel.com>
Cc: <stable@vger.kernel.org>
This commit is contained in:
Mans Rullgard 2016-01-11 13:04:29 +00:00 committed by Vinod Koul
parent df3bb8a0e6
commit 2895b2cad6
1 changed files with 15 additions and 6 deletions

View File

@ -156,6 +156,7 @@ static void dwc_initialize(struct dw_dma_chan *dwc)
/* Enable interrupts */ /* Enable interrupts */
channel_set_bit(dw, MASK.XFER, dwc->mask); channel_set_bit(dw, MASK.XFER, dwc->mask);
channel_set_bit(dw, MASK.BLOCK, dwc->mask);
channel_set_bit(dw, MASK.ERROR, dwc->mask); channel_set_bit(dw, MASK.ERROR, dwc->mask);
dwc->initialized = true; dwc->initialized = true;
@ -536,16 +537,17 @@ EXPORT_SYMBOL(dw_dma_get_dst_addr);
/* Called with dwc->lock held and all DMAC interrupts disabled */ /* Called with dwc->lock held and all DMAC interrupts disabled */
static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc, static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
u32 status_err, u32 status_xfer) u32 status_block, u32 status_err, u32 status_xfer)
{ {
unsigned long flags; unsigned long flags;
if (dwc->mask) { if (status_block & dwc->mask) {
void (*callback)(void *param); void (*callback)(void *param);
void *callback_param; void *callback_param;
dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n", dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n",
channel_readl(dwc, LLP)); channel_readl(dwc, LLP));
dma_writel(dw, CLEAR.BLOCK, dwc->mask);
callback = dwc->cdesc->period_callback; callback = dwc->cdesc->period_callback;
callback_param = dwc->cdesc->period_callback_param; callback_param = dwc->cdesc->period_callback_param;
@ -577,6 +579,7 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
channel_writel(dwc, CTL_LO, 0); channel_writel(dwc, CTL_LO, 0);
channel_writel(dwc, CTL_HI, 0); channel_writel(dwc, CTL_HI, 0);
dma_writel(dw, CLEAR.BLOCK, dwc->mask);
dma_writel(dw, CLEAR.ERROR, dwc->mask); dma_writel(dw, CLEAR.ERROR, dwc->mask);
dma_writel(dw, CLEAR.XFER, dwc->mask); dma_writel(dw, CLEAR.XFER, dwc->mask);
@ -593,10 +596,12 @@ static void dw_dma_tasklet(unsigned long data)
{ {
struct dw_dma *dw = (struct dw_dma *)data; struct dw_dma *dw = (struct dw_dma *)data;
struct dw_dma_chan *dwc; struct dw_dma_chan *dwc;
u32 status_block;
u32 status_xfer; u32 status_xfer;
u32 status_err; u32 status_err;
int i; int i;
status_block = dma_readl(dw, RAW.BLOCK);
status_xfer = dma_readl(dw, RAW.XFER); status_xfer = dma_readl(dw, RAW.XFER);
status_err = dma_readl(dw, RAW.ERROR); status_err = dma_readl(dw, RAW.ERROR);
@ -605,7 +610,8 @@ static void dw_dma_tasklet(unsigned long data)
for (i = 0; i < dw->dma.chancnt; i++) { for (i = 0; i < dw->dma.chancnt; i++) {
dwc = &dw->chan[i]; dwc = &dw->chan[i];
if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags))
dwc_handle_cyclic(dw, dwc, status_err, status_xfer); dwc_handle_cyclic(dw, dwc, status_block, status_err,
status_xfer);
else if (status_err & (1 << i)) else if (status_err & (1 << i))
dwc_handle_error(dw, dwc); dwc_handle_error(dw, dwc);
else if (status_xfer & (1 << i)) else if (status_xfer & (1 << i))
@ -616,6 +622,7 @@ static void dw_dma_tasklet(unsigned long data)
* Re-enable interrupts. * Re-enable interrupts.
*/ */
channel_set_bit(dw, MASK.XFER, dw->all_chan_mask); channel_set_bit(dw, MASK.XFER, dw->all_chan_mask);
channel_set_bit(dw, MASK.BLOCK, dw->all_chan_mask);
channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask); channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask);
} }
@ -635,6 +642,7 @@ static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
* softirq handler. * softirq handler.
*/ */
channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
status = dma_readl(dw, STATUS_INT); status = dma_readl(dw, STATUS_INT);
@ -645,6 +653,7 @@ static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
/* Try to recover */ /* Try to recover */
channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1); channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1);
channel_clear_bit(dw, MASK.BLOCK, (1 << 8) - 1);
channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1); channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1);
channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1); channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1);
channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1); channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1);
@ -1111,6 +1120,7 @@ static void dw_dma_off(struct dw_dma *dw)
dma_writel(dw, CFG, 0); dma_writel(dw, CFG, 0);
channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask); channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask); channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
@ -1216,6 +1226,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
/* Disable interrupts */ /* Disable interrupts */
channel_clear_bit(dw, MASK.XFER, dwc->mask); channel_clear_bit(dw, MASK.XFER, dwc->mask);
channel_clear_bit(dw, MASK.BLOCK, dwc->mask);
channel_clear_bit(dw, MASK.ERROR, dwc->mask); channel_clear_bit(dw, MASK.ERROR, dwc->mask);
spin_unlock_irqrestore(&dwc->lock, flags); spin_unlock_irqrestore(&dwc->lock, flags);
@ -1458,6 +1469,7 @@ void dw_dma_cyclic_free(struct dma_chan *chan)
dwc_chan_disable(dw, dwc); dwc_chan_disable(dw, dwc);
dma_writel(dw, CLEAR.BLOCK, dwc->mask);
dma_writel(dw, CLEAR.ERROR, dwc->mask); dma_writel(dw, CLEAR.ERROR, dwc->mask);
dma_writel(dw, CLEAR.XFER, dwc->mask); dma_writel(dw, CLEAR.XFER, dwc->mask);
@ -1546,9 +1558,6 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
/* Force dma off, just in case */ /* Force dma off, just in case */
dw_dma_off(dw); dw_dma_off(dw);
/* Disable BLOCK interrupts as well */
channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
/* Create a pool of consistent memory blocks for hardware descriptors */ /* Create a pool of consistent memory blocks for hardware descriptors */
dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", chip->dev, dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", chip->dev,
sizeof(struct dw_desc), 4, 0); sizeof(struct dw_desc), 4, 0);