mirror of https://gitee.com/openkylin/linux.git
dmaengine: virt-dma: Add missing locking
Originally freeing descriptors was split into a locked and an unlocked
part. The locked part in vchan_get_all_descriptors() collected all
descriptors on a separate list_head. This was done to allow iterating
over that new list in vchan_dma_desc_free_list() without a lock held.
This became broken in 13bb26ae88
("dmaengine: virt-dma: don't always
free descriptor upon completion"). With this commit
vchan_dma_desc_free_list() no longer exclusively operates on the
separate list, but starts to put descriptors which can be reused back on
&vc->desc_allocated. This list operation should have been locked, but
wasn't.
In the mean time drivers started to call vchan_dma_desc_free_list() with
their lock held so that we now have the situation that
vchan_dma_desc_free_list() is called locked from some drivers and
unlocked from others.
To clean this up we have to do two things:
1. Add missing locking in vchan_dma_desc_free_list()
2. Make sure drivers call vchan_dma_desc_free_list() unlocked
This needs to be done atomically, so in this patch the locking is added
and all drivers are fixed.
Signed-off-by: Sascha Hauer <s.hauer@pengutronix.de>
Reviewed-by: Peter Ujfalusi <peter.ujfalusi@ti.com>
Tested-by: Peter Ujfalusi <peter.ujfalusi@ti.com>
Reviewed-by: Green Wan <green.wan@sifive.com>
Tested-by: Green Wan <green.wan@sifive.com>
Link: https://lore.kernel.org/r/20191216105328.15198-3-s.hauer@pengutronix.de
Signed-off-by: Vinod Koul <vkoul@kernel.org>
This commit is contained in:
parent
5c8aacbbb3
commit
51fe9cd2bd
|
@ -636,14 +636,10 @@ static int dma_chan_terminate_all(struct dma_chan *dchan)
|
|||
|
||||
vchan_get_all_descriptors(&chan->vc, &head);
|
||||
|
||||
/*
|
||||
* As vchan_dma_desc_free_list can access to desc_allocated list
|
||||
* we need to call it in vc.lock context.
|
||||
*/
|
||||
vchan_dma_desc_free_list(&chan->vc, &head);
|
||||
|
||||
spin_unlock_irqrestore(&chan->vc.lock, flags);
|
||||
|
||||
vchan_dma_desc_free_list(&chan->vc, &head);
|
||||
|
||||
dev_vdbg(dchan2dev(dchan), "terminated: %s\n", axi_chan_name(chan));
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -430,9 +430,10 @@ static int mtk_uart_apdma_terminate_all(struct dma_chan *chan)
|
|||
|
||||
spin_lock_irqsave(&c->vc.lock, flags);
|
||||
vchan_get_all_descriptors(&c->vc, &head);
|
||||
vchan_dma_desc_free_list(&c->vc, &head);
|
||||
spin_unlock_irqrestore(&c->vc.lock, flags);
|
||||
|
||||
vchan_dma_desc_free_list(&c->vc, &head);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -674,10 +674,11 @@ static int owl_dma_terminate_all(struct dma_chan *chan)
|
|||
}
|
||||
|
||||
vchan_get_all_descriptors(&vchan->vc, &head);
|
||||
vchan_dma_desc_free_list(&vchan->vc, &head);
|
||||
|
||||
spin_unlock_irqrestore(&vchan->vc.lock, flags);
|
||||
|
||||
vchan_dma_desc_free_list(&vchan->vc, &head);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -519,15 +519,6 @@ static void s3c24xx_dma_start_next_txd(struct s3c24xx_dma_chan *s3cchan)
|
|||
s3c24xx_dma_start_next_sg(s3cchan, txd);
|
||||
}
|
||||
|
||||
static void s3c24xx_dma_free_txd_list(struct s3c24xx_dma_engine *s3cdma,
|
||||
struct s3c24xx_dma_chan *s3cchan)
|
||||
{
|
||||
LIST_HEAD(head);
|
||||
|
||||
vchan_get_all_descriptors(&s3cchan->vc, &head);
|
||||
vchan_dma_desc_free_list(&s3cchan->vc, &head);
|
||||
}
|
||||
|
||||
/*
|
||||
* Try to allocate a physical channel. When successful, assign it to
|
||||
* this virtual channel, and initiate the next descriptor. The
|
||||
|
@ -709,8 +700,9 @@ static int s3c24xx_dma_terminate_all(struct dma_chan *chan)
|
|||
{
|
||||
struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
|
||||
struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
|
||||
LIST_HEAD(head);
|
||||
unsigned long flags;
|
||||
int ret = 0;
|
||||
int ret;
|
||||
|
||||
spin_lock_irqsave(&s3cchan->vc.lock, flags);
|
||||
|
||||
|
@ -734,7 +726,15 @@ static int s3c24xx_dma_terminate_all(struct dma_chan *chan)
|
|||
}
|
||||
|
||||
/* Dequeue jobs not yet fired as well */
|
||||
s3c24xx_dma_free_txd_list(s3cdma, s3cchan);
|
||||
|
||||
vchan_get_all_descriptors(&s3cchan->vc, &head);
|
||||
|
||||
spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
|
||||
|
||||
vchan_dma_desc_free_list(&s3cchan->vc, &head);
|
||||
|
||||
return 0;
|
||||
|
||||
unlock:
|
||||
spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
|
||||
|
||||
|
|
|
@ -155,9 +155,9 @@ static void sf_pdma_free_chan_resources(struct dma_chan *dchan)
|
|||
kfree(chan->desc);
|
||||
chan->desc = NULL;
|
||||
vchan_get_all_descriptors(&chan->vchan, &head);
|
||||
vchan_dma_desc_free_list(&chan->vchan, &head);
|
||||
sf_pdma_disclaim_chan(chan);
|
||||
spin_unlock_irqrestore(&chan->vchan.lock, flags);
|
||||
vchan_dma_desc_free_list(&chan->vchan, &head);
|
||||
}
|
||||
|
||||
static size_t sf_pdma_desc_residue(struct sf_pdma_chan *chan,
|
||||
|
@ -220,8 +220,8 @@ static int sf_pdma_terminate_all(struct dma_chan *dchan)
|
|||
chan->desc = NULL;
|
||||
chan->xfer_err = false;
|
||||
vchan_get_all_descriptors(&chan->vchan, &head);
|
||||
vchan_dma_desc_free_list(&chan->vchan, &head);
|
||||
spin_unlock_irqrestore(&chan->vchan.lock, flags);
|
||||
vchan_dma_desc_free_list(&chan->vchan, &head);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -885,12 +885,13 @@ static int sun4i_dma_terminate_all(struct dma_chan *chan)
|
|||
}
|
||||
|
||||
spin_lock_irqsave(&vchan->vc.lock, flags);
|
||||
vchan_dma_desc_free_list(&vchan->vc, &head);
|
||||
/* Clear these so the vchan is usable again */
|
||||
vchan->processing = NULL;
|
||||
vchan->pchan = NULL;
|
||||
spin_unlock_irqrestore(&vchan->vc.lock, flags);
|
||||
|
||||
vchan_dma_desc_free_list(&vchan->vc, &head);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -116,7 +116,11 @@ void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head)
|
|||
|
||||
list_for_each_entry_safe(vd, _vd, head, node) {
|
||||
if (dmaengine_desc_test_reuse(&vd->tx)) {
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&vc->lock, flags);
|
||||
list_move_tail(&vd->node, &vc->desc_allocated);
|
||||
spin_unlock_irqrestore(&vc->lock, flags);
|
||||
} else {
|
||||
dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd);
|
||||
list_del(&vd->node);
|
||||
|
|
Loading…
Reference in New Issue