Merge branch 'topic/virt-dma' into for-linus
This commit is contained in:
commit
b8e1a96359
|
@ -2182,7 +2182,7 @@ static int pl08x_terminate_all(struct dma_chan *chan)
|
|||
}
|
||||
/* Dequeue jobs and free LLIs */
|
||||
if (plchan->at) {
|
||||
pl08x_desc_free(&plchan->at->vd);
|
||||
vchan_terminate_vdesc(&plchan->at->vd);
|
||||
plchan->at = NULL;
|
||||
}
|
||||
/* Dequeue jobs not yet fired as well */
|
||||
|
@ -2193,6 +2193,13 @@ static int pl08x_terminate_all(struct dma_chan *chan)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void pl08x_synchronize(struct dma_chan *chan)
|
||||
{
|
||||
struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
|
||||
|
||||
vchan_synchronize(&plchan->vc);
|
||||
}
|
||||
|
||||
static int pl08x_pause(struct dma_chan *chan)
|
||||
{
|
||||
struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
|
||||
|
@ -2773,6 +2780,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
|
|||
pl08x->memcpy.device_pause = pl08x_pause;
|
||||
pl08x->memcpy.device_resume = pl08x_resume;
|
||||
pl08x->memcpy.device_terminate_all = pl08x_terminate_all;
|
||||
pl08x->memcpy.device_synchronize = pl08x_synchronize;
|
||||
pl08x->memcpy.src_addr_widths = PL80X_DMA_BUSWIDTHS;
|
||||
pl08x->memcpy.dst_addr_widths = PL80X_DMA_BUSWIDTHS;
|
||||
pl08x->memcpy.directions = BIT(DMA_MEM_TO_MEM);
|
||||
|
@ -2802,6 +2810,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
|
|||
pl08x->slave.device_pause = pl08x_pause;
|
||||
pl08x->slave.device_resume = pl08x_resume;
|
||||
pl08x->slave.device_terminate_all = pl08x_terminate_all;
|
||||
pl08x->slave.device_synchronize = pl08x_synchronize;
|
||||
pl08x->slave.src_addr_widths = PL80X_DMA_BUSWIDTHS;
|
||||
pl08x->slave.dst_addr_widths = PL80X_DMA_BUSWIDTHS;
|
||||
pl08x->slave.directions =
|
||||
|
|
|
@ -812,7 +812,7 @@ static int bcm2835_dma_terminate_all(struct dma_chan *chan)
|
|||
* c->desc is NULL and exit.)
|
||||
*/
|
||||
if (c->desc) {
|
||||
bcm2835_dma_desc_free(&c->desc->vd);
|
||||
vchan_terminate_vdesc(&c->desc->vd);
|
||||
c->desc = NULL;
|
||||
bcm2835_dma_abort(c->chan_base);
|
||||
|
||||
|
@ -836,6 +836,13 @@ static int bcm2835_dma_terminate_all(struct dma_chan *chan)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void bcm2835_dma_synchronize(struct dma_chan *chan)
|
||||
{
|
||||
struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
|
||||
|
||||
vchan_synchronize(&c->vc);
|
||||
}
|
||||
|
||||
static int bcm2835_dma_chan_init(struct bcm2835_dmadev *d, int chan_id,
|
||||
int irq, unsigned int irq_flags)
|
||||
{
|
||||
|
@ -942,6 +949,7 @@ static int bcm2835_dma_probe(struct platform_device *pdev)
|
|||
od->ddev.device_prep_dma_memcpy = bcm2835_dma_prep_dma_memcpy;
|
||||
od->ddev.device_config = bcm2835_dma_slave_config;
|
||||
od->ddev.device_terminate_all = bcm2835_dma_terminate_all;
|
||||
od->ddev.device_synchronize = bcm2835_dma_synchronize;
|
||||
od->ddev.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
|
||||
od->ddev.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
|
||||
od->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV) |
|
||||
|
|
|
@ -511,7 +511,7 @@ static int jz4780_dma_terminate_all(struct dma_chan *chan)
|
|||
/* Clear the DMA status and stop the transfer. */
|
||||
jz4780_dma_writel(jzdma, JZ_DMA_REG_DCS(jzchan->id), 0);
|
||||
if (jzchan->desc) {
|
||||
jz4780_dma_desc_free(&jzchan->desc->vdesc);
|
||||
vchan_terminate_vdesc(&jzchan->desc->vdesc);
|
||||
jzchan->desc = NULL;
|
||||
}
|
||||
|
||||
|
@ -523,6 +523,13 @@ static int jz4780_dma_terminate_all(struct dma_chan *chan)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void jz4780_dma_synchronize(struct dma_chan *chan)
|
||||
{
|
||||
struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
|
||||
|
||||
vchan_synchronize(&jzchan->vchan);
|
||||
}
|
||||
|
||||
static int jz4780_dma_config(struct dma_chan *chan,
|
||||
struct dma_slave_config *config)
|
||||
{
|
||||
|
@ -813,6 +820,7 @@ static int jz4780_dma_probe(struct platform_device *pdev)
|
|||
dd->device_prep_dma_memcpy = jz4780_dma_prep_dma_memcpy;
|
||||
dd->device_config = jz4780_dma_config;
|
||||
dd->device_terminate_all = jz4780_dma_terminate_all;
|
||||
dd->device_synchronize = jz4780_dma_synchronize;
|
||||
dd->device_tx_status = jz4780_dma_tx_status;
|
||||
dd->device_issue_pending = jz4780_dma_issue_pending;
|
||||
dd->src_addr_widths = JZ_DMA_BUSWIDTHS;
|
||||
|
|
|
@ -860,11 +860,8 @@ static int edma_terminate_all(struct dma_chan *chan)
|
|||
/* Move the cyclic channel back to default queue */
|
||||
if (!echan->tc && echan->edesc->cyclic)
|
||||
edma_assign_channel_eventq(echan, EVENTQ_DEFAULT);
|
||||
/*
|
||||
* free the running request descriptor
|
||||
* since it is not in any of the vdesc lists
|
||||
*/
|
||||
edma_desc_free(&echan->edesc->vdesc);
|
||||
|
||||
vchan_terminate_vdesc(&echan->edesc->vdesc);
|
||||
echan->edesc = NULL;
|
||||
}
|
||||
|
||||
|
|
|
@ -694,7 +694,6 @@ static unsigned int mdc_get_new_events(struct mdc_chan *mchan)
|
|||
static int mdc_terminate_all(struct dma_chan *chan)
|
||||
{
|
||||
struct mdc_chan *mchan = to_mdc_chan(chan);
|
||||
struct mdc_tx_desc *mdesc;
|
||||
unsigned long flags;
|
||||
LIST_HEAD(head);
|
||||
|
||||
|
@ -703,21 +702,28 @@ static int mdc_terminate_all(struct dma_chan *chan)
|
|||
mdc_chan_writel(mchan, MDC_CONTROL_AND_STATUS_CANCEL,
|
||||
MDC_CONTROL_AND_STATUS);
|
||||
|
||||
mdesc = mchan->desc;
|
||||
mchan->desc = NULL;
|
||||
if (mchan->desc) {
|
||||
vchan_terminate_vdesc(&mchan->desc->vd);
|
||||
mchan->desc = NULL;
|
||||
}
|
||||
vchan_get_all_descriptors(&mchan->vc, &head);
|
||||
|
||||
mdc_get_new_events(mchan);
|
||||
|
||||
spin_unlock_irqrestore(&mchan->vc.lock, flags);
|
||||
|
||||
if (mdesc)
|
||||
mdc_desc_free(&mdesc->vd);
|
||||
vchan_dma_desc_free_list(&mchan->vc, &head);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void mdc_synchronize(struct dma_chan *chan)
|
||||
{
|
||||
struct mdc_chan *mchan = to_mdc_chan(chan);
|
||||
|
||||
vchan_synchronize(&mchan->vc);
|
||||
}
|
||||
|
||||
static int mdc_slave_config(struct dma_chan *chan,
|
||||
struct dma_slave_config *config)
|
||||
{
|
||||
|
@ -952,6 +958,7 @@ static int mdc_dma_probe(struct platform_device *pdev)
|
|||
mdma->dma_dev.device_tx_status = mdc_tx_status;
|
||||
mdma->dma_dev.device_issue_pending = mdc_issue_pending;
|
||||
mdma->dma_dev.device_terminate_all = mdc_terminate_all;
|
||||
mdma->dma_dev.device_synchronize = mdc_synchronize;
|
||||
mdma->dma_dev.device_config = mdc_slave_config;
|
||||
|
||||
mdma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
|
||||
|
|
|
@ -719,7 +719,7 @@ static int k3_dma_terminate_all(struct dma_chan *chan)
|
|||
c->phy = NULL;
|
||||
p->vchan = NULL;
|
||||
if (p->ds_run) {
|
||||
k3_dma_free_desc(&p->ds_run->vd);
|
||||
vchan_terminate_vdesc(&p->ds_run->vd);
|
||||
p->ds_run = NULL;
|
||||
}
|
||||
p->ds_done = NULL;
|
||||
|
@ -730,6 +730,13 @@ static int k3_dma_terminate_all(struct dma_chan *chan)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void k3_dma_synchronize(struct dma_chan *chan)
|
||||
{
|
||||
struct k3_dma_chan *c = to_k3_chan(chan);
|
||||
|
||||
vchan_synchronize(&c->vc);
|
||||
}
|
||||
|
||||
static int k3_dma_transfer_pause(struct dma_chan *chan)
|
||||
{
|
||||
struct k3_dma_chan *c = to_k3_chan(chan);
|
||||
|
@ -868,6 +875,7 @@ static int k3_dma_probe(struct platform_device *op)
|
|||
d->slave.device_pause = k3_dma_transfer_pause;
|
||||
d->slave.device_resume = k3_dma_transfer_resume;
|
||||
d->slave.device_terminate_all = k3_dma_terminate_all;
|
||||
d->slave.device_synchronize = k3_dma_synchronize;
|
||||
d->slave.copy_align = DMAENGINE_ALIGN_8_BYTES;
|
||||
|
||||
/* init virtual channel */
|
||||
|
|
|
@ -1311,7 +1311,7 @@ static int omap_dma_terminate_all(struct dma_chan *chan)
|
|||
* c->desc is NULL and exit.)
|
||||
*/
|
||||
if (c->desc) {
|
||||
omap_dma_desc_free(&c->desc->vd);
|
||||
vchan_terminate_vdesc(&c->desc->vd);
|
||||
c->desc = NULL;
|
||||
/* Avoid stopping the dma twice */
|
||||
if (!c->paused)
|
||||
|
|
|
@ -732,7 +732,7 @@ static int s3c24xx_dma_terminate_all(struct dma_chan *chan)
|
|||
|
||||
/* Dequeue current job */
|
||||
if (s3cchan->at) {
|
||||
s3c24xx_dma_desc_free(&s3cchan->at->vd);
|
||||
vchan_terminate_vdesc(&s3cchan->at->vd);
|
||||
s3cchan->at = NULL;
|
||||
}
|
||||
|
||||
|
@ -744,6 +744,13 @@ static int s3c24xx_dma_terminate_all(struct dma_chan *chan)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void s3c24xx_dma_synchronize(struct dma_chan *chan)
|
||||
{
|
||||
struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
|
||||
|
||||
vchan_synchronize(&s3cchan->vc);
|
||||
}
|
||||
|
||||
static void s3c24xx_dma_free_chan_resources(struct dma_chan *chan)
|
||||
{
|
||||
/* Ensure all queued descriptors are freed */
|
||||
|
@ -1282,6 +1289,7 @@ static int s3c24xx_dma_probe(struct platform_device *pdev)
|
|||
s3cdma->memcpy.device_issue_pending = s3c24xx_dma_issue_pending;
|
||||
s3cdma->memcpy.device_config = s3c24xx_dma_set_runtime_config;
|
||||
s3cdma->memcpy.device_terminate_all = s3c24xx_dma_terminate_all;
|
||||
s3cdma->memcpy.device_synchronize = s3c24xx_dma_synchronize;
|
||||
|
||||
/* Initialize slave engine for SoC internal dedicated peripherals */
|
||||
dma_cap_set(DMA_SLAVE, s3cdma->slave.cap_mask);
|
||||
|
@ -1296,6 +1304,7 @@ static int s3c24xx_dma_probe(struct platform_device *pdev)
|
|||
s3cdma->slave.device_prep_dma_cyclic = s3c24xx_dma_prep_dma_cyclic;
|
||||
s3cdma->slave.device_config = s3c24xx_dma_set_runtime_config;
|
||||
s3cdma->slave.device_terminate_all = s3c24xx_dma_terminate_all;
|
||||
s3cdma->slave.device_synchronize = s3c24xx_dma_synchronize;
|
||||
s3cdma->slave.filter.map = pdata->slave_map;
|
||||
s3cdma->slave.filter.mapcnt = pdata->slavecnt;
|
||||
s3cdma->slave.filter.fn = s3c24xx_dma_filter;
|
||||
|
|
|
@ -107,10 +107,7 @@ static void vchan_complete(unsigned long arg)
|
|||
dmaengine_desc_get_callback(&vd->tx, &cb);
|
||||
|
||||
list_del(&vd->node);
|
||||
if (dmaengine_desc_test_reuse(&vd->tx))
|
||||
list_add(&vd->node, &vc->desc_allocated);
|
||||
else
|
||||
vc->desc_free(vd);
|
||||
vchan_vdesc_fini(vd);
|
||||
|
||||
dmaengine_desc_callback_invoke(&cb, NULL);
|
||||
}
|
||||
|
|
|
@ -35,6 +35,7 @@ struct virt_dma_chan {
|
|||
struct list_head desc_completed;
|
||||
|
||||
struct virt_dma_desc *cyclic;
|
||||
struct virt_dma_desc *vd_terminated;
|
||||
};
|
||||
|
||||
static inline struct virt_dma_chan *to_virt_chan(struct dma_chan *chan)
|
||||
|
@ -103,6 +104,20 @@ static inline void vchan_cookie_complete(struct virt_dma_desc *vd)
|
|||
tasklet_schedule(&vc->task);
|
||||
}
|
||||
|
||||
/**
|
||||
* vchan_vdesc_fini - Free or reuse a descriptor
|
||||
* @vd: virtual descriptor to free/reuse
|
||||
*/
|
||||
static inline void vchan_vdesc_fini(struct virt_dma_desc *vd)
|
||||
{
|
||||
struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
|
||||
|
||||
if (dmaengine_desc_test_reuse(&vd->tx))
|
||||
list_add(&vd->node, &vc->desc_allocated);
|
||||
else
|
||||
vc->desc_free(vd);
|
||||
}
|
||||
|
||||
/**
|
||||
* vchan_cyclic_callback - report the completion of a period
|
||||
* @vd: virtual descriptor
|
||||
|
@ -115,6 +130,25 @@ static inline void vchan_cyclic_callback(struct virt_dma_desc *vd)
|
|||
tasklet_schedule(&vc->task);
|
||||
}
|
||||
|
||||
/**
|
||||
* vchan_terminate_vdesc - Disable pending cyclic callback
|
||||
* @vd: virtual descriptor to be terminated
|
||||
*
|
||||
* vc.lock must be held by caller
|
||||
*/
|
||||
static inline void vchan_terminate_vdesc(struct virt_dma_desc *vd)
|
||||
{
|
||||
struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
|
||||
|
||||
/* free up stuck descriptor */
|
||||
if (vc->vd_terminated)
|
||||
vchan_vdesc_fini(vc->vd_terminated);
|
||||
|
||||
vc->vd_terminated = vd;
|
||||
if (vc->cyclic == vd)
|
||||
vc->cyclic = NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* vchan_next_desc - peek at the next descriptor to be processed
|
||||
* @vc: virtual channel to obtain descriptor from
|
||||
|
@ -168,10 +202,20 @@ static inline void vchan_free_chan_resources(struct virt_dma_chan *vc)
|
|||
* Makes sure that all scheduled or active callbacks have finished running. For
|
||||
* proper operation the caller has to ensure that no new callbacks are scheduled
|
||||
* after the invocation of this function started.
|
||||
* Free up the terminated cyclic descriptor to prevent memory leakage.
|
||||
*/
|
||||
static inline void vchan_synchronize(struct virt_dma_chan *vc)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
tasklet_kill(&vc->task);
|
||||
|
||||
spin_lock_irqsave(&vc->lock, flags);
|
||||
if (vc->vd_terminated) {
|
||||
vchan_vdesc_fini(vc->vd_terminated);
|
||||
vc->vd_terminated = NULL;
|
||||
}
|
||||
spin_unlock_irqrestore(&vc->lock, flags);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
Loading…
Reference in New Issue