media: ti-vpe: cal: Set cal_dmaqueue.pending to NULL when no pending buffer

When a pending buffer becomes active, the cal_dmaqueue.active field is
updated, but the pending field keeps the same value until a new buffer
becomes pending. This requires handling the special case of
pending == active in different places. Simplify the code by setting the
pending field to NULL when the pending buffer becomes active. Buffers
are now simply moved from queue to pending and from pending to active.

Signed-off-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
Reviewed-by: Benoit Parrot <bparrot@ti.com>
Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl>
Signed-off-by: Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
This commit is contained in:
Laurent Pinchart 2020-12-07 00:53:50 +01:00 committed by Mauro Carvalho Chehab
parent ca4fec54f6
commit 2e2279b53a
2 changed files with 11 additions and 9 deletions

View File

@ -491,12 +491,15 @@ static void cal_release_buffers(struct cal_ctx *ctx,
vb2_buffer_done(&buf->vb.vb2_buf, state);
}
if (ctx->dma.pending != ctx->dma.active)
if (ctx->dma.pending) {
vb2_buffer_done(&ctx->dma.pending->vb.vb2_buf, state);
vb2_buffer_done(&ctx->dma.active->vb.vb2_buf, state);
ctx->dma.active = NULL;
ctx->dma.pending = NULL;
}
if (ctx->dma.active) {
vb2_buffer_done(&ctx->dma.active->vb.vb2_buf, state);
ctx->dma.active = NULL;
}
spin_unlock_irq(&ctx->dma.lock);
}
@ -510,12 +513,11 @@ static int cal_start_streaming(struct vb2_queue *vq, unsigned int count)
spin_lock_irq(&ctx->dma.lock);
buf = list_first_entry(&ctx->dma.queue, struct cal_buffer, list);
ctx->dma.active = buf;
ctx->dma.pending = buf;
list_del(&buf->list);
spin_unlock_irq(&ctx->dma.lock);
addr = vb2_dma_contig_plane_dma_addr(&ctx->dma.active->vb.vb2_buf, 0);
addr = vb2_dma_contig_plane_dma_addr(&buf->vb.vb2_buf, 0);
ctx->sequence = 0;
ctx->dma.state = CAL_DMA_RUNNING;

View File

@ -485,8 +485,7 @@ static inline void cal_irq_wdma_start(struct cal_ctx *ctx)
*/
cal_ctx_wr_dma_disable(ctx);
ctx->dma.state = CAL_DMA_STOP_PENDING;
} else if (!list_empty(&ctx->dma.queue) &&
ctx->dma.active == ctx->dma.pending) {
} else if (!list_empty(&ctx->dma.queue) && !ctx->dma.pending) {
/*
* Otherwise, if a new buffer is available, queue it to the
* hardware.
@ -519,9 +518,10 @@ static inline void cal_irq_wdma_end(struct cal_ctx *ctx)
}
/* If a new buffer was queued, complete the current buffer. */
if (ctx->dma.active != ctx->dma.pending) {
if (ctx->dma.pending) {
buf = ctx->dma.active;
ctx->dma.active = ctx->dma.pending;
ctx->dma.pending = NULL;
}
spin_unlock(&ctx->dma.lock);