2011-06-21 21:51:26 +08:00
|
|
|
/*
|
|
|
|
* Samsung S5P Multi Format Codec v 5.1
|
|
|
|
*
|
|
|
|
* Copyright (c) 2011 Samsung Electronics Co., Ltd.
|
|
|
|
* Kamil Debski, <k.debski@samsung.com>
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License as published by
|
|
|
|
* the Free Software Foundation; either version 2 of the License, or
|
|
|
|
* (at your option) any later version.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/clk.h>
|
|
|
|
#include <linux/delay.h>
|
|
|
|
#include <linux/interrupt.h>
|
|
|
|
#include <linux/io.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/platform_device.h>
|
|
|
|
#include <linux/sched.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/videodev2.h>
|
2012-08-21 19:05:32 +08:00
|
|
|
#include <media/v4l2-event.h>
|
2011-06-21 21:51:26 +08:00
|
|
|
#include <linux/workqueue.h>
|
2012-10-25 16:24:14 +08:00
|
|
|
#include <linux/of.h>
|
2011-06-21 21:51:26 +08:00
|
|
|
#include <media/videobuf2-core.h>
|
2012-10-04 09:19:08 +08:00
|
|
|
#include "s5p_mfc_common.h"
|
2011-06-21 21:51:26 +08:00
|
|
|
#include "s5p_mfc_ctrl.h"
|
|
|
|
#include "s5p_mfc_debug.h"
|
|
|
|
#include "s5p_mfc_dec.h"
|
|
|
|
#include "s5p_mfc_enc.h"
|
|
|
|
#include "s5p_mfc_intr.h"
|
2012-10-04 09:19:08 +08:00
|
|
|
#include "s5p_mfc_opr.h"
|
|
|
|
#include "s5p_mfc_cmd.h"
|
2011-06-21 21:51:26 +08:00
|
|
|
#include "s5p_mfc_pm.h"
|
|
|
|
|
|
|
|
#define S5P_MFC_NAME "s5p-mfc"
|
|
|
|
#define S5P_MFC_DEC_NAME "s5p-mfc-dec"
|
|
|
|
#define S5P_MFC_ENC_NAME "s5p-mfc-enc"
|
|
|
|
|
2014-08-22 19:07:57 +08:00
|
|
|
int mfc_debug_level;
|
|
|
|
module_param_named(debug, mfc_debug_level, int, S_IRUGO | S_IWUSR);
|
2011-06-21 21:51:26 +08:00
|
|
|
MODULE_PARM_DESC(debug, "Debug level - higher value produces more verbose messages");
|
|
|
|
|
|
|
|
/* Helper functions for interrupt processing */
|
2012-08-14 17:13:40 +08:00
|
|
|
|
2011-06-21 21:51:26 +08:00
|
|
|
/* Remove from hw execution round robin */
|
2012-08-14 17:13:40 +08:00
|
|
|
void clear_work_bit(struct s5p_mfc_ctx *ctx)
|
2011-06-21 21:51:26 +08:00
|
|
|
{
|
|
|
|
struct s5p_mfc_dev *dev = ctx->dev;
|
|
|
|
|
|
|
|
spin_lock(&dev->condlock);
|
2012-08-14 17:13:40 +08:00
|
|
|
__clear_bit(ctx->num, &dev->ctx_work_bits);
|
2011-06-21 21:51:26 +08:00
|
|
|
spin_unlock(&dev->condlock);
|
|
|
|
}
|
|
|
|
|
2012-08-14 17:13:40 +08:00
|
|
|
/* Add to hw execution round robin */
|
|
|
|
void set_work_bit(struct s5p_mfc_ctx *ctx)
|
|
|
|
{
|
|
|
|
struct s5p_mfc_dev *dev = ctx->dev;
|
|
|
|
|
|
|
|
spin_lock(&dev->condlock);
|
|
|
|
__set_bit(ctx->num, &dev->ctx_work_bits);
|
|
|
|
spin_unlock(&dev->condlock);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Remove from hw execution round robin */
|
|
|
|
void clear_work_bit_irqsave(struct s5p_mfc_ctx *ctx)
|
|
|
|
{
|
|
|
|
struct s5p_mfc_dev *dev = ctx->dev;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&dev->condlock, flags);
|
|
|
|
__clear_bit(ctx->num, &dev->ctx_work_bits);
|
|
|
|
spin_unlock_irqrestore(&dev->condlock, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Add to hw execution round robin */
|
|
|
|
void set_work_bit_irqsave(struct s5p_mfc_ctx *ctx)
|
|
|
|
{
|
|
|
|
struct s5p_mfc_dev *dev = ctx->dev;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&dev->condlock, flags);
|
|
|
|
__set_bit(ctx->num, &dev->ctx_work_bits);
|
|
|
|
spin_unlock_irqrestore(&dev->condlock, flags);
|
|
|
|
}
|
|
|
|
|
2011-06-21 21:51:26 +08:00
|
|
|
/* Wake up context wait_queue */
|
|
|
|
static void wake_up_ctx(struct s5p_mfc_ctx *ctx, unsigned int reason,
|
|
|
|
unsigned int err)
|
|
|
|
{
|
|
|
|
ctx->int_cond = 1;
|
|
|
|
ctx->int_type = reason;
|
|
|
|
ctx->int_err = err;
|
|
|
|
wake_up(&ctx->queue);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Wake up device wait_queue */
|
|
|
|
static void wake_up_dev(struct s5p_mfc_dev *dev, unsigned int reason,
|
|
|
|
unsigned int err)
|
|
|
|
{
|
|
|
|
dev->int_cond = 1;
|
|
|
|
dev->int_type = reason;
|
|
|
|
dev->int_err = err;
|
|
|
|
wake_up(&dev->queue);
|
|
|
|
}
|
|
|
|
|
2012-05-10 14:32:01 +08:00
|
|
|
static void s5p_mfc_watchdog(unsigned long arg)
|
2011-06-21 21:51:26 +08:00
|
|
|
{
|
|
|
|
struct s5p_mfc_dev *dev = (struct s5p_mfc_dev *)arg;
|
|
|
|
|
|
|
|
if (test_bit(0, &dev->hw_lock))
|
|
|
|
atomic_inc(&dev->watchdog_cnt);
|
|
|
|
if (atomic_read(&dev->watchdog_cnt) >= MFC_WATCHDOG_CNT) {
|
|
|
|
/* This means that hw is busy and no interrupts were
|
|
|
|
* generated by hw for the Nth time of running this
|
|
|
|
* watchdog timer. This usually means a serious hw
|
|
|
|
* error. Now it is time to kill all instances and
|
|
|
|
* reset the MFC. */
|
|
|
|
mfc_err("Time out during waiting for HW\n");
|
|
|
|
queue_work(dev->watchdog_workqueue, &dev->watchdog_work);
|
|
|
|
}
|
|
|
|
dev->watchdog_timer.expires = jiffies +
|
|
|
|
msecs_to_jiffies(MFC_WATCHDOG_INTERVAL);
|
|
|
|
add_timer(&dev->watchdog_timer);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void s5p_mfc_watchdog_worker(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct s5p_mfc_dev *dev;
|
|
|
|
struct s5p_mfc_ctx *ctx;
|
|
|
|
unsigned long flags;
|
|
|
|
int mutex_locked;
|
|
|
|
int i, ret;
|
|
|
|
|
|
|
|
dev = container_of(work, struct s5p_mfc_dev, watchdog_work);
|
|
|
|
|
|
|
|
mfc_err("Driver timeout error handling\n");
|
|
|
|
/* Lock the mutex that protects open and release.
|
|
|
|
* This is necessary as they may load and unload firmware. */
|
|
|
|
mutex_locked = mutex_trylock(&dev->mfc_mutex);
|
|
|
|
if (!mutex_locked)
|
|
|
|
mfc_err("Error: some instance may be closing/opening\n");
|
|
|
|
spin_lock_irqsave(&dev->irqlock, flags);
|
|
|
|
|
|
|
|
s5p_mfc_clock_off();
|
|
|
|
|
|
|
|
for (i = 0; i < MFC_NUM_CONTEXTS; i++) {
|
|
|
|
ctx = dev->ctx[i];
|
|
|
|
if (!ctx)
|
|
|
|
continue;
|
|
|
|
ctx->state = MFCINST_ERROR;
|
2014-09-11 21:27:20 +08:00
|
|
|
s5p_mfc_hw_call_void(dev->mfc_ops, cleanup_queue,
|
|
|
|
&ctx->dst_queue, &ctx->vq_dst);
|
|
|
|
s5p_mfc_hw_call_void(dev->mfc_ops, cleanup_queue,
|
|
|
|
&ctx->src_queue, &ctx->vq_src);
|
2011-06-21 21:51:26 +08:00
|
|
|
clear_work_bit(ctx);
|
2012-10-04 09:19:08 +08:00
|
|
|
wake_up_ctx(ctx, S5P_MFC_R2H_CMD_ERR_RET, 0);
|
2011-06-21 21:51:26 +08:00
|
|
|
}
|
|
|
|
clear_bit(0, &dev->hw_lock);
|
|
|
|
spin_unlock_irqrestore(&dev->irqlock, flags);
|
|
|
|
/* Double check if there is at least one instance running.
|
|
|
|
* If no instance is in memory than no firmware should be present */
|
|
|
|
if (dev->num_inst > 0) {
|
2014-05-21 17:29:29 +08:00
|
|
|
ret = s5p_mfc_load_firmware(dev);
|
2011-06-21 21:51:26 +08:00
|
|
|
if (ret) {
|
|
|
|
mfc_err("Failed to reload FW\n");
|
|
|
|
goto unlock;
|
|
|
|
}
|
|
|
|
s5p_mfc_clock_on();
|
|
|
|
ret = s5p_mfc_init_hw(dev);
|
|
|
|
if (ret)
|
|
|
|
mfc_err("Failed to reinit FW\n");
|
|
|
|
}
|
|
|
|
unlock:
|
|
|
|
if (mutex_locked)
|
|
|
|
mutex_unlock(&dev->mfc_mutex);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void s5p_mfc_clear_int_flags(struct s5p_mfc_dev *dev)
|
|
|
|
{
|
|
|
|
mfc_write(dev, 0, S5P_FIMV_RISC_HOST_INT);
|
|
|
|
mfc_write(dev, 0, S5P_FIMV_RISC2HOST_CMD);
|
|
|
|
mfc_write(dev, 0xffff, S5P_FIMV_SI_RTN_CHID);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void s5p_mfc_handle_frame_all_extracted(struct s5p_mfc_ctx *ctx)
|
|
|
|
{
|
|
|
|
struct s5p_mfc_buf *dst_buf;
|
2012-10-04 09:19:08 +08:00
|
|
|
struct s5p_mfc_dev *dev = ctx->dev;
|
2011-06-21 21:51:26 +08:00
|
|
|
|
|
|
|
ctx->state = MFCINST_FINISHED;
|
|
|
|
ctx->sequence++;
|
|
|
|
while (!list_empty(&ctx->dst_queue)) {
|
|
|
|
dst_buf = list_entry(ctx->dst_queue.next,
|
|
|
|
struct s5p_mfc_buf, list);
|
|
|
|
mfc_debug(2, "Cleaning up buffer: %d\n",
|
|
|
|
dst_buf->b->v4l2_buf.index);
|
|
|
|
vb2_set_plane_payload(dst_buf->b, 0, 0);
|
|
|
|
vb2_set_plane_payload(dst_buf->b, 1, 0);
|
|
|
|
list_del(&dst_buf->list);
|
|
|
|
ctx->dst_queue_cnt--;
|
|
|
|
dst_buf->b->v4l2_buf.sequence = (ctx->sequence++);
|
|
|
|
|
2012-10-04 09:19:08 +08:00
|
|
|
if (s5p_mfc_hw_call(dev->mfc_ops, get_pic_type_top, ctx) ==
|
|
|
|
s5p_mfc_hw_call(dev->mfc_ops, get_pic_type_bot, ctx))
|
2011-06-21 21:51:26 +08:00
|
|
|
dst_buf->b->v4l2_buf.field = V4L2_FIELD_NONE;
|
|
|
|
else
|
|
|
|
dst_buf->b->v4l2_buf.field = V4L2_FIELD_INTERLACED;
|
|
|
|
|
|
|
|
ctx->dec_dst_flag &= ~(1 << dst_buf->b->v4l2_buf.index);
|
|
|
|
vb2_buffer_done(dst_buf->b, VB2_BUF_STATE_DONE);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void s5p_mfc_handle_frame_copy_time(struct s5p_mfc_ctx *ctx)
|
|
|
|
{
|
|
|
|
struct s5p_mfc_dev *dev = ctx->dev;
|
|
|
|
struct s5p_mfc_buf *dst_buf, *src_buf;
|
2012-10-04 09:19:08 +08:00
|
|
|
size_t dec_y_addr;
|
|
|
|
unsigned int frame_type;
|
|
|
|
|
|
|
|
dec_y_addr = s5p_mfc_hw_call(dev->mfc_ops, get_dec_y_adr, dev);
|
|
|
|
frame_type = s5p_mfc_hw_call(dev->mfc_ops, get_dec_frame_type, dev);
|
2011-06-21 21:51:26 +08:00
|
|
|
|
|
|
|
/* Copy timestamp / timecode from decoded src to dst and set
|
2013-10-21 08:34:01 +08:00
|
|
|
appropriate flags */
|
2011-06-21 21:51:26 +08:00
|
|
|
src_buf = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
|
|
|
|
list_for_each_entry(dst_buf, &ctx->dst_queue, list) {
|
2011-08-29 14:20:56 +08:00
|
|
|
if (vb2_dma_contig_plane_dma_addr(dst_buf->b, 0) == dec_y_addr) {
|
2013-04-24 21:31:05 +08:00
|
|
|
dst_buf->b->v4l2_buf.timecode =
|
|
|
|
src_buf->b->v4l2_buf.timecode;
|
|
|
|
dst_buf->b->v4l2_buf.timestamp =
|
|
|
|
src_buf->b->v4l2_buf.timestamp;
|
2014-02-09 01:21:35 +08:00
|
|
|
dst_buf->b->v4l2_buf.flags &=
|
|
|
|
~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
|
|
|
|
dst_buf->b->v4l2_buf.flags |=
|
|
|
|
src_buf->b->v4l2_buf.flags
|
|
|
|
& V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
|
2011-06-21 21:51:26 +08:00
|
|
|
switch (frame_type) {
|
|
|
|
case S5P_FIMV_DECODE_FRAME_I_FRAME:
|
|
|
|
dst_buf->b->v4l2_buf.flags |=
|
|
|
|
V4L2_BUF_FLAG_KEYFRAME;
|
|
|
|
break;
|
|
|
|
case S5P_FIMV_DECODE_FRAME_P_FRAME:
|
|
|
|
dst_buf->b->v4l2_buf.flags |=
|
|
|
|
V4L2_BUF_FLAG_PFRAME;
|
|
|
|
break;
|
|
|
|
case S5P_FIMV_DECODE_FRAME_B_FRAME:
|
|
|
|
dst_buf->b->v4l2_buf.flags |=
|
|
|
|
V4L2_BUF_FLAG_BFRAME;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void s5p_mfc_handle_frame_new(struct s5p_mfc_ctx *ctx, unsigned int err)
|
|
|
|
{
|
|
|
|
struct s5p_mfc_dev *dev = ctx->dev;
|
|
|
|
struct s5p_mfc_buf *dst_buf;
|
2012-10-04 09:19:08 +08:00
|
|
|
size_t dspl_y_addr;
|
|
|
|
unsigned int frame_type;
|
2011-06-21 21:51:26 +08:00
|
|
|
|
2012-10-04 09:19:08 +08:00
|
|
|
dspl_y_addr = s5p_mfc_hw_call(dev->mfc_ops, get_dspl_y_adr, dev);
|
2014-09-22 20:52:02 +08:00
|
|
|
if (IS_MFCV6_PLUS(dev))
|
|
|
|
frame_type = s5p_mfc_hw_call(dev->mfc_ops,
|
|
|
|
get_disp_frame_type, ctx);
|
|
|
|
else
|
|
|
|
frame_type = s5p_mfc_hw_call(dev->mfc_ops,
|
|
|
|
get_dec_frame_type, dev);
|
2012-10-04 09:19:08 +08:00
|
|
|
|
2011-06-21 21:51:26 +08:00
|
|
|
/* If frame is same as previous then skip and do not dequeue */
|
|
|
|
if (frame_type == S5P_FIMV_DECODE_FRAME_SKIPPED) {
|
|
|
|
if (!ctx->after_packed_pb)
|
|
|
|
ctx->sequence++;
|
|
|
|
ctx->after_packed_pb = 0;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
ctx->sequence++;
|
|
|
|
/* The MFC returns address of the buffer, now we have to
|
|
|
|
* check which videobuf does it correspond to */
|
|
|
|
list_for_each_entry(dst_buf, &ctx->dst_queue, list) {
|
|
|
|
/* Check if this is the buffer we're looking for */
|
2011-08-29 14:20:56 +08:00
|
|
|
if (vb2_dma_contig_plane_dma_addr(dst_buf->b, 0) == dspl_y_addr) {
|
2011-06-21 21:51:26 +08:00
|
|
|
list_del(&dst_buf->list);
|
|
|
|
ctx->dst_queue_cnt--;
|
|
|
|
dst_buf->b->v4l2_buf.sequence = ctx->sequence;
|
2012-10-04 09:19:08 +08:00
|
|
|
if (s5p_mfc_hw_call(dev->mfc_ops,
|
|
|
|
get_pic_type_top, ctx) ==
|
|
|
|
s5p_mfc_hw_call(dev->mfc_ops,
|
|
|
|
get_pic_type_bot, ctx))
|
2011-06-21 21:51:26 +08:00
|
|
|
dst_buf->b->v4l2_buf.field = V4L2_FIELD_NONE;
|
|
|
|
else
|
|
|
|
dst_buf->b->v4l2_buf.field =
|
|
|
|
V4L2_FIELD_INTERLACED;
|
|
|
|
vb2_set_plane_payload(dst_buf->b, 0, ctx->luma_size);
|
|
|
|
vb2_set_plane_payload(dst_buf->b, 1, ctx->chroma_size);
|
|
|
|
clear_bit(dst_buf->b->v4l2_buf.index,
|
|
|
|
&ctx->dec_dst_flag);
|
|
|
|
|
|
|
|
vb2_buffer_done(dst_buf->b,
|
|
|
|
err ? VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
|
|
|
|
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Handle frame decoding interrupt */
|
|
|
|
static void s5p_mfc_handle_frame(struct s5p_mfc_ctx *ctx,
|
|
|
|
unsigned int reason, unsigned int err)
|
|
|
|
{
|
|
|
|
struct s5p_mfc_dev *dev = ctx->dev;
|
|
|
|
unsigned int dst_frame_status;
|
[media] s5p-mfc: Copy timestamps only when a frame is produced
Timestamps for destination buffers are assigned by copying them from
corresponding source buffers when the decode operation results in a frame
being outputted to a destination buffer. But the decision when to do this, i.e.
whether the decode operation on current source buffer produced a destination
frame, is wrongly based on "display status". Display status reflects the status
of the destination buffer, not source.
This used to work for firmwares version <= 6, because in addition to the above,
we'd check the decoded frame type register, which was set to "skipped" if
a destination frame was not produced, exiting early from
s5p_mfc_handle_frame_new().
Firmware >=7 does not set the frame type register for frames that were not
decoded anymore though, which results in us wrongly overwriting timestamps of
previously decoded buffers (firmware reports the same destination buffer address
as previously decoded one if a frame wasn't decoded during current operation).
To do it properly, we should be basing our decision to copy the timestamp on the
status of the source buffer, i.e. "decode status". The decode status register
values are confusing, because in its case "display" means "a frame has been
outputted to a destination buffer". We should copy if "decode and display"
is returned in it. This also works on <= v6 firmware, which behaves in the same
way with regards to decode status register.
Signed-off-by: Pawel Osciak <posciak@chromium.org>
Signed-off-by: Arun Kumar K <arun.kk@samsung.com>
Signed-off-by: Kamil Debski <k.debski@samsung.com>
Signed-off-by: Mauro Carvalho Chehab <m.chehab@samsung.com>
2014-05-19 20:32:57 +08:00
|
|
|
unsigned int dec_frame_status;
|
2011-06-21 21:51:26 +08:00
|
|
|
struct s5p_mfc_buf *src_buf;
|
|
|
|
unsigned long flags;
|
|
|
|
unsigned int res_change;
|
|
|
|
|
2012-10-04 09:19:08 +08:00
|
|
|
dst_frame_status = s5p_mfc_hw_call(dev->mfc_ops, get_dspl_status, dev)
|
2011-06-21 21:51:26 +08:00
|
|
|
& S5P_FIMV_DEC_STATUS_DECODING_STATUS_MASK;
|
[media] s5p-mfc: Copy timestamps only when a frame is produced
Timestamps for destination buffers are assigned by copying them from
corresponding source buffers when the decode operation results in a frame
being outputted to a destination buffer. But the decision when to do this, i.e.
whether the decode operation on current source buffer produced a destination
frame, is wrongly based on "display status". Display status reflects the status
of the destination buffer, not source.
This used to work for firmwares version <= 6, because in addition to the above,
we'd check the decoded frame type register, which was set to "skipped" if
a destination frame was not produced, exiting early from
s5p_mfc_handle_frame_new().
Firmware >=7 does not set the frame type register for frames that were not
decoded anymore though, which results in us wrongly overwriting timestamps of
previously decoded buffers (firmware reports the same destination buffer address
as previously decoded one if a frame wasn't decoded during current operation).
To do it properly, we should be basing our decision to copy the timestamp on the
status of the source buffer, i.e. "decode status". The decode status register
values are confusing, because in its case "display" means "a frame has been
outputted to a destination buffer". We should copy if "decode and display"
is returned in it. This also works on <= v6 firmware, which behaves in the same
way with regards to decode status register.
Signed-off-by: Pawel Osciak <posciak@chromium.org>
Signed-off-by: Arun Kumar K <arun.kk@samsung.com>
Signed-off-by: Kamil Debski <k.debski@samsung.com>
Signed-off-by: Mauro Carvalho Chehab <m.chehab@samsung.com>
2014-05-19 20:32:57 +08:00
|
|
|
dec_frame_status = s5p_mfc_hw_call(dev->mfc_ops, get_dec_status, dev)
|
|
|
|
& S5P_FIMV_DEC_STATUS_DECODING_STATUS_MASK;
|
2012-10-04 09:19:11 +08:00
|
|
|
res_change = (s5p_mfc_hw_call(dev->mfc_ops, get_dspl_status, dev)
|
|
|
|
& S5P_FIMV_DEC_STATUS_RESOLUTION_MASK)
|
|
|
|
>> S5P_FIMV_DEC_STATUS_RESOLUTION_SHIFT;
|
2011-06-21 21:51:26 +08:00
|
|
|
mfc_debug(2, "Frame Status: %x\n", dst_frame_status);
|
|
|
|
if (ctx->state == MFCINST_RES_CHANGE_INIT)
|
|
|
|
ctx->state = MFCINST_RES_CHANGE_FLUSH;
|
2012-10-04 09:19:11 +08:00
|
|
|
if (res_change == S5P_FIMV_RES_INCREASE ||
|
|
|
|
res_change == S5P_FIMV_RES_DECREASE) {
|
2011-06-21 21:51:26 +08:00
|
|
|
ctx->state = MFCINST_RES_CHANGE_INIT;
|
2014-09-11 21:27:20 +08:00
|
|
|
s5p_mfc_hw_call_void(dev->mfc_ops, clear_int_flags, dev);
|
2011-06-21 21:51:26 +08:00
|
|
|
wake_up_ctx(ctx, reason, err);
|
|
|
|
if (test_and_clear_bit(0, &dev->hw_lock) == 0)
|
|
|
|
BUG();
|
|
|
|
s5p_mfc_clock_off();
|
2014-09-11 21:27:20 +08:00
|
|
|
s5p_mfc_hw_call_void(dev->mfc_ops, try_run, dev);
|
2011-06-21 21:51:26 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (ctx->dpb_flush_flag)
|
|
|
|
ctx->dpb_flush_flag = 0;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&dev->irqlock, flags);
|
|
|
|
/* All frames remaining in the buffer have been extracted */
|
|
|
|
if (dst_frame_status == S5P_FIMV_DEC_STATUS_DECODING_EMPTY) {
|
|
|
|
if (ctx->state == MFCINST_RES_CHANGE_FLUSH) {
|
2014-05-14 14:59:43 +08:00
|
|
|
static const struct v4l2_event ev_src_ch = {
|
|
|
|
.type = V4L2_EVENT_SOURCE_CHANGE,
|
|
|
|
.u.src_change.changes =
|
|
|
|
V4L2_EVENT_SRC_CH_RESOLUTION,
|
|
|
|
};
|
|
|
|
|
2011-06-21 21:51:26 +08:00
|
|
|
s5p_mfc_handle_frame_all_extracted(ctx);
|
|
|
|
ctx->state = MFCINST_RES_CHANGE_END;
|
2014-05-14 14:59:43 +08:00
|
|
|
v4l2_event_queue_fh(&ctx->fh, &ev_src_ch);
|
|
|
|
|
2011-06-21 21:51:26 +08:00
|
|
|
goto leave_handle_frame;
|
|
|
|
} else {
|
|
|
|
s5p_mfc_handle_frame_all_extracted(ctx);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
[media] s5p-mfc: Copy timestamps only when a frame is produced
Timestamps for destination buffers are assigned by copying them from
corresponding source buffers when the decode operation results in a frame
being outputted to a destination buffer. But the decision when to do this, i.e.
whether the decode operation on current source buffer produced a destination
frame, is wrongly based on "display status". Display status reflects the status
of the destination buffer, not source.
This used to work for firmwares version <= 6, because in addition to the above,
we'd check the decoded frame type register, which was set to "skipped" if
a destination frame was not produced, exiting early from
s5p_mfc_handle_frame_new().
Firmware >=7 does not set the frame type register for frames that were not
decoded anymore though, which results in us wrongly overwriting timestamps of
previously decoded buffers (firmware reports the same destination buffer address
as previously decoded one if a frame wasn't decoded during current operation).
To do it properly, we should be basing our decision to copy the timestamp on the
status of the source buffer, i.e. "decode status". The decode status register
values are confusing, because in its case "display" means "a frame has been
outputted to a destination buffer". We should copy if "decode and display"
is returned in it. This also works on <= v6 firmware, which behaves in the same
way with regards to decode status register.
Signed-off-by: Pawel Osciak <posciak@chromium.org>
Signed-off-by: Arun Kumar K <arun.kk@samsung.com>
Signed-off-by: Kamil Debski <k.debski@samsung.com>
Signed-off-by: Mauro Carvalho Chehab <m.chehab@samsung.com>
2014-05-19 20:32:57 +08:00
|
|
|
if (dec_frame_status == S5P_FIMV_DEC_STATUS_DECODING_DISPLAY)
|
2011-06-21 21:51:26 +08:00
|
|
|
s5p_mfc_handle_frame_copy_time(ctx);
|
|
|
|
|
|
|
|
/* A frame has been decoded and is in the buffer */
|
|
|
|
if (dst_frame_status == S5P_FIMV_DEC_STATUS_DISPLAY_ONLY ||
|
|
|
|
dst_frame_status == S5P_FIMV_DEC_STATUS_DECODING_DISPLAY) {
|
|
|
|
s5p_mfc_handle_frame_new(ctx, err);
|
|
|
|
} else {
|
|
|
|
mfc_debug(2, "No frame decode\n");
|
|
|
|
}
|
|
|
|
/* Mark source buffer as complete */
|
|
|
|
if (dst_frame_status != S5P_FIMV_DEC_STATUS_DISPLAY_ONLY
|
|
|
|
&& !list_empty(&ctx->src_queue)) {
|
|
|
|
src_buf = list_entry(ctx->src_queue.next, struct s5p_mfc_buf,
|
|
|
|
list);
|
2012-10-04 09:19:08 +08:00
|
|
|
ctx->consumed_stream += s5p_mfc_hw_call(dev->mfc_ops,
|
|
|
|
get_consumed_stream, dev);
|
|
|
|
if (ctx->codec_mode != S5P_MFC_CODEC_H264_DEC &&
|
[media] s5p-mfc: Don't try to resubmit VP8 bitstream buffer for decode
Currently, for formats that are not H264, MFC driver will check
the consumed stream size returned by the firmware and, based on that,
will try to decide whether the bitstream buffer contained more than
one frame. If the size of the buffer is larger than the consumed
stream, it assumes that there are more frames in the buffer and that the
buffer should be resubmitted for decode. This rarely works though and
actually introduces problems, because:
- v7 firmware will always return consumed stream size equal to whatever
the driver passed to it when running decode (which is the size of the whole
buffer), which means we will never try to resubmit, because the firmware
will always tell us that it consumed all the data we passed to it;
- v6 firmware will return the number of consumed bytes, but will not
include the padding ("stuffing") bytes that are allowed after the frame
in VP8. Since there is no way of figuring out how many of those bytes
follow the frame without getting the frame size from IVF headers (or
somewhere else, but not from the stream itself), the driver tries to guess that
padding size is not larger than 4 bytes, which is not always true;
The only way to make it work is to queue only one frame per buffer from
userspace and the check in the kernel is useless and wrong for VP8.
So adding VP8 also along with H264 to disallow re-submitting of buffer
back to hardware for decode.
Signed-off-by: Pawel Osciak <posciak@chromium.org>
Signed-off-by: Arun Kumar K <arun.kk@samsung.com>
Signed-off-by: Kamil Debski <k.debski@samsung.com>
Signed-off-by: Mauro Carvalho Chehab <m.chehab@samsung.com>
2014-05-19 20:33:02 +08:00
|
|
|
ctx->codec_mode != S5P_MFC_CODEC_VP8_DEC &&
|
2012-11-14 20:26:45 +08:00
|
|
|
ctx->consumed_stream + STUFF_BYTE <
|
|
|
|
src_buf->b->v4l2_planes[0].bytesused) {
|
2011-06-21 21:51:26 +08:00
|
|
|
/* Run MFC again on the same buffer */
|
|
|
|
mfc_debug(2, "Running again the same buffer\n");
|
|
|
|
ctx->after_packed_pb = 1;
|
|
|
|
} else {
|
|
|
|
mfc_debug(2, "MFC needs next buffer\n");
|
|
|
|
ctx->consumed_stream = 0;
|
2013-01-11 23:29:33 +08:00
|
|
|
if (src_buf->flags & MFC_BUF_FLAG_EOS)
|
|
|
|
ctx->state = MFCINST_FINISHING;
|
2011-06-21 21:51:26 +08:00
|
|
|
list_del(&src_buf->list);
|
|
|
|
ctx->src_queue_cnt--;
|
2012-10-04 09:19:08 +08:00
|
|
|
if (s5p_mfc_hw_call(dev->mfc_ops, err_dec, err) > 0)
|
2011-06-21 21:51:26 +08:00
|
|
|
vb2_buffer_done(src_buf->b, VB2_BUF_STATE_ERROR);
|
|
|
|
else
|
|
|
|
vb2_buffer_done(src_buf->b, VB2_BUF_STATE_DONE);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
leave_handle_frame:
|
|
|
|
spin_unlock_irqrestore(&dev->irqlock, flags);
|
|
|
|
if ((ctx->src_queue_cnt == 0 && ctx->state != MFCINST_FINISHING)
|
2013-04-24 20:41:53 +08:00
|
|
|
|| ctx->dst_queue_cnt < ctx->pb_count)
|
2011-06-21 21:51:26 +08:00
|
|
|
clear_work_bit(ctx);
|
2014-09-11 21:27:20 +08:00
|
|
|
s5p_mfc_hw_call_void(dev->mfc_ops, clear_int_flags, dev);
|
2011-06-21 21:51:26 +08:00
|
|
|
wake_up_ctx(ctx, reason, err);
|
|
|
|
if (test_and_clear_bit(0, &dev->hw_lock) == 0)
|
|
|
|
BUG();
|
|
|
|
s5p_mfc_clock_off();
|
2013-10-04 12:47:19 +08:00
|
|
|
/* if suspending, wake up device and do not try_run again*/
|
|
|
|
if (test_bit(0, &dev->enter_suspend))
|
|
|
|
wake_up_dev(dev, reason, err);
|
|
|
|
else
|
2014-09-11 21:27:20 +08:00
|
|
|
s5p_mfc_hw_call_void(dev->mfc_ops, try_run, dev);
|
2011-06-21 21:51:26 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Error handling for interrupt */
|
2012-12-21 16:32:59 +08:00
|
|
|
static void s5p_mfc_handle_error(struct s5p_mfc_dev *dev,
|
|
|
|
struct s5p_mfc_ctx *ctx, unsigned int reason, unsigned int err)
|
2011-06-21 21:51:26 +08:00
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
mfc_err("Interrupt Error: %08x\n", err);
|
|
|
|
|
2012-12-21 16:32:59 +08:00
|
|
|
if (ctx != NULL) {
|
|
|
|
/* Error recovery is dependent on the state of context */
|
|
|
|
switch (ctx->state) {
|
|
|
|
case MFCINST_RES_CHANGE_INIT:
|
|
|
|
case MFCINST_RES_CHANGE_FLUSH:
|
|
|
|
case MFCINST_RES_CHANGE_END:
|
|
|
|
case MFCINST_FINISHING:
|
|
|
|
case MFCINST_FINISHED:
|
|
|
|
case MFCINST_RUNNING:
|
2013-10-21 08:34:01 +08:00
|
|
|
/* It is highly probable that an error occurred
|
2012-12-21 16:32:59 +08:00
|
|
|
* while decoding a frame */
|
|
|
|
clear_work_bit(ctx);
|
|
|
|
ctx->state = MFCINST_ERROR;
|
|
|
|
/* Mark all dst buffers as having an error */
|
|
|
|
spin_lock_irqsave(&dev->irqlock, flags);
|
2014-09-11 21:27:20 +08:00
|
|
|
s5p_mfc_hw_call_void(dev->mfc_ops, cleanup_queue,
|
2012-12-21 16:32:59 +08:00
|
|
|
&ctx->dst_queue, &ctx->vq_dst);
|
|
|
|
/* Mark all src buffers as having an error */
|
2014-09-11 21:27:20 +08:00
|
|
|
s5p_mfc_hw_call_void(dev->mfc_ops, cleanup_queue,
|
2012-12-21 16:32:59 +08:00
|
|
|
&ctx->src_queue, &ctx->vq_src);
|
|
|
|
spin_unlock_irqrestore(&dev->irqlock, flags);
|
|
|
|
wake_up_ctx(ctx, reason, err);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
clear_work_bit(ctx);
|
|
|
|
ctx->state = MFCINST_ERROR;
|
|
|
|
wake_up_ctx(ctx, reason, err);
|
|
|
|
break;
|
|
|
|
}
|
2011-06-21 21:51:26 +08:00
|
|
|
}
|
2012-12-21 16:32:59 +08:00
|
|
|
if (test_and_clear_bit(0, &dev->hw_lock) == 0)
|
|
|
|
BUG();
|
2014-09-11 21:27:20 +08:00
|
|
|
s5p_mfc_hw_call_void(dev->mfc_ops, clear_int_flags, dev);
|
2012-12-21 16:32:59 +08:00
|
|
|
s5p_mfc_clock_off();
|
|
|
|
wake_up_dev(dev, reason, err);
|
2011-06-21 21:51:26 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Header parsing interrupt handling */
|
|
|
|
static void s5p_mfc_handle_seq_done(struct s5p_mfc_ctx *ctx,
|
|
|
|
unsigned int reason, unsigned int err)
|
|
|
|
{
|
|
|
|
struct s5p_mfc_dev *dev;
|
|
|
|
|
2012-05-10 14:32:00 +08:00
|
|
|
if (ctx == NULL)
|
2011-06-21 21:51:26 +08:00
|
|
|
return;
|
|
|
|
dev = ctx->dev;
|
|
|
|
if (ctx->c_ops->post_seq_start) {
|
|
|
|
if (ctx->c_ops->post_seq_start(ctx))
|
|
|
|
mfc_err("post_seq_start() failed\n");
|
|
|
|
} else {
|
2012-10-04 09:19:08 +08:00
|
|
|
ctx->img_width = s5p_mfc_hw_call(dev->mfc_ops, get_img_width,
|
|
|
|
dev);
|
|
|
|
ctx->img_height = s5p_mfc_hw_call(dev->mfc_ops, get_img_height,
|
|
|
|
dev);
|
2011-06-21 21:51:26 +08:00
|
|
|
|
2014-09-11 21:27:20 +08:00
|
|
|
s5p_mfc_hw_call_void(dev->mfc_ops, dec_calc_dpb_size, ctx);
|
2012-10-04 09:19:09 +08:00
|
|
|
|
2013-04-24 20:41:53 +08:00
|
|
|
ctx->pb_count = s5p_mfc_hw_call(dev->mfc_ops, get_dpb_count,
|
2012-10-04 09:19:08 +08:00
|
|
|
dev);
|
2012-10-04 09:19:11 +08:00
|
|
|
ctx->mv_count = s5p_mfc_hw_call(dev->mfc_ops, get_mv_count,
|
|
|
|
dev);
|
2012-01-13 04:49:30 +08:00
|
|
|
if (ctx->img_width == 0 || ctx->img_height == 0)
|
2011-06-21 21:51:26 +08:00
|
|
|
ctx->state = MFCINST_ERROR;
|
|
|
|
else
|
|
|
|
ctx->state = MFCINST_HEAD_PARSED;
|
2012-10-04 09:19:11 +08:00
|
|
|
|
|
|
|
if ((ctx->codec_mode == S5P_MFC_CODEC_H264_DEC ||
|
|
|
|
ctx->codec_mode == S5P_MFC_CODEC_H264_MVC_DEC) &&
|
|
|
|
!list_empty(&ctx->src_queue)) {
|
|
|
|
struct s5p_mfc_buf *src_buf;
|
|
|
|
src_buf = list_entry(ctx->src_queue.next,
|
|
|
|
struct s5p_mfc_buf, list);
|
|
|
|
if (s5p_mfc_hw_call(dev->mfc_ops, get_consumed_stream,
|
|
|
|
dev) <
|
|
|
|
src_buf->b->v4l2_planes[0].bytesused)
|
|
|
|
ctx->head_processed = 0;
|
|
|
|
else
|
|
|
|
ctx->head_processed = 1;
|
|
|
|
} else {
|
|
|
|
ctx->head_processed = 1;
|
|
|
|
}
|
2011-06-21 21:51:26 +08:00
|
|
|
}
|
2014-09-11 21:27:20 +08:00
|
|
|
s5p_mfc_hw_call_void(dev->mfc_ops, clear_int_flags, dev);
|
2011-06-21 21:51:26 +08:00
|
|
|
clear_work_bit(ctx);
|
|
|
|
if (test_and_clear_bit(0, &dev->hw_lock) == 0)
|
|
|
|
BUG();
|
|
|
|
s5p_mfc_clock_off();
|
2014-09-11 21:27:20 +08:00
|
|
|
s5p_mfc_hw_call_void(dev->mfc_ops, try_run, dev);
|
2011-06-21 21:51:26 +08:00
|
|
|
wake_up_ctx(ctx, reason, err);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Header parsing interrupt handling */
|
|
|
|
static void s5p_mfc_handle_init_buffers(struct s5p_mfc_ctx *ctx,
|
|
|
|
unsigned int reason, unsigned int err)
|
|
|
|
{
|
|
|
|
struct s5p_mfc_buf *src_buf;
|
|
|
|
struct s5p_mfc_dev *dev;
|
|
|
|
unsigned long flags;
|
|
|
|
|
2012-05-10 14:32:00 +08:00
|
|
|
if (ctx == NULL)
|
2011-06-21 21:51:26 +08:00
|
|
|
return;
|
|
|
|
dev = ctx->dev;
|
2014-09-11 21:27:20 +08:00
|
|
|
s5p_mfc_hw_call_void(dev->mfc_ops, clear_int_flags, dev);
|
2011-06-21 21:51:26 +08:00
|
|
|
ctx->int_type = reason;
|
|
|
|
ctx->int_err = err;
|
|
|
|
ctx->int_cond = 1;
|
2012-08-14 17:13:40 +08:00
|
|
|
clear_work_bit(ctx);
|
2011-06-21 21:51:26 +08:00
|
|
|
if (err == 0) {
|
|
|
|
ctx->state = MFCINST_RUNNING;
|
2012-10-04 09:19:11 +08:00
|
|
|
if (!ctx->dpb_flush_flag && ctx->head_processed) {
|
2011-06-21 21:51:26 +08:00
|
|
|
spin_lock_irqsave(&dev->irqlock, flags);
|
|
|
|
if (!list_empty(&ctx->src_queue)) {
|
|
|
|
src_buf = list_entry(ctx->src_queue.next,
|
|
|
|
struct s5p_mfc_buf, list);
|
|
|
|
list_del(&src_buf->list);
|
|
|
|
ctx->src_queue_cnt--;
|
|
|
|
vb2_buffer_done(src_buf->b,
|
|
|
|
VB2_BUF_STATE_DONE);
|
|
|
|
}
|
|
|
|
spin_unlock_irqrestore(&dev->irqlock, flags);
|
|
|
|
} else {
|
|
|
|
ctx->dpb_flush_flag = 0;
|
|
|
|
}
|
|
|
|
if (test_and_clear_bit(0, &dev->hw_lock) == 0)
|
|
|
|
BUG();
|
|
|
|
|
|
|
|
s5p_mfc_clock_off();
|
|
|
|
|
|
|
|
wake_up(&ctx->queue);
|
2014-09-11 21:27:20 +08:00
|
|
|
s5p_mfc_hw_call_void(dev->mfc_ops, try_run, dev);
|
2011-06-21 21:51:26 +08:00
|
|
|
} else {
|
|
|
|
if (test_and_clear_bit(0, &dev->hw_lock) == 0)
|
|
|
|
BUG();
|
|
|
|
|
|
|
|
s5p_mfc_clock_off();
|
|
|
|
|
|
|
|
wake_up(&ctx->queue);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-08-21 19:05:32 +08:00
|
|
|
static void s5p_mfc_handle_stream_complete(struct s5p_mfc_ctx *ctx,
|
|
|
|
unsigned int reason, unsigned int err)
|
|
|
|
{
|
|
|
|
struct s5p_mfc_dev *dev = ctx->dev;
|
|
|
|
struct s5p_mfc_buf *mb_entry;
|
|
|
|
|
2013-05-28 14:26:16 +08:00
|
|
|
mfc_debug(2, "Stream completed\n");
|
2012-08-21 19:05:32 +08:00
|
|
|
|
|
|
|
s5p_mfc_clear_int_flags(dev);
|
|
|
|
ctx->int_type = reason;
|
|
|
|
ctx->int_err = err;
|
|
|
|
ctx->state = MFCINST_FINISHED;
|
|
|
|
|
|
|
|
spin_lock(&dev->irqlock);
|
|
|
|
if (!list_empty(&ctx->dst_queue)) {
|
|
|
|
mb_entry = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf,
|
|
|
|
list);
|
|
|
|
list_del(&mb_entry->list);
|
|
|
|
ctx->dst_queue_cnt--;
|
|
|
|
vb2_set_plane_payload(mb_entry->b, 0, 0);
|
|
|
|
vb2_buffer_done(mb_entry->b, VB2_BUF_STATE_DONE);
|
|
|
|
}
|
|
|
|
spin_unlock(&dev->irqlock);
|
|
|
|
|
|
|
|
clear_work_bit(ctx);
|
|
|
|
|
2013-01-22 12:00:06 +08:00
|
|
|
WARN_ON(test_and_clear_bit(0, &dev->hw_lock) == 0);
|
2012-08-21 19:05:32 +08:00
|
|
|
|
|
|
|
s5p_mfc_clock_off();
|
|
|
|
wake_up(&ctx->queue);
|
2014-09-11 21:27:20 +08:00
|
|
|
s5p_mfc_hw_call_void(dev->mfc_ops, try_run, dev);
|
2012-08-21 19:05:32 +08:00
|
|
|
}
|
|
|
|
|
2011-06-21 21:51:26 +08:00
|
|
|
/* Interrupt processing */
|
|
|
|
static irqreturn_t s5p_mfc_irq(int irq, void *priv)
|
|
|
|
{
|
|
|
|
struct s5p_mfc_dev *dev = priv;
|
|
|
|
struct s5p_mfc_ctx *ctx;
|
|
|
|
unsigned int reason;
|
|
|
|
unsigned int err;
|
|
|
|
|
|
|
|
mfc_debug_enter();
|
|
|
|
/* Reset the timeout watchdog */
|
|
|
|
atomic_set(&dev->watchdog_cnt, 0);
|
|
|
|
ctx = dev->ctx[dev->curr_ctx];
|
|
|
|
/* Get the reason of interrupt and the error code */
|
2012-10-04 09:19:08 +08:00
|
|
|
reason = s5p_mfc_hw_call(dev->mfc_ops, get_int_reason, dev);
|
|
|
|
err = s5p_mfc_hw_call(dev->mfc_ops, get_int_err, dev);
|
2011-06-21 21:51:26 +08:00
|
|
|
mfc_debug(1, "Int reason: %d (err: %08x)\n", reason, err);
|
|
|
|
switch (reason) {
|
2012-10-04 09:19:08 +08:00
|
|
|
case S5P_MFC_R2H_CMD_ERR_RET:
|
2013-10-21 08:34:01 +08:00
|
|
|
/* An error has occurred */
|
2011-06-21 21:51:26 +08:00
|
|
|
if (ctx->state == MFCINST_RUNNING &&
|
2012-10-04 09:19:08 +08:00
|
|
|
s5p_mfc_hw_call(dev->mfc_ops, err_dec, err) >=
|
|
|
|
dev->warn_start)
|
2011-06-21 21:51:26 +08:00
|
|
|
s5p_mfc_handle_frame(ctx, reason, err);
|
|
|
|
else
|
2012-12-21 16:32:59 +08:00
|
|
|
s5p_mfc_handle_error(dev, ctx, reason, err);
|
2011-06-21 21:51:26 +08:00
|
|
|
clear_bit(0, &dev->enter_suspend);
|
|
|
|
break;
|
|
|
|
|
2012-10-04 09:19:08 +08:00
|
|
|
case S5P_MFC_R2H_CMD_SLICE_DONE_RET:
|
|
|
|
case S5P_MFC_R2H_CMD_FIELD_DONE_RET:
|
|
|
|
case S5P_MFC_R2H_CMD_FRAME_DONE_RET:
|
2011-06-21 21:51:26 +08:00
|
|
|
if (ctx->c_ops->post_frame_start) {
|
|
|
|
if (ctx->c_ops->post_frame_start(ctx))
|
|
|
|
mfc_err("post_frame_start() failed\n");
|
2014-09-11 21:27:20 +08:00
|
|
|
s5p_mfc_hw_call_void(dev->mfc_ops, clear_int_flags, dev);
|
2011-06-21 21:51:26 +08:00
|
|
|
wake_up_ctx(ctx, reason, err);
|
|
|
|
if (test_and_clear_bit(0, &dev->hw_lock) == 0)
|
|
|
|
BUG();
|
|
|
|
s5p_mfc_clock_off();
|
2014-09-11 21:27:20 +08:00
|
|
|
s5p_mfc_hw_call_void(dev->mfc_ops, try_run, dev);
|
2011-06-21 21:51:26 +08:00
|
|
|
} else {
|
|
|
|
s5p_mfc_handle_frame(ctx, reason, err);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
2012-10-04 09:19:08 +08:00
|
|
|
case S5P_MFC_R2H_CMD_SEQ_DONE_RET:
|
2011-06-21 21:51:26 +08:00
|
|
|
s5p_mfc_handle_seq_done(ctx, reason, err);
|
|
|
|
break;
|
|
|
|
|
2012-10-04 09:19:08 +08:00
|
|
|
case S5P_MFC_R2H_CMD_OPEN_INSTANCE_RET:
|
|
|
|
ctx->inst_no = s5p_mfc_hw_call(dev->mfc_ops, get_inst_no, dev);
|
2011-06-21 21:51:26 +08:00
|
|
|
ctx->state = MFCINST_GOT_INST;
|
|
|
|
clear_work_bit(ctx);
|
|
|
|
wake_up(&ctx->queue);
|
|
|
|
goto irq_cleanup_hw;
|
|
|
|
|
2012-10-04 09:19:08 +08:00
|
|
|
case S5P_MFC_R2H_CMD_CLOSE_INSTANCE_RET:
|
2011-06-21 21:51:26 +08:00
|
|
|
clear_work_bit(ctx);
|
2014-05-19 20:33:00 +08:00
|
|
|
ctx->inst_no = MFC_NO_INSTANCE_SET;
|
2011-06-21 21:51:26 +08:00
|
|
|
ctx->state = MFCINST_FREE;
|
|
|
|
wake_up(&ctx->queue);
|
|
|
|
goto irq_cleanup_hw;
|
|
|
|
|
2012-10-04 09:19:08 +08:00
|
|
|
case S5P_MFC_R2H_CMD_SYS_INIT_RET:
|
|
|
|
case S5P_MFC_R2H_CMD_FW_STATUS_RET:
|
|
|
|
case S5P_MFC_R2H_CMD_SLEEP_RET:
|
|
|
|
case S5P_MFC_R2H_CMD_WAKEUP_RET:
|
2011-06-21 21:51:26 +08:00
|
|
|
if (ctx)
|
|
|
|
clear_work_bit(ctx);
|
2014-09-11 21:27:20 +08:00
|
|
|
s5p_mfc_hw_call_void(dev->mfc_ops, clear_int_flags, dev);
|
2011-06-21 21:51:26 +08:00
|
|
|
wake_up_dev(dev, reason, err);
|
|
|
|
clear_bit(0, &dev->hw_lock);
|
|
|
|
clear_bit(0, &dev->enter_suspend);
|
|
|
|
break;
|
|
|
|
|
2012-10-04 09:19:08 +08:00
|
|
|
case S5P_MFC_R2H_CMD_INIT_BUFFERS_RET:
|
2011-06-21 21:51:26 +08:00
|
|
|
s5p_mfc_handle_init_buffers(ctx, reason, err);
|
|
|
|
break;
|
2012-08-21 19:05:32 +08:00
|
|
|
|
2012-10-04 09:19:08 +08:00
|
|
|
case S5P_MFC_R2H_CMD_COMPLETE_SEQ_RET:
|
2012-08-21 19:05:32 +08:00
|
|
|
s5p_mfc_handle_stream_complete(ctx, reason, err);
|
|
|
|
break;
|
|
|
|
|
2012-11-22 17:15:55 +08:00
|
|
|
case S5P_MFC_R2H_CMD_DPB_FLUSH_RET:
|
|
|
|
clear_work_bit(ctx);
|
|
|
|
ctx->state = MFCINST_RUNNING;
|
|
|
|
wake_up(&ctx->queue);
|
|
|
|
goto irq_cleanup_hw;
|
|
|
|
|
2011-06-21 21:51:26 +08:00
|
|
|
default:
|
|
|
|
mfc_debug(2, "Unknown int reason\n");
|
2014-09-11 21:27:20 +08:00
|
|
|
s5p_mfc_hw_call_void(dev->mfc_ops, clear_int_flags, dev);
|
2011-06-21 21:51:26 +08:00
|
|
|
}
|
|
|
|
mfc_debug_leave();
|
|
|
|
return IRQ_HANDLED;
|
|
|
|
irq_cleanup_hw:
|
2014-09-11 21:27:20 +08:00
|
|
|
s5p_mfc_hw_call_void(dev->mfc_ops, clear_int_flags, dev);
|
2011-06-21 21:51:26 +08:00
|
|
|
ctx->int_type = reason;
|
|
|
|
ctx->int_err = err;
|
|
|
|
ctx->int_cond = 1;
|
|
|
|
if (test_and_clear_bit(0, &dev->hw_lock) == 0)
|
|
|
|
mfc_err("Failed to unlock hw\n");
|
|
|
|
|
|
|
|
s5p_mfc_clock_off();
|
|
|
|
|
2014-09-11 21:27:20 +08:00
|
|
|
s5p_mfc_hw_call_void(dev->mfc_ops, try_run, dev);
|
2011-06-21 21:51:26 +08:00
|
|
|
mfc_debug(2, "Exit via irq_cleanup_hw\n");
|
|
|
|
return IRQ_HANDLED;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Open an MFC node */
|
|
|
|
static int s5p_mfc_open(struct file *file)
|
|
|
|
{
|
2013-12-03 21:12:51 +08:00
|
|
|
struct video_device *vdev = video_devdata(file);
|
2011-06-21 21:51:26 +08:00
|
|
|
struct s5p_mfc_dev *dev = video_drvdata(file);
|
|
|
|
struct s5p_mfc_ctx *ctx = NULL;
|
|
|
|
struct vb2_queue *q;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
mfc_debug_enter();
|
2012-06-24 18:13:33 +08:00
|
|
|
if (mutex_lock_interruptible(&dev->mfc_mutex))
|
|
|
|
return -ERESTARTSYS;
|
2011-06-21 21:51:26 +08:00
|
|
|
dev->num_inst++; /* It is guarded by mfc_mutex in vfd */
|
|
|
|
/* Allocate memory for context */
|
2012-08-17 14:22:55 +08:00
|
|
|
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
|
2011-06-21 21:51:26 +08:00
|
|
|
if (!ctx) {
|
|
|
|
mfc_err("Not enough memory\n");
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto err_alloc;
|
|
|
|
}
|
2014-07-23 12:06:12 +08:00
|
|
|
v4l2_fh_init(&ctx->fh, vdev);
|
2011-06-21 21:51:26 +08:00
|
|
|
file->private_data = &ctx->fh;
|
|
|
|
v4l2_fh_add(&ctx->fh);
|
|
|
|
ctx->dev = dev;
|
|
|
|
INIT_LIST_HEAD(&ctx->src_queue);
|
|
|
|
INIT_LIST_HEAD(&ctx->dst_queue);
|
|
|
|
ctx->src_queue_cnt = 0;
|
|
|
|
ctx->dst_queue_cnt = 0;
|
|
|
|
/* Get context number */
|
|
|
|
ctx->num = 0;
|
|
|
|
while (dev->ctx[ctx->num]) {
|
|
|
|
ctx->num++;
|
|
|
|
if (ctx->num >= MFC_NUM_CONTEXTS) {
|
|
|
|
mfc_err("Too many open contexts\n");
|
|
|
|
ret = -EBUSY;
|
|
|
|
goto err_no_ctx;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/* Mark context as idle */
|
2012-08-14 17:13:40 +08:00
|
|
|
clear_work_bit_irqsave(ctx);
|
2011-06-21 21:51:26 +08:00
|
|
|
dev->ctx[ctx->num] = ctx;
|
2013-12-03 21:12:51 +08:00
|
|
|
if (vdev == dev->vfd_dec) {
|
2011-06-21 21:51:26 +08:00
|
|
|
ctx->type = MFCINST_DECODER;
|
|
|
|
ctx->c_ops = get_dec_codec_ops();
|
2012-10-04 09:19:08 +08:00
|
|
|
s5p_mfc_dec_init(ctx);
|
2011-06-21 21:51:26 +08:00
|
|
|
/* Setup ctrl handler */
|
|
|
|
ret = s5p_mfc_dec_ctrls_setup(ctx);
|
|
|
|
if (ret) {
|
|
|
|
mfc_err("Failed to setup mfc controls\n");
|
|
|
|
goto err_ctrls_setup;
|
|
|
|
}
|
2013-12-03 21:12:51 +08:00
|
|
|
} else if (vdev == dev->vfd_enc) {
|
2011-06-21 21:51:26 +08:00
|
|
|
ctx->type = MFCINST_ENCODER;
|
|
|
|
ctx->c_ops = get_enc_codec_ops();
|
|
|
|
/* only for encoder */
|
|
|
|
INIT_LIST_HEAD(&ctx->ref_queue);
|
|
|
|
ctx->ref_queue_cnt = 0;
|
2012-10-04 09:19:08 +08:00
|
|
|
s5p_mfc_enc_init(ctx);
|
2011-06-21 21:51:26 +08:00
|
|
|
/* Setup ctrl handler */
|
|
|
|
ret = s5p_mfc_enc_ctrls_setup(ctx);
|
|
|
|
if (ret) {
|
|
|
|
mfc_err("Failed to setup mfc controls\n");
|
|
|
|
goto err_ctrls_setup;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
ret = -ENOENT;
|
|
|
|
goto err_bad_node;
|
|
|
|
}
|
|
|
|
ctx->fh.ctrl_handler = &ctx->ctrl_handler;
|
2014-05-19 20:33:00 +08:00
|
|
|
ctx->inst_no = MFC_NO_INSTANCE_SET;
|
2011-06-21 21:51:26 +08:00
|
|
|
/* Load firmware if this is the first instance */
|
|
|
|
if (dev->num_inst == 1) {
|
|
|
|
dev->watchdog_timer.expires = jiffies +
|
|
|
|
msecs_to_jiffies(MFC_WATCHDOG_INTERVAL);
|
|
|
|
add_timer(&dev->watchdog_timer);
|
|
|
|
ret = s5p_mfc_power_on();
|
|
|
|
if (ret < 0) {
|
|
|
|
mfc_err("power on failed\n");
|
|
|
|
goto err_pwr_enable;
|
|
|
|
}
|
|
|
|
s5p_mfc_clock_on();
|
2013-01-03 22:02:07 +08:00
|
|
|
ret = s5p_mfc_load_firmware(dev);
|
|
|
|
if (ret) {
|
|
|
|
s5p_mfc_clock_off();
|
|
|
|
goto err_load_fw;
|
|
|
|
}
|
2011-06-21 21:51:26 +08:00
|
|
|
/* Init the FW */
|
|
|
|
ret = s5p_mfc_init_hw(dev);
|
2013-01-03 22:02:07 +08:00
|
|
|
s5p_mfc_clock_off();
|
2011-06-21 21:51:26 +08:00
|
|
|
if (ret)
|
|
|
|
goto err_init_hw;
|
|
|
|
}
|
|
|
|
/* Init videobuf2 queue for CAPTURE */
|
|
|
|
q = &ctx->vq_dst;
|
|
|
|
q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
|
|
|
|
q->drv_priv = &ctx->fh;
|
2013-12-03 21:12:51 +08:00
|
|
|
if (vdev == dev->vfd_dec) {
|
2011-06-21 21:51:26 +08:00
|
|
|
q->io_modes = VB2_MMAP;
|
|
|
|
q->ops = get_dec_queue_ops();
|
2013-12-03 21:12:51 +08:00
|
|
|
} else if (vdev == dev->vfd_enc) {
|
2011-06-21 21:51:26 +08:00
|
|
|
q->io_modes = VB2_MMAP | VB2_USERPTR;
|
|
|
|
q->ops = get_enc_queue_ops();
|
|
|
|
} else {
|
|
|
|
ret = -ENOENT;
|
|
|
|
goto err_queue_init;
|
|
|
|
}
|
|
|
|
q->mem_ops = (struct vb2_mem_ops *)&vb2_dma_contig_memops;
|
2014-02-26 06:12:19 +08:00
|
|
|
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
|
2011-06-21 21:51:26 +08:00
|
|
|
ret = vb2_queue_init(q);
|
|
|
|
if (ret) {
|
|
|
|
mfc_err("Failed to initialize videobuf2 queue(capture)\n");
|
|
|
|
goto err_queue_init;
|
|
|
|
}
|
|
|
|
/* Init videobuf2 queue for OUTPUT */
|
|
|
|
q = &ctx->vq_src;
|
|
|
|
q->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
|
|
|
|
q->io_modes = VB2_MMAP;
|
|
|
|
q->drv_priv = &ctx->fh;
|
2013-12-03 21:12:51 +08:00
|
|
|
if (vdev == dev->vfd_dec) {
|
2011-06-21 21:51:26 +08:00
|
|
|
q->io_modes = VB2_MMAP;
|
|
|
|
q->ops = get_dec_queue_ops();
|
2013-12-03 21:12:51 +08:00
|
|
|
} else if (vdev == dev->vfd_enc) {
|
2011-06-21 21:51:26 +08:00
|
|
|
q->io_modes = VB2_MMAP | VB2_USERPTR;
|
|
|
|
q->ops = get_enc_queue_ops();
|
|
|
|
} else {
|
|
|
|
ret = -ENOENT;
|
|
|
|
goto err_queue_init;
|
|
|
|
}
|
|
|
|
q->mem_ops = (struct vb2_mem_ops *)&vb2_dma_contig_memops;
|
2014-02-26 06:12:19 +08:00
|
|
|
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
|
2011-06-21 21:51:26 +08:00
|
|
|
ret = vb2_queue_init(q);
|
|
|
|
if (ret) {
|
|
|
|
mfc_err("Failed to initialize videobuf2 queue(output)\n");
|
|
|
|
goto err_queue_init;
|
|
|
|
}
|
|
|
|
init_waitqueue_head(&ctx->queue);
|
2012-06-24 18:13:33 +08:00
|
|
|
mutex_unlock(&dev->mfc_mutex);
|
2011-06-21 21:51:26 +08:00
|
|
|
mfc_debug_leave();
|
|
|
|
return ret;
|
2013-10-21 08:34:01 +08:00
|
|
|
/* Deinit when failure occurred */
|
2011-06-21 21:51:26 +08:00
|
|
|
err_queue_init:
|
2013-01-03 22:02:07 +08:00
|
|
|
if (dev->num_inst == 1)
|
|
|
|
s5p_mfc_deinit_hw(dev);
|
2011-06-21 21:51:26 +08:00
|
|
|
err_init_hw:
|
2013-01-03 22:02:07 +08:00
|
|
|
err_load_fw:
|
2011-06-21 21:51:26 +08:00
|
|
|
err_pwr_enable:
|
|
|
|
if (dev->num_inst == 1) {
|
|
|
|
if (s5p_mfc_power_off() < 0)
|
|
|
|
mfc_err("power off failed\n");
|
2012-11-22 21:00:28 +08:00
|
|
|
del_timer_sync(&dev->watchdog_timer);
|
2011-06-21 21:51:26 +08:00
|
|
|
}
|
|
|
|
err_ctrls_setup:
|
|
|
|
s5p_mfc_dec_ctrls_delete(ctx);
|
|
|
|
err_bad_node:
|
2012-11-22 21:00:28 +08:00
|
|
|
dev->ctx[ctx->num] = NULL;
|
2011-06-21 21:51:26 +08:00
|
|
|
err_no_ctx:
|
|
|
|
v4l2_fh_del(&ctx->fh);
|
|
|
|
v4l2_fh_exit(&ctx->fh);
|
|
|
|
kfree(ctx);
|
|
|
|
err_alloc:
|
|
|
|
dev->num_inst--;
|
2012-06-24 18:13:33 +08:00
|
|
|
mutex_unlock(&dev->mfc_mutex);
|
2011-06-21 21:51:26 +08:00
|
|
|
mfc_debug_leave();
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Release MFC context */
|
|
|
|
static int s5p_mfc_release(struct file *file)
|
|
|
|
{
|
|
|
|
struct s5p_mfc_ctx *ctx = fh_to_ctx(file->private_data);
|
|
|
|
struct s5p_mfc_dev *dev = ctx->dev;
|
|
|
|
|
|
|
|
mfc_debug_enter();
|
2012-06-24 18:13:33 +08:00
|
|
|
mutex_lock(&dev->mfc_mutex);
|
2011-06-21 21:51:26 +08:00
|
|
|
s5p_mfc_clock_on();
|
|
|
|
vb2_queue_release(&ctx->vq_src);
|
|
|
|
vb2_queue_release(&ctx->vq_dst);
|
|
|
|
/* Mark context as idle */
|
2012-08-14 17:13:40 +08:00
|
|
|
clear_work_bit_irqsave(ctx);
|
2014-05-19 20:33:00 +08:00
|
|
|
/* If instance was initialised and not yet freed,
|
2013-10-21 08:34:01 +08:00
|
|
|
* return instance and free resources */
|
2014-05-19 20:33:00 +08:00
|
|
|
if (ctx->state != MFCINST_FREE && ctx->state != MFCINST_INIT) {
|
2011-06-21 21:51:26 +08:00
|
|
|
mfc_debug(2, "Has to free instance\n");
|
2014-05-19 20:32:59 +08:00
|
|
|
s5p_mfc_close_mfc_inst(dev, ctx);
|
2011-06-21 21:51:26 +08:00
|
|
|
}
|
|
|
|
/* hardware locking scheme */
|
|
|
|
if (dev->curr_ctx == ctx->num)
|
|
|
|
clear_bit(0, &dev->hw_lock);
|
|
|
|
dev->num_inst--;
|
|
|
|
if (dev->num_inst == 0) {
|
2013-01-03 22:02:07 +08:00
|
|
|
mfc_debug(2, "Last instance\n");
|
2012-10-04 09:19:08 +08:00
|
|
|
s5p_mfc_deinit_hw(dev);
|
2011-06-21 21:51:26 +08:00
|
|
|
del_timer_sync(&dev->watchdog_timer);
|
|
|
|
if (s5p_mfc_power_off() < 0)
|
|
|
|
mfc_err("Power off failed\n");
|
|
|
|
}
|
|
|
|
mfc_debug(2, "Shutting down clock\n");
|
|
|
|
s5p_mfc_clock_off();
|
2012-05-10 14:32:00 +08:00
|
|
|
dev->ctx[ctx->num] = NULL;
|
2011-06-21 21:51:26 +08:00
|
|
|
s5p_mfc_dec_ctrls_delete(ctx);
|
|
|
|
v4l2_fh_del(&ctx->fh);
|
|
|
|
v4l2_fh_exit(&ctx->fh);
|
|
|
|
kfree(ctx);
|
|
|
|
mfc_debug_leave();
|
2012-06-24 18:13:33 +08:00
|
|
|
mutex_unlock(&dev->mfc_mutex);
|
2011-06-21 21:51:26 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Poll */
|
|
|
|
static unsigned int s5p_mfc_poll(struct file *file,
|
|
|
|
struct poll_table_struct *wait)
|
|
|
|
{
|
|
|
|
struct s5p_mfc_ctx *ctx = fh_to_ctx(file->private_data);
|
|
|
|
struct s5p_mfc_dev *dev = ctx->dev;
|
|
|
|
struct vb2_queue *src_q, *dst_q;
|
|
|
|
struct vb2_buffer *src_vb = NULL, *dst_vb = NULL;
|
|
|
|
unsigned int rc = 0;
|
|
|
|
unsigned long flags;
|
|
|
|
|
2012-06-24 18:13:33 +08:00
|
|
|
mutex_lock(&dev->mfc_mutex);
|
2011-06-21 21:51:26 +08:00
|
|
|
src_q = &ctx->vq_src;
|
|
|
|
dst_q = &ctx->vq_dst;
|
|
|
|
/*
|
|
|
|
* There has to be at least one buffer queued on each queued_list, which
|
|
|
|
* means either in driver already or waiting for driver to claim it
|
|
|
|
* and start processing.
|
|
|
|
*/
|
|
|
|
if ((!src_q->streaming || list_empty(&src_q->queued_list))
|
|
|
|
&& (!dst_q->streaming || list_empty(&dst_q->queued_list))) {
|
|
|
|
rc = POLLERR;
|
|
|
|
goto end;
|
|
|
|
}
|
|
|
|
mutex_unlock(&dev->mfc_mutex);
|
2012-08-21 19:05:32 +08:00
|
|
|
poll_wait(file, &ctx->fh.wait, wait);
|
2011-06-21 21:51:26 +08:00
|
|
|
poll_wait(file, &src_q->done_wq, wait);
|
|
|
|
poll_wait(file, &dst_q->done_wq, wait);
|
|
|
|
mutex_lock(&dev->mfc_mutex);
|
2012-08-21 19:05:32 +08:00
|
|
|
if (v4l2_event_pending(&ctx->fh))
|
|
|
|
rc |= POLLPRI;
|
2011-06-21 21:51:26 +08:00
|
|
|
spin_lock_irqsave(&src_q->done_lock, flags);
|
|
|
|
if (!list_empty(&src_q->done_list))
|
|
|
|
src_vb = list_first_entry(&src_q->done_list, struct vb2_buffer,
|
|
|
|
done_entry);
|
|
|
|
if (src_vb && (src_vb->state == VB2_BUF_STATE_DONE
|
|
|
|
|| src_vb->state == VB2_BUF_STATE_ERROR))
|
|
|
|
rc |= POLLOUT | POLLWRNORM;
|
|
|
|
spin_unlock_irqrestore(&src_q->done_lock, flags);
|
|
|
|
spin_lock_irqsave(&dst_q->done_lock, flags);
|
|
|
|
if (!list_empty(&dst_q->done_list))
|
|
|
|
dst_vb = list_first_entry(&dst_q->done_list, struct vb2_buffer,
|
|
|
|
done_entry);
|
|
|
|
if (dst_vb && (dst_vb->state == VB2_BUF_STATE_DONE
|
|
|
|
|| dst_vb->state == VB2_BUF_STATE_ERROR))
|
|
|
|
rc |= POLLIN | POLLRDNORM;
|
|
|
|
spin_unlock_irqrestore(&dst_q->done_lock, flags);
|
|
|
|
end:
|
2012-06-24 18:13:33 +08:00
|
|
|
mutex_unlock(&dev->mfc_mutex);
|
2011-06-21 21:51:26 +08:00
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Mmap */
|
|
|
|
static int s5p_mfc_mmap(struct file *file, struct vm_area_struct *vma)
|
|
|
|
{
|
|
|
|
struct s5p_mfc_ctx *ctx = fh_to_ctx(file->private_data);
|
2012-06-24 18:13:33 +08:00
|
|
|
struct s5p_mfc_dev *dev = ctx->dev;
|
2011-06-21 21:51:26 +08:00
|
|
|
unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
|
|
|
|
int ret;
|
2012-06-24 18:13:33 +08:00
|
|
|
|
|
|
|
if (mutex_lock_interruptible(&dev->mfc_mutex))
|
|
|
|
return -ERESTARTSYS;
|
2011-06-21 21:51:26 +08:00
|
|
|
if (offset < DST_QUEUE_OFF_BASE) {
|
|
|
|
mfc_debug(2, "mmaping source\n");
|
|
|
|
ret = vb2_mmap(&ctx->vq_src, vma);
|
|
|
|
} else { /* capture */
|
|
|
|
mfc_debug(2, "mmaping destination\n");
|
|
|
|
vma->vm_pgoff -= (DST_QUEUE_OFF_BASE >> PAGE_SHIFT);
|
|
|
|
ret = vb2_mmap(&ctx->vq_dst, vma);
|
|
|
|
}
|
2012-06-24 18:13:33 +08:00
|
|
|
mutex_unlock(&dev->mfc_mutex);
|
2011-06-21 21:51:26 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* v4l2 ops */
|
|
|
|
static const struct v4l2_file_operations s5p_mfc_fops = {
|
|
|
|
.owner = THIS_MODULE,
|
|
|
|
.open = s5p_mfc_open,
|
|
|
|
.release = s5p_mfc_release,
|
|
|
|
.poll = s5p_mfc_poll,
|
|
|
|
.unlocked_ioctl = video_ioctl2,
|
|
|
|
.mmap = s5p_mfc_mmap,
|
|
|
|
};
|
|
|
|
|
|
|
|
static int match_child(struct device *dev, void *data)
|
|
|
|
{
|
|
|
|
if (!dev_name(dev))
|
|
|
|
return 0;
|
|
|
|
return !strcmp(dev_name(dev), (char *)data);
|
|
|
|
}
|
|
|
|
|
2012-10-25 16:24:14 +08:00
|
|
|
static void *mfc_get_drv_data(struct platform_device *pdev);
|
|
|
|
|
2013-01-19 02:42:34 +08:00
|
|
|
static int s5p_mfc_alloc_memdevs(struct s5p_mfc_dev *dev)
|
|
|
|
{
|
2013-04-10 18:17:52 +08:00
|
|
|
unsigned int mem_info[2] = { };
|
2013-01-19 02:42:34 +08:00
|
|
|
|
|
|
|
dev->mem_dev_l = devm_kzalloc(&dev->plat_dev->dev,
|
|
|
|
sizeof(struct device), GFP_KERNEL);
|
|
|
|
if (!dev->mem_dev_l) {
|
|
|
|
mfc_err("Not enough memory\n");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
device_initialize(dev->mem_dev_l);
|
|
|
|
of_property_read_u32_array(dev->plat_dev->dev.of_node,
|
|
|
|
"samsung,mfc-l", mem_info, 2);
|
|
|
|
if (dma_declare_coherent_memory(dev->mem_dev_l, mem_info[0],
|
|
|
|
mem_info[0], mem_info[1],
|
|
|
|
DMA_MEMORY_MAP | DMA_MEMORY_EXCLUSIVE) == 0) {
|
|
|
|
mfc_err("Failed to declare coherent memory for\n"
|
|
|
|
"MFC device\n");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
dev->mem_dev_r = devm_kzalloc(&dev->plat_dev->dev,
|
|
|
|
sizeof(struct device), GFP_KERNEL);
|
|
|
|
if (!dev->mem_dev_r) {
|
|
|
|
mfc_err("Not enough memory\n");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
device_initialize(dev->mem_dev_r);
|
|
|
|
of_property_read_u32_array(dev->plat_dev->dev.of_node,
|
|
|
|
"samsung,mfc-r", mem_info, 2);
|
|
|
|
if (dma_declare_coherent_memory(dev->mem_dev_r, mem_info[0],
|
|
|
|
mem_info[0], mem_info[1],
|
|
|
|
DMA_MEMORY_MAP | DMA_MEMORY_EXCLUSIVE) == 0) {
|
|
|
|
pr_err("Failed to declare coherent memory for\n"
|
|
|
|
"MFC device\n");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-06-21 21:51:26 +08:00
|
|
|
/* MFC probe function */
|
2011-08-09 00:12:51 +08:00
|
|
|
static int s5p_mfc_probe(struct platform_device *pdev)
|
2011-06-21 21:51:26 +08:00
|
|
|
{
|
|
|
|
struct s5p_mfc_dev *dev;
|
|
|
|
struct video_device *vfd;
|
|
|
|
struct resource *res;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
pr_debug("%s++\n", __func__);
|
2012-08-17 14:22:55 +08:00
|
|
|
dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
|
2011-06-21 21:51:26 +08:00
|
|
|
if (!dev) {
|
|
|
|
dev_err(&pdev->dev, "Not enough memory for MFC device\n");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_lock_init(&dev->irqlock);
|
|
|
|
spin_lock_init(&dev->condlock);
|
|
|
|
dev->plat_dev = pdev;
|
|
|
|
if (!dev->plat_dev) {
|
|
|
|
dev_err(&pdev->dev, "No platform data specified\n");
|
2012-05-14 19:22:27 +08:00
|
|
|
return -ENODEV;
|
2011-06-21 21:51:26 +08:00
|
|
|
}
|
|
|
|
|
2012-10-25 16:24:14 +08:00
|
|
|
dev->variant = mfc_get_drv_data(pdev);
|
2012-10-04 09:19:09 +08:00
|
|
|
|
2011-06-21 21:51:26 +08:00
|
|
|
ret = s5p_mfc_init_pm(dev);
|
|
|
|
if (ret < 0) {
|
|
|
|
dev_err(&pdev->dev, "failed to get mfc clock source\n");
|
2012-05-14 19:22:27 +08:00
|
|
|
return ret;
|
2011-06-21 21:51:26 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
|
|
|
|
2013-01-21 17:09:07 +08:00
|
|
|
dev->regs_base = devm_ioremap_resource(&pdev->dev, res);
|
|
|
|
if (IS_ERR(dev->regs_base))
|
|
|
|
return PTR_ERR(dev->regs_base);
|
2011-06-21 21:51:26 +08:00
|
|
|
|
|
|
|
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
|
|
|
|
if (res == NULL) {
|
|
|
|
dev_err(&pdev->dev, "failed to get irq resource\n");
|
|
|
|
ret = -ENOENT;
|
2012-05-14 19:22:27 +08:00
|
|
|
goto err_res;
|
2011-06-21 21:51:26 +08:00
|
|
|
}
|
|
|
|
dev->irq = res->start;
|
2012-05-14 19:22:27 +08:00
|
|
|
ret = devm_request_irq(&pdev->dev, dev->irq, s5p_mfc_irq,
|
2013-10-13 13:58:39 +08:00
|
|
|
0, pdev->name, dev);
|
2011-06-21 21:51:26 +08:00
|
|
|
if (ret) {
|
|
|
|
dev_err(&pdev->dev, "Failed to install irq (%d)\n", ret);
|
2012-05-14 19:22:27 +08:00
|
|
|
goto err_res;
|
2011-06-21 21:51:26 +08:00
|
|
|
}
|
|
|
|
|
2012-10-25 16:24:14 +08:00
|
|
|
if (pdev->dev.of_node) {
|
2013-04-18 10:18:19 +08:00
|
|
|
ret = s5p_mfc_alloc_memdevs(dev);
|
|
|
|
if (ret < 0)
|
2012-10-25 16:24:14 +08:00
|
|
|
goto err_res;
|
|
|
|
} else {
|
|
|
|
dev->mem_dev_l = device_find_child(&dev->plat_dev->dev,
|
|
|
|
"s5p-mfc-l", match_child);
|
|
|
|
if (!dev->mem_dev_l) {
|
|
|
|
mfc_err("Mem child (L) device get failed\n");
|
|
|
|
ret = -ENODEV;
|
|
|
|
goto err_res;
|
|
|
|
}
|
|
|
|
dev->mem_dev_r = device_find_child(&dev->plat_dev->dev,
|
|
|
|
"s5p-mfc-r", match_child);
|
|
|
|
if (!dev->mem_dev_r) {
|
|
|
|
mfc_err("Mem child (R) device get failed\n");
|
|
|
|
ret = -ENODEV;
|
|
|
|
goto err_res;
|
|
|
|
}
|
2011-06-21 21:51:26 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
dev->alloc_ctx[0] = vb2_dma_contig_init_ctx(dev->mem_dev_l);
|
2013-01-03 18:06:03 +08:00
|
|
|
if (IS_ERR(dev->alloc_ctx[0])) {
|
2011-06-21 21:51:26 +08:00
|
|
|
ret = PTR_ERR(dev->alloc_ctx[0]);
|
2012-05-14 19:22:27 +08:00
|
|
|
goto err_res;
|
2011-06-21 21:51:26 +08:00
|
|
|
}
|
|
|
|
dev->alloc_ctx[1] = vb2_dma_contig_init_ctx(dev->mem_dev_r);
|
2013-01-03 18:06:03 +08:00
|
|
|
if (IS_ERR(dev->alloc_ctx[1])) {
|
2011-06-21 21:51:26 +08:00
|
|
|
ret = PTR_ERR(dev->alloc_ctx[1]);
|
|
|
|
goto err_mem_init_ctx_1;
|
|
|
|
}
|
|
|
|
|
|
|
|
mutex_init(&dev->mfc_mutex);
|
|
|
|
|
2013-01-03 22:02:07 +08:00
|
|
|
ret = s5p_mfc_alloc_firmware(dev);
|
|
|
|
if (ret)
|
|
|
|
goto err_alloc_fw;
|
|
|
|
|
2011-06-21 21:51:26 +08:00
|
|
|
ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
|
|
|
|
if (ret)
|
|
|
|
goto err_v4l2_dev_reg;
|
|
|
|
init_waitqueue_head(&dev->queue);
|
|
|
|
|
|
|
|
/* decoder */
|
|
|
|
vfd = video_device_alloc();
|
|
|
|
if (!vfd) {
|
|
|
|
v4l2_err(&dev->v4l2_dev, "Failed to allocate video device\n");
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto err_dec_alloc;
|
|
|
|
}
|
2014-02-21 10:22:12 +08:00
|
|
|
vfd->fops = &s5p_mfc_fops;
|
2011-06-21 21:51:26 +08:00
|
|
|
vfd->ioctl_ops = get_dec_v4l2_ioctl_ops();
|
2014-02-21 10:22:12 +08:00
|
|
|
vfd->release = video_device_release;
|
2011-06-21 21:51:26 +08:00
|
|
|
vfd->lock = &dev->mfc_mutex;
|
|
|
|
vfd->v4l2_dev = &dev->v4l2_dev;
|
2012-09-05 17:05:50 +08:00
|
|
|
vfd->vfl_dir = VFL_DIR_M2M;
|
2011-06-21 21:51:26 +08:00
|
|
|
snprintf(vfd->name, sizeof(vfd->name), "%s", S5P_MFC_DEC_NAME);
|
|
|
|
dev->vfd_dec = vfd;
|
|
|
|
ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
|
|
|
|
if (ret) {
|
|
|
|
v4l2_err(&dev->v4l2_dev, "Failed to register video device\n");
|
|
|
|
video_device_release(vfd);
|
|
|
|
goto err_dec_reg;
|
|
|
|
}
|
|
|
|
v4l2_info(&dev->v4l2_dev,
|
|
|
|
"decoder registered as /dev/video%d\n", vfd->num);
|
|
|
|
video_set_drvdata(vfd, dev);
|
|
|
|
|
|
|
|
/* encoder */
|
|
|
|
vfd = video_device_alloc();
|
|
|
|
if (!vfd) {
|
|
|
|
v4l2_err(&dev->v4l2_dev, "Failed to allocate video device\n");
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto err_enc_alloc;
|
|
|
|
}
|
2014-02-21 10:22:12 +08:00
|
|
|
vfd->fops = &s5p_mfc_fops;
|
2011-06-21 21:51:26 +08:00
|
|
|
vfd->ioctl_ops = get_enc_v4l2_ioctl_ops();
|
2014-02-21 10:22:12 +08:00
|
|
|
vfd->release = video_device_release;
|
2011-06-21 21:51:26 +08:00
|
|
|
vfd->lock = &dev->mfc_mutex;
|
|
|
|
vfd->v4l2_dev = &dev->v4l2_dev;
|
2012-10-05 03:14:56 +08:00
|
|
|
vfd->vfl_dir = VFL_DIR_M2M;
|
2011-06-21 21:51:26 +08:00
|
|
|
snprintf(vfd->name, sizeof(vfd->name), "%s", S5P_MFC_ENC_NAME);
|
|
|
|
dev->vfd_enc = vfd;
|
|
|
|
ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
|
|
|
|
if (ret) {
|
|
|
|
v4l2_err(&dev->v4l2_dev, "Failed to register video device\n");
|
|
|
|
video_device_release(vfd);
|
|
|
|
goto err_enc_reg;
|
|
|
|
}
|
|
|
|
v4l2_info(&dev->v4l2_dev,
|
|
|
|
"encoder registered as /dev/video%d\n", vfd->num);
|
|
|
|
video_set_drvdata(vfd, dev);
|
|
|
|
platform_set_drvdata(pdev, dev);
|
|
|
|
|
|
|
|
dev->hw_lock = 0;
|
|
|
|
dev->watchdog_workqueue = create_singlethread_workqueue(S5P_MFC_NAME);
|
|
|
|
INIT_WORK(&dev->watchdog_work, s5p_mfc_watchdog_worker);
|
|
|
|
atomic_set(&dev->watchdog_cnt, 0);
|
|
|
|
init_timer(&dev->watchdog_timer);
|
|
|
|
dev->watchdog_timer.data = (unsigned long)dev;
|
|
|
|
dev->watchdog_timer.function = s5p_mfc_watchdog;
|
|
|
|
|
2012-10-04 09:19:08 +08:00
|
|
|
/* Initialize HW ops and commands based on MFC version */
|
|
|
|
s5p_mfc_init_hw_ops(dev);
|
|
|
|
s5p_mfc_init_hw_cmds(dev);
|
2014-05-19 20:33:05 +08:00
|
|
|
s5p_mfc_init_regs(dev);
|
2012-10-04 09:19:08 +08:00
|
|
|
|
2011-06-21 21:51:26 +08:00
|
|
|
pr_debug("%s--\n", __func__);
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* Deinit MFC if probe had failed */
|
|
|
|
err_enc_reg:
|
|
|
|
video_device_release(dev->vfd_enc);
|
|
|
|
err_enc_alloc:
|
|
|
|
video_unregister_device(dev->vfd_dec);
|
|
|
|
err_dec_reg:
|
|
|
|
video_device_release(dev->vfd_dec);
|
|
|
|
err_dec_alloc:
|
|
|
|
v4l2_device_unregister(&dev->v4l2_dev);
|
|
|
|
err_v4l2_dev_reg:
|
2013-01-03 22:02:07 +08:00
|
|
|
s5p_mfc_release_firmware(dev);
|
|
|
|
err_alloc_fw:
|
2011-06-21 21:51:26 +08:00
|
|
|
vb2_dma_contig_cleanup_ctx(dev->alloc_ctx[1]);
|
|
|
|
err_mem_init_ctx_1:
|
|
|
|
vb2_dma_contig_cleanup_ctx(dev->alloc_ctx[0]);
|
|
|
|
err_res:
|
|
|
|
s5p_mfc_final_pm(dev);
|
2012-05-14 19:22:27 +08:00
|
|
|
|
2011-06-21 21:51:26 +08:00
|
|
|
pr_debug("%s-- with error\n", __func__);
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Remove the driver */
|
2012-12-22 05:17:53 +08:00
|
|
|
static int s5p_mfc_remove(struct platform_device *pdev)
|
2011-06-21 21:51:26 +08:00
|
|
|
{
|
|
|
|
struct s5p_mfc_dev *dev = platform_get_drvdata(pdev);
|
|
|
|
|
|
|
|
v4l2_info(&dev->v4l2_dev, "Removing %s\n", pdev->name);
|
|
|
|
|
|
|
|
del_timer_sync(&dev->watchdog_timer);
|
|
|
|
flush_workqueue(dev->watchdog_workqueue);
|
|
|
|
destroy_workqueue(dev->watchdog_workqueue);
|
|
|
|
|
|
|
|
video_unregister_device(dev->vfd_enc);
|
|
|
|
video_unregister_device(dev->vfd_dec);
|
|
|
|
v4l2_device_unregister(&dev->v4l2_dev);
|
2013-01-03 22:02:07 +08:00
|
|
|
s5p_mfc_release_firmware(dev);
|
2011-06-21 21:51:26 +08:00
|
|
|
vb2_dma_contig_cleanup_ctx(dev->alloc_ctx[0]);
|
|
|
|
vb2_dma_contig_cleanup_ctx(dev->alloc_ctx[1]);
|
2013-01-19 02:42:34 +08:00
|
|
|
if (pdev->dev.of_node) {
|
|
|
|
put_device(dev->mem_dev_l);
|
|
|
|
put_device(dev->mem_dev_r);
|
|
|
|
}
|
2011-06-21 21:51:26 +08:00
|
|
|
|
|
|
|
s5p_mfc_final_pm(dev);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_PM_SLEEP
|
|
|
|
|
|
|
|
static int s5p_mfc_suspend(struct device *dev)
|
|
|
|
{
|
|
|
|
struct platform_device *pdev = to_platform_device(dev);
|
|
|
|
struct s5p_mfc_dev *m_dev = platform_get_drvdata(pdev);
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (m_dev->num_inst == 0)
|
|
|
|
return 0;
|
2012-09-28 15:01:35 +08:00
|
|
|
|
2011-06-21 21:51:26 +08:00
|
|
|
if (test_and_set_bit(0, &m_dev->enter_suspend) != 0) {
|
|
|
|
mfc_err("Error: going to suspend for a second time\n");
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check if we're processing then wait if it necessary. */
|
|
|
|
while (test_and_set_bit(0, &m_dev->hw_lock) != 0) {
|
|
|
|
/* Try and lock the HW */
|
|
|
|
/* Wait on the interrupt waitqueue */
|
|
|
|
ret = wait_event_interruptible_timeout(m_dev->queue,
|
2013-10-04 12:47:19 +08:00
|
|
|
m_dev->int_cond, msecs_to_jiffies(MFC_INT_TIMEOUT));
|
2011-06-21 21:51:26 +08:00
|
|
|
if (ret == 0) {
|
|
|
|
mfc_err("Waiting for hardware to finish timed out\n");
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
}
|
2012-09-28 15:01:35 +08:00
|
|
|
|
|
|
|
return s5p_mfc_sleep(m_dev);
|
2011-06-21 21:51:26 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int s5p_mfc_resume(struct device *dev)
|
|
|
|
{
|
|
|
|
struct platform_device *pdev = to_platform_device(dev);
|
|
|
|
struct s5p_mfc_dev *m_dev = platform_get_drvdata(pdev);
|
|
|
|
|
|
|
|
if (m_dev->num_inst == 0)
|
|
|
|
return 0;
|
|
|
|
return s5p_mfc_wakeup(m_dev);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef CONFIG_PM_RUNTIME
|
|
|
|
static int s5p_mfc_runtime_suspend(struct device *dev)
|
|
|
|
{
|
|
|
|
struct platform_device *pdev = to_platform_device(dev);
|
|
|
|
struct s5p_mfc_dev *m_dev = platform_get_drvdata(pdev);
|
|
|
|
|
|
|
|
atomic_set(&m_dev->pm.power, 0);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int s5p_mfc_runtime_resume(struct device *dev)
|
|
|
|
{
|
|
|
|
struct platform_device *pdev = to_platform_device(dev);
|
|
|
|
struct s5p_mfc_dev *m_dev = platform_get_drvdata(pdev);
|
|
|
|
|
|
|
|
if (!m_dev->alloc_ctx)
|
|
|
|
return 0;
|
|
|
|
atomic_set(&m_dev->pm.power, 1);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* Power management */
|
|
|
|
static const struct dev_pm_ops s5p_mfc_pm_ops = {
|
|
|
|
SET_SYSTEM_SLEEP_PM_OPS(s5p_mfc_suspend, s5p_mfc_resume)
|
|
|
|
SET_RUNTIME_PM_OPS(s5p_mfc_runtime_suspend, s5p_mfc_runtime_resume,
|
|
|
|
NULL)
|
|
|
|
};
|
|
|
|
|
2014-09-24 20:08:10 +08:00
|
|
|
static struct s5p_mfc_buf_size_v5 mfc_buf_size_v5 = {
|
2012-10-04 09:19:09 +08:00
|
|
|
.h264_ctx = MFC_H264_CTX_BUF_SIZE,
|
|
|
|
.non_h264_ctx = MFC_CTX_BUF_SIZE,
|
|
|
|
.dsc = DESC_BUF_SIZE,
|
|
|
|
.shm = SHARED_BUF_SIZE,
|
|
|
|
};
|
|
|
|
|
2014-09-24 20:08:10 +08:00
|
|
|
static struct s5p_mfc_buf_size buf_size_v5 = {
|
2012-10-04 09:19:09 +08:00
|
|
|
.fw = MAX_FW_SIZE,
|
|
|
|
.cpb = MAX_CPB_SIZE,
|
|
|
|
.priv = &mfc_buf_size_v5,
|
|
|
|
};
|
|
|
|
|
2014-09-24 20:08:10 +08:00
|
|
|
static struct s5p_mfc_buf_align mfc_buf_align_v5 = {
|
2012-10-04 09:19:09 +08:00
|
|
|
.base = MFC_BASE_ALIGN_ORDER,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct s5p_mfc_variant mfc_drvdata_v5 = {
|
|
|
|
.version = MFC_VERSION,
|
2014-05-20 21:15:13 +08:00
|
|
|
.version_bit = MFC_V5_BIT,
|
2012-10-04 09:19:09 +08:00
|
|
|
.port_num = MFC_NUM_PORTS,
|
|
|
|
.buf_size = &buf_size_v5,
|
|
|
|
.buf_align = &mfc_buf_align_v5,
|
2014-05-21 17:29:30 +08:00
|
|
|
.fw_name[0] = "s5p-mfc.fw",
|
2012-10-04 09:19:11 +08:00
|
|
|
};
|
|
|
|
|
2014-09-24 20:08:10 +08:00
|
|
|
static struct s5p_mfc_buf_size_v6 mfc_buf_size_v6 = {
|
2012-10-04 09:19:11 +08:00
|
|
|
.dev_ctx = MFC_CTX_BUF_SIZE_V6,
|
|
|
|
.h264_dec_ctx = MFC_H264_DEC_CTX_BUF_SIZE_V6,
|
|
|
|
.other_dec_ctx = MFC_OTHER_DEC_CTX_BUF_SIZE_V6,
|
|
|
|
.h264_enc_ctx = MFC_H264_ENC_CTX_BUF_SIZE_V6,
|
|
|
|
.other_enc_ctx = MFC_OTHER_ENC_CTX_BUF_SIZE_V6,
|
|
|
|
};
|
|
|
|
|
2014-09-24 20:08:10 +08:00
|
|
|
static struct s5p_mfc_buf_size buf_size_v6 = {
|
2012-10-04 09:19:11 +08:00
|
|
|
.fw = MAX_FW_SIZE_V6,
|
|
|
|
.cpb = MAX_CPB_SIZE_V6,
|
|
|
|
.priv = &mfc_buf_size_v6,
|
|
|
|
};
|
|
|
|
|
2014-09-24 20:08:10 +08:00
|
|
|
static struct s5p_mfc_buf_align mfc_buf_align_v6 = {
|
2012-10-04 09:19:11 +08:00
|
|
|
.base = 0,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct s5p_mfc_variant mfc_drvdata_v6 = {
|
|
|
|
.version = MFC_VERSION_V6,
|
2014-05-20 21:15:13 +08:00
|
|
|
.version_bit = MFC_V6_BIT,
|
2012-10-04 09:19:11 +08:00
|
|
|
.port_num = MFC_NUM_PORTS_V6,
|
|
|
|
.buf_size = &buf_size_v6,
|
|
|
|
.buf_align = &mfc_buf_align_v6,
|
2014-05-21 17:29:30 +08:00
|
|
|
.fw_name[0] = "s5p-mfc-v6.fw",
|
|
|
|
/*
|
|
|
|
* v6-v2 firmware contains bug fixes and interface change
|
|
|
|
* for init buffer command
|
|
|
|
*/
|
|
|
|
.fw_name[1] = "s5p-mfc-v6-v2.fw",
|
2012-10-04 09:19:09 +08:00
|
|
|
};
|
|
|
|
|
2014-09-24 20:08:10 +08:00
|
|
|
static struct s5p_mfc_buf_size_v6 mfc_buf_size_v7 = {
|
2013-07-09 12:24:38 +08:00
|
|
|
.dev_ctx = MFC_CTX_BUF_SIZE_V7,
|
|
|
|
.h264_dec_ctx = MFC_H264_DEC_CTX_BUF_SIZE_V7,
|
|
|
|
.other_dec_ctx = MFC_OTHER_DEC_CTX_BUF_SIZE_V7,
|
|
|
|
.h264_enc_ctx = MFC_H264_ENC_CTX_BUF_SIZE_V7,
|
|
|
|
.other_enc_ctx = MFC_OTHER_ENC_CTX_BUF_SIZE_V7,
|
|
|
|
};
|
|
|
|
|
2014-09-24 20:08:10 +08:00
|
|
|
static struct s5p_mfc_buf_size buf_size_v7 = {
|
2013-07-09 12:24:38 +08:00
|
|
|
.fw = MAX_FW_SIZE_V7,
|
|
|
|
.cpb = MAX_CPB_SIZE_V7,
|
|
|
|
.priv = &mfc_buf_size_v7,
|
|
|
|
};
|
|
|
|
|
2014-09-24 20:08:10 +08:00
|
|
|
static struct s5p_mfc_buf_align mfc_buf_align_v7 = {
|
2013-07-09 12:24:38 +08:00
|
|
|
.base = 0,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct s5p_mfc_variant mfc_drvdata_v7 = {
|
|
|
|
.version = MFC_VERSION_V7,
|
2014-05-20 21:15:13 +08:00
|
|
|
.version_bit = MFC_V7_BIT,
|
2013-07-09 12:24:38 +08:00
|
|
|
.port_num = MFC_NUM_PORTS_V7,
|
|
|
|
.buf_size = &buf_size_v7,
|
|
|
|
.buf_align = &mfc_buf_align_v7,
|
2014-05-21 17:29:30 +08:00
|
|
|
.fw_name[0] = "s5p-mfc-v7.fw",
|
2013-07-09 12:24:38 +08:00
|
|
|
};
|
|
|
|
|
2014-09-24 20:08:10 +08:00
|
|
|
static struct s5p_mfc_buf_size_v6 mfc_buf_size_v8 = {
|
2014-05-19 20:50:01 +08:00
|
|
|
.dev_ctx = MFC_CTX_BUF_SIZE_V8,
|
|
|
|
.h264_dec_ctx = MFC_H264_DEC_CTX_BUF_SIZE_V8,
|
|
|
|
.other_dec_ctx = MFC_OTHER_DEC_CTX_BUF_SIZE_V8,
|
2014-05-19 20:50:02 +08:00
|
|
|
.h264_enc_ctx = MFC_H264_ENC_CTX_BUF_SIZE_V8,
|
|
|
|
.other_enc_ctx = MFC_OTHER_ENC_CTX_BUF_SIZE_V8,
|
2014-05-19 20:50:01 +08:00
|
|
|
};
|
|
|
|
|
2014-09-24 20:08:10 +08:00
|
|
|
static struct s5p_mfc_buf_size buf_size_v8 = {
|
2014-05-19 20:50:01 +08:00
|
|
|
.fw = MAX_FW_SIZE_V8,
|
|
|
|
.cpb = MAX_CPB_SIZE_V8,
|
|
|
|
.priv = &mfc_buf_size_v8,
|
|
|
|
};
|
|
|
|
|
2014-09-24 20:08:10 +08:00
|
|
|
static struct s5p_mfc_buf_align mfc_buf_align_v8 = {
|
2014-05-19 20:50:01 +08:00
|
|
|
.base = 0,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct s5p_mfc_variant mfc_drvdata_v8 = {
|
|
|
|
.version = MFC_VERSION_V8,
|
|
|
|
.version_bit = MFC_V8_BIT,
|
|
|
|
.port_num = MFC_NUM_PORTS_V8,
|
|
|
|
.buf_size = &buf_size_v8,
|
|
|
|
.buf_align = &mfc_buf_align_v8,
|
2014-05-21 17:29:30 +08:00
|
|
|
.fw_name[0] = "s5p-mfc-v8.fw",
|
2014-05-19 20:50:01 +08:00
|
|
|
};
|
|
|
|
|
2012-10-04 09:19:09 +08:00
|
|
|
static struct platform_device_id mfc_driver_ids[] = {
|
|
|
|
{
|
|
|
|
.name = "s5p-mfc",
|
|
|
|
.driver_data = (unsigned long)&mfc_drvdata_v5,
|
2012-10-04 09:19:11 +08:00
|
|
|
}, {
|
|
|
|
.name = "s5p-mfc-v5",
|
|
|
|
.driver_data = (unsigned long)&mfc_drvdata_v5,
|
|
|
|
}, {
|
|
|
|
.name = "s5p-mfc-v6",
|
|
|
|
.driver_data = (unsigned long)&mfc_drvdata_v6,
|
2013-07-09 12:24:38 +08:00
|
|
|
}, {
|
|
|
|
.name = "s5p-mfc-v7",
|
|
|
|
.driver_data = (unsigned long)&mfc_drvdata_v7,
|
2014-05-19 20:50:01 +08:00
|
|
|
}, {
|
|
|
|
.name = "s5p-mfc-v8",
|
|
|
|
.driver_data = (unsigned long)&mfc_drvdata_v8,
|
2012-10-04 09:19:09 +08:00
|
|
|
},
|
|
|
|
{},
|
|
|
|
};
|
|
|
|
MODULE_DEVICE_TABLE(platform, mfc_driver_ids);
|
|
|
|
|
2012-10-25 16:24:14 +08:00
|
|
|
static const struct of_device_id exynos_mfc_match[] = {
|
|
|
|
{
|
|
|
|
.compatible = "samsung,mfc-v5",
|
|
|
|
.data = &mfc_drvdata_v5,
|
|
|
|
}, {
|
|
|
|
.compatible = "samsung,mfc-v6",
|
|
|
|
.data = &mfc_drvdata_v6,
|
2013-07-09 12:24:38 +08:00
|
|
|
}, {
|
|
|
|
.compatible = "samsung,mfc-v7",
|
|
|
|
.data = &mfc_drvdata_v7,
|
2014-05-19 20:50:01 +08:00
|
|
|
}, {
|
|
|
|
.compatible = "samsung,mfc-v8",
|
|
|
|
.data = &mfc_drvdata_v8,
|
2012-10-25 16:24:14 +08:00
|
|
|
},
|
|
|
|
{},
|
|
|
|
};
|
|
|
|
MODULE_DEVICE_TABLE(of, exynos_mfc_match);
|
|
|
|
|
|
|
|
static void *mfc_get_drv_data(struct platform_device *pdev)
|
|
|
|
{
|
|
|
|
struct s5p_mfc_variant *driver_data = NULL;
|
|
|
|
|
|
|
|
if (pdev->dev.of_node) {
|
|
|
|
const struct of_device_id *match;
|
2013-05-23 11:51:19 +08:00
|
|
|
match = of_match_node(exynos_mfc_match,
|
2012-10-25 16:24:14 +08:00
|
|
|
pdev->dev.of_node);
|
|
|
|
if (match)
|
|
|
|
driver_data = (struct s5p_mfc_variant *)match->data;
|
|
|
|
} else {
|
|
|
|
driver_data = (struct s5p_mfc_variant *)
|
|
|
|
platform_get_device_id(pdev)->driver_data;
|
|
|
|
}
|
|
|
|
return driver_data;
|
|
|
|
}
|
|
|
|
|
2011-08-09 00:12:51 +08:00
|
|
|
static struct platform_driver s5p_mfc_driver = {
|
2012-10-04 09:19:09 +08:00
|
|
|
.probe = s5p_mfc_probe,
|
2012-12-22 05:17:53 +08:00
|
|
|
.remove = s5p_mfc_remove,
|
2012-10-04 09:19:09 +08:00
|
|
|
.id_table = mfc_driver_ids,
|
2011-06-21 21:51:26 +08:00
|
|
|
.driver = {
|
|
|
|
.name = S5P_MFC_NAME,
|
|
|
|
.owner = THIS_MODULE,
|
2012-10-25 16:24:14 +08:00
|
|
|
.pm = &s5p_mfc_pm_ops,
|
|
|
|
.of_match_table = exynos_mfc_match,
|
2011-06-21 21:51:26 +08:00
|
|
|
},
|
|
|
|
};
|
|
|
|
|
2012-01-10 14:21:49 +08:00
|
|
|
module_platform_driver(s5p_mfc_driver);
|
2011-06-21 21:51:26 +08:00
|
|
|
|
|
|
|
MODULE_LICENSE("GPL");
|
|
|
|
MODULE_AUTHOR("Kamil Debski <k.debski@samsung.com>");
|
|
|
|
MODULE_DESCRIPTION("Samsung S5P Multi Format Codec V4L2 driver");
|
|
|
|
|