From 21fbd085e62ff9a11318f5a9aba55e56e8503173 Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Thu, 26 Apr 2018 09:48:55 +0200 Subject: [PATCH 01/17] drm/vmwgfx: Clean up fbdev modeset locking At least since the atomic port, the vmwgfx fbdev code is taking a number of unnecessary modeset locks. In particular the kms_set_config() function will grab its own locks, leading to locking retries. So avoid drm_modeset_lock_all() and instead provide a local acquire context for kms_set_config(). Also have the vmw_kms_fbdev_init data itself grab the lock that it needs. This also fixed a long standing problem that vmw_fb_close() didn't provide an acquire context for kms_set_config(), causing potential warnings and hangs during driver unload. This problem was uncovered by the recent commit "drm/vmwgfx: Improve on hibernation" Testing done: Repeated driver load and unload on Ubuntu 16.04.2 Fixes: c3b9b1657344 ("drm/vmwgfx: Improve on hibernation") Signed-off-by: Thomas Hellstrom Reviewed-by: Deepak Rawat Reviewed-by: Sinclair Yeh --- drivers/gpu/drm/vmwgfx/vmwgfx_fb.c | 31 ++++++++++------------------- drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | 13 +++++++++--- 2 files changed, 20 insertions(+), 24 deletions(-) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c index 2582ffd36bb5..ba0cdb743c3e 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c @@ -441,11 +441,11 @@ static int vmwgfx_set_config_internal(struct drm_mode_set *set) struct drm_crtc *crtc = set->crtc; struct drm_framebuffer *fb; struct drm_crtc *tmp; - struct drm_modeset_acquire_ctx *ctx; struct drm_device *dev = set->crtc->dev; + struct drm_modeset_acquire_ctx ctx; int ret; - ctx = dev->mode_config.acquire_ctx; + drm_modeset_acquire_init(&ctx, 0); restart: /* @@ -458,7 +458,7 @@ static int vmwgfx_set_config_internal(struct drm_mode_set *set) fb = set->fb; - ret = crtc->funcs->set_config(set, ctx); + ret = crtc->funcs->set_config(set, &ctx); if (ret == 0) { crtc->primary->crtc = crtc; crtc->primary->fb = fb; @@ -473,20 +473,13 @@ static int vmwgfx_set_config_internal(struct drm_mode_set *set) } if (ret == -EDEADLK) { - dev->mode_config.acquire_ctx = NULL; - -retry_locking: - drm_modeset_backoff(ctx); - - ret = drm_modeset_lock_all_ctx(dev, ctx); - if (ret) - goto retry_locking; - - dev->mode_config.acquire_ctx = ctx; - + drm_modeset_backoff(&ctx); goto restart; } + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); + return ret; } @@ -624,7 +617,6 @@ static int vmw_fb_set_par(struct fb_info *info) } mutex_lock(&par->bo_mutex); - drm_modeset_lock_all(vmw_priv->dev); ret = vmw_fb_kms_framebuffer(info); if (ret) goto out_unlock; @@ -657,7 +649,6 @@ static int vmw_fb_set_par(struct fb_info *info) drm_mode_destroy(vmw_priv->dev, old_mode); par->set_mode = mode; - drm_modeset_unlock_all(vmw_priv->dev); mutex_unlock(&par->bo_mutex); return ret; @@ -713,18 +704,14 @@ int vmw_fb_init(struct vmw_private *vmw_priv) par->max_width = fb_width; par->max_height = fb_height; - drm_modeset_lock_all(vmw_priv->dev); ret = vmw_kms_fbdev_init_data(vmw_priv, 0, par->max_width, par->max_height, &par->con, &par->crtc, &init_mode); - if (ret) { - drm_modeset_unlock_all(vmw_priv->dev); + if (ret) goto err_kms; - } info->var.xres = init_mode->hdisplay; info->var.yres = init_mode->vdisplay; - drm_modeset_unlock_all(vmw_priv->dev); /* * Create buffers and alloc memory @@ -832,7 +819,9 @@ int vmw_fb_close(struct vmw_private *vmw_priv) cancel_delayed_work_sync(&par->local_work); unregister_framebuffer(info); + mutex_lock(&par->bo_mutex); (void) vmw_fb_kms_detach(par, true, true); + mutex_unlock(&par->bo_mutex); vfree(par->vmalloc); framebuffer_release(info); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index f11601b6fd74..aacc9307ab0b 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -2680,7 +2680,9 @@ int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv, struct vmw_display_unit *du; struct drm_display_mode *mode; int i = 0; + int ret = 0; + mutex_lock(&dev_priv->dev->mode_config.mutex); list_for_each_entry(con, &dev_priv->dev->mode_config.connector_list, head) { if (i == unit) @@ -2691,7 +2693,8 @@ int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv, if (i != unit) { DRM_ERROR("Could not find initial display unit.\n"); - return -EINVAL; + ret = -EINVAL; + goto out_unlock; } if (list_empty(&con->modes)) @@ -2699,7 +2702,8 @@ int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv, if (list_empty(&con->modes)) { DRM_ERROR("Could not find initial display mode.\n"); - return -EINVAL; + ret = -EINVAL; + goto out_unlock; } du = vmw_connector_to_du(con); @@ -2720,7 +2724,10 @@ int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv, head); } - return 0; + out_unlock: + mutex_unlock(&dev_priv->dev->mode_config.mutex); + + return ret; } /** From 13f149d47392782baafd96d54d4e65f3b5ca342f Mon Sep 17 00:00:00 2001 From: Thomas Hellstrom Date: Thu, 26 Apr 2018 09:59:30 +0200 Subject: [PATCH 02/17] drm/vmwgfx: Fix a buffer object leak A buffer object leak was introduced when fixing a premature buffer object release. Fix this. Cc: Fixes: 73a88250b709 ("Fix a destoy-while-held mutex problem.") Signed-off-by: Thomas Hellstrom Reviewed-by: Deepak Rawat Reviewed-by: Sinclair Yeh --- drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index aacc9307ab0b..96fd7a03d2f8 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -2595,6 +2595,7 @@ void vmw_kms_helper_resource_finish(struct vmw_validation_ctx *ctx, vmw_kms_helper_buffer_finish(res->dev_priv, NULL, ctx->buf, out_fence, NULL); + vmw_dmabuf_unreference(&ctx->buf); vmw_resource_unreserve(res, false, NULL, 0); mutex_unlock(&res->dev_priv->cmdbuf_mutex); } From b607990c76ceda0a7a7ceacabab174cdc8b9beee Mon Sep 17 00:00:00 2001 From: Ian W MORRISON Date: Wed, 11 Apr 2018 14:42:13 +1000 Subject: [PATCH 03/17] drm/i915/glk: Add MODULE_FIRMWARE for Geminilake As the Geminilake firmware is now merged to linux-firmware.git use MODUE_FIRMWARE to load the firmware. This removes the error message in the dmesg log: i915 0000:00:02.0: Direct firmware load for i915/glk_dmc_ver1_04.bin failed with error -2 i915 0000:00:02.0: Failed to load DMC firmware i915/glk_dmc_ver1_04.bin. Disabling runtime power management. i915 0000:00:02.0: DMC firmware homepage: https://01.org/linuxgraphics/downloads/firmware and now shows that the firmware has correctly loaded: [drm] Finished loading DMC firmware i915/glk_dmc_ver1_04.bin (v1.4) Signed-off-by: Ian W MORRISON Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20180411044213.383-1-ianwmorrison@gmail.com (cherry picked from commit f6d3e06f074721ad3a231df745d85b60428c1f03) Signed-off-by: Joonas Lahtinen --- drivers/gpu/drm/i915/intel_csr.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c index 41e6c75a7f3c..f9550ea46c26 100644 --- a/drivers/gpu/drm/i915/intel_csr.c +++ b/drivers/gpu/drm/i915/intel_csr.c @@ -35,6 +35,7 @@ */ #define I915_CSR_GLK "i915/glk_dmc_ver1_04.bin" +MODULE_FIRMWARE(I915_CSR_GLK); #define GLK_CSR_VERSION_REQUIRED CSR_VERSION(1, 4) #define I915_CSR_CNL "i915/cnl_dmc_ver1_07.bin" From f7aef1c207092770d06d0df21dceafdca2b49c39 Mon Sep 17 00:00:00 2001 From: Boris Brezillon Date: Mon, 30 Apr 2018 15:32:32 +0200 Subject: [PATCH 04/17] drm/vc4: Make sure vc4_bo_{inc,dec}_usecnt() calls are balanced Commit b9f19259b84d ("drm/vc4: Add the DRM_IOCTL_VC4_GEM_MADVISE ioctl") introduced a mechanism to mark some BOs as purgeable to allow the driver to drop them under memory pressure. In order to implement this feature we had to add a mechanism to mark BOs as currently used by a piece of hardware which materialized through the ->usecnt counter. Plane code is supposed to increment usecnt when it attaches a BO to a plane and decrement it when it's done with this BO, which was done in the ->prepare_fb() and ->cleanup_fb() hooks. The problem is, async page flip logic does not go through the regular atomic update path, and ->prepare_fb() and ->cleanup_fb() are not called in this case. Fix that by manually calling vc4_bo_{inc,dec}_usecnt() in the async-page-flip path. Note that all this should go away as soon as we get generic async page flip support in the core, in the meantime, this fix should do the trick. Fixes: b9f19259b84d ("drm/vc4: Add the DRM_IOCTL_VC4_GEM_MADVISE ioctl") Reported-by: Peter Robinson Cc: Signed-off-by: Boris Brezillon Signed-off-by: Eric Anholt Link: https://patchwork.freedesktop.org/patch/msgid/20180430133232.32457-1-boris.brezillon@bootlin.com Link: https://patchwork.freedesktop.org/patch/msgid/20180430133232.32457-1-boris.brezillon@bootlin.com --- drivers/gpu/drm/vc4/vc4_crtc.c | 46 +++++++++++++++++++++++++++++++++- 1 file changed, 45 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c index bf4667481935..c61dff594195 100644 --- a/drivers/gpu/drm/vc4/vc4_crtc.c +++ b/drivers/gpu/drm/vc4/vc4_crtc.c @@ -760,6 +760,7 @@ static irqreturn_t vc4_crtc_irq_handler(int irq, void *data) struct vc4_async_flip_state { struct drm_crtc *crtc; struct drm_framebuffer *fb; + struct drm_framebuffer *old_fb; struct drm_pending_vblank_event *event; struct vc4_seqno_cb cb; @@ -789,6 +790,23 @@ vc4_async_page_flip_complete(struct vc4_seqno_cb *cb) drm_crtc_vblank_put(crtc); drm_framebuffer_put(flip_state->fb); + + /* Decrement the BO usecnt in order to keep the inc/dec calls balanced + * when the planes are updated through the async update path. + * FIXME: we should move to generic async-page-flip when it's + * available, so that we can get rid of this hand-made cleanup_fb() + * logic. + */ + if (flip_state->old_fb) { + struct drm_gem_cma_object *cma_bo; + struct vc4_bo *bo; + + cma_bo = drm_fb_cma_get_gem_obj(flip_state->old_fb, 0); + bo = to_vc4_bo(&cma_bo->base); + vc4_bo_dec_usecnt(bo); + drm_framebuffer_put(flip_state->old_fb); + } + kfree(flip_state); up(&vc4->async_modeset); @@ -813,9 +831,22 @@ static int vc4_async_page_flip(struct drm_crtc *crtc, struct drm_gem_cma_object *cma_bo = drm_fb_cma_get_gem_obj(fb, 0); struct vc4_bo *bo = to_vc4_bo(&cma_bo->base); + /* Increment the BO usecnt here, so that we never end up with an + * unbalanced number of vc4_bo_{dec,inc}_usecnt() calls when the + * plane is later updated through the non-async path. + * FIXME: we should move to generic async-page-flip when it's + * available, so that we can get rid of this hand-made prepare_fb() + * logic. + */ + ret = vc4_bo_inc_usecnt(bo); + if (ret) + return ret; + flip_state = kzalloc(sizeof(*flip_state), GFP_KERNEL); - if (!flip_state) + if (!flip_state) { + vc4_bo_dec_usecnt(bo); return -ENOMEM; + } drm_framebuffer_get(fb); flip_state->fb = fb; @@ -826,10 +857,23 @@ static int vc4_async_page_flip(struct drm_crtc *crtc, ret = down_interruptible(&vc4->async_modeset); if (ret) { drm_framebuffer_put(fb); + vc4_bo_dec_usecnt(bo); kfree(flip_state); return ret; } + /* Save the current FB before it's replaced by the new one in + * drm_atomic_set_fb_for_plane(). We'll need the old FB in + * vc4_async_page_flip_complete() to decrement the BO usecnt and keep + * it consistent. + * FIXME: we should move to generic async-page-flip when it's + * available, so that we can get rid of this hand-made cleanup_fb() + * logic. + */ + flip_state->old_fb = plane->state->fb; + if (flip_state->old_fb) + drm_framebuffer_get(flip_state->old_fb); + WARN_ON(drm_crtc_vblank_get(crtc) != 0); /* Immediately update the plane's legacy fb pointer, so that later From 49ceda9de2da4d1827941d06701f3017c27c1855 Mon Sep 17 00:00:00 2001 From: Sean Paul Date: Fri, 20 Apr 2018 14:59:59 -0400 Subject: [PATCH 05/17] drm/bridge: vga-dac: Fix edid memory leak edid should be freed once it's finished being used. Fixes: 56fe8b6f4991 ("drm/bridge: Add RGB to VGA bridge support") Cc: Rob Herring Cc: Sean Paul Cc: Maxime Ripard Cc: Archit Taneja Cc: Andrzej Hajda Cc: Laurent Pinchart Cc: # v4.9+ Reviewed-by: Maxime Ripard Reviewed-by: Laurent Pinchart Signed-off-by: Sean Paul Link: https://patchwork.freedesktop.org/patch/msgid/20180420190007.1572-1-seanpaul@chromium.org --- drivers/gpu/drm/bridge/dumb-vga-dac.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/bridge/dumb-vga-dac.c b/drivers/gpu/drm/bridge/dumb-vga-dac.c index 498d5948d1a8..9837c8d69e69 100644 --- a/drivers/gpu/drm/bridge/dumb-vga-dac.c +++ b/drivers/gpu/drm/bridge/dumb-vga-dac.c @@ -56,7 +56,9 @@ static int dumb_vga_get_modes(struct drm_connector *connector) } drm_mode_connector_update_edid_property(connector, edid); - return drm_add_edid_modes(connector, edid); + ret = drm_add_edid_modes(connector, edid); + kfree(edid); + return ret; fallback: /* From 2eced8e917b060587fc8ed46df41c364957a5050 Mon Sep 17 00:00:00 2001 From: Andrzej Hajda Date: Fri, 2 Feb 2018 16:11:22 +0100 Subject: [PATCH 06/17] drm/exynos/mixer: fix synchronization check in interlaced mode In case of interlace mode video processor registers and mixer config register must be check to ensure internal state is in sync with shadow registers. This patch fixes page-faults in interlaced mode. Signed-off-by: Andrzej Hajda Signed-off-by: Inki Dae --- drivers/gpu/drm/exynos/exynos_mixer.c | 10 ++++++++++ drivers/gpu/drm/exynos/regs-mixer.h | 1 + 2 files changed, 11 insertions(+) diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c index 257299ec95c4..a8d978d6e4e0 100644 --- a/drivers/gpu/drm/exynos/exynos_mixer.c +++ b/drivers/gpu/drm/exynos/exynos_mixer.c @@ -482,6 +482,7 @@ static void vp_video_buffer(struct mixer_context *ctx, spin_lock_irqsave(&ctx->reg_slock, flags); + vp_reg_write(ctx, VP_SHADOW_UPDATE, 1); /* interlace or progressive scan mode */ val = (test_bit(MXR_BIT_INTERLACE, &ctx->flags) ? ~0 : 0); vp_reg_writemask(ctx, VP_MODE, val, VP_MODE_LINE_SKIP); @@ -699,6 +700,15 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg) /* interlace scan need to check shadow register */ if (test_bit(MXR_BIT_INTERLACE, &ctx->flags)) { + if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags) && + vp_reg_read(ctx, VP_SHADOW_UPDATE)) + goto out; + + base = mixer_reg_read(ctx, MXR_CFG); + shadow = mixer_reg_read(ctx, MXR_CFG_S); + if (base != shadow) + goto out; + base = mixer_reg_read(ctx, MXR_GRAPHIC_BASE(0)); shadow = mixer_reg_read(ctx, MXR_GRAPHIC_BASE_S(0)); if (base != shadow) diff --git a/drivers/gpu/drm/exynos/regs-mixer.h b/drivers/gpu/drm/exynos/regs-mixer.h index c311f571bdf9..189cfa2470a8 100644 --- a/drivers/gpu/drm/exynos/regs-mixer.h +++ b/drivers/gpu/drm/exynos/regs-mixer.h @@ -47,6 +47,7 @@ #define MXR_MO 0x0304 #define MXR_RESOLUTION 0x0310 +#define MXR_CFG_S 0x2004 #define MXR_GRAPHIC0_BASE_S 0x2024 #define MXR_GRAPHIC1_BASE_S 0x2044 From 0ccc1c8f0282e237a0bd6dca7cdac4ed5e318ee7 Mon Sep 17 00:00:00 2001 From: Tobias Jakobi Date: Fri, 2 Feb 2018 16:11:23 +0100 Subject: [PATCH 07/17] drm/exynos: mixer: avoid Oops in vp_video_buffer() If an interlaced video mode is selected, a IOMMU pagefault is triggered by vp_video_buffer(). Fix the most apparent bugs: - pitch value for chroma plane - divide by two of height and vpos of source and destination Signed-off-by: Tobias Jakobi [ a.hajda: Halved also destination height and vpos, updated commit message ] Signed-off-by: Andrzej Hajda Signed-off-by: Inki Dae --- drivers/gpu/drm/exynos/exynos_mixer.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c index a8d978d6e4e0..272c79f5f5bf 100644 --- a/drivers/gpu/drm/exynos/exynos_mixer.c +++ b/drivers/gpu/drm/exynos/exynos_mixer.c @@ -473,7 +473,7 @@ static void vp_video_buffer(struct mixer_context *ctx, chroma_addr[1] = chroma_addr[0] + 0x40; } else { luma_addr[1] = luma_addr[0] + fb->pitches[0]; - chroma_addr[1] = chroma_addr[0] + fb->pitches[0]; + chroma_addr[1] = chroma_addr[0] + fb->pitches[1]; } } else { luma_addr[1] = 0; @@ -496,21 +496,23 @@ static void vp_video_buffer(struct mixer_context *ctx, vp_reg_write(ctx, VP_IMG_SIZE_Y, VP_IMG_HSIZE(fb->pitches[0]) | VP_IMG_VSIZE(fb->height)); /* chroma plane for NV12/NV21 is half the height of the luma plane */ - vp_reg_write(ctx, VP_IMG_SIZE_C, VP_IMG_HSIZE(fb->pitches[0]) | + vp_reg_write(ctx, VP_IMG_SIZE_C, VP_IMG_HSIZE(fb->pitches[1]) | VP_IMG_VSIZE(fb->height / 2)); vp_reg_write(ctx, VP_SRC_WIDTH, state->src.w); - vp_reg_write(ctx, VP_SRC_HEIGHT, state->src.h); vp_reg_write(ctx, VP_SRC_H_POSITION, VP_SRC_H_POSITION_VAL(state->src.x)); - vp_reg_write(ctx, VP_SRC_V_POSITION, state->src.y); - vp_reg_write(ctx, VP_DST_WIDTH, state->crtc.w); vp_reg_write(ctx, VP_DST_H_POSITION, state->crtc.x); + if (test_bit(MXR_BIT_INTERLACE, &ctx->flags)) { + vp_reg_write(ctx, VP_SRC_HEIGHT, state->src.h / 2); + vp_reg_write(ctx, VP_SRC_V_POSITION, state->src.y / 2); vp_reg_write(ctx, VP_DST_HEIGHT, state->crtc.h / 2); vp_reg_write(ctx, VP_DST_V_POSITION, state->crtc.y / 2); } else { + vp_reg_write(ctx, VP_SRC_HEIGHT, state->src.h); + vp_reg_write(ctx, VP_SRC_V_POSITION, state->src.y); vp_reg_write(ctx, VP_DST_HEIGHT, state->crtc.h); vp_reg_write(ctx, VP_DST_V_POSITION, state->crtc.y); } From 6f2db7dc901a1b89fbc50f7b38f0f7ee17205703 Mon Sep 17 00:00:00 2001 From: Peter Rosin Date: Wed, 2 May 2018 09:40:25 +0200 Subject: [PATCH 08/17] drm/exynos: hdmi: avoid duplicating drm_bridge_attach drm_bridge_attach takes care of these assignments, so there is no need to open-code them a second time. Signed-off-by: Peter Rosin Signed-off-by: Inki Dae --- drivers/gpu/drm/exynos/exynos_hdmi.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c index abd84cbcf1c2..09c4bc0b1859 100644 --- a/drivers/gpu/drm/exynos/exynos_hdmi.c +++ b/drivers/gpu/drm/exynos/exynos_hdmi.c @@ -954,8 +954,6 @@ static int hdmi_create_connector(struct drm_encoder *encoder) drm_mode_connector_attach_encoder(connector, encoder); if (hdata->bridge) { - encoder->bridge = hdata->bridge; - hdata->bridge->encoder = encoder; ret = drm_bridge_attach(encoder, hdata->bridge, NULL); if (ret) DRM_ERROR("Failed to attach bridge\n"); From 8a8d9b2c38d9e050bec8d203ba2fb40c663c1b9c Mon Sep 17 00:00:00 2001 From: Souptick Joarder Date: Sat, 14 Apr 2018 21:34:29 +0530 Subject: [PATCH 09/17] gpu: drm: exynos: Change return type to vm_fault_t Use new return type vm_fault_t for fault handler in struct vm_operations_struct. Signed-off-by: Souptick Joarder Reviewed-by: Matthew Wilcox Signed-off-by: Inki Dae --- drivers/gpu/drm/exynos/exynos_drm_gem.c | 21 ++++----------------- drivers/gpu/drm/exynos/exynos_drm_gem.h | 3 ++- 2 files changed, 6 insertions(+), 18 deletions(-) diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c index 11cc01b47bc0..6e1494fa71b4 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c @@ -431,37 +431,24 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv, return 0; } -int exynos_drm_gem_fault(struct vm_fault *vmf) +vm_fault_t exynos_drm_gem_fault(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; struct drm_gem_object *obj = vma->vm_private_data; struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj); unsigned long pfn; pgoff_t page_offset; - int ret; page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT; if (page_offset >= (exynos_gem->size >> PAGE_SHIFT)) { DRM_ERROR("invalid page offset\n"); - ret = -EINVAL; - goto out; + return VM_FAULT_SIGBUS; } pfn = page_to_pfn(exynos_gem->pages[page_offset]); - ret = vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV)); - -out: - switch (ret) { - case 0: - case -ERESTARTSYS: - case -EINTR: - return VM_FAULT_NOPAGE; - case -ENOMEM: - return VM_FAULT_OOM; - default: - return VM_FAULT_SIGBUS; - } + return vmf_insert_mixed(vma, vmf->address, + __pfn_to_pfn_t(pfn, PFN_DEV)); } static int exynos_drm_gem_mmap_obj(struct drm_gem_object *obj, diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h index 5a4c7de80f65..9057d7f1d6ed 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.h +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h @@ -13,6 +13,7 @@ #define _EXYNOS_DRM_GEM_H_ #include +#include #define to_exynos_gem(x) container_of(x, struct exynos_drm_gem, base) @@ -111,7 +112,7 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv, struct drm_mode_create_dumb *args); /* page fault handler and mmap fault address(virtual) to physical memory. */ -int exynos_drm_gem_fault(struct vm_fault *vmf); +vm_fault_t exynos_drm_gem_fault(struct vm_fault *vmf); /* set vm_flags and we can change the vm attribute to other one at here. */ int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma); From fa50b7b4ba50f015acd0a6ca505582851e316d2a Mon Sep 17 00:00:00 2001 From: Tomasz Figa Date: Sat, 21 Apr 2018 19:26:10 +0200 Subject: [PATCH 10/17] drm/exynos: fimd: Add support for S5PV210 FIMD variant MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch adds support for FIMD variant found on S5PV210 SoC. Except CLKSEL bit availability, it is identical to Exynos4210. Tested-by: Paweł Chmiel Signed-off-by: Tomasz Figa Signed-off-by: Paweł Chmiel Signed-off-by: Inki Dae --- drivers/gpu/drm/exynos/exynos_drm_fimd.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c index d42ae2bc3e56..01b1570d0c3a 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c @@ -121,6 +121,12 @@ static struct fimd_driver_data s3c64xx_fimd_driver_data = { .has_limited_fmt = 1, }; +static struct fimd_driver_data s5pv210_fimd_driver_data = { + .timing_base = 0x0, + .has_shadowcon = 1, + .has_clksel = 1, +}; + static struct fimd_driver_data exynos3_fimd_driver_data = { .timing_base = 0x20000, .lcdblk_offset = 0x210, @@ -193,6 +199,8 @@ struct fimd_context { static const struct of_device_id fimd_driver_dt_match[] = { { .compatible = "samsung,s3c6400-fimd", .data = &s3c64xx_fimd_driver_data }, + { .compatible = "samsung,s5pv210-fimd", + .data = &s5pv210_fimd_driver_data }, { .compatible = "samsung,exynos3250-fimd", .data = &exynos3_fimd_driver_data }, { .compatible = "samsung,exynos4210-fimd", From 5fae288d8ddd5b75d38d323cb4aa51ed2190ce17 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Chmiel?= Date: Sat, 21 Apr 2018 19:26:11 +0200 Subject: [PATCH 11/17] drm/exynos: Allow DRM_EXYNOS on s5pv210. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch brings back possibility to use drivers depending on DRM_EXYNOS, on Samsung S5PV210/S5PC110 series based systems. Fixes: dbbc925bb83a ("drm/exynos: depend on ARCH_EXYNOS for DRM_EXYNOS") Signed-off-by: Paweł Chmiel Signed-off-by: Inki Dae --- drivers/gpu/drm/exynos/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig index 735ce47688f9..1548a784ef71 100644 --- a/drivers/gpu/drm/exynos/Kconfig +++ b/drivers/gpu/drm/exynos/Kconfig @@ -1,6 +1,6 @@ config DRM_EXYNOS tristate "DRM Support for Samsung SoC EXYNOS Series" - depends on OF && DRM && (ARCH_S3C64XX || ARCH_EXYNOS || ARCH_MULTIPLATFORM) + depends on OF && DRM && (ARCH_S3C64XX || ARCH_S5PV210 || ARCH_EXYNOS || ARCH_MULTIPLATFORM) select DRM_KMS_HELPER select VIDEOMODE_HELPERS select SND_SOC_HDMI_CODEC if SND_SOC From 9913f74fe15705acd5163551ddf449568cf0048d Mon Sep 17 00:00:00 2001 From: Marek Szyprowski Date: Thu, 10 May 2018 08:46:36 +0900 Subject: [PATCH 12/17] drm/exynos: ipp: Add IPP v2 framework This patch adds Exynos IPP v2 subsystem and userspace API. New userspace API is focused ONLY on memory-to-memory image processing. The two remainging operation modes of obsolete IPP v1 API (framebuffer writeback and local-path output with image processing) can be implemented using standard DRM features: writeback connectors and additional DRM planes with scaling features. V2 IPP userspace API is based on stateless approach, which much better fits to memory-to-memory image processing model. It also provides support for all image formats, which are both already defined in DRM API and supported by the existing IPP hardware modules. The API consists of the following ioctls: - DRM_IOCTL_EXYNOS_IPP_GET_RESOURCES: to enumerate all available image processing modules, - DRM_IOCTL_EXYNOS_IPP_GET_CAPS: to query capabilities and supported image formats of given IPP module, - DRM_IOCTL_EXYNOS_IPP_GET_LIMITS: to query hardware limitiations for selected image format of given IPP module, - DRM_IOCTL_EXYNOS_IPP_COMMIT: to perform operation described by the provided structures (source and destination buffers, operation rectangle, transformation, etc). The proposed userspace API is extensible. In the future more advanced image processing operations can be defined to support for example blending. Userspace API is fully functional also on DRM render nodes, so it is not limited to the root/privileged client. Internal driver API also has been completely rewritten. New IPP core performs all possible input validation, checks and object life-time control. The drivers can focus only on writing configuration to hardware registers. Stateless nature of DRM_IOCTL_EXYNOS_IPP_COMMIT ioctl simplifies the driver API. Minimal driver needs to provide a single callback for starting processing and an array with supported image formats. Signed-off-by: Marek Szyprowski Tested-by: Hoegeun Kwon Merge conflict so merged manually. Signed-off-by: Inki Dae --- drivers/gpu/drm/exynos/Kconfig | 3 + drivers/gpu/drm/exynos/Makefile | 1 + drivers/gpu/drm/exynos/exynos_drm_drv.c | 22 +- drivers/gpu/drm/exynos/exynos_drm_ipp.c | 916 ++++++++++++++++++++++++ drivers/gpu/drm/exynos/exynos_drm_ipp.h | 175 +++++ include/uapi/drm/exynos_drm.h | 240 +++++++ 6 files changed, 1355 insertions(+), 2 deletions(-) create mode 100644 drivers/gpu/drm/exynos/exynos_drm_ipp.c create mode 100644 drivers/gpu/drm/exynos/exynos_drm_ipp.h diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig index 1548a784ef71..9e914655c430 100644 --- a/drivers/gpu/drm/exynos/Kconfig +++ b/drivers/gpu/drm/exynos/Kconfig @@ -95,6 +95,9 @@ config DRM_EXYNOS_G2D help Choose this option if you want to use Exynos G2D for DRM. +config DRM_EXYNOS_IPP + bool + config DRM_EXYNOS_FIMC bool "FIMC" depends on BROKEN && MFD_SYSCON diff --git a/drivers/gpu/drm/exynos/Makefile b/drivers/gpu/drm/exynos/Makefile index a51c5459bb13..bdf4212dde7b 100644 --- a/drivers/gpu/drm/exynos/Makefile +++ b/drivers/gpu/drm/exynos/Makefile @@ -18,6 +18,7 @@ exynosdrm-$(CONFIG_DRM_EXYNOS_MIXER) += exynos_mixer.o exynosdrm-$(CONFIG_DRM_EXYNOS_HDMI) += exynos_hdmi.o exynosdrm-$(CONFIG_DRM_EXYNOS_VIDI) += exynos_drm_vidi.o exynosdrm-$(CONFIG_DRM_EXYNOS_G2D) += exynos_drm_g2d.o +exynosdrm-$(CONFIG_DRM_EXYNOS_IPP) += exynos_drm_ipp.o exynosdrm-$(CONFIG_DRM_EXYNOS_FIMC) += exynos_drm_fimc.o exynosdrm-$(CONFIG_DRM_EXYNOS_ROTATOR) += exynos_drm_rotator.o exynosdrm-$(CONFIG_DRM_EXYNOS_GSC) += exynos_drm_gsc.o diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index a518e9c6d6cc..37c0db759674 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c @@ -27,15 +27,23 @@ #include "exynos_drm_fb.h" #include "exynos_drm_gem.h" #include "exynos_drm_plane.h" +#include "exynos_drm_ipp.h" #include "exynos_drm_vidi.h" #include "exynos_drm_g2d.h" #include "exynos_drm_iommu.h" #define DRIVER_NAME "exynos" #define DRIVER_DESC "Samsung SoC DRM" -#define DRIVER_DATE "20110530" +#define DRIVER_DATE "20180330" + +/* + * Interface history: + * + * 1.0 - Original version + * 1.1 - Upgrade IPP driver to version 2.0 + */ #define DRIVER_MAJOR 1 -#define DRIVER_MINOR 0 +#define DRIVER_MINOR 1 int exynos_atomic_check(struct drm_device *dev, struct drm_atomic_state *state) @@ -108,6 +116,16 @@ static const struct drm_ioctl_desc exynos_ioctls[] = { DRM_AUTH | DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(EXYNOS_G2D_EXEC, exynos_g2d_exec_ioctl, DRM_AUTH | DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(EXYNOS_IPP_GET_RESOURCES, + exynos_drm_ipp_get_res_ioctl, + DRM_AUTH | DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(EXYNOS_IPP_GET_CAPS, exynos_drm_ipp_get_caps_ioctl, + DRM_AUTH | DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(EXYNOS_IPP_GET_LIMITS, + exynos_drm_ipp_get_limits_ioctl, + DRM_AUTH | DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(EXYNOS_IPP_COMMIT, exynos_drm_ipp_commit_ioctl, + DRM_AUTH | DRM_RENDER_ALLOW), }; static const struct file_operations exynos_drm_driver_fops = { diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c new file mode 100644 index 000000000000..26374e58c557 --- /dev/null +++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c @@ -0,0 +1,916 @@ +/* + * Copyright (C) 2017 Samsung Electronics Co.Ltd + * Authors: + * Marek Szyprowski + * + * Exynos DRM Image Post Processing (IPP) related functions + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + */ + + +#include +#include +#include + +#include "exynos_drm_drv.h" +#include "exynos_drm_gem.h" +#include "exynos_drm_ipp.h" + +static int num_ipp; +static LIST_HEAD(ipp_list); + +/** + * exynos_drm_ipp_register - Register a new picture processor hardware module + * @dev: DRM device + * @ipp: ipp module to init + * @funcs: callbacks for the new ipp object + * @caps: bitmask of ipp capabilities (%DRM_EXYNOS_IPP_CAP_*) + * @formats: array of supported formats + * @num_formats: size of the supported formats array + * @name: name (for debugging purposes) + * + * Initializes a ipp module. + * + * Returns: + * Zero on success, error code on failure. + */ +int exynos_drm_ipp_register(struct drm_device *dev, struct exynos_drm_ipp *ipp, + const struct exynos_drm_ipp_funcs *funcs, unsigned int caps, + const struct exynos_drm_ipp_formats *formats, + unsigned int num_formats, const char *name) +{ + WARN_ON(!ipp); + WARN_ON(!funcs); + WARN_ON(!formats); + WARN_ON(!num_formats); + + spin_lock_init(&ipp->lock); + INIT_LIST_HEAD(&ipp->todo_list); + init_waitqueue_head(&ipp->done_wq); + ipp->dev = dev; + ipp->funcs = funcs; + ipp->capabilities = caps; + ipp->name = name; + ipp->formats = formats; + ipp->num_formats = num_formats; + + /* ipp_list modification is serialized by component framework */ + list_add_tail(&ipp->head, &ipp_list); + ipp->id = num_ipp++; + + DRM_DEBUG_DRIVER("Registered ipp %d\n", ipp->id); + + return 0; +} + +/** + * exynos_drm_ipp_unregister - Unregister the picture processor module + * @dev: DRM device + * @ipp: ipp module + */ +void exynos_drm_ipp_unregister(struct drm_device *dev, + struct exynos_drm_ipp *ipp) +{ + WARN_ON(ipp->task); + WARN_ON(!list_empty(&ipp->todo_list)); + list_del(&ipp->head); +} + +/** + * exynos_drm_ipp_ioctl_get_res_ioctl - enumerate all ipp modules + * @dev: DRM device + * @data: ioctl data + * @file_priv: DRM file info + * + * Construct a list of ipp ids. + * + * Called by the user via ioctl. + * + * Returns: + * Zero on success, negative errno on failure. + */ +int exynos_drm_ipp_get_res_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_exynos_ioctl_ipp_get_res *resp = data; + struct exynos_drm_ipp *ipp; + uint32_t __user *ipp_ptr = (uint32_t __user *) + (unsigned long)resp->ipp_id_ptr; + unsigned int count = num_ipp, copied = 0; + + /* + * This ioctl is called twice, once to determine how much space is + * needed, and the 2nd time to fill it. + */ + if (count && resp->count_ipps >= count) { + list_for_each_entry(ipp, &ipp_list, head) { + if (put_user(ipp->id, ipp_ptr + copied)) + return -EFAULT; + copied++; + } + } + resp->count_ipps = count; + + return 0; +} + +static inline struct exynos_drm_ipp *__ipp_get(uint32_t id) +{ + struct exynos_drm_ipp *ipp; + + list_for_each_entry(ipp, &ipp_list, head) + if (ipp->id == id) + return ipp; + return NULL; +} + +/** + * exynos_drm_ipp_ioctl_get_caps - get ipp module capabilities and formats + * @dev: DRM device + * @data: ioctl data + * @file_priv: DRM file info + * + * Construct a structure describing ipp module capabilities. + * + * Called by the user via ioctl. + * + * Returns: + * Zero on success, negative errno on failure. + */ +int exynos_drm_ipp_get_caps_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_exynos_ioctl_ipp_get_caps *resp = data; + void __user *ptr = (void __user *)(unsigned long)resp->formats_ptr; + struct exynos_drm_ipp *ipp; + int i; + + ipp = __ipp_get(resp->ipp_id); + if (!ipp) + return -ENOENT; + + resp->ipp_id = ipp->id; + resp->capabilities = ipp->capabilities; + + /* + * This ioctl is called twice, once to determine how much space is + * needed, and the 2nd time to fill it. + */ + if (resp->formats_count >= ipp->num_formats) { + for (i = 0; i < ipp->num_formats; i++) { + struct drm_exynos_ipp_format tmp = { + .fourcc = ipp->formats[i].fourcc, + .type = ipp->formats[i].type, + .modifier = ipp->formats[i].modifier, + }; + + if (copy_to_user(ptr, &tmp, sizeof(tmp))) + return -EFAULT; + ptr += sizeof(tmp); + } + } + resp->formats_count = ipp->num_formats; + + return 0; +} + +static inline const struct exynos_drm_ipp_formats *__ipp_format_get( + struct exynos_drm_ipp *ipp, uint32_t fourcc, + uint64_t mod, unsigned int type) +{ + int i; + + for (i = 0; i < ipp->num_formats; i++) { + if ((ipp->formats[i].type & type) && + ipp->formats[i].fourcc == fourcc && + ipp->formats[i].modifier == mod) + return &ipp->formats[i]; + } + return NULL; +} + +/** + * exynos_drm_ipp_get_limits_ioctl - get ipp module limits + * @dev: DRM device + * @data: ioctl data + * @file_priv: DRM file info + * + * Construct a structure describing ipp module limitations for provided + * picture format. + * + * Called by the user via ioctl. + * + * Returns: + * Zero on success, negative errno on failure. + */ +int exynos_drm_ipp_get_limits_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_exynos_ioctl_ipp_get_limits *resp = data; + void __user *ptr = (void __user *)(unsigned long)resp->limits_ptr; + const struct exynos_drm_ipp_formats *format; + struct exynos_drm_ipp *ipp; + + if (resp->type != DRM_EXYNOS_IPP_FORMAT_SOURCE && + resp->type != DRM_EXYNOS_IPP_FORMAT_DESTINATION) + return -EINVAL; + + ipp = __ipp_get(resp->ipp_id); + if (!ipp) + return -ENOENT; + + format = __ipp_format_get(ipp, resp->fourcc, resp->modifier, + resp->type); + if (!format) + return -EINVAL; + + /* + * This ioctl is called twice, once to determine how much space is + * needed, and the 2nd time to fill it. + */ + if (format->num_limits && resp->limits_count >= format->num_limits) + if (copy_to_user((void __user *)ptr, format->limits, + sizeof(*format->limits) * format->num_limits)) + return -EFAULT; + resp->limits_count = format->num_limits; + + return 0; +} + +struct drm_pending_exynos_ipp_event { + struct drm_pending_event base; + struct drm_exynos_ipp_event event; +}; + +static inline struct exynos_drm_ipp_task * + exynos_drm_ipp_task_alloc(struct exynos_drm_ipp *ipp) +{ + struct exynos_drm_ipp_task *task; + + task = kzalloc(sizeof(*task), GFP_KERNEL); + if (!task) + return NULL; + + task->dev = ipp->dev; + task->ipp = ipp; + + /* some defaults */ + task->src.rect.w = task->dst.rect.w = UINT_MAX; + task->src.rect.h = task->dst.rect.h = UINT_MAX; + task->transform.rotation = DRM_MODE_ROTATE_0; + + DRM_DEBUG_DRIVER("Allocated task %pK\n", task); + + return task; +} + +static const struct exynos_drm_param_map { + unsigned int id; + unsigned int size; + unsigned int offset; +} exynos_drm_ipp_params_maps[] = { + { + DRM_EXYNOS_IPP_TASK_BUFFER | DRM_EXYNOS_IPP_TASK_TYPE_SOURCE, + sizeof(struct drm_exynos_ipp_task_buffer), + offsetof(struct exynos_drm_ipp_task, src.buf), + }, { + DRM_EXYNOS_IPP_TASK_BUFFER | + DRM_EXYNOS_IPP_TASK_TYPE_DESTINATION, + sizeof(struct drm_exynos_ipp_task_buffer), + offsetof(struct exynos_drm_ipp_task, dst.buf), + }, { + DRM_EXYNOS_IPP_TASK_RECTANGLE | DRM_EXYNOS_IPP_TASK_TYPE_SOURCE, + sizeof(struct drm_exynos_ipp_task_rect), + offsetof(struct exynos_drm_ipp_task, src.rect), + }, { + DRM_EXYNOS_IPP_TASK_RECTANGLE | + DRM_EXYNOS_IPP_TASK_TYPE_DESTINATION, + sizeof(struct drm_exynos_ipp_task_rect), + offsetof(struct exynos_drm_ipp_task, dst.rect), + }, { + DRM_EXYNOS_IPP_TASK_TRANSFORM, + sizeof(struct drm_exynos_ipp_task_transform), + offsetof(struct exynos_drm_ipp_task, transform), + }, { + DRM_EXYNOS_IPP_TASK_ALPHA, + sizeof(struct drm_exynos_ipp_task_alpha), + offsetof(struct exynos_drm_ipp_task, alpha), + }, +}; + +static int exynos_drm_ipp_task_set(struct exynos_drm_ipp_task *task, + struct drm_exynos_ioctl_ipp_commit *arg) +{ + const struct exynos_drm_param_map *map = exynos_drm_ipp_params_maps; + void __user *params = (void __user *)(unsigned long)arg->params_ptr; + unsigned int size = arg->params_size; + uint32_t id; + int i; + + while (size) { + if (get_user(id, (uint32_t __user *)params)) + return -EFAULT; + + for (i = 0; i < ARRAY_SIZE(exynos_drm_ipp_params_maps); i++) + if (map[i].id == id) + break; + if (i == ARRAY_SIZE(exynos_drm_ipp_params_maps) || + map[i].size > size) + return -EINVAL; + + if (copy_from_user((void *)task + map[i].offset, params, + map[i].size)) + return -EFAULT; + + params += map[i].size; + size -= map[i].size; + } + + DRM_DEBUG_DRIVER("Got task %pK configuration from userspace\n", task); + return 0; +} + +static int exynos_drm_ipp_task_setup_buffer(struct exynos_drm_ipp_buffer *buf, + struct drm_file *filp) +{ + int ret = 0; + int i; + + /* basic checks */ + if (buf->buf.width == 0 || buf->buf.height == 0) + return -EINVAL; + buf->format = drm_format_info(buf->buf.fourcc); + for (i = 0; i < buf->format->num_planes; i++) { + unsigned int width = (i == 0) ? buf->buf.width : + DIV_ROUND_UP(buf->buf.width, buf->format->hsub); + + if (buf->buf.pitch[i] == 0) + buf->buf.pitch[i] = width * buf->format->cpp[i]; + if (buf->buf.pitch[i] < width * buf->format->cpp[i]) + return -EINVAL; + if (!buf->buf.gem_id[i]) + return -ENOENT; + } + + /* pitch for additional planes must match */ + if (buf->format->num_planes > 2 && + buf->buf.pitch[1] != buf->buf.pitch[2]) + return -EINVAL; + + /* get GEM buffers and check their size */ + for (i = 0; i < buf->format->num_planes; i++) { + unsigned int height = (i == 0) ? buf->buf.height : + DIV_ROUND_UP(buf->buf.height, buf->format->vsub); + unsigned long size = height * buf->buf.pitch[i]; + struct drm_gem_object *obj = drm_gem_object_lookup(filp, + buf->buf.gem_id[i]); + if (!obj) { + ret = -ENOENT; + goto gem_free; + } + buf->exynos_gem[i] = to_exynos_gem(obj); + + if (size + buf->buf.offset[i] > buf->exynos_gem[i]->size) { + i++; + ret = -EINVAL; + goto gem_free; + } + buf->dma_addr[i] = buf->exynos_gem[i]->dma_addr + + buf->buf.offset[i]; + } + + return 0; +gem_free: + while (i--) { + drm_gem_object_put_unlocked(&buf->exynos_gem[i]->base); + buf->exynos_gem[i] = NULL; + } + return ret; +} + +static void exynos_drm_ipp_task_release_buf(struct exynos_drm_ipp_buffer *buf) +{ + int i; + + if (!buf->exynos_gem[0]) + return; + for (i = 0; i < buf->format->num_planes; i++) + drm_gem_object_put_unlocked(&buf->exynos_gem[i]->base); +} + +static void exynos_drm_ipp_task_free(struct exynos_drm_ipp *ipp, + struct exynos_drm_ipp_task *task) +{ + DRM_DEBUG_DRIVER("Freeing task %pK\n", task); + + exynos_drm_ipp_task_release_buf(&task->src); + exynos_drm_ipp_task_release_buf(&task->dst); + if (task->event) + drm_event_cancel_free(ipp->dev, &task->event->base); + kfree(task); +} + +struct drm_ipp_limit { + struct drm_exynos_ipp_limit_val h; + struct drm_exynos_ipp_limit_val v; +}; + +enum drm_ipp_size_id { + IPP_LIMIT_BUFFER, IPP_LIMIT_AREA, IPP_LIMIT_ROTATED, IPP_LIMIT_MAX +}; + +static const enum drm_ipp_size_id limit_id_fallback[IPP_LIMIT_MAX][4] = { + [IPP_LIMIT_BUFFER] = { DRM_EXYNOS_IPP_LIMIT_SIZE_BUFFER }, + [IPP_LIMIT_AREA] = { DRM_EXYNOS_IPP_LIMIT_SIZE_AREA, + DRM_EXYNOS_IPP_LIMIT_SIZE_BUFFER }, + [IPP_LIMIT_ROTATED] = { DRM_EXYNOS_IPP_LIMIT_SIZE_ROTATED, + DRM_EXYNOS_IPP_LIMIT_SIZE_AREA, + DRM_EXYNOS_IPP_LIMIT_SIZE_BUFFER }, +}; + +static inline void __limit_set_val(unsigned int *ptr, unsigned int val) +{ + if (!*ptr) + *ptr = val; +} + +static void __get_size_limit(const struct drm_exynos_ipp_limit *limits, + unsigned int num_limits, enum drm_ipp_size_id id, + struct drm_ipp_limit *res) +{ + const struct drm_exynos_ipp_limit *l = limits; + int i = 0; + + memset(res, 0, sizeof(*res)); + for (i = 0; limit_id_fallback[id][i]; i++) + for (l = limits; l - limits < num_limits; l++) { + if (((l->type & DRM_EXYNOS_IPP_LIMIT_TYPE_MASK) != + DRM_EXYNOS_IPP_LIMIT_TYPE_SIZE) || + ((l->type & DRM_EXYNOS_IPP_LIMIT_SIZE_MASK) != + limit_id_fallback[id][i])) + continue; + __limit_set_val(&res->h.min, l->h.min); + __limit_set_val(&res->h.max, l->h.max); + __limit_set_val(&res->h.align, l->h.align); + __limit_set_val(&res->v.min, l->v.min); + __limit_set_val(&res->v.max, l->v.max); + __limit_set_val(&res->v.align, l->v.align); + } +} + +static inline bool __align_check(unsigned int val, unsigned int align) +{ + if (align && (val & (align - 1))) { + DRM_DEBUG_DRIVER("Value %d exceeds HW limits (align %d)\n", + val, align); + return false; + } + return true; +} + +static inline bool __size_limit_check(unsigned int val, + struct drm_exynos_ipp_limit_val *l) +{ + if ((l->min && val < l->min) || (l->max && val > l->max)) { + DRM_DEBUG_DRIVER("Value %d exceeds HW limits (min %d, max %d)\n", + val, l->min, l->max); + return false; + } + return __align_check(val, l->align); +} + +static int exynos_drm_ipp_check_size_limits(struct exynos_drm_ipp_buffer *buf, + const struct drm_exynos_ipp_limit *limits, unsigned int num_limits, + bool rotate, bool swap) +{ + enum drm_ipp_size_id id = rotate ? IPP_LIMIT_ROTATED : IPP_LIMIT_AREA; + struct drm_ipp_limit l; + struct drm_exynos_ipp_limit_val *lh = &l.h, *lv = &l.v; + + if (!limits) + return 0; + + __get_size_limit(limits, num_limits, IPP_LIMIT_BUFFER, &l); + if (!__size_limit_check(buf->buf.width, &l.h) || + !__size_limit_check(buf->buf.height, &l.v)) + return -EINVAL; + + if (swap) { + lv = &l.h; + lh = &l.v; + } + __get_size_limit(limits, num_limits, id, &l); + if (!__size_limit_check(buf->rect.w, lh) || + !__align_check(buf->rect.x, lh->align) || + !__size_limit_check(buf->rect.h, lv) || + !__align_check(buf->rect.y, lv->align)) + return -EINVAL; + + return 0; +} + +static inline bool __scale_limit_check(unsigned int src, unsigned int dst, + unsigned int min, unsigned int max) +{ + if ((max && (dst << 16) > src * max) || + (min && (dst << 16) < src * min)) { + DRM_DEBUG_DRIVER("Scale from %d to %d exceeds HW limits (ratio min %d.%05d, max %d.%05d)\n", + src, dst, + min >> 16, 100000 * (min & 0xffff) / (1 << 16), + max >> 16, 100000 * (max & 0xffff) / (1 << 16)); + return false; + } + return true; +} + +static int exynos_drm_ipp_check_scale_limits( + struct drm_exynos_ipp_task_rect *src, + struct drm_exynos_ipp_task_rect *dst, + const struct drm_exynos_ipp_limit *limits, + unsigned int num_limits, bool swap) +{ + const struct drm_exynos_ipp_limit_val *lh, *lv; + int dw, dh; + + for (; num_limits; limits++, num_limits--) + if ((limits->type & DRM_EXYNOS_IPP_LIMIT_TYPE_MASK) == + DRM_EXYNOS_IPP_LIMIT_TYPE_SCALE) + break; + if (!num_limits) + return 0; + + lh = (!swap) ? &limits->h : &limits->v; + lv = (!swap) ? &limits->v : &limits->h; + dw = (!swap) ? dst->w : dst->h; + dh = (!swap) ? dst->h : dst->w; + + if (!__scale_limit_check(src->w, dw, lh->min, lh->max) || + !__scale_limit_check(src->h, dh, lv->min, lv->max)) + return -EINVAL; + + return 0; +} + +static int exynos_drm_ipp_task_check(struct exynos_drm_ipp_task *task) +{ + struct exynos_drm_ipp *ipp = task->ipp; + const struct exynos_drm_ipp_formats *src_fmt, *dst_fmt; + struct exynos_drm_ipp_buffer *src = &task->src, *dst = &task->dst; + unsigned int rotation = task->transform.rotation; + int ret = 0; + bool swap = drm_rotation_90_or_270(rotation); + bool rotate = (rotation != DRM_MODE_ROTATE_0); + bool scale = false; + + DRM_DEBUG_DRIVER("Checking task %pK\n", task); + + if (src->rect.w == UINT_MAX) + src->rect.w = src->buf.width; + if (src->rect.h == UINT_MAX) + src->rect.h = src->buf.height; + if (dst->rect.w == UINT_MAX) + dst->rect.w = dst->buf.width; + if (dst->rect.h == UINT_MAX) + dst->rect.h = dst->buf.height; + + if (src->rect.x + src->rect.w > (src->buf.width) || + src->rect.y + src->rect.h > (src->buf.height) || + dst->rect.x + dst->rect.w > (dst->buf.width) || + dst->rect.y + dst->rect.h > (dst->buf.height)) { + DRM_DEBUG_DRIVER("Task %pK: defined area is outside provided buffers\n", + task); + return -EINVAL; + } + + if ((!swap && (src->rect.w != dst->rect.w || + src->rect.h != dst->rect.h)) || + (swap && (src->rect.w != dst->rect.h || + src->rect.h != dst->rect.w))) + scale = true; + + if ((!(ipp->capabilities & DRM_EXYNOS_IPP_CAP_CROP) && + (src->rect.x || src->rect.y || dst->rect.x || dst->rect.y)) || + (!(ipp->capabilities & DRM_EXYNOS_IPP_CAP_ROTATE) && rotate) || + (!(ipp->capabilities & DRM_EXYNOS_IPP_CAP_SCALE) && scale) || + (!(ipp->capabilities & DRM_EXYNOS_IPP_CAP_CONVERT) && + src->buf.fourcc != dst->buf.fourcc)) { + DRM_DEBUG_DRIVER("Task %pK: hw capabilities exceeded\n", task); + return -EINVAL; + } + + src_fmt = __ipp_format_get(ipp, src->buf.fourcc, src->buf.modifier, + DRM_EXYNOS_IPP_FORMAT_SOURCE); + if (!src_fmt) { + DRM_DEBUG_DRIVER("Task %pK: src format not supported\n", task); + return -EINVAL; + } + ret = exynos_drm_ipp_check_size_limits(src, src_fmt->limits, + src_fmt->num_limits, + rotate, false); + if (ret) + return ret; + ret = exynos_drm_ipp_check_scale_limits(&src->rect, &dst->rect, + src_fmt->limits, + src_fmt->num_limits, swap); + if (ret) + return ret; + + dst_fmt = __ipp_format_get(ipp, dst->buf.fourcc, dst->buf.modifier, + DRM_EXYNOS_IPP_FORMAT_DESTINATION); + if (!dst_fmt) { + DRM_DEBUG_DRIVER("Task %pK: dst format not supported\n", task); + return -EINVAL; + } + ret = exynos_drm_ipp_check_size_limits(dst, dst_fmt->limits, + dst_fmt->num_limits, + false, swap); + if (ret) + return ret; + ret = exynos_drm_ipp_check_scale_limits(&src->rect, &dst->rect, + dst_fmt->limits, + dst_fmt->num_limits, swap); + if (ret) + return ret; + + DRM_DEBUG_DRIVER("Task %pK: all checks done.\n", task); + + return ret; +} + +static int exynos_drm_ipp_task_setup_buffers(struct exynos_drm_ipp_task *task, + struct drm_file *filp) +{ + struct exynos_drm_ipp_buffer *src = &task->src, *dst = &task->dst; + int ret = 0; + + DRM_DEBUG_DRIVER("Setting buffer for task %pK\n", task); + + ret = exynos_drm_ipp_task_setup_buffer(src, filp); + if (ret) { + DRM_DEBUG_DRIVER("Task %pK: src buffer setup failed\n", task); + return ret; + } + ret = exynos_drm_ipp_task_setup_buffer(dst, filp); + if (ret) { + DRM_DEBUG_DRIVER("Task %pK: dst buffer setup failed\n", task); + return ret; + } + + DRM_DEBUG_DRIVER("Task %pK: buffers prepared.\n", task); + + return ret; +} + + +static int exynos_drm_ipp_event_create(struct exynos_drm_ipp_task *task, + struct drm_file *file_priv, uint64_t user_data) +{ + struct drm_pending_exynos_ipp_event *e = NULL; + int ret; + + e = kzalloc(sizeof(*e), GFP_KERNEL); + if (!e) + return -ENOMEM; + + e->event.base.type = DRM_EXYNOS_IPP_EVENT; + e->event.base.length = sizeof(e->event); + e->event.user_data = user_data; + + ret = drm_event_reserve_init(task->dev, file_priv, &e->base, + &e->event.base); + if (ret) + goto free; + + task->event = e; + return 0; +free: + kfree(e); + return ret; +} + +static void exynos_drm_ipp_event_send(struct exynos_drm_ipp_task *task) +{ + struct timespec64 now; + + ktime_get_ts64(&now); + task->event->event.tv_sec = now.tv_sec; + task->event->event.tv_usec = now.tv_nsec / NSEC_PER_USEC; + task->event->event.sequence = atomic_inc_return(&task->ipp->sequence); + + drm_send_event(task->dev, &task->event->base); +} + +static int exynos_drm_ipp_task_cleanup(struct exynos_drm_ipp_task *task) +{ + int ret = task->ret; + + if (ret == 0 && task->event) { + exynos_drm_ipp_event_send(task); + /* ensure event won't be canceled on task free */ + task->event = NULL; + } + + exynos_drm_ipp_task_free(task->ipp, task); + return ret; +} + +static void exynos_drm_ipp_cleanup_work(struct work_struct *work) +{ + struct exynos_drm_ipp_task *task = container_of(work, + struct exynos_drm_ipp_task, cleanup_work); + + exynos_drm_ipp_task_cleanup(task); +} + +static void exynos_drm_ipp_next_task(struct exynos_drm_ipp *ipp); + +/** + * exynos_drm_ipp_task_done - finish given task and set return code + * @task: ipp task to finish + * @ret: error code or 0 if operation has been performed successfully + */ +void exynos_drm_ipp_task_done(struct exynos_drm_ipp_task *task, int ret) +{ + struct exynos_drm_ipp *ipp = task->ipp; + unsigned long flags; + + DRM_DEBUG_DRIVER("ipp: %d, task %pK done: %d\n", ipp->id, task, ret); + + spin_lock_irqsave(&ipp->lock, flags); + if (ipp->task == task) + ipp->task = NULL; + task->flags |= DRM_EXYNOS_IPP_TASK_DONE; + task->ret = ret; + spin_unlock_irqrestore(&ipp->lock, flags); + + exynos_drm_ipp_next_task(ipp); + wake_up(&ipp->done_wq); + + if (task->flags & DRM_EXYNOS_IPP_TASK_ASYNC) { + INIT_WORK(&task->cleanup_work, exynos_drm_ipp_cleanup_work); + schedule_work(&task->cleanup_work); + } +} + +static void exynos_drm_ipp_next_task(struct exynos_drm_ipp *ipp) +{ + struct exynos_drm_ipp_task *task; + unsigned long flags; + int ret; + + DRM_DEBUG_DRIVER("ipp: %d, try to run new task\n", ipp->id); + + spin_lock_irqsave(&ipp->lock, flags); + + if (ipp->task || list_empty(&ipp->todo_list)) { + spin_unlock_irqrestore(&ipp->lock, flags); + return; + } + + task = list_first_entry(&ipp->todo_list, struct exynos_drm_ipp_task, + head); + list_del_init(&task->head); + ipp->task = task; + + spin_unlock_irqrestore(&ipp->lock, flags); + + DRM_DEBUG_DRIVER("ipp: %d, selected task %pK to run\n", ipp->id, task); + + ret = ipp->funcs->commit(ipp, task); + if (ret) + exynos_drm_ipp_task_done(task, ret); +} + +static void exynos_drm_ipp_schedule_task(struct exynos_drm_ipp *ipp, + struct exynos_drm_ipp_task *task) +{ + unsigned long flags; + + spin_lock_irqsave(&ipp->lock, flags); + list_add(&task->head, &ipp->todo_list); + spin_unlock_irqrestore(&ipp->lock, flags); + + exynos_drm_ipp_next_task(ipp); +} + +static void exynos_drm_ipp_task_abort(struct exynos_drm_ipp *ipp, + struct exynos_drm_ipp_task *task) +{ + unsigned long flags; + + spin_lock_irqsave(&ipp->lock, flags); + if (task->flags & DRM_EXYNOS_IPP_TASK_DONE) { + /* already completed task */ + exynos_drm_ipp_task_cleanup(task); + } else if (ipp->task != task) { + /* task has not been scheduled for execution yet */ + list_del_init(&task->head); + exynos_drm_ipp_task_cleanup(task); + } else { + /* + * currently processed task, call abort() and perform + * cleanup with async worker + */ + task->flags |= DRM_EXYNOS_IPP_TASK_ASYNC; + spin_unlock_irqrestore(&ipp->lock, flags); + if (ipp->funcs->abort) + ipp->funcs->abort(ipp, task); + return; + } + spin_unlock_irqrestore(&ipp->lock, flags); +} + +/** + * exynos_drm_ipp_commit_ioctl - perform image processing operation + * @dev: DRM device + * @data: ioctl data + * @file_priv: DRM file info + * + * Construct a ipp task from the set of properties provided from the user + * and try to schedule it to framebuffer processor hardware. + * + * Called by the user via ioctl. + * + * Returns: + * Zero on success, negative errno on failure. + */ +int exynos_drm_ipp_commit_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_exynos_ioctl_ipp_commit *arg = data; + struct exynos_drm_ipp *ipp; + struct exynos_drm_ipp_task *task; + int ret = 0; + + if ((arg->flags & ~DRM_EXYNOS_IPP_FLAGS) || arg->reserved) + return -EINVAL; + + /* can't test and expect an event at the same time */ + if ((arg->flags & DRM_EXYNOS_IPP_FLAG_TEST_ONLY) && + (arg->flags & DRM_EXYNOS_IPP_FLAG_EVENT)) + return -EINVAL; + + ipp = __ipp_get(arg->ipp_id); + if (!ipp) + return -ENOENT; + + task = exynos_drm_ipp_task_alloc(ipp); + if (!task) + return -ENOMEM; + + ret = exynos_drm_ipp_task_set(task, arg); + if (ret) + goto free; + + ret = exynos_drm_ipp_task_check(task); + if (ret) + goto free; + + ret = exynos_drm_ipp_task_setup_buffers(task, file_priv); + if (ret || arg->flags & DRM_EXYNOS_IPP_FLAG_TEST_ONLY) + goto free; + + if (arg->flags & DRM_EXYNOS_IPP_FLAG_EVENT) { + ret = exynos_drm_ipp_event_create(task, file_priv, + arg->user_data); + if (ret) + goto free; + } + + /* + * Queue task for processing on the hardware. task object will be + * then freed after exynos_drm_ipp_task_done() + */ + if (arg->flags & DRM_EXYNOS_IPP_FLAG_NONBLOCK) { + DRM_DEBUG_DRIVER("ipp: %d, nonblocking processing task %pK\n", + ipp->id, task); + + task->flags |= DRM_EXYNOS_IPP_TASK_ASYNC; + exynos_drm_ipp_schedule_task(task->ipp, task); + ret = 0; + } else { + DRM_DEBUG_DRIVER("ipp: %d, processing task %pK\n", ipp->id, + task); + exynos_drm_ipp_schedule_task(ipp, task); + ret = wait_event_interruptible(ipp->done_wq, + task->flags & DRM_EXYNOS_IPP_TASK_DONE); + if (ret) + exynos_drm_ipp_task_abort(ipp, task); + else + ret = exynos_drm_ipp_task_cleanup(task); + } + return ret; +free: + exynos_drm_ipp_task_free(ipp, task); + + return ret; +} diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.h b/drivers/gpu/drm/exynos/exynos_drm_ipp.h new file mode 100644 index 000000000000..0b27d4a9bf94 --- /dev/null +++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.h @@ -0,0 +1,175 @@ +/* + * Copyright (c) 2017 Samsung Electronics Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef _EXYNOS_DRM_IPP_H_ +#define _EXYNOS_DRM_IPP_H_ + +#include + +struct exynos_drm_ipp; +struct exynos_drm_ipp_task; + +/** + * struct exynos_drm_ipp_funcs - exynos_drm_ipp control functions + */ +struct exynos_drm_ipp_funcs { + /** + * @commit: + * + * This is the main entry point to start framebuffer processing + * in the hardware. The exynos_drm_ipp_task has been already validated. + * This function must not wait until the device finishes processing. + * When the driver finishes processing, it has to call + * exynos_exynos_drm_ipp_task_done() function. + * + * RETURNS: + * + * 0 on success or negative error codes in case of failure. + */ + int (*commit)(struct exynos_drm_ipp *ipp, + struct exynos_drm_ipp_task *task); + + /** + * @abort: + * + * Informs the driver that it has to abort the currently running + * task as soon as possible (i.e. as soon as it can stop the device + * safely), even if the task would not have been finished by then. + * After the driver performs the necessary steps, it has to call + * exynos_drm_ipp_task_done() (as if the task ended normally). + * This function does not have to (and will usually not) wait + * until the device enters a state when it can be stopped. + */ + void (*abort)(struct exynos_drm_ipp *ipp, + struct exynos_drm_ipp_task *task); +}; + +/** + * struct exynos_drm_ipp - central picture processor module structure + */ +struct exynos_drm_ipp { + struct drm_device *dev; + struct list_head head; + unsigned int id; + + const char *name; + const struct exynos_drm_ipp_funcs *funcs; + unsigned int capabilities; + const struct exynos_drm_ipp_formats *formats; + unsigned int num_formats; + atomic_t sequence; + + spinlock_t lock; + struct exynos_drm_ipp_task *task; + struct list_head todo_list; + wait_queue_head_t done_wq; +}; + +struct exynos_drm_ipp_buffer { + struct drm_exynos_ipp_task_buffer buf; + struct drm_exynos_ipp_task_rect rect; + + struct exynos_drm_gem *exynos_gem[MAX_FB_BUFFER]; + const struct drm_format_info *format; + dma_addr_t dma_addr[MAX_FB_BUFFER]; +}; + +/** + * struct exynos_drm_ipp_task - a structure describing transformation that + * has to be performed by the picture processor hardware module + */ +struct exynos_drm_ipp_task { + struct drm_device *dev; + struct exynos_drm_ipp *ipp; + struct list_head head; + + struct exynos_drm_ipp_buffer src; + struct exynos_drm_ipp_buffer dst; + + struct drm_exynos_ipp_task_transform transform; + struct drm_exynos_ipp_task_alpha alpha; + + struct work_struct cleanup_work; + unsigned int flags; + int ret; + + struct drm_pending_exynos_ipp_event *event; +}; + +#define DRM_EXYNOS_IPP_TASK_DONE (1 << 0) +#define DRM_EXYNOS_IPP_TASK_ASYNC (1 << 1) + +struct exynos_drm_ipp_formats { + uint32_t fourcc; + uint32_t type; + uint64_t modifier; + const struct drm_exynos_ipp_limit *limits; + unsigned int num_limits; +}; + +/* helper macros to set exynos_drm_ipp_formats structure and limits*/ +#define IPP_SRCDST_MFORMAT(f, m, l) \ + .fourcc = DRM_FORMAT_##f, .modifier = m, .limits = l, \ + .num_limits = ARRAY_SIZE(l), \ + .type = (DRM_EXYNOS_IPP_FORMAT_SOURCE | \ + DRM_EXYNOS_IPP_FORMAT_DESTINATION) + +#define IPP_SRCDST_FORMAT(f, l) IPP_SRCDST_MFORMAT(f, 0, l) + +#define IPP_SIZE_LIMIT(l, val...) \ + .type = (DRM_EXYNOS_IPP_LIMIT_TYPE_SIZE | \ + DRM_EXYNOS_IPP_LIMIT_SIZE_##l), val + +#define IPP_SCALE_LIMIT(val...) \ + .type = (DRM_EXYNOS_IPP_LIMIT_TYPE_SCALE), val + +int exynos_drm_ipp_register(struct drm_device *dev, struct exynos_drm_ipp *ipp, + const struct exynos_drm_ipp_funcs *funcs, unsigned int caps, + const struct exynos_drm_ipp_formats *formats, + unsigned int num_formats, const char *name); +void exynos_drm_ipp_unregister(struct drm_device *dev, + struct exynos_drm_ipp *ipp); + +void exynos_drm_ipp_task_done(struct exynos_drm_ipp_task *task, int ret); + +#ifdef CONFIG_DRM_EXYNOS_IPP +int exynos_drm_ipp_get_res_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int exynos_drm_ipp_get_caps_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int exynos_drm_ipp_get_limits_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int exynos_drm_ipp_commit_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv); +#else +static inline int exynos_drm_ipp_get_res_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv) +{ + struct drm_exynos_ioctl_ipp_get_res *resp = data; + + resp->count_ipps = 0; + return 0; +} +static inline int exynos_drm_ipp_get_caps_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv) +{ + return -ENODEV; +} +static inline int exynos_drm_ipp_get_limits_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv) +{ + return -ENODEV; +} +static inline int exynos_drm_ipp_commit_ioctl(struct drm_device *dev, + void *data, struct drm_file *file_priv) +{ + return -ENODEV; +} +#endif +#endif diff --git a/include/uapi/drm/exynos_drm.h b/include/uapi/drm/exynos_drm.h index 4a54305120e0..3e59b8382dd8 100644 --- a/include/uapi/drm/exynos_drm.h +++ b/include/uapi/drm/exynos_drm.h @@ -135,6 +135,219 @@ struct drm_exynos_g2d_exec { __u64 async; }; +/* Exynos DRM IPP v2 API */ + +/** + * Enumerate available IPP hardware modules. + * + * @count_ipps: size of ipp_id array / number of ipp modules (set by driver) + * @reserved: padding + * @ipp_id_ptr: pointer to ipp_id array or NULL + */ +struct drm_exynos_ioctl_ipp_get_res { + __u32 count_ipps; + __u32 reserved; + __u64 ipp_id_ptr; +}; + +enum drm_exynos_ipp_format_type { + DRM_EXYNOS_IPP_FORMAT_SOURCE = 0x01, + DRM_EXYNOS_IPP_FORMAT_DESTINATION = 0x02, +}; + +struct drm_exynos_ipp_format { + __u32 fourcc; + __u32 type; + __u64 modifier; +}; + +enum drm_exynos_ipp_capability { + DRM_EXYNOS_IPP_CAP_CROP = 0x01, + DRM_EXYNOS_IPP_CAP_ROTATE = 0x02, + DRM_EXYNOS_IPP_CAP_SCALE = 0x04, + DRM_EXYNOS_IPP_CAP_CONVERT = 0x08, +}; + +/** + * Get IPP hardware capabilities and supported image formats. + * + * @ipp_id: id of IPP module to query + * @capabilities: bitmask of drm_exynos_ipp_capability (set by driver) + * @reserved: padding + * @formats_count: size of formats array (in entries) / number of filled + * formats (set by driver) + * @formats_ptr: pointer to formats array or NULL + */ +struct drm_exynos_ioctl_ipp_get_caps { + __u32 ipp_id; + __u32 capabilities; + __u32 reserved; + __u32 formats_count; + __u64 formats_ptr; +}; + +enum drm_exynos_ipp_limit_type { + /* size (horizontal/vertial) limits, in pixels (min, max, alignment) */ + DRM_EXYNOS_IPP_LIMIT_TYPE_SIZE = 0x0001, + /* scale ratio (horizonta/vertial), 16.16 fixed point (min, max) */ + DRM_EXYNOS_IPP_LIMIT_TYPE_SCALE = 0x0002, + + /* image buffer area */ + DRM_EXYNOS_IPP_LIMIT_SIZE_BUFFER = 0x0001 << 16, + /* src/dst rectangle area */ + DRM_EXYNOS_IPP_LIMIT_SIZE_AREA = 0x0002 << 16, + /* src/dst rectangle area when rotation enabled */ + DRM_EXYNOS_IPP_LIMIT_SIZE_ROTATED = 0x0003 << 16, + + DRM_EXYNOS_IPP_LIMIT_TYPE_MASK = 0x000f, + DRM_EXYNOS_IPP_LIMIT_SIZE_MASK = 0x000f << 16, +}; + +struct drm_exynos_ipp_limit_val { + __u32 min; + __u32 max; + __u32 align; + __u32 reserved; +}; + +/** + * IPP module limitation. + * + * @type: limit type (see drm_exynos_ipp_limit_type enum) + * @reserved: padding + * @h: horizontal limits + * @v: vertical limits + */ +struct drm_exynos_ipp_limit { + __u32 type; + __u32 reserved; + struct drm_exynos_ipp_limit_val h; + struct drm_exynos_ipp_limit_val v; +}; + +/** + * Get IPP limits for given image format. + * + * @ipp_id: id of IPP module to query + * @fourcc: image format code (see DRM_FORMAT_* in drm_fourcc.h) + * @modifier: image format modifier (see DRM_FORMAT_MOD_* in drm_fourcc.h) + * @type: source/destination identifier (drm_exynos_ipp_format_flag enum) + * @limits_count: size of limits array (in entries) / number of filled entries + * (set by driver) + * @limits_ptr: pointer to limits array or NULL + */ +struct drm_exynos_ioctl_ipp_get_limits { + __u32 ipp_id; + __u32 fourcc; + __u64 modifier; + __u32 type; + __u32 limits_count; + __u64 limits_ptr; +}; + +enum drm_exynos_ipp_task_id { + /* buffer described by struct drm_exynos_ipp_task_buffer */ + DRM_EXYNOS_IPP_TASK_BUFFER = 0x0001, + /* rectangle described by struct drm_exynos_ipp_task_rect */ + DRM_EXYNOS_IPP_TASK_RECTANGLE = 0x0002, + /* transformation described by struct drm_exynos_ipp_task_transform */ + DRM_EXYNOS_IPP_TASK_TRANSFORM = 0x0003, + /* alpha configuration described by struct drm_exynos_ipp_task_alpha */ + DRM_EXYNOS_IPP_TASK_ALPHA = 0x0004, + + /* source image data (for buffer and rectangle chunks) */ + DRM_EXYNOS_IPP_TASK_TYPE_SOURCE = 0x0001 << 16, + /* destination image data (for buffer and rectangle chunks) */ + DRM_EXYNOS_IPP_TASK_TYPE_DESTINATION = 0x0002 << 16, +}; + +/** + * Memory buffer with image data. + * + * @id: must be DRM_EXYNOS_IPP_TASK_BUFFER + * other parameters are same as for AddFB2 generic DRM ioctl + */ +struct drm_exynos_ipp_task_buffer { + __u32 id; + __u32 fourcc; + __u32 width, height; + __u32 gem_id[4]; + __u32 offset[4]; + __u32 pitch[4]; + __u64 modifier; +}; + +/** + * Rectangle for processing. + * + * @id: must be DRM_EXYNOS_IPP_TASK_RECTANGLE + * @reserved: padding + * @x,@y: left corner in pixels + * @w,@h: width/height in pixels + */ +struct drm_exynos_ipp_task_rect { + __u32 id; + __u32 reserved; + __u32 x; + __u32 y; + __u32 w; + __u32 h; +}; + +/** + * Image tranformation description. + * + * @id: must be DRM_EXYNOS_IPP_TASK_TRANSFORM + * @rotation: DRM_MODE_ROTATE_* and DRM_MODE_REFLECT_* values + */ +struct drm_exynos_ipp_task_transform { + __u32 id; + __u32 rotation; +}; + +/** + * Image global alpha configuration for formats without alpha values. + * + * @id: must be DRM_EXYNOS_IPP_TASK_ALPHA + * @value: global alpha value (0-255) + */ +struct drm_exynos_ipp_task_alpha { + __u32 id; + __u32 value; +}; + +enum drm_exynos_ipp_flag { + /* generate DRM event after processing */ + DRM_EXYNOS_IPP_FLAG_EVENT = 0x01, + /* dry run, only check task parameters */ + DRM_EXYNOS_IPP_FLAG_TEST_ONLY = 0x02, + /* non-blocking processing */ + DRM_EXYNOS_IPP_FLAG_NONBLOCK = 0x04, +}; + +#define DRM_EXYNOS_IPP_FLAGS (DRM_EXYNOS_IPP_FLAG_EVENT |\ + DRM_EXYNOS_IPP_FLAG_TEST_ONLY | DRM_EXYNOS_IPP_FLAG_NONBLOCK) + +/** + * Perform image processing described by array of drm_exynos_ipp_task_* + * structures (parameters array). + * + * @ipp_id: id of IPP module to run the task + * @flags: bitmask of drm_exynos_ipp_flag values + * @reserved: padding + * @params_size: size of parameters array (in bytes) + * @params_ptr: pointer to parameters array or NULL + * @user_data: (optional) data for drm event + */ +struct drm_exynos_ioctl_ipp_commit { + __u32 ipp_id; + __u32 flags; + __u32 reserved; + __u32 params_size; + __u64 params_ptr; + __u64 user_data; +}; + #define DRM_EXYNOS_GEM_CREATE 0x00 #define DRM_EXYNOS_GEM_MAP 0x01 /* Reserved 0x03 ~ 0x05 for exynos specific gem ioctl */ @@ -147,6 +360,11 @@ struct drm_exynos_g2d_exec { #define DRM_EXYNOS_G2D_EXEC 0x22 /* Reserved 0x30 ~ 0x33 for obsolete Exynos IPP ioctls */ +/* IPP - Image Post Processing */ +#define DRM_EXYNOS_IPP_GET_RESOURCES 0x40 +#define DRM_EXYNOS_IPP_GET_CAPS 0x41 +#define DRM_EXYNOS_IPP_GET_LIMITS 0x42 +#define DRM_EXYNOS_IPP_COMMIT 0x43 #define DRM_IOCTL_EXYNOS_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + \ DRM_EXYNOS_GEM_CREATE, struct drm_exynos_gem_create) @@ -165,8 +383,20 @@ struct drm_exynos_g2d_exec { #define DRM_IOCTL_EXYNOS_G2D_EXEC DRM_IOWR(DRM_COMMAND_BASE + \ DRM_EXYNOS_G2D_EXEC, struct drm_exynos_g2d_exec) +#define DRM_IOCTL_EXYNOS_IPP_GET_RESOURCES DRM_IOWR(DRM_COMMAND_BASE + \ + DRM_EXYNOS_IPP_GET_RESOURCES, \ + struct drm_exynos_ioctl_ipp_get_res) +#define DRM_IOCTL_EXYNOS_IPP_GET_CAPS DRM_IOWR(DRM_COMMAND_BASE + \ + DRM_EXYNOS_IPP_GET_CAPS, struct drm_exynos_ioctl_ipp_get_caps) +#define DRM_IOCTL_EXYNOS_IPP_GET_LIMITS DRM_IOWR(DRM_COMMAND_BASE + \ + DRM_EXYNOS_IPP_GET_LIMITS, \ + struct drm_exynos_ioctl_ipp_get_limits) +#define DRM_IOCTL_EXYNOS_IPP_COMMIT DRM_IOWR(DRM_COMMAND_BASE + \ + DRM_EXYNOS_IPP_COMMIT, struct drm_exynos_ioctl_ipp_commit) + /* EXYNOS specific events */ #define DRM_EXYNOS_G2D_EVENT 0x80000000 +#define DRM_EXYNOS_IPP_EVENT 0x80000002 struct drm_exynos_g2d_event { struct drm_event base; @@ -177,6 +407,16 @@ struct drm_exynos_g2d_event { __u32 reserved; }; +struct drm_exynos_ipp_event { + struct drm_event base; + __u64 user_data; + __u32 tv_sec; + __u32 tv_usec; + __u32 ipp_id; + __u32 sequence; + __u64 reserved; +}; + #if defined(__cplusplus) } #endif From d8cb9eeaa79fce028982589da8696df6bb10b903 Mon Sep 17 00:00:00 2001 From: Marek Szyprowski Date: Wed, 9 May 2018 10:59:23 +0200 Subject: [PATCH 13/17] drm/exynos: rotator: Convert driver to IPP v2 core API This patch adapts Exynos DRM rotator driver to new IPP v2 core API. The side effect of this conversion is a switch to driver component API to register properly in the Exynos DRM core. Signed-off-by: Marek Szyprowski Signed-off-by: Inki Dae --- drivers/gpu/drm/exynos/Kconfig | 2 +- drivers/gpu/drm/exynos/exynos_drm_drv.c | 1 + drivers/gpu/drm/exynos/exynos_drm_rotator.c | 772 +++++--------------- 3 files changed, 197 insertions(+), 578 deletions(-) diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig index 9e914655c430..63a27c14b133 100644 --- a/drivers/gpu/drm/exynos/Kconfig +++ b/drivers/gpu/drm/exynos/Kconfig @@ -106,7 +106,7 @@ config DRM_EXYNOS_FIMC config DRM_EXYNOS_ROTATOR bool "Rotator" - depends on BROKEN + select DRM_EXYNOS_IPP help Choose this option if you want to use Exynos Rotator for DRM. diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index 37c0db759674..537a588ef370 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c @@ -263,6 +263,7 @@ static struct exynos_drm_driver_info exynos_drm_drivers[] = { DRV_PTR(fimc_driver, CONFIG_DRM_EXYNOS_FIMC), }, { DRV_PTR(rotator_driver, CONFIG_DRM_EXYNOS_ROTATOR), + DRM_COMPONENT_DRIVER }, { DRV_PTR(gsc_driver, CONFIG_DRM_EXYNOS_GSC), }, { diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c index 79282a820ecc..1a76dd3d52e1 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c +++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c @@ -10,6 +10,7 @@ */ #include +#include #include #include #include @@ -22,29 +23,18 @@ #include #include "regs-rotator.h" #include "exynos_drm_drv.h" +#include "exynos_drm_iommu.h" #include "exynos_drm_ipp.h" /* * Rotator supports image crop/rotator and input/output DMA operations. * input DMA reads image data from the memory. * output DMA writes image data to memory. - * - * M2M operation : supports crop/scale/rotation/csc so on. - * Memory ----> Rotator H/W ----> Memory. */ -/* - * TODO - * 1. check suspend/resume api if needed. - * 2. need to check use case platform_device_id. - * 3. check src/dst size with, height. - * 4. need to add supported list in prop_list. - */ +#define ROTATOR_AUTOSUSPEND_DELAY 2000 -#define get_rot_context(dev) platform_get_drvdata(to_platform_device(dev)) -#define get_ctx_from_ippdrv(ippdrv) container_of(ippdrv,\ - struct rot_context, ippdrv); -#define rot_read(offset) readl(rot->regs + (offset)) +#define rot_read(offset) readl(rot->regs + (offset)) #define rot_write(cfg, offset) writel(cfg, rot->regs + (offset)) enum rot_irq_status { @@ -52,54 +42,28 @@ enum rot_irq_status { ROT_IRQ_STATUS_ILLEGAL = 9, }; -/* - * A structure of limitation. - * - * @min_w: minimum width. - * @min_h: minimum height. - * @max_w: maximum width. - * @max_h: maximum height. - * @align: align size. - */ -struct rot_limit { - u32 min_w; - u32 min_h; - u32 max_w; - u32 max_h; - u32 align; -}; - -/* - * A structure of limitation table. - * - * @ycbcr420_2p: case of YUV. - * @rgb888: case of RGB. - */ -struct rot_limit_table { - struct rot_limit ycbcr420_2p; - struct rot_limit rgb888; +struct rot_variant { + const struct exynos_drm_ipp_formats *formats; + unsigned int num_formats; }; /* * A structure of rotator context. * @ippdrv: prepare initialization using ippdrv. - * @regs_res: register resources. * @regs: memory mapped io registers. * @clock: rotator gate clock. * @limit_tbl: limitation of rotator. * @irq: irq number. - * @cur_buf_id: current operation buffer id. - * @suspended: suspended state. */ struct rot_context { - struct exynos_drm_ippdrv ippdrv; - struct resource *regs_res; + struct exynos_drm_ipp ipp; + struct drm_device *drm_dev; + struct device *dev; void __iomem *regs; struct clk *clock; - struct rot_limit_table *limit_tbl; - int irq; - int cur_buf_id[EXYNOS_DRM_OPS_MAX]; - bool suspended; + const struct exynos_drm_ipp_formats *formats; + unsigned int num_formats; + struct exynos_drm_ipp_task *task; }; static void rotator_reg_set_irq(struct rot_context *rot, bool enable) @@ -114,15 +78,6 @@ static void rotator_reg_set_irq(struct rot_context *rot, bool enable) rot_write(val, ROT_CONFIG); } -static u32 rotator_reg_get_fmt(struct rot_context *rot) -{ - u32 val = rot_read(ROT_CONTROL); - - val &= ROT_CONTROL_FMT_MASK; - - return val; -} - static enum rot_irq_status rotator_reg_get_irq_status(struct rot_context *rot) { u32 val = rot_read(ROT_STATUS); @@ -138,9 +93,6 @@ static enum rot_irq_status rotator_reg_get_irq_status(struct rot_context *rot) static irqreturn_t rotator_irq_handler(int irq, void *arg) { struct rot_context *rot = arg; - struct exynos_drm_ippdrv *ippdrv = &rot->ippdrv; - struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node; - struct drm_exynos_ipp_event_work *event_work = c_node->event_work; enum rot_irq_status irq_status; u32 val; @@ -152,56 +104,21 @@ static irqreturn_t rotator_irq_handler(int irq, void *arg) val |= ROT_STATUS_IRQ_PENDING((u32)irq_status); rot_write(val, ROT_STATUS); - if (irq_status == ROT_IRQ_STATUS_COMPLETE) { - event_work->ippdrv = ippdrv; - event_work->buf_id[EXYNOS_DRM_OPS_DST] = - rot->cur_buf_id[EXYNOS_DRM_OPS_DST]; - queue_work(ippdrv->event_workq, &event_work->work); - } else { - DRM_ERROR("the SFR is set illegally\n"); + if (rot->task) { + struct exynos_drm_ipp_task *task = rot->task; + + rot->task = NULL; + pm_runtime_mark_last_busy(rot->dev); + pm_runtime_put_autosuspend(rot->dev); + exynos_drm_ipp_task_done(task, + irq_status == ROT_IRQ_STATUS_COMPLETE ? 0 : -EINVAL); } return IRQ_HANDLED; } -static void rotator_align_size(struct rot_context *rot, u32 fmt, u32 *hsize, - u32 *vsize) +static void rotator_src_set_fmt(struct rot_context *rot, u32 fmt) { - struct rot_limit_table *limit_tbl = rot->limit_tbl; - struct rot_limit *limit; - u32 mask, val; - - /* Get size limit */ - if (fmt == ROT_CONTROL_FMT_RGB888) - limit = &limit_tbl->rgb888; - else - limit = &limit_tbl->ycbcr420_2p; - - /* Get mask for rounding to nearest aligned val */ - mask = ~((1 << limit->align) - 1); - - /* Set aligned width */ - val = ROT_ALIGN(*hsize, limit->align, mask); - if (val < limit->min_w) - *hsize = ROT_MIN(limit->min_w, mask); - else if (val > limit->max_w) - *hsize = ROT_MAX(limit->max_w, mask); - else - *hsize = val; - - /* Set aligned height */ - val = ROT_ALIGN(*vsize, limit->align, mask); - if (val < limit->min_h) - *vsize = ROT_MIN(limit->min_h, mask); - else if (val > limit->max_h) - *vsize = ROT_MAX(limit->max_h, mask); - else - *vsize = val; -} - -static int rotator_src_set_fmt(struct device *dev, u32 fmt) -{ - struct rot_context *rot = dev_get_drvdata(dev); u32 val; val = rot_read(ROT_CONTROL); @@ -214,515 +131,176 @@ static int rotator_src_set_fmt(struct device *dev, u32 fmt) case DRM_FORMAT_XRGB8888: val |= ROT_CONTROL_FMT_RGB888; break; - default: - DRM_ERROR("invalid image format\n"); - return -EINVAL; } rot_write(val, ROT_CONTROL); - - return 0; } -static inline bool rotator_check_reg_fmt(u32 fmt) +static void rotator_src_set_buf(struct rot_context *rot, + struct exynos_drm_ipp_buffer *buf) { - if ((fmt == ROT_CONTROL_FMT_YCBCR420_2P) || - (fmt == ROT_CONTROL_FMT_RGB888)) - return true; - - return false; -} - -static int rotator_src_set_size(struct device *dev, int swap, - struct drm_exynos_pos *pos, - struct drm_exynos_sz *sz) -{ - struct rot_context *rot = dev_get_drvdata(dev); - u32 fmt, hsize, vsize; u32 val; - /* Get format */ - fmt = rotator_reg_get_fmt(rot); - if (!rotator_check_reg_fmt(fmt)) { - DRM_ERROR("invalid format.\n"); - return -EINVAL; - } - - /* Align buffer size */ - hsize = sz->hsize; - vsize = sz->vsize; - rotator_align_size(rot, fmt, &hsize, &vsize); - /* Set buffer size configuration */ - val = ROT_SET_BUF_SIZE_H(vsize) | ROT_SET_BUF_SIZE_W(hsize); + val = ROT_SET_BUF_SIZE_H(buf->buf.height) | + ROT_SET_BUF_SIZE_W(buf->buf.pitch[0] / buf->format->cpp[0]); rot_write(val, ROT_SRC_BUF_SIZE); /* Set crop image position configuration */ - val = ROT_CROP_POS_Y(pos->y) | ROT_CROP_POS_X(pos->x); + val = ROT_CROP_POS_Y(buf->rect.y) | ROT_CROP_POS_X(buf->rect.x); rot_write(val, ROT_SRC_CROP_POS); - val = ROT_SRC_CROP_SIZE_H(pos->h) | ROT_SRC_CROP_SIZE_W(pos->w); + val = ROT_SRC_CROP_SIZE_H(buf->rect.h) | + ROT_SRC_CROP_SIZE_W(buf->rect.w); rot_write(val, ROT_SRC_CROP_SIZE); - return 0; + /* Set buffer DMA address */ + rot_write(buf->dma_addr[0], ROT_SRC_BUF_ADDR(0)); + rot_write(buf->dma_addr[1], ROT_SRC_BUF_ADDR(1)); } -static int rotator_src_set_addr(struct device *dev, - struct drm_exynos_ipp_buf_info *buf_info, - u32 buf_id, enum drm_exynos_ipp_buf_type buf_type) +static void rotator_dst_set_transf(struct rot_context *rot, + unsigned int rotation) { - struct rot_context *rot = dev_get_drvdata(dev); - dma_addr_t addr[EXYNOS_DRM_PLANAR_MAX]; - u32 val, fmt, hsize, vsize; - int i; - - /* Set current buf_id */ - rot->cur_buf_id[EXYNOS_DRM_OPS_SRC] = buf_id; - - switch (buf_type) { - case IPP_BUF_ENQUEUE: - /* Set address configuration */ - for_each_ipp_planar(i) - addr[i] = buf_info->base[i]; - - /* Get format */ - fmt = rotator_reg_get_fmt(rot); - if (!rotator_check_reg_fmt(fmt)) { - DRM_ERROR("invalid format.\n"); - return -EINVAL; - } - - /* Re-set cb planar for NV12 format */ - if ((fmt == ROT_CONTROL_FMT_YCBCR420_2P) && - !addr[EXYNOS_DRM_PLANAR_CB]) { - - val = rot_read(ROT_SRC_BUF_SIZE); - hsize = ROT_GET_BUF_SIZE_W(val); - vsize = ROT_GET_BUF_SIZE_H(val); - - /* Set cb planar */ - addr[EXYNOS_DRM_PLANAR_CB] = - addr[EXYNOS_DRM_PLANAR_Y] + hsize * vsize; - } - - for_each_ipp_planar(i) - rot_write(addr[i], ROT_SRC_BUF_ADDR(i)); - break; - case IPP_BUF_DEQUEUE: - for_each_ipp_planar(i) - rot_write(0x0, ROT_SRC_BUF_ADDR(i)); - break; - default: - /* Nothing to do */ - break; - } - - return 0; -} - -static int rotator_dst_set_transf(struct device *dev, - enum drm_exynos_degree degree, - enum drm_exynos_flip flip, bool *swap) -{ - struct rot_context *rot = dev_get_drvdata(dev); u32 val; /* Set transform configuration */ val = rot_read(ROT_CONTROL); val &= ~ROT_CONTROL_FLIP_MASK; - switch (flip) { - case EXYNOS_DRM_FLIP_VERTICAL: - val |= ROT_CONTROL_FLIP_VERTICAL; - break; - case EXYNOS_DRM_FLIP_HORIZONTAL: + if (rotation & DRM_MODE_REFLECT_X) val |= ROT_CONTROL_FLIP_HORIZONTAL; - break; - default: - /* Flip None */ - break; - } + if (rotation & DRM_MODE_REFLECT_Y) + val |= ROT_CONTROL_FLIP_VERTICAL; val &= ~ROT_CONTROL_ROT_MASK; - switch (degree) { - case EXYNOS_DRM_DEGREE_90: + if (rotation & DRM_MODE_ROTATE_90) val |= ROT_CONTROL_ROT_90; - break; - case EXYNOS_DRM_DEGREE_180: + else if (rotation & DRM_MODE_ROTATE_180) val |= ROT_CONTROL_ROT_180; - break; - case EXYNOS_DRM_DEGREE_270: + else if (rotation & DRM_MODE_ROTATE_270) val |= ROT_CONTROL_ROT_270; - break; - default: - /* Rotation 0 Degree */ - break; - } rot_write(val, ROT_CONTROL); - - /* Check degree for setting buffer size swap */ - if ((degree == EXYNOS_DRM_DEGREE_90) || - (degree == EXYNOS_DRM_DEGREE_270)) - *swap = true; - else - *swap = false; - - return 0; } -static int rotator_dst_set_size(struct device *dev, int swap, - struct drm_exynos_pos *pos, - struct drm_exynos_sz *sz) +static void rotator_dst_set_buf(struct rot_context *rot, + struct exynos_drm_ipp_buffer *buf) { - struct rot_context *rot = dev_get_drvdata(dev); - u32 val, fmt, hsize, vsize; - - /* Get format */ - fmt = rotator_reg_get_fmt(rot); - if (!rotator_check_reg_fmt(fmt)) { - DRM_ERROR("invalid format.\n"); - return -EINVAL; - } - - /* Align buffer size */ - hsize = sz->hsize; - vsize = sz->vsize; - rotator_align_size(rot, fmt, &hsize, &vsize); + u32 val; /* Set buffer size configuration */ - val = ROT_SET_BUF_SIZE_H(vsize) | ROT_SET_BUF_SIZE_W(hsize); + val = ROT_SET_BUF_SIZE_H(buf->buf.height) | + ROT_SET_BUF_SIZE_W(buf->buf.pitch[0] / buf->format->cpp[0]); rot_write(val, ROT_DST_BUF_SIZE); /* Set crop image position configuration */ - val = ROT_CROP_POS_Y(pos->y) | ROT_CROP_POS_X(pos->x); + val = ROT_CROP_POS_Y(buf->rect.y) | ROT_CROP_POS_X(buf->rect.x); rot_write(val, ROT_DST_CROP_POS); - return 0; + /* Set buffer DMA address */ + rot_write(buf->dma_addr[0], ROT_DST_BUF_ADDR(0)); + rot_write(buf->dma_addr[1], ROT_DST_BUF_ADDR(1)); } -static int rotator_dst_set_addr(struct device *dev, - struct drm_exynos_ipp_buf_info *buf_info, - u32 buf_id, enum drm_exynos_ipp_buf_type buf_type) +static void rotator_start(struct rot_context *rot) { - struct rot_context *rot = dev_get_drvdata(dev); - dma_addr_t addr[EXYNOS_DRM_PLANAR_MAX]; - u32 val, fmt, hsize, vsize; - int i; - - /* Set current buf_id */ - rot->cur_buf_id[EXYNOS_DRM_OPS_DST] = buf_id; - - switch (buf_type) { - case IPP_BUF_ENQUEUE: - /* Set address configuration */ - for_each_ipp_planar(i) - addr[i] = buf_info->base[i]; - - /* Get format */ - fmt = rotator_reg_get_fmt(rot); - if (!rotator_check_reg_fmt(fmt)) { - DRM_ERROR("invalid format.\n"); - return -EINVAL; - } - - /* Re-set cb planar for NV12 format */ - if ((fmt == ROT_CONTROL_FMT_YCBCR420_2P) && - !addr[EXYNOS_DRM_PLANAR_CB]) { - /* Get buf size */ - val = rot_read(ROT_DST_BUF_SIZE); - - hsize = ROT_GET_BUF_SIZE_W(val); - vsize = ROT_GET_BUF_SIZE_H(val); - - /* Set cb planar */ - addr[EXYNOS_DRM_PLANAR_CB] = - addr[EXYNOS_DRM_PLANAR_Y] + hsize * vsize; - } - - for_each_ipp_planar(i) - rot_write(addr[i], ROT_DST_BUF_ADDR(i)); - break; - case IPP_BUF_DEQUEUE: - for_each_ipp_planar(i) - rot_write(0x0, ROT_DST_BUF_ADDR(i)); - break; - default: - /* Nothing to do */ - break; - } - - return 0; -} - -static struct exynos_drm_ipp_ops rot_src_ops = { - .set_fmt = rotator_src_set_fmt, - .set_size = rotator_src_set_size, - .set_addr = rotator_src_set_addr, -}; - -static struct exynos_drm_ipp_ops rot_dst_ops = { - .set_transf = rotator_dst_set_transf, - .set_size = rotator_dst_set_size, - .set_addr = rotator_dst_set_addr, -}; - -static int rotator_init_prop_list(struct exynos_drm_ippdrv *ippdrv) -{ - struct drm_exynos_ipp_prop_list *prop_list = &ippdrv->prop_list; - - prop_list->version = 1; - prop_list->flip = (1 << EXYNOS_DRM_FLIP_VERTICAL) | - (1 << EXYNOS_DRM_FLIP_HORIZONTAL); - prop_list->degree = (1 << EXYNOS_DRM_DEGREE_0) | - (1 << EXYNOS_DRM_DEGREE_90) | - (1 << EXYNOS_DRM_DEGREE_180) | - (1 << EXYNOS_DRM_DEGREE_270); - prop_list->csc = 0; - prop_list->crop = 0; - prop_list->scale = 0; - - return 0; -} - -static inline bool rotator_check_drm_fmt(u32 fmt) -{ - switch (fmt) { - case DRM_FORMAT_XRGB8888: - case DRM_FORMAT_NV12: - return true; - default: - DRM_DEBUG_KMS("not support format\n"); - return false; - } -} - -static inline bool rotator_check_drm_flip(enum drm_exynos_flip flip) -{ - switch (flip) { - case EXYNOS_DRM_FLIP_NONE: - case EXYNOS_DRM_FLIP_VERTICAL: - case EXYNOS_DRM_FLIP_HORIZONTAL: - case EXYNOS_DRM_FLIP_BOTH: - return true; - default: - DRM_DEBUG_KMS("invalid flip\n"); - return false; - } -} - -static int rotator_ippdrv_check_property(struct device *dev, - struct drm_exynos_ipp_property *property) -{ - struct drm_exynos_ipp_config *src_config = - &property->config[EXYNOS_DRM_OPS_SRC]; - struct drm_exynos_ipp_config *dst_config = - &property->config[EXYNOS_DRM_OPS_DST]; - struct drm_exynos_pos *src_pos = &src_config->pos; - struct drm_exynos_pos *dst_pos = &dst_config->pos; - struct drm_exynos_sz *src_sz = &src_config->sz; - struct drm_exynos_sz *dst_sz = &dst_config->sz; - bool swap = false; - - /* Check format configuration */ - if (src_config->fmt != dst_config->fmt) { - DRM_DEBUG_KMS("not support csc feature\n"); - return -EINVAL; - } - - if (!rotator_check_drm_fmt(dst_config->fmt)) { - DRM_DEBUG_KMS("invalid format\n"); - return -EINVAL; - } - - /* Check transform configuration */ - if (src_config->degree != EXYNOS_DRM_DEGREE_0) { - DRM_DEBUG_KMS("not support source-side rotation\n"); - return -EINVAL; - } - - switch (dst_config->degree) { - case EXYNOS_DRM_DEGREE_90: - case EXYNOS_DRM_DEGREE_270: - swap = true; - case EXYNOS_DRM_DEGREE_0: - case EXYNOS_DRM_DEGREE_180: - /* No problem */ - break; - default: - DRM_DEBUG_KMS("invalid degree\n"); - return -EINVAL; - } - - if (src_config->flip != EXYNOS_DRM_FLIP_NONE) { - DRM_DEBUG_KMS("not support source-side flip\n"); - return -EINVAL; - } - - if (!rotator_check_drm_flip(dst_config->flip)) { - DRM_DEBUG_KMS("invalid flip\n"); - return -EINVAL; - } - - /* Check size configuration */ - if ((src_pos->x + src_pos->w > src_sz->hsize) || - (src_pos->y + src_pos->h > src_sz->vsize)) { - DRM_DEBUG_KMS("out of source buffer bound\n"); - return -EINVAL; - } - - if (swap) { - if ((dst_pos->x + dst_pos->h > dst_sz->vsize) || - (dst_pos->y + dst_pos->w > dst_sz->hsize)) { - DRM_DEBUG_KMS("out of destination buffer bound\n"); - return -EINVAL; - } - - if ((src_pos->w != dst_pos->h) || (src_pos->h != dst_pos->w)) { - DRM_DEBUG_KMS("not support scale feature\n"); - return -EINVAL; - } - } else { - if ((dst_pos->x + dst_pos->w > dst_sz->hsize) || - (dst_pos->y + dst_pos->h > dst_sz->vsize)) { - DRM_DEBUG_KMS("out of destination buffer bound\n"); - return -EINVAL; - } - - if ((src_pos->w != dst_pos->w) || (src_pos->h != dst_pos->h)) { - DRM_DEBUG_KMS("not support scale feature\n"); - return -EINVAL; - } - } - - return 0; -} - -static int rotator_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd) -{ - struct rot_context *rot = dev_get_drvdata(dev); u32 val; - if (rot->suspended) { - DRM_ERROR("suspended state\n"); - return -EPERM; - } - - if (cmd != IPP_CMD_M2M) { - DRM_ERROR("not support cmd: %d\n", cmd); - return -EINVAL; - } - /* Set interrupt enable */ rotator_reg_set_irq(rot, true); val = rot_read(ROT_CONTROL); val |= ROT_CONTROL_START; - rot_write(val, ROT_CONTROL); +} + +static int rotator_commit(struct exynos_drm_ipp *ipp, + struct exynos_drm_ipp_task *task) +{ + struct rot_context *rot = + container_of(ipp, struct rot_context, ipp); + + pm_runtime_get_sync(rot->dev); + rot->task = task; + + rotator_src_set_fmt(rot, task->src.buf.fourcc); + rotator_src_set_buf(rot, &task->src); + rotator_dst_set_transf(rot, task->transform.rotation); + rotator_dst_set_buf(rot, &task->dst); + rotator_start(rot); return 0; } -static struct rot_limit_table rot_limit_tbl_4210 = { - .ycbcr420_2p = { - .min_w = 32, - .min_h = 32, - .max_w = SZ_64K, - .max_h = SZ_64K, - .align = 3, - }, - .rgb888 = { - .min_w = 8, - .min_h = 8, - .max_w = SZ_16K, - .max_h = SZ_16K, - .align = 2, - }, +static const struct exynos_drm_ipp_funcs ipp_funcs = { + .commit = rotator_commit, }; -static struct rot_limit_table rot_limit_tbl_4x12 = { - .ycbcr420_2p = { - .min_w = 32, - .min_h = 32, - .max_w = SZ_32K, - .max_h = SZ_32K, - .align = 3, - }, - .rgb888 = { - .min_w = 8, - .min_h = 8, - .max_w = SZ_8K, - .max_h = SZ_8K, - .align = 2, - }, -}; +static int rotator_bind(struct device *dev, struct device *master, void *data) +{ + struct rot_context *rot = dev_get_drvdata(dev); + struct drm_device *drm_dev = data; + struct exynos_drm_ipp *ipp = &rot->ipp; -static struct rot_limit_table rot_limit_tbl_5250 = { - .ycbcr420_2p = { - .min_w = 32, - .min_h = 32, - .max_w = SZ_32K, - .max_h = SZ_32K, - .align = 3, - }, - .rgb888 = { - .min_w = 8, - .min_h = 8, - .max_w = SZ_8K, - .max_h = SZ_8K, - .align = 1, - }, -}; + rot->drm_dev = drm_dev; + drm_iommu_attach_device(drm_dev, dev); -static const struct of_device_id exynos_rotator_match[] = { - { - .compatible = "samsung,exynos4210-rotator", - .data = &rot_limit_tbl_4210, - }, - { - .compatible = "samsung,exynos4212-rotator", - .data = &rot_limit_tbl_4x12, - }, - { - .compatible = "samsung,exynos5250-rotator", - .data = &rot_limit_tbl_5250, - }, - {}, + exynos_drm_ipp_register(drm_dev, ipp, &ipp_funcs, + DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE, + rot->formats, rot->num_formats, "rotator"); + + dev_info(dev, "The exynos rotator has been probed successfully\n"); + + return 0; +} + +static void rotator_unbind(struct device *dev, struct device *master, + void *data) +{ + struct rot_context *rot = dev_get_drvdata(dev); + struct drm_device *drm_dev = data; + struct exynos_drm_ipp *ipp = &rot->ipp; + + exynos_drm_ipp_unregister(drm_dev, ipp); + drm_iommu_detach_device(rot->drm_dev, rot->dev); +} + +static const struct component_ops rotator_component_ops = { + .bind = rotator_bind, + .unbind = rotator_unbind, }; -MODULE_DEVICE_TABLE(of, exynos_rotator_match); static int rotator_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; + struct resource *regs_res; struct rot_context *rot; - struct exynos_drm_ippdrv *ippdrv; + const struct rot_variant *variant; + int irq; int ret; - if (!dev->of_node) { - dev_err(dev, "cannot find of_node.\n"); - return -ENODEV; - } - rot = devm_kzalloc(dev, sizeof(*rot), GFP_KERNEL); if (!rot) return -ENOMEM; - rot->limit_tbl = (struct rot_limit_table *) - of_device_get_match_data(dev); - rot->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - rot->regs = devm_ioremap_resource(dev, rot->regs_res); + variant = of_device_get_match_data(dev); + rot->formats = variant->formats; + rot->num_formats = variant->num_formats; + rot->dev = dev; + regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + rot->regs = devm_ioremap_resource(dev, regs_res); if (IS_ERR(rot->regs)) return PTR_ERR(rot->regs); - rot->irq = platform_get_irq(pdev, 0); - if (rot->irq < 0) { + irq = platform_get_irq(pdev, 0); + if (irq < 0) { dev_err(dev, "failed to get irq\n"); - return rot->irq; + return irq; } - ret = devm_request_threaded_irq(dev, rot->irq, NULL, - rotator_irq_handler, IRQF_ONESHOT, "drm_rotator", rot); + ret = devm_request_irq(dev, irq, rotator_irq_handler, 0, dev_name(dev), + rot); if (ret < 0) { dev_err(dev, "failed to request irq\n"); return ret; @@ -734,35 +312,19 @@ static int rotator_probe(struct platform_device *pdev) return PTR_ERR(rot->clock); } + pm_runtime_use_autosuspend(dev); + pm_runtime_set_autosuspend_delay(dev, ROTATOR_AUTOSUSPEND_DELAY); pm_runtime_enable(dev); - - ippdrv = &rot->ippdrv; - ippdrv->dev = dev; - ippdrv->ops[EXYNOS_DRM_OPS_SRC] = &rot_src_ops; - ippdrv->ops[EXYNOS_DRM_OPS_DST] = &rot_dst_ops; - ippdrv->check_property = rotator_ippdrv_check_property; - ippdrv->start = rotator_ippdrv_start; - ret = rotator_init_prop_list(ippdrv); - if (ret < 0) { - dev_err(dev, "failed to init property list.\n"); - goto err_ippdrv_register; - } - - DRM_DEBUG_KMS("ippdrv[%pK]\n", ippdrv); - platform_set_drvdata(pdev, rot); - ret = exynos_drm_ippdrv_register(ippdrv); - if (ret < 0) { - dev_err(dev, "failed to register drm rotator device\n"); - goto err_ippdrv_register; - } - - dev_info(dev, "The exynos rotator is probed successfully\n"); + ret = component_add(dev, &rotator_component_ops); + if (ret) + goto err_component; return 0; -err_ippdrv_register: +err_component: + pm_runtime_dont_use_autosuspend(dev); pm_runtime_disable(dev); return ret; } @@ -770,45 +332,101 @@ static int rotator_probe(struct platform_device *pdev) static int rotator_remove(struct platform_device *pdev) { struct device *dev = &pdev->dev; - struct rot_context *rot = dev_get_drvdata(dev); - struct exynos_drm_ippdrv *ippdrv = &rot->ippdrv; - - exynos_drm_ippdrv_unregister(ippdrv); + component_del(dev, &rotator_component_ops); + pm_runtime_dont_use_autosuspend(dev); pm_runtime_disable(dev); return 0; } #ifdef CONFIG_PM -static int rotator_clk_crtl(struct rot_context *rot, bool enable) -{ - if (enable) { - clk_prepare_enable(rot->clock); - rot->suspended = false; - } else { - clk_disable_unprepare(rot->clock); - rot->suspended = true; - } - - return 0; -} - static int rotator_runtime_suspend(struct device *dev) { struct rot_context *rot = dev_get_drvdata(dev); - return rotator_clk_crtl(rot, false); + clk_disable_unprepare(rot->clock); + return 0; } static int rotator_runtime_resume(struct device *dev) { struct rot_context *rot = dev_get_drvdata(dev); - return rotator_clk_crtl(rot, true); + return clk_prepare_enable(rot->clock); } #endif +static const struct drm_exynos_ipp_limit rotator_4210_rbg888_limits[] = { + { IPP_SIZE_LIMIT(BUFFER, .h = { 8, SZ_16K }, .v = { 8, SZ_16K }) }, + { IPP_SIZE_LIMIT(AREA, .h.align = 4, .v.align = 4) }, +}; + +static const struct drm_exynos_ipp_limit rotator_4412_rbg888_limits[] = { + { IPP_SIZE_LIMIT(BUFFER, .h = { 8, SZ_8K }, .v = { 8, SZ_8K }) }, + { IPP_SIZE_LIMIT(AREA, .h.align = 4, .v.align = 4) }, +}; + +static const struct drm_exynos_ipp_limit rotator_5250_rbg888_limits[] = { + { IPP_SIZE_LIMIT(BUFFER, .h = { 8, SZ_8K }, .v = { 8, SZ_8K }) }, + { IPP_SIZE_LIMIT(AREA, .h.align = 2, .v.align = 2) }, +}; + +static const struct drm_exynos_ipp_limit rotator_4210_yuv_limits[] = { + { IPP_SIZE_LIMIT(BUFFER, .h = { 32, SZ_64K }, .v = { 32, SZ_64K }) }, + { IPP_SIZE_LIMIT(AREA, .h.align = 8, .v.align = 8) }, +}; + +static const struct drm_exynos_ipp_limit rotator_4412_yuv_limits[] = { + { IPP_SIZE_LIMIT(BUFFER, .h = { 32, SZ_32K }, .v = { 32, SZ_32K }) }, + { IPP_SIZE_LIMIT(AREA, .h.align = 8, .v.align = 8) }, +}; + +static const struct exynos_drm_ipp_formats rotator_4210_formats[] = { + { IPP_SRCDST_FORMAT(XRGB8888, rotator_4210_rbg888_limits) }, + { IPP_SRCDST_FORMAT(NV12, rotator_4210_yuv_limits) }, +}; + +static const struct exynos_drm_ipp_formats rotator_4412_formats[] = { + { IPP_SRCDST_FORMAT(XRGB8888, rotator_4412_rbg888_limits) }, + { IPP_SRCDST_FORMAT(NV12, rotator_4412_yuv_limits) }, +}; + +static const struct exynos_drm_ipp_formats rotator_5250_formats[] = { + { IPP_SRCDST_FORMAT(XRGB8888, rotator_5250_rbg888_limits) }, + { IPP_SRCDST_FORMAT(NV12, rotator_4412_yuv_limits) }, +}; + +static const struct rot_variant rotator_4210_data = { + .formats = rotator_4210_formats, + .num_formats = ARRAY_SIZE(rotator_4210_formats), +}; + +static const struct rot_variant rotator_4412_data = { + .formats = rotator_4412_formats, + .num_formats = ARRAY_SIZE(rotator_4412_formats), +}; + +static const struct rot_variant rotator_5250_data = { + .formats = rotator_5250_formats, + .num_formats = ARRAY_SIZE(rotator_5250_formats), +}; + +static const struct of_device_id exynos_rotator_match[] = { + { + .compatible = "samsung,exynos4210-rotator", + .data = &rotator_4210_data, + }, { + .compatible = "samsung,exynos4212-rotator", + .data = &rotator_4412_data, + }, { + .compatible = "samsung,exynos5250-rotator", + .data = &rotator_5250_data, + }, { + }, +}; +MODULE_DEVICE_TABLE(of, exynos_rotator_match); + static const struct dev_pm_ops rotator_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume) @@ -820,7 +438,7 @@ struct platform_driver rotator_driver = { .probe = rotator_probe, .remove = rotator_remove, .driver = { - .name = "exynos-rot", + .name = "exynos-rotator", .owner = THIS_MODULE, .pm = &rotator_pm_ops, .of_match_table = exynos_rotator_match, From 8b7d3ec83aba6381bfc123c7aebcd78199635c3a Mon Sep 17 00:00:00 2001 From: Marek Szyprowski Date: Wed, 9 May 2018 10:59:24 +0200 Subject: [PATCH 14/17] drm/exynos: gsc: Convert driver to IPP v2 core API This patch adapts Exynos DRM GScaler driver to new IPP v2 core API. The side effect of this conversion is a switch to driver component API to register properly in the Exynos DRM core. During the conversion driver has been adapted to support more specific compatible strings to distinguish between Exynos5250 and Exynos5420 (different hardware limits). Support for Exynos5433 variant has been added too (different limits table, removed dependency on ARCH_EXYNOS5). Signed-off-by: Marek Szyprowski Tested-by: Hoegeun Kwon Signed-off-by: Inki Dae --- drivers/gpu/drm/exynos/Kconfig | 3 +- drivers/gpu/drm/exynos/exynos_drm_drv.c | 1 + drivers/gpu/drm/exynos/exynos_drm_gsc.c | 1083 +++++++---------------- drivers/gpu/drm/exynos/exynos_drm_gsc.h | 24 - 4 files changed, 342 insertions(+), 769 deletions(-) delete mode 100644 drivers/gpu/drm/exynos/exynos_drm_gsc.h diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig index 63a27c14b133..5c216548ea18 100644 --- a/drivers/gpu/drm/exynos/Kconfig +++ b/drivers/gpu/drm/exynos/Kconfig @@ -112,7 +112,8 @@ config DRM_EXYNOS_ROTATOR config DRM_EXYNOS_GSC bool "GScaler" - depends on BROKEN && ARCH_EXYNOS5 && VIDEO_SAMSUNG_EXYNOS_GSC=n + depends on VIDEO_SAMSUNG_EXYNOS_GSC=n + select DRM_EXYNOS_IPP help Choose this option if you want to use Exynos GSC for DRM. diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index 537a588ef370..2dcb94034716 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c @@ -266,6 +266,7 @@ static struct exynos_drm_driver_info exynos_drm_drivers[] = { DRM_COMPONENT_DRIVER }, { DRV_PTR(gsc_driver, CONFIG_DRM_EXYNOS_GSC), + DRM_COMPONENT_DRIVER }, { &exynos_drm_platform_driver, DRM_VIRTUAL_DEVICE diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c index 0506b2b17ac1..e99dd1e4ba65 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c @@ -12,18 +12,20 @@ * */ #include +#include #include #include #include #include +#include #include #include #include #include "regs-gsc.h" #include "exynos_drm_drv.h" +#include "exynos_drm_iommu.h" #include "exynos_drm_ipp.h" -#include "exynos_drm_gsc.h" /* * GSC stands for General SCaler and @@ -31,26 +33,10 @@ * input DMA reads image data from the memory. * output DMA writes image data to memory. * GSC supports image rotation and image effect functions. - * - * M2M operation : supports crop/scale/rotation/csc so on. - * Memory ----> GSC H/W ----> Memory. - * Writeback operation : supports cloned screen with FIMD. - * FIMD ----> GSC H/W ----> Memory. - * Output operation : supports direct display using local path. - * Memory ----> GSC H/W ----> FIMD, Mixer. */ -/* - * TODO - * 1. check suspend/resume api if needed. - * 2. need to check use case platform_device_id. - * 3. check src/dst size with, height. - * 4. added check_prepare api for right register. - * 5. need to add supported list in prop_list. - * 6. check prescaler/scaler optimization. - */ -#define GSC_MAX_DEVS 4 +#define GSC_MAX_CLOCKS 8 #define GSC_MAX_SRC 4 #define GSC_MAX_DST 16 #define GSC_RESET_TIMEOUT 50 @@ -65,8 +51,6 @@ #define GSC_SC_DOWN_RATIO_4_8 131072 #define GSC_SC_DOWN_RATIO_3_8 174762 #define GSC_SC_DOWN_RATIO_2_8 262144 -#define GSC_REFRESH_MIN 12 -#define GSC_REFRESH_MAX 60 #define GSC_CROP_MAX 8192 #define GSC_CROP_MIN 32 #define GSC_SCALE_MAX 4224 @@ -77,10 +61,9 @@ #define GSC_COEF_H_8T 8 #define GSC_COEF_V_4T 4 #define GSC_COEF_DEPTH 3 +#define GSC_AUTOSUSPEND_DELAY 2000 #define get_gsc_context(dev) platform_get_drvdata(to_platform_device(dev)) -#define get_ctx_from_ippdrv(ippdrv) container_of(ippdrv,\ - struct gsc_context, ippdrv); #define gsc_read(offset) readl(ctx->regs + (offset)) #define gsc_write(cfg, offset) writel(cfg, ctx->regs + (offset)) @@ -103,51 +86,48 @@ struct gsc_scaler { unsigned long main_vratio; }; -/* - * A structure of scaler capability. - * - * find user manual 49.2 features. - * @tile_w: tile mode or rotation width. - * @tile_h: tile mode or rotation height. - * @w: other cases width. - * @h: other cases height. - */ -struct gsc_capability { - /* tile or rotation */ - u32 tile_w; - u32 tile_h; - /* other cases */ - u32 w; - u32 h; -}; - /* * A structure of gsc context. * - * @ippdrv: prepare initialization using ippdrv. * @regs_res: register resources. * @regs: memory mapped io registers. - * @sysreg: handle to SYSREG block regmap. - * @lock: locking of operations. * @gsc_clk: gsc gate clock. * @sc: scaler infomations. * @id: gsc id. * @irq: irq number. * @rotation: supports rotation of src. - * @suspended: qos operations. */ struct gsc_context { - struct exynos_drm_ippdrv ippdrv; + struct exynos_drm_ipp ipp; + struct drm_device *drm_dev; + struct device *dev; + struct exynos_drm_ipp_task *task; + struct exynos_drm_ipp_formats *formats; + unsigned int num_formats; + struct resource *regs_res; void __iomem *regs; - struct regmap *sysreg; - struct mutex lock; - struct clk *gsc_clk; + const char **clk_names; + struct clk *clocks[GSC_MAX_CLOCKS]; + int num_clocks; struct gsc_scaler sc; int id; int irq; bool rotation; - bool suspended; +}; + +/** + * struct gsc_driverdata - per device type driver data for init time. + * + * @limits: picture size limits array + * @clk_names: names of clocks needed by this variant + * @num_clocks: the number of clocks needed by this variant + */ +struct gsc_driverdata { + const struct drm_exynos_ipp_limit *limits; + int num_limits; + const char *clk_names[GSC_MAX_CLOCKS]; + int num_clocks; }; /* 8-tap Filter Coefficient */ @@ -438,25 +418,6 @@ static int gsc_sw_reset(struct gsc_context *ctx) return 0; } -static void gsc_set_gscblk_fimd_wb(struct gsc_context *ctx, bool enable) -{ - unsigned int gscblk_cfg; - - if (!ctx->sysreg) - return; - - regmap_read(ctx->sysreg, SYSREG_GSCBLK_CFG1, &gscblk_cfg); - - if (enable) - gscblk_cfg |= GSC_BLK_DISP1WB_DEST(ctx->id) | - GSC_BLK_GSCL_WB_IN_SRC_SEL(ctx->id) | - GSC_BLK_SW_RESET_WB_DEST(ctx->id); - else - gscblk_cfg |= GSC_BLK_PXLASYNC_LO_MASK_WB(ctx->id); - - regmap_write(ctx->sysreg, SYSREG_GSCBLK_CFG1, gscblk_cfg); -} - static void gsc_handle_irq(struct gsc_context *ctx, bool enable, bool overflow, bool done) { @@ -487,10 +448,8 @@ static void gsc_handle_irq(struct gsc_context *ctx, bool enable, } -static int gsc_src_set_fmt(struct device *dev, u32 fmt) +static void gsc_src_set_fmt(struct gsc_context *ctx, u32 fmt) { - struct gsc_context *ctx = get_gsc_context(dev); - struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; u32 cfg; DRM_DEBUG_KMS("fmt[0x%x]\n", fmt); @@ -506,6 +465,7 @@ static int gsc_src_set_fmt(struct device *dev, u32 fmt) cfg |= GSC_IN_RGB565; break; case DRM_FORMAT_XRGB8888: + case DRM_FORMAT_ARGB8888: cfg |= GSC_IN_XRGB8888; break; case DRM_FORMAT_BGRX8888: @@ -548,115 +508,84 @@ static int gsc_src_set_fmt(struct device *dev, u32 fmt) cfg |= (GSC_IN_CHROMA_ORDER_CBCR | GSC_IN_YUV420_2P); break; - default: - dev_err(ippdrv->dev, "invalid target yuv order 0x%x.\n", fmt); - return -EINVAL; } gsc_write(cfg, GSC_IN_CON); - - return 0; } -static int gsc_src_set_transf(struct device *dev, - enum drm_exynos_degree degree, - enum drm_exynos_flip flip, bool *swap) +static void gsc_src_set_transf(struct gsc_context *ctx, unsigned int rotation) { - struct gsc_context *ctx = get_gsc_context(dev); - struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; + unsigned int degree = rotation & DRM_MODE_ROTATE_MASK; u32 cfg; - DRM_DEBUG_KMS("degree[%d]flip[0x%x]\n", degree, flip); - cfg = gsc_read(GSC_IN_CON); cfg &= ~GSC_IN_ROT_MASK; switch (degree) { - case EXYNOS_DRM_DEGREE_0: - if (flip & EXYNOS_DRM_FLIP_VERTICAL) + case DRM_MODE_ROTATE_0: + if (rotation & DRM_MODE_REFLECT_Y) cfg |= GSC_IN_ROT_XFLIP; - if (flip & EXYNOS_DRM_FLIP_HORIZONTAL) + if (rotation & DRM_MODE_REFLECT_X) cfg |= GSC_IN_ROT_YFLIP; break; - case EXYNOS_DRM_DEGREE_90: - if (flip & EXYNOS_DRM_FLIP_VERTICAL) - cfg |= GSC_IN_ROT_90_XFLIP; - else if (flip & EXYNOS_DRM_FLIP_HORIZONTAL) - cfg |= GSC_IN_ROT_90_YFLIP; - else - cfg |= GSC_IN_ROT_90; + case DRM_MODE_ROTATE_90: + cfg |= GSC_IN_ROT_90; + if (rotation & DRM_MODE_REFLECT_Y) + cfg |= GSC_IN_ROT_XFLIP; + if (rotation & DRM_MODE_REFLECT_X) + cfg |= GSC_IN_ROT_YFLIP; break; - case EXYNOS_DRM_DEGREE_180: + case DRM_MODE_ROTATE_180: cfg |= GSC_IN_ROT_180; - if (flip & EXYNOS_DRM_FLIP_VERTICAL) + if (rotation & DRM_MODE_REFLECT_Y) cfg &= ~GSC_IN_ROT_XFLIP; - if (flip & EXYNOS_DRM_FLIP_HORIZONTAL) + if (rotation & DRM_MODE_REFLECT_X) cfg &= ~GSC_IN_ROT_YFLIP; break; - case EXYNOS_DRM_DEGREE_270: + case DRM_MODE_ROTATE_270: cfg |= GSC_IN_ROT_270; - if (flip & EXYNOS_DRM_FLIP_VERTICAL) + if (rotation & DRM_MODE_REFLECT_Y) cfg &= ~GSC_IN_ROT_XFLIP; - if (flip & EXYNOS_DRM_FLIP_HORIZONTAL) + if (rotation & DRM_MODE_REFLECT_X) cfg &= ~GSC_IN_ROT_YFLIP; break; - default: - dev_err(ippdrv->dev, "invalid degree value %d.\n", degree); - return -EINVAL; } gsc_write(cfg, GSC_IN_CON); ctx->rotation = (cfg & GSC_IN_ROT_90) ? 1 : 0; - *swap = ctx->rotation; - - return 0; } -static int gsc_src_set_size(struct device *dev, int swap, - struct drm_exynos_pos *pos, struct drm_exynos_sz *sz) +static void gsc_src_set_size(struct gsc_context *ctx, + struct exynos_drm_ipp_buffer *buf) { - struct gsc_context *ctx = get_gsc_context(dev); - struct drm_exynos_pos img_pos = *pos; struct gsc_scaler *sc = &ctx->sc; u32 cfg; - DRM_DEBUG_KMS("swap[%d]x[%d]y[%d]w[%d]h[%d]\n", - swap, pos->x, pos->y, pos->w, pos->h); - - if (swap) { - img_pos.w = pos->h; - img_pos.h = pos->w; - } - /* pixel offset */ - cfg = (GSC_SRCIMG_OFFSET_X(img_pos.x) | - GSC_SRCIMG_OFFSET_Y(img_pos.y)); + cfg = (GSC_SRCIMG_OFFSET_X(buf->rect.x) | + GSC_SRCIMG_OFFSET_Y(buf->rect.y)); gsc_write(cfg, GSC_SRCIMG_OFFSET); /* cropped size */ - cfg = (GSC_CROPPED_WIDTH(img_pos.w) | - GSC_CROPPED_HEIGHT(img_pos.h)); + cfg = (GSC_CROPPED_WIDTH(buf->rect.w) | + GSC_CROPPED_HEIGHT(buf->rect.h)); gsc_write(cfg, GSC_CROPPED_SIZE); - DRM_DEBUG_KMS("hsize[%d]vsize[%d]\n", sz->hsize, sz->vsize); - /* original size */ cfg = gsc_read(GSC_SRCIMG_SIZE); cfg &= ~(GSC_SRCIMG_HEIGHT_MASK | GSC_SRCIMG_WIDTH_MASK); - cfg |= (GSC_SRCIMG_WIDTH(sz->hsize) | - GSC_SRCIMG_HEIGHT(sz->vsize)); + cfg |= (GSC_SRCIMG_WIDTH(buf->buf.width) | + GSC_SRCIMG_HEIGHT(buf->buf.height)); gsc_write(cfg, GSC_SRCIMG_SIZE); cfg = gsc_read(GSC_IN_CON); cfg &= ~GSC_IN_RGB_TYPE_MASK; - DRM_DEBUG_KMS("width[%d]range[%d]\n", pos->w, sc->range); - - if (pos->w >= GSC_WIDTH_ITU_709) + if (buf->rect.w >= GSC_WIDTH_ITU_709) if (sc->range) cfg |= GSC_IN_RGB_HD_WIDE; else @@ -668,103 +597,39 @@ static int gsc_src_set_size(struct device *dev, int swap, cfg |= GSC_IN_RGB_SD_NARROW; gsc_write(cfg, GSC_IN_CON); - - return 0; } -static int gsc_src_set_buf_seq(struct gsc_context *ctx, u32 buf_id, - enum drm_exynos_ipp_buf_type buf_type) +static void gsc_src_set_buf_seq(struct gsc_context *ctx, u32 buf_id, + bool enqueue) { - struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; - bool masked; + bool masked = !enqueue; u32 cfg; u32 mask = 0x00000001 << buf_id; - DRM_DEBUG_KMS("buf_id[%d]buf_type[%d]\n", buf_id, buf_type); - /* mask register set */ cfg = gsc_read(GSC_IN_BASE_ADDR_Y_MASK); - switch (buf_type) { - case IPP_BUF_ENQUEUE: - masked = false; - break; - case IPP_BUF_DEQUEUE: - masked = true; - break; - default: - dev_err(ippdrv->dev, "invalid buf ctrl parameter.\n"); - return -EINVAL; - } - /* sequence id */ cfg &= ~mask; cfg |= masked << buf_id; gsc_write(cfg, GSC_IN_BASE_ADDR_Y_MASK); gsc_write(cfg, GSC_IN_BASE_ADDR_CB_MASK); gsc_write(cfg, GSC_IN_BASE_ADDR_CR_MASK); - - return 0; } -static int gsc_src_set_addr(struct device *dev, - struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id, - enum drm_exynos_ipp_buf_type buf_type) +static void gsc_src_set_addr(struct gsc_context *ctx, u32 buf_id, + struct exynos_drm_ipp_buffer *buf) { - struct gsc_context *ctx = get_gsc_context(dev); - struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; - struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node; - struct drm_exynos_ipp_property *property; - - if (!c_node) { - DRM_ERROR("failed to get c_node.\n"); - return -EFAULT; - } - - property = &c_node->property; - - DRM_DEBUG_KMS("prop_id[%d]buf_id[%d]buf_type[%d]\n", - property->prop_id, buf_id, buf_type); - - if (buf_id > GSC_MAX_SRC) { - dev_info(ippdrv->dev, "invalid buf_id %d.\n", buf_id); - return -EINVAL; - } - /* address register set */ - switch (buf_type) { - case IPP_BUF_ENQUEUE: - gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_Y], - GSC_IN_BASE_ADDR_Y(buf_id)); - gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB], - GSC_IN_BASE_ADDR_CB(buf_id)); - gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR], - GSC_IN_BASE_ADDR_CR(buf_id)); - break; - case IPP_BUF_DEQUEUE: - gsc_write(0x0, GSC_IN_BASE_ADDR_Y(buf_id)); - gsc_write(0x0, GSC_IN_BASE_ADDR_CB(buf_id)); - gsc_write(0x0, GSC_IN_BASE_ADDR_CR(buf_id)); - break; - default: - /* bypass */ - break; - } + gsc_write(buf->dma_addr[0], GSC_IN_BASE_ADDR_Y(buf_id)); + gsc_write(buf->dma_addr[1], GSC_IN_BASE_ADDR_CB(buf_id)); + gsc_write(buf->dma_addr[2], GSC_IN_BASE_ADDR_CR(buf_id)); - return gsc_src_set_buf_seq(ctx, buf_id, buf_type); + gsc_src_set_buf_seq(ctx, buf_id, true); } -static struct exynos_drm_ipp_ops gsc_src_ops = { - .set_fmt = gsc_src_set_fmt, - .set_transf = gsc_src_set_transf, - .set_size = gsc_src_set_size, - .set_addr = gsc_src_set_addr, -}; - -static int gsc_dst_set_fmt(struct device *dev, u32 fmt) +static void gsc_dst_set_fmt(struct gsc_context *ctx, u32 fmt) { - struct gsc_context *ctx = get_gsc_context(dev); - struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; u32 cfg; DRM_DEBUG_KMS("fmt[0x%x]\n", fmt); @@ -779,8 +644,9 @@ static int gsc_dst_set_fmt(struct device *dev, u32 fmt) case DRM_FORMAT_RGB565: cfg |= GSC_OUT_RGB565; break; + case DRM_FORMAT_ARGB8888: case DRM_FORMAT_XRGB8888: - cfg |= GSC_OUT_XRGB8888; + cfg |= (GSC_OUT_XRGB8888 | GSC_OUT_GLOBAL_ALPHA(0xff)); break; case DRM_FORMAT_BGRX8888: cfg |= (GSC_OUT_XRGB8888 | GSC_OUT_RB_SWAP); @@ -819,69 +685,9 @@ static int gsc_dst_set_fmt(struct device *dev, u32 fmt) cfg |= (GSC_OUT_CHROMA_ORDER_CBCR | GSC_OUT_YUV420_2P); break; - default: - dev_err(ippdrv->dev, "invalid target yuv order 0x%x.\n", fmt); - return -EINVAL; } gsc_write(cfg, GSC_OUT_CON); - - return 0; -} - -static int gsc_dst_set_transf(struct device *dev, - enum drm_exynos_degree degree, - enum drm_exynos_flip flip, bool *swap) -{ - struct gsc_context *ctx = get_gsc_context(dev); - struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; - u32 cfg; - - DRM_DEBUG_KMS("degree[%d]flip[0x%x]\n", degree, flip); - - cfg = gsc_read(GSC_IN_CON); - cfg &= ~GSC_IN_ROT_MASK; - - switch (degree) { - case EXYNOS_DRM_DEGREE_0: - if (flip & EXYNOS_DRM_FLIP_VERTICAL) - cfg |= GSC_IN_ROT_XFLIP; - if (flip & EXYNOS_DRM_FLIP_HORIZONTAL) - cfg |= GSC_IN_ROT_YFLIP; - break; - case EXYNOS_DRM_DEGREE_90: - if (flip & EXYNOS_DRM_FLIP_VERTICAL) - cfg |= GSC_IN_ROT_90_XFLIP; - else if (flip & EXYNOS_DRM_FLIP_HORIZONTAL) - cfg |= GSC_IN_ROT_90_YFLIP; - else - cfg |= GSC_IN_ROT_90; - break; - case EXYNOS_DRM_DEGREE_180: - cfg |= GSC_IN_ROT_180; - if (flip & EXYNOS_DRM_FLIP_VERTICAL) - cfg &= ~GSC_IN_ROT_XFLIP; - if (flip & EXYNOS_DRM_FLIP_HORIZONTAL) - cfg &= ~GSC_IN_ROT_YFLIP; - break; - case EXYNOS_DRM_DEGREE_270: - cfg |= GSC_IN_ROT_270; - if (flip & EXYNOS_DRM_FLIP_VERTICAL) - cfg &= ~GSC_IN_ROT_XFLIP; - if (flip & EXYNOS_DRM_FLIP_HORIZONTAL) - cfg &= ~GSC_IN_ROT_YFLIP; - break; - default: - dev_err(ippdrv->dev, "invalid degree value %d.\n", degree); - return -EINVAL; - } - - gsc_write(cfg, GSC_IN_CON); - - ctx->rotation = (cfg & GSC_IN_ROT_90) ? 1 : 0; - *swap = ctx->rotation; - - return 0; } static int gsc_get_ratio_shift(u32 src, u32 dst, u32 *ratio) @@ -919,9 +725,9 @@ static void gsc_get_prescaler_shfactor(u32 hratio, u32 vratio, u32 *shfactor) } static int gsc_set_prescaler(struct gsc_context *ctx, struct gsc_scaler *sc, - struct drm_exynos_pos *src, struct drm_exynos_pos *dst) + struct drm_exynos_ipp_task_rect *src, + struct drm_exynos_ipp_task_rect *dst) { - struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; u32 cfg; u32 src_w, src_h, dst_w, dst_h; int ret = 0; @@ -939,13 +745,13 @@ static int gsc_set_prescaler(struct gsc_context *ctx, struct gsc_scaler *sc, ret = gsc_get_ratio_shift(src_w, dst_w, &sc->pre_hratio); if (ret) { - dev_err(ippdrv->dev, "failed to get ratio horizontal.\n"); + dev_err(ctx->dev, "failed to get ratio horizontal.\n"); return ret; } ret = gsc_get_ratio_shift(src_h, dst_h, &sc->pre_vratio); if (ret) { - dev_err(ippdrv->dev, "failed to get ratio vertical.\n"); + dev_err(ctx->dev, "failed to get ratio vertical.\n"); return ret; } @@ -1039,47 +845,37 @@ static void gsc_set_scaler(struct gsc_context *ctx, struct gsc_scaler *sc) gsc_write(cfg, GSC_MAIN_V_RATIO); } -static int gsc_dst_set_size(struct device *dev, int swap, - struct drm_exynos_pos *pos, struct drm_exynos_sz *sz) +static void gsc_dst_set_size(struct gsc_context *ctx, + struct exynos_drm_ipp_buffer *buf) { - struct gsc_context *ctx = get_gsc_context(dev); - struct drm_exynos_pos img_pos = *pos; struct gsc_scaler *sc = &ctx->sc; u32 cfg; - DRM_DEBUG_KMS("swap[%d]x[%d]y[%d]w[%d]h[%d]\n", - swap, pos->x, pos->y, pos->w, pos->h); - - if (swap) { - img_pos.w = pos->h; - img_pos.h = pos->w; - } - /* pixel offset */ - cfg = (GSC_DSTIMG_OFFSET_X(pos->x) | - GSC_DSTIMG_OFFSET_Y(pos->y)); + cfg = (GSC_DSTIMG_OFFSET_X(buf->rect.x) | + GSC_DSTIMG_OFFSET_Y(buf->rect.y)); gsc_write(cfg, GSC_DSTIMG_OFFSET); /* scaled size */ - cfg = (GSC_SCALED_WIDTH(img_pos.w) | GSC_SCALED_HEIGHT(img_pos.h)); + if (ctx->rotation) + cfg = (GSC_SCALED_WIDTH(buf->rect.h) | + GSC_SCALED_HEIGHT(buf->rect.w)); + else + cfg = (GSC_SCALED_WIDTH(buf->rect.w) | + GSC_SCALED_HEIGHT(buf->rect.h)); gsc_write(cfg, GSC_SCALED_SIZE); - DRM_DEBUG_KMS("hsize[%d]vsize[%d]\n", sz->hsize, sz->vsize); - /* original size */ cfg = gsc_read(GSC_DSTIMG_SIZE); - cfg &= ~(GSC_DSTIMG_HEIGHT_MASK | - GSC_DSTIMG_WIDTH_MASK); - cfg |= (GSC_DSTIMG_WIDTH(sz->hsize) | - GSC_DSTIMG_HEIGHT(sz->vsize)); + cfg &= ~(GSC_DSTIMG_HEIGHT_MASK | GSC_DSTIMG_WIDTH_MASK); + cfg |= GSC_DSTIMG_WIDTH(buf->buf.width) | + GSC_DSTIMG_HEIGHT(buf->buf.height); gsc_write(cfg, GSC_DSTIMG_SIZE); cfg = gsc_read(GSC_OUT_CON); cfg &= ~GSC_OUT_RGB_TYPE_MASK; - DRM_DEBUG_KMS("width[%d]range[%d]\n", pos->w, sc->range); - - if (pos->w >= GSC_WIDTH_ITU_709) + if (buf->rect.w >= GSC_WIDTH_ITU_709) if (sc->range) cfg |= GSC_OUT_RGB_HD_WIDE; else @@ -1091,8 +887,6 @@ static int gsc_dst_set_size(struct device *dev, int swap, cfg |= GSC_OUT_RGB_SD_NARROW; gsc_write(cfg, GSC_OUT_CON); - - return 0; } static int gsc_dst_get_buf_seq(struct gsc_context *ctx) @@ -1111,35 +905,16 @@ static int gsc_dst_get_buf_seq(struct gsc_context *ctx) return buf_num; } -static int gsc_dst_set_buf_seq(struct gsc_context *ctx, u32 buf_id, - enum drm_exynos_ipp_buf_type buf_type) +static void gsc_dst_set_buf_seq(struct gsc_context *ctx, u32 buf_id, + bool enqueue) { - struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; - bool masked; + bool masked = !enqueue; u32 cfg; u32 mask = 0x00000001 << buf_id; - int ret = 0; - - DRM_DEBUG_KMS("buf_id[%d]buf_type[%d]\n", buf_id, buf_type); - - mutex_lock(&ctx->lock); /* mask register set */ cfg = gsc_read(GSC_OUT_BASE_ADDR_Y_MASK); - switch (buf_type) { - case IPP_BUF_ENQUEUE: - masked = false; - break; - case IPP_BUF_DEQUEUE: - masked = true; - break; - default: - dev_err(ippdrv->dev, "invalid buf ctrl parameter.\n"); - ret = -EINVAL; - goto err_unlock; - } - /* sequence id */ cfg &= ~mask; cfg |= masked << buf_id; @@ -1148,94 +923,29 @@ static int gsc_dst_set_buf_seq(struct gsc_context *ctx, u32 buf_id, gsc_write(cfg, GSC_OUT_BASE_ADDR_CR_MASK); /* interrupt enable */ - if (buf_type == IPP_BUF_ENQUEUE && - gsc_dst_get_buf_seq(ctx) >= GSC_BUF_START) + if (enqueue && gsc_dst_get_buf_seq(ctx) >= GSC_BUF_START) gsc_handle_irq(ctx, true, false, true); /* interrupt disable */ - if (buf_type == IPP_BUF_DEQUEUE && - gsc_dst_get_buf_seq(ctx) <= GSC_BUF_STOP) + if (!enqueue && gsc_dst_get_buf_seq(ctx) <= GSC_BUF_STOP) gsc_handle_irq(ctx, false, false, true); - -err_unlock: - mutex_unlock(&ctx->lock); - return ret; } -static int gsc_dst_set_addr(struct device *dev, - struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id, - enum drm_exynos_ipp_buf_type buf_type) +static void gsc_dst_set_addr(struct gsc_context *ctx, + u32 buf_id, struct exynos_drm_ipp_buffer *buf) { - struct gsc_context *ctx = get_gsc_context(dev); - struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; - struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node; - struct drm_exynos_ipp_property *property; - - if (!c_node) { - DRM_ERROR("failed to get c_node.\n"); - return -EFAULT; - } - - property = &c_node->property; - - DRM_DEBUG_KMS("prop_id[%d]buf_id[%d]buf_type[%d]\n", - property->prop_id, buf_id, buf_type); - - if (buf_id > GSC_MAX_DST) { - dev_info(ippdrv->dev, "invalid buf_id %d.\n", buf_id); - return -EINVAL; - } - /* address register set */ - switch (buf_type) { - case IPP_BUF_ENQUEUE: - gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_Y], - GSC_OUT_BASE_ADDR_Y(buf_id)); - gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_CB], - GSC_OUT_BASE_ADDR_CB(buf_id)); - gsc_write(buf_info->base[EXYNOS_DRM_PLANAR_CR], - GSC_OUT_BASE_ADDR_CR(buf_id)); - break; - case IPP_BUF_DEQUEUE: - gsc_write(0x0, GSC_OUT_BASE_ADDR_Y(buf_id)); - gsc_write(0x0, GSC_OUT_BASE_ADDR_CB(buf_id)); - gsc_write(0x0, GSC_OUT_BASE_ADDR_CR(buf_id)); - break; - default: - /* bypass */ - break; - } + gsc_write(buf->dma_addr[0], GSC_OUT_BASE_ADDR_Y(buf_id)); + gsc_write(buf->dma_addr[1], GSC_OUT_BASE_ADDR_CB(buf_id)); + gsc_write(buf->dma_addr[2], GSC_OUT_BASE_ADDR_CR(buf_id)); - return gsc_dst_set_buf_seq(ctx, buf_id, buf_type); -} - -static struct exynos_drm_ipp_ops gsc_dst_ops = { - .set_fmt = gsc_dst_set_fmt, - .set_transf = gsc_dst_set_transf, - .set_size = gsc_dst_set_size, - .set_addr = gsc_dst_set_addr, -}; - -static int gsc_clk_ctrl(struct gsc_context *ctx, bool enable) -{ - DRM_DEBUG_KMS("enable[%d]\n", enable); - - if (enable) { - clk_prepare_enable(ctx->gsc_clk); - ctx->suspended = false; - } else { - clk_disable_unprepare(ctx->gsc_clk); - ctx->suspended = true; - } - - return 0; + gsc_dst_set_buf_seq(ctx, buf_id, true); } static int gsc_get_src_buf_index(struct gsc_context *ctx) { u32 cfg, curr_index, i; u32 buf_id = GSC_MAX_SRC; - int ret; DRM_DEBUG_KMS("gsc id[%d]\n", ctx->id); @@ -1249,19 +959,15 @@ static int gsc_get_src_buf_index(struct gsc_context *ctx) } } + DRM_DEBUG_KMS("cfg[0x%x]curr_index[%d]buf_id[%d]\n", cfg, + curr_index, buf_id); + if (buf_id == GSC_MAX_SRC) { DRM_ERROR("failed to get in buffer index.\n"); return -EINVAL; } - ret = gsc_src_set_buf_seq(ctx, buf_id, IPP_BUF_DEQUEUE); - if (ret < 0) { - DRM_ERROR("failed to dequeue.\n"); - return ret; - } - - DRM_DEBUG_KMS("cfg[0x%x]curr_index[%d]buf_id[%d]\n", cfg, - curr_index, buf_id); + gsc_src_set_buf_seq(ctx, buf_id, false); return buf_id; } @@ -1270,7 +976,6 @@ static int gsc_get_dst_buf_index(struct gsc_context *ctx) { u32 cfg, curr_index, i; u32 buf_id = GSC_MAX_DST; - int ret; DRM_DEBUG_KMS("gsc id[%d]\n", ctx->id); @@ -1289,11 +994,7 @@ static int gsc_get_dst_buf_index(struct gsc_context *ctx) return -EINVAL; } - ret = gsc_dst_set_buf_seq(ctx, buf_id, IPP_BUF_DEQUEUE); - if (ret < 0) { - DRM_ERROR("failed to dequeue.\n"); - return ret; - } + gsc_dst_set_buf_seq(ctx, buf_id, false); DRM_DEBUG_KMS("cfg[0x%x]curr_index[%d]buf_id[%d]\n", cfg, curr_index, buf_id); @@ -1304,215 +1005,55 @@ static int gsc_get_dst_buf_index(struct gsc_context *ctx) static irqreturn_t gsc_irq_handler(int irq, void *dev_id) { struct gsc_context *ctx = dev_id; - struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; - struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node; - struct drm_exynos_ipp_event_work *event_work = - c_node->event_work; u32 status; - int buf_id[EXYNOS_DRM_OPS_MAX]; + int err = 0; DRM_DEBUG_KMS("gsc id[%d]\n", ctx->id); status = gsc_read(GSC_IRQ); if (status & GSC_IRQ_STATUS_OR_IRQ) { - dev_err(ippdrv->dev, "occurred overflow at %d, status 0x%x.\n", + dev_err(ctx->dev, "occurred overflow at %d, status 0x%x.\n", ctx->id, status); - return IRQ_NONE; + err = -EINVAL; } if (status & GSC_IRQ_STATUS_OR_FRM_DONE) { - dev_dbg(ippdrv->dev, "occurred frame done at %d, status 0x%x.\n", + int src_buf_id, dst_buf_id; + + dev_dbg(ctx->dev, "occurred frame done at %d, status 0x%x.\n", ctx->id, status); - buf_id[EXYNOS_DRM_OPS_SRC] = gsc_get_src_buf_index(ctx); - if (buf_id[EXYNOS_DRM_OPS_SRC] < 0) - return IRQ_HANDLED; + src_buf_id = gsc_get_src_buf_index(ctx); + dst_buf_id = gsc_get_dst_buf_index(ctx); - buf_id[EXYNOS_DRM_OPS_DST] = gsc_get_dst_buf_index(ctx); - if (buf_id[EXYNOS_DRM_OPS_DST] < 0) - return IRQ_HANDLED; + DRM_DEBUG_KMS("buf_id_src[%d]buf_id_dst[%d]\n", src_buf_id, + dst_buf_id); - DRM_DEBUG_KMS("buf_id_src[%d]buf_id_dst[%d]\n", - buf_id[EXYNOS_DRM_OPS_SRC], buf_id[EXYNOS_DRM_OPS_DST]); + if (src_buf_id < 0 || dst_buf_id < 0) + err = -EINVAL; + } - event_work->ippdrv = ippdrv; - event_work->buf_id[EXYNOS_DRM_OPS_SRC] = - buf_id[EXYNOS_DRM_OPS_SRC]; - event_work->buf_id[EXYNOS_DRM_OPS_DST] = - buf_id[EXYNOS_DRM_OPS_DST]; - queue_work(ippdrv->event_workq, &event_work->work); + if (ctx->task) { + struct exynos_drm_ipp_task *task = ctx->task; + + ctx->task = NULL; + pm_runtime_mark_last_busy(ctx->dev); + pm_runtime_put_autosuspend(ctx->dev); + exynos_drm_ipp_task_done(task, err); } return IRQ_HANDLED; } -static int gsc_init_prop_list(struct exynos_drm_ippdrv *ippdrv) +static int gsc_reset(struct gsc_context *ctx) { - struct drm_exynos_ipp_prop_list *prop_list = &ippdrv->prop_list; - - prop_list->version = 1; - prop_list->writeback = 1; - prop_list->refresh_min = GSC_REFRESH_MIN; - prop_list->refresh_max = GSC_REFRESH_MAX; - prop_list->flip = (1 << EXYNOS_DRM_FLIP_VERTICAL) | - (1 << EXYNOS_DRM_FLIP_HORIZONTAL); - prop_list->degree = (1 << EXYNOS_DRM_DEGREE_0) | - (1 << EXYNOS_DRM_DEGREE_90) | - (1 << EXYNOS_DRM_DEGREE_180) | - (1 << EXYNOS_DRM_DEGREE_270); - prop_list->csc = 1; - prop_list->crop = 1; - prop_list->crop_max.hsize = GSC_CROP_MAX; - prop_list->crop_max.vsize = GSC_CROP_MAX; - prop_list->crop_min.hsize = GSC_CROP_MIN; - prop_list->crop_min.vsize = GSC_CROP_MIN; - prop_list->scale = 1; - prop_list->scale_max.hsize = GSC_SCALE_MAX; - prop_list->scale_max.vsize = GSC_SCALE_MAX; - prop_list->scale_min.hsize = GSC_SCALE_MIN; - prop_list->scale_min.vsize = GSC_SCALE_MIN; - - return 0; -} - -static inline bool gsc_check_drm_flip(enum drm_exynos_flip flip) -{ - switch (flip) { - case EXYNOS_DRM_FLIP_NONE: - case EXYNOS_DRM_FLIP_VERTICAL: - case EXYNOS_DRM_FLIP_HORIZONTAL: - case EXYNOS_DRM_FLIP_BOTH: - return true; - default: - DRM_DEBUG_KMS("invalid flip\n"); - return false; - } -} - -static int gsc_ippdrv_check_property(struct device *dev, - struct drm_exynos_ipp_property *property) -{ - struct gsc_context *ctx = get_gsc_context(dev); - struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; - struct drm_exynos_ipp_prop_list *pp = &ippdrv->prop_list; - struct drm_exynos_ipp_config *config; - struct drm_exynos_pos *pos; - struct drm_exynos_sz *sz; - bool swap; - int i; - - for_each_ipp_ops(i) { - if ((i == EXYNOS_DRM_OPS_SRC) && - (property->cmd == IPP_CMD_WB)) - continue; - - config = &property->config[i]; - pos = &config->pos; - sz = &config->sz; - - /* check for flip */ - if (!gsc_check_drm_flip(config->flip)) { - DRM_ERROR("invalid flip.\n"); - goto err_property; - } - - /* check for degree */ - switch (config->degree) { - case EXYNOS_DRM_DEGREE_90: - case EXYNOS_DRM_DEGREE_270: - swap = true; - break; - case EXYNOS_DRM_DEGREE_0: - case EXYNOS_DRM_DEGREE_180: - swap = false; - break; - default: - DRM_ERROR("invalid degree.\n"); - goto err_property; - } - - /* check for buffer bound */ - if ((pos->x + pos->w > sz->hsize) || - (pos->y + pos->h > sz->vsize)) { - DRM_ERROR("out of buf bound.\n"); - goto err_property; - } - - /* check for crop */ - if ((i == EXYNOS_DRM_OPS_SRC) && (pp->crop)) { - if (swap) { - if ((pos->h < pp->crop_min.hsize) || - (sz->vsize > pp->crop_max.hsize) || - (pos->w < pp->crop_min.vsize) || - (sz->hsize > pp->crop_max.vsize)) { - DRM_ERROR("out of crop size.\n"); - goto err_property; - } - } else { - if ((pos->w < pp->crop_min.hsize) || - (sz->hsize > pp->crop_max.hsize) || - (pos->h < pp->crop_min.vsize) || - (sz->vsize > pp->crop_max.vsize)) { - DRM_ERROR("out of crop size.\n"); - goto err_property; - } - } - } - - /* check for scale */ - if ((i == EXYNOS_DRM_OPS_DST) && (pp->scale)) { - if (swap) { - if ((pos->h < pp->scale_min.hsize) || - (sz->vsize > pp->scale_max.hsize) || - (pos->w < pp->scale_min.vsize) || - (sz->hsize > pp->scale_max.vsize)) { - DRM_ERROR("out of scale size.\n"); - goto err_property; - } - } else { - if ((pos->w < pp->scale_min.hsize) || - (sz->hsize > pp->scale_max.hsize) || - (pos->h < pp->scale_min.vsize) || - (sz->vsize > pp->scale_max.vsize)) { - DRM_ERROR("out of scale size.\n"); - goto err_property; - } - } - } - } - - return 0; - -err_property: - for_each_ipp_ops(i) { - if ((i == EXYNOS_DRM_OPS_SRC) && - (property->cmd == IPP_CMD_WB)) - continue; - - config = &property->config[i]; - pos = &config->pos; - sz = &config->sz; - - DRM_ERROR("[%s]f[%d]r[%d]pos[%d %d %d %d]sz[%d %d]\n", - i ? "dst" : "src", config->flip, config->degree, - pos->x, pos->y, pos->w, pos->h, - sz->hsize, sz->vsize); - } - - return -EINVAL; -} - - -static int gsc_ippdrv_reset(struct device *dev) -{ - struct gsc_context *ctx = get_gsc_context(dev); struct gsc_scaler *sc = &ctx->sc; int ret; /* reset h/w block */ ret = gsc_sw_reset(ctx); if (ret < 0) { - dev_err(dev, "failed to reset hardware.\n"); + dev_err(ctx->dev, "failed to reset hardware.\n"); return ret; } @@ -1523,166 +1064,172 @@ static int gsc_ippdrv_reset(struct device *dev) return 0; } -static int gsc_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd) +static void gsc_start(struct gsc_context *ctx) { - struct gsc_context *ctx = get_gsc_context(dev); - struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; - struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node; - struct drm_exynos_ipp_property *property; - struct drm_exynos_ipp_config *config; - struct drm_exynos_pos img_pos[EXYNOS_DRM_OPS_MAX]; - struct drm_exynos_ipp_set_wb set_wb; u32 cfg; - int ret, i; - - DRM_DEBUG_KMS("cmd[%d]\n", cmd); - - if (!c_node) { - DRM_ERROR("failed to get c_node.\n"); - return -EINVAL; - } - - property = &c_node->property; gsc_handle_irq(ctx, true, false, true); - for_each_ipp_ops(i) { - config = &property->config[i]; - img_pos[i] = config->pos; - } + /* enable one shot */ + cfg = gsc_read(GSC_ENABLE); + cfg &= ~(GSC_ENABLE_ON_CLEAR_MASK | + GSC_ENABLE_CLK_GATE_MODE_MASK); + cfg |= GSC_ENABLE_ON_CLEAR_ONESHOT; + gsc_write(cfg, GSC_ENABLE); - switch (cmd) { - case IPP_CMD_M2M: - /* enable one shot */ - cfg = gsc_read(GSC_ENABLE); - cfg &= ~(GSC_ENABLE_ON_CLEAR_MASK | - GSC_ENABLE_CLK_GATE_MODE_MASK); - cfg |= GSC_ENABLE_ON_CLEAR_ONESHOT; - gsc_write(cfg, GSC_ENABLE); + /* src dma memory */ + cfg = gsc_read(GSC_IN_CON); + cfg &= ~(GSC_IN_PATH_MASK | GSC_IN_LOCAL_SEL_MASK); + cfg |= GSC_IN_PATH_MEMORY; + gsc_write(cfg, GSC_IN_CON); - /* src dma memory */ - cfg = gsc_read(GSC_IN_CON); - cfg &= ~(GSC_IN_PATH_MASK | GSC_IN_LOCAL_SEL_MASK); - cfg |= GSC_IN_PATH_MEMORY; - gsc_write(cfg, GSC_IN_CON); - - /* dst dma memory */ - cfg = gsc_read(GSC_OUT_CON); - cfg |= GSC_OUT_PATH_MEMORY; - gsc_write(cfg, GSC_OUT_CON); - break; - case IPP_CMD_WB: - set_wb.enable = 1; - set_wb.refresh = property->refresh_rate; - gsc_set_gscblk_fimd_wb(ctx, set_wb.enable); - exynos_drm_ippnb_send_event(IPP_SET_WRITEBACK, (void *)&set_wb); - - /* src local path */ - cfg = gsc_read(GSC_IN_CON); - cfg &= ~(GSC_IN_PATH_MASK | GSC_IN_LOCAL_SEL_MASK); - cfg |= (GSC_IN_PATH_LOCAL | GSC_IN_LOCAL_FIMD_WB); - gsc_write(cfg, GSC_IN_CON); - - /* dst dma memory */ - cfg = gsc_read(GSC_OUT_CON); - cfg |= GSC_OUT_PATH_MEMORY; - gsc_write(cfg, GSC_OUT_CON); - break; - case IPP_CMD_OUTPUT: - /* src dma memory */ - cfg = gsc_read(GSC_IN_CON); - cfg &= ~(GSC_IN_PATH_MASK | GSC_IN_LOCAL_SEL_MASK); - cfg |= GSC_IN_PATH_MEMORY; - gsc_write(cfg, GSC_IN_CON); - - /* dst local path */ - cfg = gsc_read(GSC_OUT_CON); - cfg |= GSC_OUT_PATH_MEMORY; - gsc_write(cfg, GSC_OUT_CON); - break; - default: - ret = -EINVAL; - dev_err(dev, "invalid operations.\n"); - return ret; - } - - ret = gsc_set_prescaler(ctx, &ctx->sc, - &img_pos[EXYNOS_DRM_OPS_SRC], - &img_pos[EXYNOS_DRM_OPS_DST]); - if (ret) { - dev_err(dev, "failed to set prescaler.\n"); - return ret; - } + /* dst dma memory */ + cfg = gsc_read(GSC_OUT_CON); + cfg |= GSC_OUT_PATH_MEMORY; + gsc_write(cfg, GSC_OUT_CON); gsc_set_scaler(ctx, &ctx->sc); cfg = gsc_read(GSC_ENABLE); cfg |= GSC_ENABLE_ON; gsc_write(cfg, GSC_ENABLE); +} + +static int gsc_commit(struct exynos_drm_ipp *ipp, + struct exynos_drm_ipp_task *task) +{ + struct gsc_context *ctx = container_of(ipp, struct gsc_context, ipp); + int ret; + + pm_runtime_get_sync(ctx->dev); + ctx->task = task; + + ret = gsc_reset(ctx); + if (ret) { + pm_runtime_put_autosuspend(ctx->dev); + ctx->task = NULL; + return ret; + } + + gsc_src_set_fmt(ctx, task->src.buf.fourcc); + gsc_src_set_transf(ctx, task->transform.rotation); + gsc_src_set_size(ctx, &task->src); + gsc_src_set_addr(ctx, 0, &task->src); + gsc_dst_set_fmt(ctx, task->dst.buf.fourcc); + gsc_dst_set_size(ctx, &task->dst); + gsc_dst_set_addr(ctx, 0, &task->dst); + gsc_set_prescaler(ctx, &ctx->sc, &task->src.rect, &task->dst.rect); + gsc_start(ctx); return 0; } -static void gsc_ippdrv_stop(struct device *dev, enum drm_exynos_ipp_cmd cmd) +static void gsc_abort(struct exynos_drm_ipp *ipp, + struct exynos_drm_ipp_task *task) { - struct gsc_context *ctx = get_gsc_context(dev); - struct drm_exynos_ipp_set_wb set_wb = {0, 0}; - u32 cfg; + struct gsc_context *ctx = + container_of(ipp, struct gsc_context, ipp); - DRM_DEBUG_KMS("cmd[%d]\n", cmd); + gsc_reset(ctx); + if (ctx->task) { + struct exynos_drm_ipp_task *task = ctx->task; - switch (cmd) { - case IPP_CMD_M2M: - /* bypass */ - break; - case IPP_CMD_WB: - gsc_set_gscblk_fimd_wb(ctx, set_wb.enable); - exynos_drm_ippnb_send_event(IPP_SET_WRITEBACK, (void *)&set_wb); - break; - case IPP_CMD_OUTPUT: - default: - dev_err(dev, "invalid operations.\n"); - break; + ctx->task = NULL; + pm_runtime_mark_last_busy(ctx->dev); + pm_runtime_put_autosuspend(ctx->dev); + exynos_drm_ipp_task_done(task, -EIO); } - - gsc_handle_irq(ctx, false, false, true); - - /* reset sequence */ - gsc_write(0xff, GSC_OUT_BASE_ADDR_Y_MASK); - gsc_write(0xff, GSC_OUT_BASE_ADDR_CB_MASK); - gsc_write(0xff, GSC_OUT_BASE_ADDR_CR_MASK); - - cfg = gsc_read(GSC_ENABLE); - cfg &= ~GSC_ENABLE_ON; - gsc_write(cfg, GSC_ENABLE); } +static struct exynos_drm_ipp_funcs ipp_funcs = { + .commit = gsc_commit, + .abort = gsc_abort, +}; + +static int gsc_bind(struct device *dev, struct device *master, void *data) +{ + struct gsc_context *ctx = dev_get_drvdata(dev); + struct drm_device *drm_dev = data; + struct exynos_drm_ipp *ipp = &ctx->ipp; + + ctx->drm_dev = drm_dev; + drm_iommu_attach_device(drm_dev, dev); + + exynos_drm_ipp_register(drm_dev, ipp, &ipp_funcs, + DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE | + DRM_EXYNOS_IPP_CAP_SCALE | DRM_EXYNOS_IPP_CAP_CONVERT, + ctx->formats, ctx->num_formats, "gsc"); + + dev_info(dev, "The exynos gscaler has been probed successfully\n"); + + return 0; +} + +static void gsc_unbind(struct device *dev, struct device *master, + void *data) +{ + struct gsc_context *ctx = dev_get_drvdata(dev); + struct drm_device *drm_dev = data; + struct exynos_drm_ipp *ipp = &ctx->ipp; + + exynos_drm_ipp_unregister(drm_dev, ipp); + drm_iommu_detach_device(drm_dev, dev); +} + +static const struct component_ops gsc_component_ops = { + .bind = gsc_bind, + .unbind = gsc_unbind, +}; + +static const unsigned int gsc_formats[] = { + DRM_FORMAT_ARGB8888, + DRM_FORMAT_XRGB8888, DRM_FORMAT_RGB565, DRM_FORMAT_BGRX8888, + DRM_FORMAT_NV12, DRM_FORMAT_NV16, DRM_FORMAT_NV21, DRM_FORMAT_NV61, + DRM_FORMAT_UYVY, DRM_FORMAT_VYUY, DRM_FORMAT_YUYV, DRM_FORMAT_YVYU, + DRM_FORMAT_YUV420, DRM_FORMAT_YVU420, DRM_FORMAT_YUV422, +}; + static int gsc_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; + struct gsc_driverdata *driver_data; + struct exynos_drm_ipp_formats *formats; struct gsc_context *ctx; struct resource *res; - struct exynos_drm_ippdrv *ippdrv; - int ret; + int ret, i; ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); if (!ctx) return -ENOMEM; - if (dev->of_node) { - ctx->sysreg = syscon_regmap_lookup_by_phandle(dev->of_node, - "samsung,sysreg"); - if (IS_ERR(ctx->sysreg)) { - dev_warn(dev, "failed to get system register.\n"); - ctx->sysreg = NULL; - } + formats = devm_kzalloc(dev, sizeof(*formats) * + (ARRAY_SIZE(gsc_formats)), GFP_KERNEL); + if (!formats) + return -ENOMEM; + + driver_data = (struct gsc_driverdata *)of_device_get_match_data(dev); + ctx->dev = dev; + ctx->num_clocks = driver_data->num_clocks; + ctx->clk_names = driver_data->clk_names; + + for (i = 0; i < ARRAY_SIZE(gsc_formats); i++) { + formats[i].fourcc = gsc_formats[i]; + formats[i].type = DRM_EXYNOS_IPP_FORMAT_SOURCE | + DRM_EXYNOS_IPP_FORMAT_DESTINATION; + formats[i].limits = driver_data->limits; + formats[i].num_limits = driver_data->num_limits; } + ctx->formats = formats; + ctx->num_formats = ARRAY_SIZE(gsc_formats); /* clock control */ - ctx->gsc_clk = devm_clk_get(dev, "gscl"); - if (IS_ERR(ctx->gsc_clk)) { - dev_err(dev, "failed to get gsc clock.\n"); - return PTR_ERR(ctx->gsc_clk); + for (i = 0; i < ctx->num_clocks; i++) { + ctx->clocks[i] = devm_clk_get(dev, ctx->clk_names[i]); + if (IS_ERR(ctx->clocks[i])) { + dev_err(dev, "failed to get clock: %s\n", + ctx->clk_names[i]); + return PTR_ERR(ctx->clocks[i]); + } } /* resource memory */ @@ -1699,8 +1246,8 @@ static int gsc_probe(struct platform_device *pdev) } ctx->irq = res->start; - ret = devm_request_threaded_irq(dev, ctx->irq, NULL, gsc_irq_handler, - IRQF_ONESHOT, "drm_gsc", ctx); + ret = devm_request_irq(dev, ctx->irq, gsc_irq_handler, 0, + dev_name(dev), ctx); if (ret < 0) { dev_err(dev, "failed to request irq.\n"); return ret; @@ -1709,38 +1256,22 @@ static int gsc_probe(struct platform_device *pdev) /* context initailization */ ctx->id = pdev->id; - ippdrv = &ctx->ippdrv; - ippdrv->dev = dev; - ippdrv->ops[EXYNOS_DRM_OPS_SRC] = &gsc_src_ops; - ippdrv->ops[EXYNOS_DRM_OPS_DST] = &gsc_dst_ops; - ippdrv->check_property = gsc_ippdrv_check_property; - ippdrv->reset = gsc_ippdrv_reset; - ippdrv->start = gsc_ippdrv_start; - ippdrv->stop = gsc_ippdrv_stop; - ret = gsc_init_prop_list(ippdrv); - if (ret < 0) { - dev_err(dev, "failed to init property list.\n"); - return ret; - } - - DRM_DEBUG_KMS("id[%d]ippdrv[%pK]\n", ctx->id, ippdrv); - - mutex_init(&ctx->lock); platform_set_drvdata(pdev, ctx); + pm_runtime_use_autosuspend(dev); + pm_runtime_set_autosuspend_delay(dev, GSC_AUTOSUSPEND_DELAY); pm_runtime_enable(dev); - ret = exynos_drm_ippdrv_register(ippdrv); - if (ret < 0) { - dev_err(dev, "failed to register drm gsc device.\n"); - goto err_ippdrv_register; - } + ret = component_add(dev, &gsc_component_ops); + if (ret) + goto err_pm_dis; dev_info(dev, "drm gsc registered successfully.\n"); return 0; -err_ippdrv_register: +err_pm_dis: + pm_runtime_dont_use_autosuspend(dev); pm_runtime_disable(dev); return ret; } @@ -1748,13 +1279,8 @@ static int gsc_probe(struct platform_device *pdev) static int gsc_remove(struct platform_device *pdev) { struct device *dev = &pdev->dev; - struct gsc_context *ctx = get_gsc_context(dev); - struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; - exynos_drm_ippdrv_unregister(ippdrv); - mutex_destroy(&ctx->lock); - - pm_runtime_set_suspended(dev); + pm_runtime_dont_use_autosuspend(dev); pm_runtime_disable(dev); return 0; @@ -1763,19 +1289,32 @@ static int gsc_remove(struct platform_device *pdev) static int __maybe_unused gsc_runtime_suspend(struct device *dev) { struct gsc_context *ctx = get_gsc_context(dev); + int i; DRM_DEBUG_KMS("id[%d]\n", ctx->id); - return gsc_clk_ctrl(ctx, false); + for (i = ctx->num_clocks - 1; i >= 0; i--) + clk_disable_unprepare(ctx->clocks[i]); + + return 0; } static int __maybe_unused gsc_runtime_resume(struct device *dev) { struct gsc_context *ctx = get_gsc_context(dev); + int i, ret; DRM_DEBUG_KMS("id[%d]\n", ctx->id); - return gsc_clk_ctrl(ctx, true); + for (i = 0; i < ctx->num_clocks; i++) { + ret = clk_prepare_enable(ctx->clocks[i]); + if (ret) { + while (--i > 0) + clk_disable_unprepare(ctx->clocks[i]); + return ret; + } + } + return 0; } static const struct dev_pm_ops gsc_pm_ops = { @@ -1784,9 +1323,66 @@ static const struct dev_pm_ops gsc_pm_ops = { SET_RUNTIME_PM_OPS(gsc_runtime_suspend, gsc_runtime_resume, NULL) }; +static const struct drm_exynos_ipp_limit gsc_5250_limits[] = { + { IPP_SIZE_LIMIT(BUFFER, .h = { 32, 4800, 8 }, .v = { 16, 3344, 8 }) }, + { IPP_SIZE_LIMIT(AREA, .h = { 16, 4800, 2 }, .v = { 8, 3344, 2 }) }, + { IPP_SIZE_LIMIT(ROTATED, .h = { 32, 2048 }, .v = { 16, 2048 }) }, + { IPP_SCALE_LIMIT(.h = { (1 << 16) / 16, (1 << 16) * 8 }, + .v = { (1 << 16) / 16, (1 << 16) * 8 }) }, +}; + +static const struct drm_exynos_ipp_limit gsc_5420_limits[] = { + { IPP_SIZE_LIMIT(BUFFER, .h = { 32, 4800, 8 }, .v = { 16, 3344, 8 }) }, + { IPP_SIZE_LIMIT(AREA, .h = { 16, 4800, 2 }, .v = { 8, 3344, 2 }) }, + { IPP_SIZE_LIMIT(ROTATED, .h = { 16, 2016 }, .v = { 8, 2016 }) }, + { IPP_SCALE_LIMIT(.h = { (1 << 16) / 16, (1 << 16) * 8 }, + .v = { (1 << 16) / 16, (1 << 16) * 8 }) }, +}; + +static const struct drm_exynos_ipp_limit gsc_5433_limits[] = { + { IPP_SIZE_LIMIT(BUFFER, .h = { 32, 8191, 2 }, .v = { 16, 8191, 2 }) }, + { IPP_SIZE_LIMIT(AREA, .h = { 16, 4800, 1 }, .v = { 8, 3344, 1 }) }, + { IPP_SIZE_LIMIT(ROTATED, .h = { 32, 2047 }, .v = { 8, 8191 }) }, + { IPP_SCALE_LIMIT(.h = { (1 << 16) / 16, (1 << 16) * 8 }, + .v = { (1 << 16) / 16, (1 << 16) * 8 }) }, +}; + +static struct gsc_driverdata gsc_exynos5250_drvdata = { + .clk_names = {"gscl"}, + .num_clocks = 1, + .limits = gsc_5250_limits, + .num_limits = ARRAY_SIZE(gsc_5250_limits), +}; + +static struct gsc_driverdata gsc_exynos5420_drvdata = { + .clk_names = {"gscl"}, + .num_clocks = 1, + .limits = gsc_5420_limits, + .num_limits = ARRAY_SIZE(gsc_5420_limits), +}; + +static struct gsc_driverdata gsc_exynos5433_drvdata = { + .clk_names = {"pclk", "aclk", "aclk_xiu", "aclk_gsclbend"}, + .num_clocks = 4, + .limits = gsc_5433_limits, + .num_limits = ARRAY_SIZE(gsc_5433_limits), +}; + static const struct of_device_id exynos_drm_gsc_of_match[] = { - { .compatible = "samsung,exynos5-gsc" }, - { }, + { + .compatible = "samsung,exynos5-gsc", + .data = &gsc_exynos5250_drvdata, + }, { + .compatible = "samsung,exynos5250-gsc", + .data = &gsc_exynos5250_drvdata, + }, { + .compatible = "samsung,exynos5420-gsc", + .data = &gsc_exynos5420_drvdata, + }, { + .compatible = "samsung,exynos5433-gsc", + .data = &gsc_exynos5433_drvdata, + }, { + }, }; MODULE_DEVICE_TABLE(of, exynos_drm_gsc_of_match); @@ -1800,4 +1396,3 @@ struct platform_driver gsc_driver = { .of_match_table = of_match_ptr(exynos_drm_gsc_of_match), }, }; - diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.h b/drivers/gpu/drm/exynos/exynos_drm_gsc.h deleted file mode 100644 index 29ec1c5efcf2..000000000000 --- a/drivers/gpu/drm/exynos/exynos_drm_gsc.h +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright (c) 2012 Samsung Electronics Co., Ltd. - * - * Authors: - * Eunchul Kim - * Jinyoung Jeon - * Sangmin Lee - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - */ - -#ifndef _EXYNOS_DRM_GSC_H_ -#define _EXYNOS_DRM_GSC_H_ - -/* - * TODO - * FIMD output interface notifier callback. - * Mixer output interface notifier callback. - */ - -#endif /* _EXYNOS_DRM_GSC_H_ */ From 7a2d5c77c55847f31945e5aa8337db2218a5a7c1 Mon Sep 17 00:00:00 2001 From: Marek Szyprowski Date: Thu, 10 May 2018 08:52:12 +0900 Subject: [PATCH 15/17] drm/exynos: fimc: Convert driver to IPP v2 core API This patch adapts Exynos DRM FIMC driver to new IPP v2 core API. The side effect of this conversion is a switch to driver component API to register properly in the Exynos DRM core. Signed-off-by: Marek Szyprowski Merge conflict so merged manually. Signed-off-by: Inki Dae --- drivers/gpu/drm/exynos/Kconfig | 2 +- drivers/gpu/drm/exynos/exynos_drm_drv.c | 8 +- drivers/gpu/drm/exynos/exynos_drm_drv.h | 9 + drivers/gpu/drm/exynos/exynos_drm_fimc.c | 1088 +++++++--------------- drivers/gpu/drm/exynos/exynos_drm_fimc.h | 23 - 5 files changed, 370 insertions(+), 760 deletions(-) delete mode 100644 drivers/gpu/drm/exynos/exynos_drm_fimc.h diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig index 5c216548ea18..54f5703b37e8 100644 --- a/drivers/gpu/drm/exynos/Kconfig +++ b/drivers/gpu/drm/exynos/Kconfig @@ -100,7 +100,7 @@ config DRM_EXYNOS_IPP config DRM_EXYNOS_FIMC bool "FIMC" - depends on BROKEN && MFD_SYSCON + select DRM_EXYNOS_IPP help Choose this option if you want to use Exynos FIMC for DRM. diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index 2dcb94034716..7ba13c122d14 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c @@ -222,6 +222,7 @@ struct exynos_drm_driver_info { #define DRM_COMPONENT_DRIVER BIT(0) /* supports component framework */ #define DRM_VIRTUAL_DEVICE BIT(1) /* create virtual platform device */ #define DRM_DMA_DEVICE BIT(2) /* can be used for dma allocations */ +#define DRM_FIMC_DEVICE BIT(3) /* devices shared with V4L2 subsystem */ #define DRV_PTR(drv, cond) (IS_ENABLED(cond) ? &drv : NULL) @@ -261,6 +262,7 @@ static struct exynos_drm_driver_info exynos_drm_drivers[] = { DRV_PTR(g2d_driver, CONFIG_DRM_EXYNOS_G2D), }, { DRV_PTR(fimc_driver, CONFIG_DRM_EXYNOS_FIMC), + DRM_COMPONENT_DRIVER | DRM_FIMC_DEVICE, }, { DRV_PTR(rotator_driver, CONFIG_DRM_EXYNOS_ROTATOR), DRM_COMPONENT_DRIVER @@ -294,7 +296,11 @@ static struct component_match *exynos_drm_match_add(struct device *dev) &info->driver->driver, (void *)platform_bus_type.match))) { put_device(p); - component_match_add(dev, &match, compare_dev, d); + + if (!(info->flags & DRM_FIMC_DEVICE) || + exynos_drm_check_fimc_device(d) == 0) + component_match_add(dev, &match, + compare_dev, d); p = d; } put_device(p); diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h index df2262f70d91..0834e7e28c99 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.h +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h @@ -273,6 +273,15 @@ static inline int exynos_dpi_bind(struct drm_device *dev, } #endif +#ifdef CONFIG_DRM_EXYNOS_FIMC +int exynos_drm_check_fimc_device(struct device *dev); +#else +static inline int exynos_drm_check_fimc_device(struct device *dev) +{ + return 0; +} +#endif + int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state, bool nonblock); int exynos_atomic_check(struct drm_device *dev, struct drm_atomic_state *state); diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c index 5b18b5c5fdf2..4dfbfc7f3b84 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c @@ -12,6 +12,7 @@ * */ #include +#include #include #include #include @@ -24,8 +25,8 @@ #include #include "regs-fimc.h" #include "exynos_drm_drv.h" +#include "exynos_drm_iommu.h" #include "exynos_drm_ipp.h" -#include "exynos_drm_fimc.h" /* * FIMC stands for Fully Interactive Mobile Camera and @@ -33,23 +34,6 @@ * input DMA reads image data from the memory. * output DMA writes image data to memory. * FIMC supports image rotation and image effect functions. - * - * M2M operation : supports crop/scale/rotation/csc so on. - * Memory ----> FIMC H/W ----> Memory. - * Writeback operation : supports cloned screen with FIMD. - * FIMD ----> FIMC H/W ----> Memory. - * Output operation : supports direct display using local path. - * Memory ----> FIMC H/W ----> FIMD. - */ - -/* - * TODO - * 1. check suspend/resume api if needed. - * 2. need to check use case platform_device_id. - * 3. check src/dst size with, height. - * 4. added check_prepare api for right register. - * 5. need to add supported list in prop_list. - * 6. check prescaler/scaler optimization. */ #define FIMC_MAX_DEVS 4 @@ -59,29 +43,19 @@ #define FIMC_BUF_STOP 1 #define FIMC_BUF_START 2 #define FIMC_WIDTH_ITU_709 1280 -#define FIMC_REFRESH_MAX 60 -#define FIMC_REFRESH_MIN 12 -#define FIMC_CROP_MAX 8192 -#define FIMC_CROP_MIN 32 -#define FIMC_SCALE_MAX 4224 -#define FIMC_SCALE_MIN 32 +#define FIMC_AUTOSUSPEND_DELAY 2000 + +static unsigned int fimc_mask = 0xc; +module_param_named(fimc_devs, fimc_mask, uint, 0644); +MODULE_PARM_DESC(fimc_devs, "Alias mask for assigning FIMC devices to Exynos DRM"); #define get_fimc_context(dev) platform_get_drvdata(to_platform_device(dev)) -#define get_ctx_from_ippdrv(ippdrv) container_of(ippdrv,\ - struct fimc_context, ippdrv); -enum fimc_wb { - FIMC_WB_NONE, - FIMC_WB_A, - FIMC_WB_B, -}; enum { FIMC_CLK_LCLK, FIMC_CLK_GATE, FIMC_CLK_WB_A, FIMC_CLK_WB_B, - FIMC_CLK_MUX, - FIMC_CLK_PARENT, FIMC_CLKS_MAX }; @@ -90,12 +64,8 @@ static const char * const fimc_clock_names[] = { [FIMC_CLK_GATE] = "fimc", [FIMC_CLK_WB_A] = "pxl_async0", [FIMC_CLK_WB_B] = "pxl_async1", - [FIMC_CLK_MUX] = "mux", - [FIMC_CLK_PARENT] = "parent", }; -#define FIMC_DEFAULT_LCLK_FREQUENCY 133000000UL - /* * A structure of scaler. * @@ -107,7 +77,7 @@ static const char * const fimc_clock_names[] = { * @vratio: vertical ratio. */ struct fimc_scaler { - bool range; + bool range; bool bypass; bool up_h; bool up_v; @@ -115,57 +85,33 @@ struct fimc_scaler { u32 vratio; }; -/* - * A structure of scaler capability. - * - * find user manual table 43-1. - * @in_hori: scaler input horizontal size. - * @bypass: scaler bypass mode. - * @dst_h_wo_rot: target horizontal size without output rotation. - * @dst_h_rot: target horizontal size with output rotation. - * @rl_w_wo_rot: real width without input rotation. - * @rl_h_rot: real height without output rotation. - */ -struct fimc_capability { - /* scaler */ - u32 in_hori; - u32 bypass; - /* output rotator */ - u32 dst_h_wo_rot; - u32 dst_h_rot; - /* input rotator */ - u32 rl_w_wo_rot; - u32 rl_h_rot; -}; - /* * A structure of fimc context. * - * @ippdrv: prepare initialization using ippdrv. * @regs_res: register resources. * @regs: memory mapped io registers. * @lock: locking of operations. * @clocks: fimc clocks. - * @clk_frequency: LCLK clock frequency. - * @sysreg: handle to SYSREG block regmap. * @sc: scaler infomations. * @pol: porarity of writeback. * @id: fimc id. * @irq: irq number. - * @suspended: qos operations. */ struct fimc_context { - struct exynos_drm_ippdrv ippdrv; + struct exynos_drm_ipp ipp; + struct drm_device *drm_dev; + struct device *dev; + struct exynos_drm_ipp_task *task; + struct exynos_drm_ipp_formats *formats; + unsigned int num_formats; + struct resource *regs_res; void __iomem *regs; spinlock_t lock; struct clk *clocks[FIMC_CLKS_MAX]; - u32 clk_frequency; - struct regmap *sysreg; struct fimc_scaler sc; int id; int irq; - bool suspended; }; static u32 fimc_read(struct fimc_context *ctx, u32 reg) @@ -217,19 +163,10 @@ static void fimc_sw_reset(struct fimc_context *ctx) fimc_write(ctx, 0x0, EXYNOS_CIFCNTSEQ); } -static int fimc_set_camblk_fimd0_wb(struct fimc_context *ctx) -{ - return regmap_update_bits(ctx->sysreg, SYSREG_CAMERA_BLK, - SYSREG_FIMD0WB_DEST_MASK, - ctx->id << SYSREG_FIMD0WB_DEST_SHIFT); -} - -static void fimc_set_type_ctrl(struct fimc_context *ctx, enum fimc_wb wb) +static void fimc_set_type_ctrl(struct fimc_context *ctx) { u32 cfg; - DRM_DEBUG_KMS("wb[%d]\n", wb); - cfg = fimc_read(ctx, EXYNOS_CIGCTRL); cfg &= ~(EXYNOS_CIGCTRL_TESTPATTERN_MASK | EXYNOS_CIGCTRL_SELCAM_ITU_MASK | @@ -238,23 +175,10 @@ static void fimc_set_type_ctrl(struct fimc_context *ctx, enum fimc_wb wb) EXYNOS_CIGCTRL_SELWB_CAMIF_MASK | EXYNOS_CIGCTRL_SELWRITEBACK_MASK); - switch (wb) { - case FIMC_WB_A: - cfg |= (EXYNOS_CIGCTRL_SELWRITEBACK_A | - EXYNOS_CIGCTRL_SELWB_CAMIF_WRITEBACK); - break; - case FIMC_WB_B: - cfg |= (EXYNOS_CIGCTRL_SELWRITEBACK_B | - EXYNOS_CIGCTRL_SELWB_CAMIF_WRITEBACK); - break; - case FIMC_WB_NONE: - default: - cfg |= (EXYNOS_CIGCTRL_SELCAM_ITU_A | - EXYNOS_CIGCTRL_SELWRITEBACK_A | - EXYNOS_CIGCTRL_SELCAM_MIPI_A | - EXYNOS_CIGCTRL_SELCAM_FIMC_ITU); - break; - } + cfg |= (EXYNOS_CIGCTRL_SELCAM_ITU_A | + EXYNOS_CIGCTRL_SELWRITEBACK_A | + EXYNOS_CIGCTRL_SELCAM_MIPI_A | + EXYNOS_CIGCTRL_SELCAM_FIMC_ITU); fimc_write(ctx, cfg, EXYNOS_CIGCTRL); } @@ -296,7 +220,6 @@ static void fimc_clear_irq(struct fimc_context *ctx) static bool fimc_check_ovf(struct fimc_context *ctx) { - struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; u32 status, flag; status = fimc_read(ctx, EXYNOS_CISTATUS); @@ -310,7 +233,7 @@ static bool fimc_check_ovf(struct fimc_context *ctx) EXYNOS_CIWDOFST_CLROVFIY | EXYNOS_CIWDOFST_CLROVFICB | EXYNOS_CIWDOFST_CLROVFICR); - dev_err(ippdrv->dev, "occurred overflow at %d, status 0x%x.\n", + dev_err(ctx->dev, "occurred overflow at %d, status 0x%x.\n", ctx->id, status); return true; } @@ -376,10 +299,8 @@ static void fimc_handle_lastend(struct fimc_context *ctx, bool enable) fimc_write(ctx, cfg, EXYNOS_CIOCTRL); } - -static int fimc_src_set_fmt_order(struct fimc_context *ctx, u32 fmt) +static void fimc_src_set_fmt_order(struct fimc_context *ctx, u32 fmt) { - struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; u32 cfg; DRM_DEBUG_KMS("fmt[0x%x]\n", fmt); @@ -392,12 +313,12 @@ static int fimc_src_set_fmt_order(struct fimc_context *ctx, u32 fmt) case DRM_FORMAT_RGB565: cfg |= EXYNOS_CISCCTRL_INRGB_FMT_RGB565; fimc_write(ctx, cfg, EXYNOS_CISCCTRL); - return 0; + return; case DRM_FORMAT_RGB888: case DRM_FORMAT_XRGB8888: cfg |= EXYNOS_CISCCTRL_INRGB_FMT_RGB888; fimc_write(ctx, cfg, EXYNOS_CISCCTRL); - return 0; + return; default: /* bypass */ break; @@ -438,20 +359,13 @@ static int fimc_src_set_fmt_order(struct fimc_context *ctx, u32 fmt) cfg |= (EXYNOS_MSCTRL_ORDER2P_LSB_CBCR | EXYNOS_MSCTRL_C_INT_IN_2PLANE); break; - default: - dev_err(ippdrv->dev, "invalid source yuv order 0x%x.\n", fmt); - return -EINVAL; } fimc_write(ctx, cfg, EXYNOS_MSCTRL); - - return 0; } -static int fimc_src_set_fmt(struct device *dev, u32 fmt) +static void fimc_src_set_fmt(struct fimc_context *ctx, u32 fmt, bool tiled) { - struct fimc_context *ctx = get_fimc_context(dev); - struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; u32 cfg; DRM_DEBUG_KMS("fmt[0x%x]\n", fmt); @@ -485,9 +399,6 @@ static int fimc_src_set_fmt(struct device *dev, u32 fmt) case DRM_FORMAT_NV21: cfg |= EXYNOS_MSCTRL_INFORMAT_YCBCR420; break; - default: - dev_err(ippdrv->dev, "invalid source format 0x%x.\n", fmt); - return -EINVAL; } fimc_write(ctx, cfg, EXYNOS_MSCTRL); @@ -495,22 +406,22 @@ static int fimc_src_set_fmt(struct device *dev, u32 fmt) cfg = fimc_read(ctx, EXYNOS_CIDMAPARAM); cfg &= ~EXYNOS_CIDMAPARAM_R_MODE_MASK; - cfg |= EXYNOS_CIDMAPARAM_R_MODE_LINEAR; + if (tiled) + cfg |= EXYNOS_CIDMAPARAM_R_MODE_64X32; + else + cfg |= EXYNOS_CIDMAPARAM_R_MODE_LINEAR; fimc_write(ctx, cfg, EXYNOS_CIDMAPARAM); - return fimc_src_set_fmt_order(ctx, fmt); + fimc_src_set_fmt_order(ctx, fmt); } -static int fimc_src_set_transf(struct device *dev, - enum drm_exynos_degree degree, - enum drm_exynos_flip flip, bool *swap) +static void fimc_src_set_transf(struct fimc_context *ctx, unsigned int rotation) { - struct fimc_context *ctx = get_fimc_context(dev); - struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; + unsigned int degree = rotation & DRM_MODE_ROTATE_MASK; u32 cfg1, cfg2; - DRM_DEBUG_KMS("degree[%d]flip[0x%x]\n", degree, flip); + DRM_DEBUG_KMS("rotation[%x]\n", rotation); cfg1 = fimc_read(ctx, EXYNOS_MSCTRL); cfg1 &= ~(EXYNOS_MSCTRL_FLIP_X_MIRROR | @@ -520,61 +431,56 @@ static int fimc_src_set_transf(struct device *dev, cfg2 &= ~EXYNOS_CITRGFMT_INROT90_CLOCKWISE; switch (degree) { - case EXYNOS_DRM_DEGREE_0: - if (flip & EXYNOS_DRM_FLIP_VERTICAL) + case DRM_MODE_ROTATE_0: + if (rotation & DRM_MODE_REFLECT_X) cfg1 |= EXYNOS_MSCTRL_FLIP_X_MIRROR; - if (flip & EXYNOS_DRM_FLIP_HORIZONTAL) + if (rotation & DRM_MODE_REFLECT_Y) cfg1 |= EXYNOS_MSCTRL_FLIP_Y_MIRROR; break; - case EXYNOS_DRM_DEGREE_90: + case DRM_MODE_ROTATE_90: cfg2 |= EXYNOS_CITRGFMT_INROT90_CLOCKWISE; - if (flip & EXYNOS_DRM_FLIP_VERTICAL) + if (rotation & DRM_MODE_REFLECT_X) cfg1 |= EXYNOS_MSCTRL_FLIP_X_MIRROR; - if (flip & EXYNOS_DRM_FLIP_HORIZONTAL) + if (rotation & DRM_MODE_REFLECT_Y) cfg1 |= EXYNOS_MSCTRL_FLIP_Y_MIRROR; break; - case EXYNOS_DRM_DEGREE_180: + case DRM_MODE_ROTATE_180: cfg1 |= (EXYNOS_MSCTRL_FLIP_X_MIRROR | EXYNOS_MSCTRL_FLIP_Y_MIRROR); - if (flip & EXYNOS_DRM_FLIP_VERTICAL) + if (rotation & DRM_MODE_REFLECT_X) cfg1 &= ~EXYNOS_MSCTRL_FLIP_X_MIRROR; - if (flip & EXYNOS_DRM_FLIP_HORIZONTAL) + if (rotation & DRM_MODE_REFLECT_Y) cfg1 &= ~EXYNOS_MSCTRL_FLIP_Y_MIRROR; break; - case EXYNOS_DRM_DEGREE_270: + case DRM_MODE_ROTATE_270: cfg1 |= (EXYNOS_MSCTRL_FLIP_X_MIRROR | EXYNOS_MSCTRL_FLIP_Y_MIRROR); cfg2 |= EXYNOS_CITRGFMT_INROT90_CLOCKWISE; - if (flip & EXYNOS_DRM_FLIP_VERTICAL) + if (rotation & DRM_MODE_REFLECT_X) cfg1 &= ~EXYNOS_MSCTRL_FLIP_X_MIRROR; - if (flip & EXYNOS_DRM_FLIP_HORIZONTAL) + if (rotation & DRM_MODE_REFLECT_Y) cfg1 &= ~EXYNOS_MSCTRL_FLIP_Y_MIRROR; break; - default: - dev_err(ippdrv->dev, "invalid degree value %d.\n", degree); - return -EINVAL; } fimc_write(ctx, cfg1, EXYNOS_MSCTRL); fimc_write(ctx, cfg2, EXYNOS_CITRGFMT); - *swap = (cfg2 & EXYNOS_CITRGFMT_INROT90_CLOCKWISE) ? 1 : 0; - - return 0; } -static int fimc_set_window(struct fimc_context *ctx, - struct drm_exynos_pos *pos, struct drm_exynos_sz *sz) +static void fimc_set_window(struct fimc_context *ctx, + struct exynos_drm_ipp_buffer *buf) { u32 cfg, h1, h2, v1, v2; /* cropped image */ - h1 = pos->x; - h2 = sz->hsize - pos->w - pos->x; - v1 = pos->y; - v2 = sz->vsize - pos->h - pos->y; + h1 = buf->rect.x; + h2 = buf->buf.width - buf->rect.w - buf->rect.x; + v1 = buf->rect.y; + v2 = buf->buf.height - buf->rect.h - buf->rect.y; DRM_DEBUG_KMS("x[%d]y[%d]w[%d]h[%d]hsize[%d]vsize[%d]\n", - pos->x, pos->y, pos->w, pos->h, sz->hsize, sz->vsize); + buf->rect.x, buf->rect.y, buf->rect.w, buf->rect.h, + buf->buf.width, buf->buf.height); DRM_DEBUG_KMS("h1[%d]h2[%d]v1[%d]v2[%d]\n", h1, h2, v1, v2); /* @@ -592,42 +498,30 @@ static int fimc_set_window(struct fimc_context *ctx, cfg = (EXYNOS_CIWDOFST2_WINHOROFST2(h2) | EXYNOS_CIWDOFST2_WINVEROFST2(v2)); fimc_write(ctx, cfg, EXYNOS_CIWDOFST2); - - return 0; } -static int fimc_src_set_size(struct device *dev, int swap, - struct drm_exynos_pos *pos, struct drm_exynos_sz *sz) +static void fimc_src_set_size(struct fimc_context *ctx, + struct exynos_drm_ipp_buffer *buf) { - struct fimc_context *ctx = get_fimc_context(dev); - struct drm_exynos_pos img_pos = *pos; - struct drm_exynos_sz img_sz = *sz; u32 cfg; - DRM_DEBUG_KMS("swap[%d]hsize[%d]vsize[%d]\n", - swap, sz->hsize, sz->vsize); + DRM_DEBUG_KMS("hsize[%d]vsize[%d]\n", buf->buf.width, buf->buf.height); /* original size */ - cfg = (EXYNOS_ORGISIZE_HORIZONTAL(img_sz.hsize) | - EXYNOS_ORGISIZE_VERTICAL(img_sz.vsize)); + cfg = (EXYNOS_ORGISIZE_HORIZONTAL(buf->buf.width) | + EXYNOS_ORGISIZE_VERTICAL(buf->buf.height)); fimc_write(ctx, cfg, EXYNOS_ORGISIZE); - DRM_DEBUG_KMS("x[%d]y[%d]w[%d]h[%d]\n", pos->x, pos->y, pos->w, pos->h); - - if (swap) { - img_pos.w = pos->h; - img_pos.h = pos->w; - img_sz.hsize = sz->vsize; - img_sz.vsize = sz->hsize; - } + DRM_DEBUG_KMS("x[%d]y[%d]w[%d]h[%d]\n", buf->rect.x, buf->rect.y, + buf->rect.w, buf->rect.h); /* set input DMA image size */ cfg = fimc_read(ctx, EXYNOS_CIREAL_ISIZE); cfg &= ~(EXYNOS_CIREAL_ISIZE_HEIGHT_MASK | EXYNOS_CIREAL_ISIZE_WIDTH_MASK); - cfg |= (EXYNOS_CIREAL_ISIZE_WIDTH(img_pos.w) | - EXYNOS_CIREAL_ISIZE_HEIGHT(img_pos.h)); + cfg |= (EXYNOS_CIREAL_ISIZE_WIDTH(buf->rect.w) | + EXYNOS_CIREAL_ISIZE_HEIGHT(buf->rect.h)); fimc_write(ctx, cfg, EXYNOS_CIREAL_ISIZE); /* @@ -635,91 +529,34 @@ static int fimc_src_set_size(struct device *dev, int swap, * for now, we support only ITU601 8 bit mode */ cfg = (EXYNOS_CISRCFMT_ITU601_8BIT | - EXYNOS_CISRCFMT_SOURCEHSIZE(img_sz.hsize) | - EXYNOS_CISRCFMT_SOURCEVSIZE(img_sz.vsize)); + EXYNOS_CISRCFMT_SOURCEHSIZE(buf->buf.width) | + EXYNOS_CISRCFMT_SOURCEVSIZE(buf->buf.height)); fimc_write(ctx, cfg, EXYNOS_CISRCFMT); /* offset Y(RGB), Cb, Cr */ - cfg = (EXYNOS_CIIYOFF_HORIZONTAL(img_pos.x) | - EXYNOS_CIIYOFF_VERTICAL(img_pos.y)); + cfg = (EXYNOS_CIIYOFF_HORIZONTAL(buf->rect.x) | + EXYNOS_CIIYOFF_VERTICAL(buf->rect.y)); fimc_write(ctx, cfg, EXYNOS_CIIYOFF); - cfg = (EXYNOS_CIICBOFF_HORIZONTAL(img_pos.x) | - EXYNOS_CIICBOFF_VERTICAL(img_pos.y)); + cfg = (EXYNOS_CIICBOFF_HORIZONTAL(buf->rect.x) | + EXYNOS_CIICBOFF_VERTICAL(buf->rect.y)); fimc_write(ctx, cfg, EXYNOS_CIICBOFF); - cfg = (EXYNOS_CIICROFF_HORIZONTAL(img_pos.x) | - EXYNOS_CIICROFF_VERTICAL(img_pos.y)); + cfg = (EXYNOS_CIICROFF_HORIZONTAL(buf->rect.x) | + EXYNOS_CIICROFF_VERTICAL(buf->rect.y)); fimc_write(ctx, cfg, EXYNOS_CIICROFF); - return fimc_set_window(ctx, &img_pos, &img_sz); + fimc_set_window(ctx, buf); } -static int fimc_src_set_addr(struct device *dev, - struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id, - enum drm_exynos_ipp_buf_type buf_type) +static void fimc_src_set_addr(struct fimc_context *ctx, + struct exynos_drm_ipp_buffer *buf) { - struct fimc_context *ctx = get_fimc_context(dev); - struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; - struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node; - struct drm_exynos_ipp_property *property; - struct drm_exynos_ipp_config *config; - - if (!c_node) { - DRM_ERROR("failed to get c_node.\n"); - return -EINVAL; - } - - property = &c_node->property; - - DRM_DEBUG_KMS("prop_id[%d]buf_id[%d]buf_type[%d]\n", - property->prop_id, buf_id, buf_type); - - if (buf_id > FIMC_MAX_SRC) { - dev_info(ippdrv->dev, "invalid buf_id %d.\n", buf_id); - return -ENOMEM; - } - - /* address register set */ - switch (buf_type) { - case IPP_BUF_ENQUEUE: - config = &property->config[EXYNOS_DRM_OPS_SRC]; - fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_Y], - EXYNOS_CIIYSA0); - - if (config->fmt == DRM_FORMAT_YVU420) { - fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CR], - EXYNOS_CIICBSA0); - fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CB], - EXYNOS_CIICRSA0); - } else { - fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CB], - EXYNOS_CIICBSA0); - fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CR], - EXYNOS_CIICRSA0); - } - break; - case IPP_BUF_DEQUEUE: - fimc_write(ctx, 0x0, EXYNOS_CIIYSA0); - fimc_write(ctx, 0x0, EXYNOS_CIICBSA0); - fimc_write(ctx, 0x0, EXYNOS_CIICRSA0); - break; - default: - /* bypass */ - break; - } - - return 0; + fimc_write(ctx, buf->dma_addr[0], EXYNOS_CIIYSA(0)); + fimc_write(ctx, buf->dma_addr[1], EXYNOS_CIICBSA(0)); + fimc_write(ctx, buf->dma_addr[2], EXYNOS_CIICRSA(0)); } -static struct exynos_drm_ipp_ops fimc_src_ops = { - .set_fmt = fimc_src_set_fmt, - .set_transf = fimc_src_set_transf, - .set_size = fimc_src_set_size, - .set_addr = fimc_src_set_addr, -}; - -static int fimc_dst_set_fmt_order(struct fimc_context *ctx, u32 fmt) +static void fimc_dst_set_fmt_order(struct fimc_context *ctx, u32 fmt) { - struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; u32 cfg; DRM_DEBUG_KMS("fmt[0x%x]\n", fmt); @@ -732,11 +569,11 @@ static int fimc_dst_set_fmt_order(struct fimc_context *ctx, u32 fmt) case DRM_FORMAT_RGB565: cfg |= EXYNOS_CISCCTRL_OUTRGB_FMT_RGB565; fimc_write(ctx, cfg, EXYNOS_CISCCTRL); - return 0; + return; case DRM_FORMAT_RGB888: cfg |= EXYNOS_CISCCTRL_OUTRGB_FMT_RGB888; fimc_write(ctx, cfg, EXYNOS_CISCCTRL); - return 0; + return; case DRM_FORMAT_XRGB8888: cfg |= (EXYNOS_CISCCTRL_OUTRGB_FMT_RGB888 | EXYNOS_CISCCTRL_EXTRGB_EXTENSION); @@ -784,20 +621,13 @@ static int fimc_dst_set_fmt_order(struct fimc_context *ctx, u32 fmt) cfg |= EXYNOS_CIOCTRL_ORDER2P_LSB_CBCR; cfg |= EXYNOS_CIOCTRL_YCBCR_2PLANE; break; - default: - dev_err(ippdrv->dev, "invalid target yuv order 0x%x.\n", fmt); - return -EINVAL; } fimc_write(ctx, cfg, EXYNOS_CIOCTRL); - - return 0; } -static int fimc_dst_set_fmt(struct device *dev, u32 fmt) +static void fimc_dst_set_fmt(struct fimc_context *ctx, u32 fmt, bool tiled) { - struct fimc_context *ctx = get_fimc_context(dev); - struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; u32 cfg; DRM_DEBUG_KMS("fmt[0x%x]\n", fmt); @@ -837,10 +667,6 @@ static int fimc_dst_set_fmt(struct device *dev, u32 fmt) case DRM_FORMAT_NV21: cfg |= EXYNOS_CITRGFMT_OUTFORMAT_YCBCR420; break; - default: - dev_err(ippdrv->dev, "invalid target format 0x%x.\n", - fmt); - return -EINVAL; } fimc_write(ctx, cfg, EXYNOS_CITRGFMT); @@ -849,73 +675,67 @@ static int fimc_dst_set_fmt(struct device *dev, u32 fmt) cfg = fimc_read(ctx, EXYNOS_CIDMAPARAM); cfg &= ~EXYNOS_CIDMAPARAM_W_MODE_MASK; - cfg |= EXYNOS_CIDMAPARAM_W_MODE_LINEAR; + if (tiled) + cfg |= EXYNOS_CIDMAPARAM_W_MODE_64X32; + else + cfg |= EXYNOS_CIDMAPARAM_W_MODE_LINEAR; fimc_write(ctx, cfg, EXYNOS_CIDMAPARAM); - return fimc_dst_set_fmt_order(ctx, fmt); + fimc_dst_set_fmt_order(ctx, fmt); } -static int fimc_dst_set_transf(struct device *dev, - enum drm_exynos_degree degree, - enum drm_exynos_flip flip, bool *swap) +static void fimc_dst_set_transf(struct fimc_context *ctx, unsigned int rotation) { - struct fimc_context *ctx = get_fimc_context(dev); - struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; + unsigned int degree = rotation & DRM_MODE_ROTATE_MASK; u32 cfg; - DRM_DEBUG_KMS("degree[%d]flip[0x%x]\n", degree, flip); + DRM_DEBUG_KMS("rotation[0x%x]\n", rotation); cfg = fimc_read(ctx, EXYNOS_CITRGFMT); cfg &= ~EXYNOS_CITRGFMT_FLIP_MASK; cfg &= ~EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE; switch (degree) { - case EXYNOS_DRM_DEGREE_0: - if (flip & EXYNOS_DRM_FLIP_VERTICAL) + case DRM_MODE_ROTATE_0: + if (rotation & DRM_MODE_REFLECT_X) cfg |= EXYNOS_CITRGFMT_FLIP_X_MIRROR; - if (flip & EXYNOS_DRM_FLIP_HORIZONTAL) + if (rotation & DRM_MODE_REFLECT_Y) cfg |= EXYNOS_CITRGFMT_FLIP_Y_MIRROR; break; - case EXYNOS_DRM_DEGREE_90: + case DRM_MODE_ROTATE_90: cfg |= EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE; - if (flip & EXYNOS_DRM_FLIP_VERTICAL) + if (rotation & DRM_MODE_REFLECT_X) cfg |= EXYNOS_CITRGFMT_FLIP_X_MIRROR; - if (flip & EXYNOS_DRM_FLIP_HORIZONTAL) + if (rotation & DRM_MODE_REFLECT_Y) cfg |= EXYNOS_CITRGFMT_FLIP_Y_MIRROR; break; - case EXYNOS_DRM_DEGREE_180: + case DRM_MODE_ROTATE_180: cfg |= (EXYNOS_CITRGFMT_FLIP_X_MIRROR | EXYNOS_CITRGFMT_FLIP_Y_MIRROR); - if (flip & EXYNOS_DRM_FLIP_VERTICAL) + if (rotation & DRM_MODE_REFLECT_X) cfg &= ~EXYNOS_CITRGFMT_FLIP_X_MIRROR; - if (flip & EXYNOS_DRM_FLIP_HORIZONTAL) + if (rotation & DRM_MODE_REFLECT_Y) cfg &= ~EXYNOS_CITRGFMT_FLIP_Y_MIRROR; break; - case EXYNOS_DRM_DEGREE_270: + case DRM_MODE_ROTATE_270: cfg |= (EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE | EXYNOS_CITRGFMT_FLIP_X_MIRROR | EXYNOS_CITRGFMT_FLIP_Y_MIRROR); - if (flip & EXYNOS_DRM_FLIP_VERTICAL) + if (rotation & DRM_MODE_REFLECT_X) cfg &= ~EXYNOS_CITRGFMT_FLIP_X_MIRROR; - if (flip & EXYNOS_DRM_FLIP_HORIZONTAL) + if (rotation & DRM_MODE_REFLECT_Y) cfg &= ~EXYNOS_CITRGFMT_FLIP_Y_MIRROR; break; - default: - dev_err(ippdrv->dev, "invalid degree value %d.\n", degree); - return -EINVAL; } fimc_write(ctx, cfg, EXYNOS_CITRGFMT); - *swap = (cfg & EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE) ? 1 : 0; - - return 0; } static int fimc_set_prescaler(struct fimc_context *ctx, struct fimc_scaler *sc, - struct drm_exynos_pos *src, struct drm_exynos_pos *dst) + struct drm_exynos_ipp_task_rect *src, + struct drm_exynos_ipp_task_rect *dst) { - struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; u32 cfg, cfg_ext, shfactor; u32 pre_dst_width, pre_dst_height; u32 hfactor, vfactor; @@ -942,13 +762,13 @@ static int fimc_set_prescaler(struct fimc_context *ctx, struct fimc_scaler *sc, /* fimc_ippdrv_check_property assures that dividers are not null */ hfactor = fls(src_w / dst_w / 2); if (hfactor > FIMC_SHFACTOR / 2) { - dev_err(ippdrv->dev, "failed to get ratio horizontal.\n"); + dev_err(ctx->dev, "failed to get ratio horizontal.\n"); return -EINVAL; } vfactor = fls(src_h / dst_h / 2); if (vfactor > FIMC_SHFACTOR / 2) { - dev_err(ippdrv->dev, "failed to get ratio vertical.\n"); + dev_err(ctx->dev, "failed to get ratio vertical.\n"); return -EINVAL; } @@ -1019,83 +839,77 @@ static void fimc_set_scaler(struct fimc_context *ctx, struct fimc_scaler *sc) fimc_write(ctx, cfg_ext, EXYNOS_CIEXTEN); } -static int fimc_dst_set_size(struct device *dev, int swap, - struct drm_exynos_pos *pos, struct drm_exynos_sz *sz) +static void fimc_dst_set_size(struct fimc_context *ctx, + struct exynos_drm_ipp_buffer *buf) { - struct fimc_context *ctx = get_fimc_context(dev); - struct drm_exynos_pos img_pos = *pos; - struct drm_exynos_sz img_sz = *sz; - u32 cfg; + u32 cfg, cfg_ext; - DRM_DEBUG_KMS("swap[%d]hsize[%d]vsize[%d]\n", - swap, sz->hsize, sz->vsize); + DRM_DEBUG_KMS("hsize[%d]vsize[%d]\n", buf->buf.width, buf->buf.height); /* original size */ - cfg = (EXYNOS_ORGOSIZE_HORIZONTAL(img_sz.hsize) | - EXYNOS_ORGOSIZE_VERTICAL(img_sz.vsize)); + cfg = (EXYNOS_ORGOSIZE_HORIZONTAL(buf->buf.width) | + EXYNOS_ORGOSIZE_VERTICAL(buf->buf.height)); fimc_write(ctx, cfg, EXYNOS_ORGOSIZE); - DRM_DEBUG_KMS("x[%d]y[%d]w[%d]h[%d]\n", pos->x, pos->y, pos->w, pos->h); + DRM_DEBUG_KMS("x[%d]y[%d]w[%d]h[%d]\n", buf->rect.x, buf->rect.y, + buf->rect.w, buf->rect.h); /* CSC ITU */ cfg = fimc_read(ctx, EXYNOS_CIGCTRL); cfg &= ~EXYNOS_CIGCTRL_CSC_MASK; - if (sz->hsize >= FIMC_WIDTH_ITU_709) + if (buf->buf.width >= FIMC_WIDTH_ITU_709) cfg |= EXYNOS_CIGCTRL_CSC_ITU709; else cfg |= EXYNOS_CIGCTRL_CSC_ITU601; fimc_write(ctx, cfg, EXYNOS_CIGCTRL); - if (swap) { - img_pos.w = pos->h; - img_pos.h = pos->w; - img_sz.hsize = sz->vsize; - img_sz.vsize = sz->hsize; - } + cfg_ext = fimc_read(ctx, EXYNOS_CITRGFMT); /* target image size */ cfg = fimc_read(ctx, EXYNOS_CITRGFMT); cfg &= ~(EXYNOS_CITRGFMT_TARGETH_MASK | EXYNOS_CITRGFMT_TARGETV_MASK); - cfg |= (EXYNOS_CITRGFMT_TARGETHSIZE(img_pos.w) | - EXYNOS_CITRGFMT_TARGETVSIZE(img_pos.h)); + if (cfg_ext & EXYNOS_CITRGFMT_OUTROT90_CLOCKWISE) + cfg |= (EXYNOS_CITRGFMT_TARGETHSIZE(buf->rect.h) | + EXYNOS_CITRGFMT_TARGETVSIZE(buf->rect.w)); + else + cfg |= (EXYNOS_CITRGFMT_TARGETHSIZE(buf->rect.w) | + EXYNOS_CITRGFMT_TARGETVSIZE(buf->rect.h)); fimc_write(ctx, cfg, EXYNOS_CITRGFMT); /* target area */ - cfg = EXYNOS_CITAREA_TARGET_AREA(img_pos.w * img_pos.h); + cfg = EXYNOS_CITAREA_TARGET_AREA(buf->rect.w * buf->rect.h); fimc_write(ctx, cfg, EXYNOS_CITAREA); /* offset Y(RGB), Cb, Cr */ - cfg = (EXYNOS_CIOYOFF_HORIZONTAL(img_pos.x) | - EXYNOS_CIOYOFF_VERTICAL(img_pos.y)); + cfg = (EXYNOS_CIOYOFF_HORIZONTAL(buf->rect.x) | + EXYNOS_CIOYOFF_VERTICAL(buf->rect.y)); fimc_write(ctx, cfg, EXYNOS_CIOYOFF); - cfg = (EXYNOS_CIOCBOFF_HORIZONTAL(img_pos.x) | - EXYNOS_CIOCBOFF_VERTICAL(img_pos.y)); + cfg = (EXYNOS_CIOCBOFF_HORIZONTAL(buf->rect.x) | + EXYNOS_CIOCBOFF_VERTICAL(buf->rect.y)); fimc_write(ctx, cfg, EXYNOS_CIOCBOFF); - cfg = (EXYNOS_CIOCROFF_HORIZONTAL(img_pos.x) | - EXYNOS_CIOCROFF_VERTICAL(img_pos.y)); + cfg = (EXYNOS_CIOCROFF_HORIZONTAL(buf->rect.x) | + EXYNOS_CIOCROFF_VERTICAL(buf->rect.y)); fimc_write(ctx, cfg, EXYNOS_CIOCROFF); - - return 0; } static void fimc_dst_set_buf_seq(struct fimc_context *ctx, u32 buf_id, - enum drm_exynos_ipp_buf_type buf_type) + bool enqueue) { unsigned long flags; u32 buf_num; u32 cfg; - DRM_DEBUG_KMS("buf_id[%d]buf_type[%d]\n", buf_id, buf_type); + DRM_DEBUG_KMS("buf_id[%d]enqueu[%d]\n", buf_id, enqueue); spin_lock_irqsave(&ctx->lock, flags); cfg = fimc_read(ctx, EXYNOS_CIFCNTSEQ); - if (buf_type == IPP_BUF_ENQUEUE) + if (enqueue) cfg |= (1 << buf_id); else cfg &= ~(1 << buf_id); @@ -1104,88 +918,29 @@ static void fimc_dst_set_buf_seq(struct fimc_context *ctx, u32 buf_id, buf_num = hweight32(cfg); - if (buf_type == IPP_BUF_ENQUEUE && buf_num >= FIMC_BUF_START) + if (enqueue && buf_num >= FIMC_BUF_START) fimc_mask_irq(ctx, true); - else if (buf_type == IPP_BUF_DEQUEUE && buf_num <= FIMC_BUF_STOP) + else if (!enqueue && buf_num <= FIMC_BUF_STOP) fimc_mask_irq(ctx, false); spin_unlock_irqrestore(&ctx->lock, flags); } -static int fimc_dst_set_addr(struct device *dev, - struct drm_exynos_ipp_buf_info *buf_info, u32 buf_id, - enum drm_exynos_ipp_buf_type buf_type) +static void fimc_dst_set_addr(struct fimc_context *ctx, + struct exynos_drm_ipp_buffer *buf) { - struct fimc_context *ctx = get_fimc_context(dev); - struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; - struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node; - struct drm_exynos_ipp_property *property; - struct drm_exynos_ipp_config *config; + fimc_write(ctx, buf->dma_addr[0], EXYNOS_CIOYSA(0)); + fimc_write(ctx, buf->dma_addr[1], EXYNOS_CIOCBSA(0)); + fimc_write(ctx, buf->dma_addr[2], EXYNOS_CIOCRSA(0)); - if (!c_node) { - DRM_ERROR("failed to get c_node.\n"); - return -EINVAL; - } - - property = &c_node->property; - - DRM_DEBUG_KMS("prop_id[%d]buf_id[%d]buf_type[%d]\n", - property->prop_id, buf_id, buf_type); - - if (buf_id > FIMC_MAX_DST) { - dev_info(ippdrv->dev, "invalid buf_id %d.\n", buf_id); - return -ENOMEM; - } - - /* address register set */ - switch (buf_type) { - case IPP_BUF_ENQUEUE: - config = &property->config[EXYNOS_DRM_OPS_DST]; - - fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_Y], - EXYNOS_CIOYSA(buf_id)); - - if (config->fmt == DRM_FORMAT_YVU420) { - fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CR], - EXYNOS_CIOCBSA(buf_id)); - fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CB], - EXYNOS_CIOCRSA(buf_id)); - } else { - fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CB], - EXYNOS_CIOCBSA(buf_id)); - fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CR], - EXYNOS_CIOCRSA(buf_id)); - } - break; - case IPP_BUF_DEQUEUE: - fimc_write(ctx, 0x0, EXYNOS_CIOYSA(buf_id)); - fimc_write(ctx, 0x0, EXYNOS_CIOCBSA(buf_id)); - fimc_write(ctx, 0x0, EXYNOS_CIOCRSA(buf_id)); - break; - default: - /* bypass */ - break; - } - - fimc_dst_set_buf_seq(ctx, buf_id, buf_type); - - return 0; + fimc_dst_set_buf_seq(ctx, 0, true); } -static struct exynos_drm_ipp_ops fimc_dst_ops = { - .set_fmt = fimc_dst_set_fmt, - .set_transf = fimc_dst_set_transf, - .set_size = fimc_dst_set_size, - .set_addr = fimc_dst_set_addr, -}; +static void fimc_stop(struct fimc_context *ctx); static irqreturn_t fimc_irq_handler(int irq, void *dev_id) { struct fimc_context *ctx = dev_id; - struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; - struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node; - struct drm_exynos_ipp_event_work *event_work = - c_node->event_work; int buf_id; DRM_DEBUG_KMS("fimc id[%d]\n", ctx->id); @@ -1203,172 +958,21 @@ static irqreturn_t fimc_irq_handler(int irq, void *dev_id) DRM_DEBUG_KMS("buf_id[%d]\n", buf_id); - fimc_dst_set_buf_seq(ctx, buf_id, IPP_BUF_DEQUEUE); + if (ctx->task) { + struct exynos_drm_ipp_task *task = ctx->task; - event_work->ippdrv = ippdrv; - event_work->buf_id[EXYNOS_DRM_OPS_DST] = buf_id; - queue_work(ippdrv->event_workq, &event_work->work); + ctx->task = NULL; + pm_runtime_mark_last_busy(ctx->dev); + pm_runtime_put_autosuspend(ctx->dev); + exynos_drm_ipp_task_done(task, 0); + } + + fimc_dst_set_buf_seq(ctx, buf_id, false); + fimc_stop(ctx); return IRQ_HANDLED; } -static int fimc_init_prop_list(struct exynos_drm_ippdrv *ippdrv) -{ - struct drm_exynos_ipp_prop_list *prop_list = &ippdrv->prop_list; - - prop_list->version = 1; - prop_list->writeback = 1; - prop_list->refresh_min = FIMC_REFRESH_MIN; - prop_list->refresh_max = FIMC_REFRESH_MAX; - prop_list->flip = (1 << EXYNOS_DRM_FLIP_NONE) | - (1 << EXYNOS_DRM_FLIP_VERTICAL) | - (1 << EXYNOS_DRM_FLIP_HORIZONTAL); - prop_list->degree = (1 << EXYNOS_DRM_DEGREE_0) | - (1 << EXYNOS_DRM_DEGREE_90) | - (1 << EXYNOS_DRM_DEGREE_180) | - (1 << EXYNOS_DRM_DEGREE_270); - prop_list->csc = 1; - prop_list->crop = 1; - prop_list->crop_max.hsize = FIMC_CROP_MAX; - prop_list->crop_max.vsize = FIMC_CROP_MAX; - prop_list->crop_min.hsize = FIMC_CROP_MIN; - prop_list->crop_min.vsize = FIMC_CROP_MIN; - prop_list->scale = 1; - prop_list->scale_max.hsize = FIMC_SCALE_MAX; - prop_list->scale_max.vsize = FIMC_SCALE_MAX; - prop_list->scale_min.hsize = FIMC_SCALE_MIN; - prop_list->scale_min.vsize = FIMC_SCALE_MIN; - - return 0; -} - -static inline bool fimc_check_drm_flip(enum drm_exynos_flip flip) -{ - switch (flip) { - case EXYNOS_DRM_FLIP_NONE: - case EXYNOS_DRM_FLIP_VERTICAL: - case EXYNOS_DRM_FLIP_HORIZONTAL: - case EXYNOS_DRM_FLIP_BOTH: - return true; - default: - DRM_DEBUG_KMS("invalid flip\n"); - return false; - } -} - -static int fimc_ippdrv_check_property(struct device *dev, - struct drm_exynos_ipp_property *property) -{ - struct fimc_context *ctx = get_fimc_context(dev); - struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; - struct drm_exynos_ipp_prop_list *pp = &ippdrv->prop_list; - struct drm_exynos_ipp_config *config; - struct drm_exynos_pos *pos; - struct drm_exynos_sz *sz; - bool swap; - int i; - - for_each_ipp_ops(i) { - if ((i == EXYNOS_DRM_OPS_SRC) && - (property->cmd == IPP_CMD_WB)) - continue; - - config = &property->config[i]; - pos = &config->pos; - sz = &config->sz; - - /* check for flip */ - if (!fimc_check_drm_flip(config->flip)) { - DRM_ERROR("invalid flip.\n"); - goto err_property; - } - - /* check for degree */ - switch (config->degree) { - case EXYNOS_DRM_DEGREE_90: - case EXYNOS_DRM_DEGREE_270: - swap = true; - break; - case EXYNOS_DRM_DEGREE_0: - case EXYNOS_DRM_DEGREE_180: - swap = false; - break; - default: - DRM_ERROR("invalid degree.\n"); - goto err_property; - } - - /* check for buffer bound */ - if ((pos->x + pos->w > sz->hsize) || - (pos->y + pos->h > sz->vsize)) { - DRM_ERROR("out of buf bound.\n"); - goto err_property; - } - - /* check for crop */ - if ((i == EXYNOS_DRM_OPS_SRC) && (pp->crop)) { - if (swap) { - if ((pos->h < pp->crop_min.hsize) || - (sz->vsize > pp->crop_max.hsize) || - (pos->w < pp->crop_min.vsize) || - (sz->hsize > pp->crop_max.vsize)) { - DRM_ERROR("out of crop size.\n"); - goto err_property; - } - } else { - if ((pos->w < pp->crop_min.hsize) || - (sz->hsize > pp->crop_max.hsize) || - (pos->h < pp->crop_min.vsize) || - (sz->vsize > pp->crop_max.vsize)) { - DRM_ERROR("out of crop size.\n"); - goto err_property; - } - } - } - - /* check for scale */ - if ((i == EXYNOS_DRM_OPS_DST) && (pp->scale)) { - if (swap) { - if ((pos->h < pp->scale_min.hsize) || - (sz->vsize > pp->scale_max.hsize) || - (pos->w < pp->scale_min.vsize) || - (sz->hsize > pp->scale_max.vsize)) { - DRM_ERROR("out of scale size.\n"); - goto err_property; - } - } else { - if ((pos->w < pp->scale_min.hsize) || - (sz->hsize > pp->scale_max.hsize) || - (pos->h < pp->scale_min.vsize) || - (sz->vsize > pp->scale_max.vsize)) { - DRM_ERROR("out of scale size.\n"); - goto err_property; - } - } - } - } - - return 0; - -err_property: - for_each_ipp_ops(i) { - if ((i == EXYNOS_DRM_OPS_SRC) && - (property->cmd == IPP_CMD_WB)) - continue; - - config = &property->config[i]; - pos = &config->pos; - sz = &config->sz; - - DRM_ERROR("[%s]f[%d]r[%d]pos[%d %d %d %d]sz[%d %d]\n", - i ? "dst" : "src", config->flip, config->degree, - pos->x, pos->y, pos->w, pos->h, - sz->hsize, sz->vsize); - } - - return -EINVAL; -} - static void fimc_clear_addr(struct fimc_context *ctx) { int i; @@ -1386,10 +990,8 @@ static void fimc_clear_addr(struct fimc_context *ctx) } } -static int fimc_ippdrv_reset(struct device *dev) +static void fimc_reset(struct fimc_context *ctx) { - struct fimc_context *ctx = get_fimc_context(dev); - /* reset h/w block */ fimc_sw_reset(ctx); @@ -1397,82 +999,26 @@ static int fimc_ippdrv_reset(struct device *dev) memset(&ctx->sc, 0x0, sizeof(ctx->sc)); fimc_clear_addr(ctx); - - return 0; } -static int fimc_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd) +static void fimc_start(struct fimc_context *ctx) { - struct fimc_context *ctx = get_fimc_context(dev); - struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; - struct drm_exynos_ipp_cmd_node *c_node = ippdrv->c_node; - struct drm_exynos_ipp_property *property; - struct drm_exynos_ipp_config *config; - struct drm_exynos_pos img_pos[EXYNOS_DRM_OPS_MAX]; - struct drm_exynos_ipp_set_wb set_wb; - int ret, i; u32 cfg0, cfg1; - DRM_DEBUG_KMS("cmd[%d]\n", cmd); - - if (!c_node) { - DRM_ERROR("failed to get c_node.\n"); - return -EINVAL; - } - - property = &c_node->property; - fimc_mask_irq(ctx, true); - for_each_ipp_ops(i) { - config = &property->config[i]; - img_pos[i] = config->pos; - } - - ret = fimc_set_prescaler(ctx, &ctx->sc, - &img_pos[EXYNOS_DRM_OPS_SRC], - &img_pos[EXYNOS_DRM_OPS_DST]); - if (ret) { - dev_err(dev, "failed to set prescaler.\n"); - return ret; - } - - /* If set ture, we can save jpeg about screen */ + /* If set true, we can save jpeg about screen */ fimc_handle_jpeg(ctx, false); fimc_set_scaler(ctx, &ctx->sc); - switch (cmd) { - case IPP_CMD_M2M: - fimc_set_type_ctrl(ctx, FIMC_WB_NONE); - fimc_handle_lastend(ctx, false); + fimc_set_type_ctrl(ctx); + fimc_handle_lastend(ctx, false); - /* setup dma */ - cfg0 = fimc_read(ctx, EXYNOS_MSCTRL); - cfg0 &= ~EXYNOS_MSCTRL_INPUT_MASK; - cfg0 |= EXYNOS_MSCTRL_INPUT_MEMORY; - fimc_write(ctx, cfg0, EXYNOS_MSCTRL); - break; - case IPP_CMD_WB: - fimc_set_type_ctrl(ctx, FIMC_WB_A); - fimc_handle_lastend(ctx, true); - - /* setup FIMD */ - ret = fimc_set_camblk_fimd0_wb(ctx); - if (ret < 0) { - dev_err(dev, "camblk setup failed.\n"); - return ret; - } - - set_wb.enable = 1; - set_wb.refresh = property->refresh_rate; - exynos_drm_ippnb_send_event(IPP_SET_WRITEBACK, (void *)&set_wb); - break; - case IPP_CMD_OUTPUT: - default: - ret = -EINVAL; - dev_err(dev, "invalid operations.\n"); - return ret; - } + /* setup dma */ + cfg0 = fimc_read(ctx, EXYNOS_MSCTRL); + cfg0 &= ~EXYNOS_MSCTRL_INPUT_MASK; + cfg0 |= EXYNOS_MSCTRL_INPUT_MEMORY; + fimc_write(ctx, cfg0, EXYNOS_MSCTRL); /* Reset status */ fimc_write(ctx, 0x0, EXYNOS_CISTATUS); @@ -1498,36 +1044,18 @@ static int fimc_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd) fimc_clear_bits(ctx, EXYNOS_CIOCTRL, EXYNOS_CIOCTRL_WEAVE_MASK); - if (cmd == IPP_CMD_M2M) - fimc_set_bits(ctx, EXYNOS_MSCTRL, EXYNOS_MSCTRL_ENVID); - - return 0; + fimc_set_bits(ctx, EXYNOS_MSCTRL, EXYNOS_MSCTRL_ENVID); } -static void fimc_ippdrv_stop(struct device *dev, enum drm_exynos_ipp_cmd cmd) +static void fimc_stop(struct fimc_context *ctx) { - struct fimc_context *ctx = get_fimc_context(dev); - struct drm_exynos_ipp_set_wb set_wb = {0, 0}; u32 cfg; - DRM_DEBUG_KMS("cmd[%d]\n", cmd); - - switch (cmd) { - case IPP_CMD_M2M: - /* Source clear */ - cfg = fimc_read(ctx, EXYNOS_MSCTRL); - cfg &= ~EXYNOS_MSCTRL_INPUT_MASK; - cfg &= ~EXYNOS_MSCTRL_ENVID; - fimc_write(ctx, cfg, EXYNOS_MSCTRL); - break; - case IPP_CMD_WB: - exynos_drm_ippnb_send_event(IPP_SET_WRITEBACK, (void *)&set_wb); - break; - case IPP_CMD_OUTPUT: - default: - dev_err(dev, "invalid operations.\n"); - break; - } + /* Source clear */ + cfg = fimc_read(ctx, EXYNOS_MSCTRL); + cfg &= ~EXYNOS_MSCTRL_INPUT_MASK; + cfg &= ~EXYNOS_MSCTRL_ENVID; + fimc_write(ctx, cfg, EXYNOS_MSCTRL); fimc_mask_irq(ctx, false); @@ -1545,6 +1073,87 @@ static void fimc_ippdrv_stop(struct device *dev, enum drm_exynos_ipp_cmd cmd) fimc_set_bits(ctx, EXYNOS_CIGCTRL, EXYNOS_CIGCTRL_IRQ_END_DISABLE); } +static int fimc_commit(struct exynos_drm_ipp *ipp, + struct exynos_drm_ipp_task *task) +{ + struct fimc_context *ctx = + container_of(ipp, struct fimc_context, ipp); + + pm_runtime_get_sync(ctx->dev); + ctx->task = task; + + fimc_src_set_fmt(ctx, task->src.buf.fourcc, task->src.buf.modifier); + fimc_src_set_size(ctx, &task->src); + fimc_src_set_transf(ctx, DRM_MODE_ROTATE_0); + fimc_src_set_addr(ctx, &task->src); + fimc_dst_set_fmt(ctx, task->dst.buf.fourcc, task->dst.buf.modifier); + fimc_dst_set_transf(ctx, task->transform.rotation); + fimc_dst_set_size(ctx, &task->dst); + fimc_dst_set_addr(ctx, &task->dst); + fimc_set_prescaler(ctx, &ctx->sc, &task->src.rect, &task->dst.rect); + fimc_start(ctx); + + return 0; +} + +static void fimc_abort(struct exynos_drm_ipp *ipp, + struct exynos_drm_ipp_task *task) +{ + struct fimc_context *ctx = + container_of(ipp, struct fimc_context, ipp); + + fimc_reset(ctx); + + if (ctx->task) { + struct exynos_drm_ipp_task *task = ctx->task; + + ctx->task = NULL; + pm_runtime_mark_last_busy(ctx->dev); + pm_runtime_put_autosuspend(ctx->dev); + exynos_drm_ipp_task_done(task, -EIO); + } +} + +static struct exynos_drm_ipp_funcs ipp_funcs = { + .commit = fimc_commit, + .abort = fimc_abort, +}; + +static int fimc_bind(struct device *dev, struct device *master, void *data) +{ + struct fimc_context *ctx = dev_get_drvdata(dev); + struct drm_device *drm_dev = data; + struct exynos_drm_ipp *ipp = &ctx->ipp; + + ctx->drm_dev = drm_dev; + drm_iommu_attach_device(drm_dev, dev); + + exynos_drm_ipp_register(drm_dev, ipp, &ipp_funcs, + DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE | + DRM_EXYNOS_IPP_CAP_SCALE | DRM_EXYNOS_IPP_CAP_CONVERT, + ctx->formats, ctx->num_formats, "fimc"); + + dev_info(dev, "The exynos fimc has been probed successfully\n"); + + return 0; +} + +static void fimc_unbind(struct device *dev, struct device *master, + void *data) +{ + struct fimc_context *ctx = dev_get_drvdata(dev); + struct drm_device *drm_dev = data; + struct exynos_drm_ipp *ipp = &ctx->ipp; + + exynos_drm_ipp_unregister(drm_dev, ipp); + drm_iommu_detach_device(drm_dev, dev); +} + +static const struct component_ops fimc_component_ops = { + .bind = fimc_bind, + .unbind = fimc_unbind, +}; + static void fimc_put_clocks(struct fimc_context *ctx) { int i; @@ -1559,7 +1168,7 @@ static void fimc_put_clocks(struct fimc_context *ctx) static int fimc_setup_clocks(struct fimc_context *ctx) { - struct device *fimc_dev = ctx->ippdrv.dev; + struct device *fimc_dev = ctx->dev; struct device *dev; int ret, i; @@ -1574,8 +1183,6 @@ static int fimc_setup_clocks(struct fimc_context *ctx) ctx->clocks[i] = clk_get(dev, fimc_clock_names[i]); if (IS_ERR(ctx->clocks[i])) { - if (i >= FIMC_CLK_MUX) - break; ret = PTR_ERR(ctx->clocks[i]); dev_err(fimc_dev, "failed to get clock: %s\n", fimc_clock_names[i]); @@ -1583,20 +1190,6 @@ static int fimc_setup_clocks(struct fimc_context *ctx) } } - /* Optional FIMC LCLK parent clock setting */ - if (!IS_ERR(ctx->clocks[FIMC_CLK_PARENT])) { - ret = clk_set_parent(ctx->clocks[FIMC_CLK_MUX], - ctx->clocks[FIMC_CLK_PARENT]); - if (ret < 0) { - dev_err(fimc_dev, "failed to set parent.\n"); - goto e_clk_free; - } - } - - ret = clk_set_rate(ctx->clocks[FIMC_CLK_LCLK], ctx->clk_frequency); - if (ret < 0) - goto e_clk_free; - ret = clk_prepare_enable(ctx->clocks[FIMC_CLK_LCLK]); if (!ret) return ret; @@ -1605,57 +1198,118 @@ static int fimc_setup_clocks(struct fimc_context *ctx) return ret; } -static int fimc_parse_dt(struct fimc_context *ctx) +int exynos_drm_check_fimc_device(struct device *dev) { - struct device_node *node = ctx->ippdrv.dev->of_node; + unsigned int id = of_alias_get_id(dev->of_node, "fimc"); - /* Handle only devices that support the LCD Writeback data path */ - if (!of_property_read_bool(node, "samsung,lcd-wb")) - return -ENODEV; - - if (of_property_read_u32(node, "clock-frequency", - &ctx->clk_frequency)) - ctx->clk_frequency = FIMC_DEFAULT_LCLK_FREQUENCY; - - ctx->id = of_alias_get_id(node, "fimc"); - - if (ctx->id < 0) { - dev_err(ctx->ippdrv.dev, "failed to get node alias id.\n"); - return -EINVAL; - } - - return 0; + if (id >= 0 && (BIT(id) & fimc_mask)) + return 0; + return -ENODEV; } +static const unsigned int fimc_formats[] = { + DRM_FORMAT_XRGB8888, DRM_FORMAT_RGB565, + DRM_FORMAT_NV12, DRM_FORMAT_NV16, DRM_FORMAT_NV21, DRM_FORMAT_NV61, + DRM_FORMAT_UYVY, DRM_FORMAT_VYUY, DRM_FORMAT_YUYV, DRM_FORMAT_YVYU, + DRM_FORMAT_YUV420, DRM_FORMAT_YVU420, DRM_FORMAT_YUV422, + DRM_FORMAT_YUV444, +}; + +static const unsigned int fimc_tiled_formats[] = { + DRM_FORMAT_NV12, DRM_FORMAT_NV21, +}; + +static const struct drm_exynos_ipp_limit fimc_4210_limits_v1[] = { + { IPP_SIZE_LIMIT(BUFFER, .h = { 16, 8192, 8 }, .v = { 16, 8192, 2 }) }, + { IPP_SIZE_LIMIT(AREA, .h = { 16, 4224, 2 }, .v = { 16, 0, 2 }) }, + { IPP_SIZE_LIMIT(ROTATED, .h = { 128, 1920 }, .v = { 128, 0 }) }, + { IPP_SCALE_LIMIT(.h = { (1 << 16) / 64, (1 << 16) * 64 }, + .v = { (1 << 16) / 64, (1 << 16) * 64 }) }, +}; + +static const struct drm_exynos_ipp_limit fimc_4210_limits_v2[] = { + { IPP_SIZE_LIMIT(BUFFER, .h = { 16, 8192, 8 }, .v = { 16, 8192, 2 }) }, + { IPP_SIZE_LIMIT(AREA, .h = { 16, 1920, 2 }, .v = { 16, 0, 2 }) }, + { IPP_SIZE_LIMIT(ROTATED, .h = { 128, 1366 }, .v = { 128, 0 }) }, + { IPP_SCALE_LIMIT(.h = { (1 << 16) / 64, (1 << 16) * 64 }, + .v = { (1 << 16) / 64, (1 << 16) * 64 }) }, +}; + +static const struct drm_exynos_ipp_limit fimc_4210_limits_tiled_v1[] = { + { IPP_SIZE_LIMIT(BUFFER, .h = { 128, 1920, 128 }, .v = { 32, 1920, 32 }) }, + { IPP_SIZE_LIMIT(AREA, .h = { 128, 1920, 2 }, .v = { 128, 0, 2 }) }, + { IPP_SCALE_LIMIT(.h = { (1 << 16) / 64, (1 << 16) * 64 }, + .v = { (1 << 16) / 64, (1 << 16) * 64 }) }, +}; + +static const struct drm_exynos_ipp_limit fimc_4210_limits_tiled_v2[] = { + { IPP_SIZE_LIMIT(BUFFER, .h = { 128, 1920, 128 }, .v = { 32, 1920, 32 }) }, + { IPP_SIZE_LIMIT(AREA, .h = { 128, 1366, 2 }, .v = { 128, 0, 2 }) }, + { IPP_SCALE_LIMIT(.h = { (1 << 16) / 64, (1 << 16) * 64 }, + .v = { (1 << 16) / 64, (1 << 16) * 64 }) }, +}; + static int fimc_probe(struct platform_device *pdev) { + const struct drm_exynos_ipp_limit *limits; + struct exynos_drm_ipp_formats *formats; struct device *dev = &pdev->dev; struct fimc_context *ctx; struct resource *res; - struct exynos_drm_ippdrv *ippdrv; int ret; + int i, j, num_limits, num_formats; - if (!dev->of_node) { - dev_err(dev, "device tree node not found.\n"); + if (exynos_drm_check_fimc_device(dev) != 0) return -ENODEV; - } ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); if (!ctx) return -ENOMEM; - ctx->ippdrv.dev = dev; + ctx->dev = dev; + ctx->id = of_alias_get_id(dev->of_node, "fimc"); - ret = fimc_parse_dt(ctx); - if (ret < 0) - return ret; + /* construct formats/limits array */ + num_formats = ARRAY_SIZE(fimc_formats) + ARRAY_SIZE(fimc_tiled_formats); + formats = devm_kzalloc(dev, sizeof(*formats) * num_formats, GFP_KERNEL); + if (!formats) + return -ENOMEM; - ctx->sysreg = syscon_regmap_lookup_by_phandle(dev->of_node, - "samsung,sysreg"); - if (IS_ERR(ctx->sysreg)) { - dev_err(dev, "syscon regmap lookup failed.\n"); - return PTR_ERR(ctx->sysreg); + /* linear formats */ + if (ctx->id < 3) { + limits = fimc_4210_limits_v1; + num_limits = ARRAY_SIZE(fimc_4210_limits_v1); + } else { + limits = fimc_4210_limits_v2; + num_limits = ARRAY_SIZE(fimc_4210_limits_v2); } + for (i = 0; i < ARRAY_SIZE(fimc_formats); i++) { + formats[i].fourcc = fimc_formats[i]; + formats[i].type = DRM_EXYNOS_IPP_FORMAT_SOURCE | + DRM_EXYNOS_IPP_FORMAT_DESTINATION; + formats[i].limits = limits; + formats[i].num_limits = num_limits; + } + + /* tiled formats */ + if (ctx->id < 3) { + limits = fimc_4210_limits_tiled_v1; + num_limits = ARRAY_SIZE(fimc_4210_limits_tiled_v1); + } else { + limits = fimc_4210_limits_tiled_v2; + num_limits = ARRAY_SIZE(fimc_4210_limits_tiled_v2); + } + for (j = i, i = 0; i < ARRAY_SIZE(fimc_tiled_formats); j++, i++) { + formats[j].fourcc = fimc_tiled_formats[i]; + formats[j].modifier = DRM_FORMAT_MOD_SAMSUNG_64_32_TILE; + formats[j].type = DRM_EXYNOS_IPP_FORMAT_SOURCE | + DRM_EXYNOS_IPP_FORMAT_DESTINATION; + formats[j].limits = limits; + formats[j].num_limits = num_limits; + } + + ctx->formats = formats; + ctx->num_formats = num_formats; /* resource memory */ ctx->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -1670,9 +1324,8 @@ static int fimc_probe(struct platform_device *pdev) return -ENOENT; } - ctx->irq = res->start; - ret = devm_request_threaded_irq(dev, ctx->irq, NULL, fimc_irq_handler, - IRQF_ONESHOT, "drm_fimc", ctx); + ret = devm_request_irq(dev, res->start, fimc_irq_handler, + 0, dev_name(dev), ctx); if (ret < 0) { dev_err(dev, "failed to request irq.\n"); return ret; @@ -1682,39 +1335,24 @@ static int fimc_probe(struct platform_device *pdev) if (ret < 0) return ret; - ippdrv = &ctx->ippdrv; - ippdrv->ops[EXYNOS_DRM_OPS_SRC] = &fimc_src_ops; - ippdrv->ops[EXYNOS_DRM_OPS_DST] = &fimc_dst_ops; - ippdrv->check_property = fimc_ippdrv_check_property; - ippdrv->reset = fimc_ippdrv_reset; - ippdrv->start = fimc_ippdrv_start; - ippdrv->stop = fimc_ippdrv_stop; - ret = fimc_init_prop_list(ippdrv); - if (ret < 0) { - dev_err(dev, "failed to init property list.\n"); - goto err_put_clk; - } - - DRM_DEBUG_KMS("id[%d]ippdrv[%pK]\n", ctx->id, ippdrv); - spin_lock_init(&ctx->lock); platform_set_drvdata(pdev, ctx); + pm_runtime_use_autosuspend(dev); + pm_runtime_set_autosuspend_delay(dev, FIMC_AUTOSUSPEND_DELAY); pm_runtime_enable(dev); - ret = exynos_drm_ippdrv_register(ippdrv); - if (ret < 0) { - dev_err(dev, "failed to register drm fimc device.\n"); + ret = component_add(dev, &fimc_component_ops); + if (ret) goto err_pm_dis; - } dev_info(dev, "drm fimc registered successfully.\n"); return 0; err_pm_dis: + pm_runtime_dont_use_autosuspend(dev); pm_runtime_disable(dev); -err_put_clk: fimc_put_clocks(ctx); return ret; @@ -1724,42 +1362,24 @@ static int fimc_remove(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct fimc_context *ctx = get_fimc_context(dev); - struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; - exynos_drm_ippdrv_unregister(ippdrv); + component_del(dev, &fimc_component_ops); + pm_runtime_dont_use_autosuspend(dev); + pm_runtime_disable(dev); fimc_put_clocks(ctx); - pm_runtime_set_suspended(dev); - pm_runtime_disable(dev); return 0; } #ifdef CONFIG_PM -static int fimc_clk_ctrl(struct fimc_context *ctx, bool enable) -{ - DRM_DEBUG_KMS("enable[%d]\n", enable); - - if (enable) { - clk_prepare_enable(ctx->clocks[FIMC_CLK_GATE]); - clk_prepare_enable(ctx->clocks[FIMC_CLK_WB_A]); - ctx->suspended = false; - } else { - clk_disable_unprepare(ctx->clocks[FIMC_CLK_GATE]); - clk_disable_unprepare(ctx->clocks[FIMC_CLK_WB_A]); - ctx->suspended = true; - } - - return 0; -} - static int fimc_runtime_suspend(struct device *dev) { struct fimc_context *ctx = get_fimc_context(dev); DRM_DEBUG_KMS("id[%d]\n", ctx->id); - - return fimc_clk_ctrl(ctx, false); + clk_disable_unprepare(ctx->clocks[FIMC_CLK_GATE]); + return 0; } static int fimc_runtime_resume(struct device *dev) @@ -1767,8 +1387,7 @@ static int fimc_runtime_resume(struct device *dev) struct fimc_context *ctx = get_fimc_context(dev); DRM_DEBUG_KMS("id[%d]\n", ctx->id); - - return fimc_clk_ctrl(ctx, true); + return clk_prepare_enable(ctx->clocks[FIMC_CLK_GATE]); } #endif @@ -1795,4 +1414,3 @@ struct platform_driver fimc_driver = { .pm = &fimc_pm_ops, }, }; - diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.h b/drivers/gpu/drm/exynos/exynos_drm_fimc.h deleted file mode 100644 index 127a424c5fdf..000000000000 --- a/drivers/gpu/drm/exynos/exynos_drm_fimc.h +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) 2012 Samsung Electronics Co., Ltd. - * - * Authors: - * Eunchul Kim - * Jinyoung Jeon - * Sangmin Lee - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - */ - -#ifndef _EXYNOS_DRM_FIMC_H_ -#define _EXYNOS_DRM_FIMC_H_ - -/* - * TODO - * FIMD output interface notifier callback. - */ - -#endif /* _EXYNOS_DRM_FIMC_H_ */ From 01fb9185dc180940f90510215ef8764d6155d088 Mon Sep 17 00:00:00 2001 From: Andrzej Pietrasiewicz Date: Wed, 9 May 2018 10:59:26 +0200 Subject: [PATCH 16/17] drm/exynos: Add driver for Exynos Scaler module Exynos Scaler is a hardware module, which processes graphic data fetched from memory and transfers the resultant dato another memory buffer. Graphics data can be up/down-scaled, rotated, flipped and converted color space. Scaler hardware modules are a part of Exynos5420 and newer Exynos SoCs. Signed-off-by: Andrzej Pietrasiewicz Signed-off-by: Marek Szyprowski Acked-by: Rob Herring Signed-off-by: Inki Dae --- .../bindings/gpu/samsung-scaler.txt | 27 + drivers/gpu/drm/exynos/Kconfig | 6 + drivers/gpu/drm/exynos/Makefile | 1 + drivers/gpu/drm/exynos/exynos_drm_drv.c | 3 + drivers/gpu/drm/exynos/exynos_drm_drv.h | 1 + drivers/gpu/drm/exynos/exynos_drm_scaler.c | 694 ++++++++++++++++++ drivers/gpu/drm/exynos/regs-scaler.h | 426 +++++++++++ 7 files changed, 1158 insertions(+) create mode 100644 Documentation/devicetree/bindings/gpu/samsung-scaler.txt create mode 100644 drivers/gpu/drm/exynos/exynos_drm_scaler.c create mode 100644 drivers/gpu/drm/exynos/regs-scaler.h diff --git a/Documentation/devicetree/bindings/gpu/samsung-scaler.txt b/Documentation/devicetree/bindings/gpu/samsung-scaler.txt new file mode 100644 index 000000000000..9c3d98105dfd --- /dev/null +++ b/Documentation/devicetree/bindings/gpu/samsung-scaler.txt @@ -0,0 +1,27 @@ +* Samsung Exynos Image Scaler + +Required properties: + - compatible : value should be one of the following: + (a) "samsung,exynos5420-scaler" for Scaler IP in Exynos5420 + (b) "samsung,exynos5433-scaler" for Scaler IP in Exynos5433 + + - reg : Physical base address of the IP registers and length of memory + mapped region. + + - interrupts : Interrupt specifier for scaler interrupt, according to format + specific to interrupt parent. + + - clocks : Clock specifier for scaler clock, according to generic clock + bindings. (See Documentation/devicetree/bindings/clock/exynos*.txt) + + - clock-names : Names of clocks. For exynos scaler, it should be "mscl" + on 5420 and "pclk", "aclk" and "aclk_xiu" on 5433. + +Example: + scaler@12800000 { + compatible = "samsung,exynos5420-scaler"; + reg = <0x12800000 0x1294>; + interrupts = <0 220 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clock CLK_MSCL0>; + clock-names = "mscl"; + }; diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig index 54f5703b37e8..208bc27be3cc 100644 --- a/drivers/gpu/drm/exynos/Kconfig +++ b/drivers/gpu/drm/exynos/Kconfig @@ -110,6 +110,12 @@ config DRM_EXYNOS_ROTATOR help Choose this option if you want to use Exynos Rotator for DRM. +config DRM_EXYNOS_SCALER + bool "Scaler" + select DRM_EXYNOS_IPP + help + Choose this option if you want to use Exynos Scaler for DRM. + config DRM_EXYNOS_GSC bool "GScaler" depends on VIDEO_SAMSUNG_EXYNOS_GSC=n diff --git a/drivers/gpu/drm/exynos/Makefile b/drivers/gpu/drm/exynos/Makefile index bdf4212dde7b..3b323f1e0475 100644 --- a/drivers/gpu/drm/exynos/Makefile +++ b/drivers/gpu/drm/exynos/Makefile @@ -21,6 +21,7 @@ exynosdrm-$(CONFIG_DRM_EXYNOS_G2D) += exynos_drm_g2d.o exynosdrm-$(CONFIG_DRM_EXYNOS_IPP) += exynos_drm_ipp.o exynosdrm-$(CONFIG_DRM_EXYNOS_FIMC) += exynos_drm_fimc.o exynosdrm-$(CONFIG_DRM_EXYNOS_ROTATOR) += exynos_drm_rotator.o +exynosdrm-$(CONFIG_DRM_EXYNOS_SCALER) += exynos_drm_scaler.o exynosdrm-$(CONFIG_DRM_EXYNOS_GSC) += exynos_drm_gsc.o exynosdrm-$(CONFIG_DRM_EXYNOS_MIC) += exynos_drm_mic.o diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index 7ba13c122d14..f55ce44b0c0d 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c @@ -266,6 +266,9 @@ static struct exynos_drm_driver_info exynos_drm_drivers[] = { }, { DRV_PTR(rotator_driver, CONFIG_DRM_EXYNOS_ROTATOR), DRM_COMPONENT_DRIVER + }, { + DRV_PTR(scaler_driver, CONFIG_DRM_EXYNOS_SCALER), + DRM_COMPONENT_DRIVER }, { DRV_PTR(gsc_driver, CONFIG_DRM_EXYNOS_GSC), DRM_COMPONENT_DRIVER diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h index 0834e7e28c99..c07e6f380e60 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.h +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h @@ -298,6 +298,7 @@ extern struct platform_driver vidi_driver; extern struct platform_driver g2d_driver; extern struct platform_driver fimc_driver; extern struct platform_driver rotator_driver; +extern struct platform_driver scaler_driver; extern struct platform_driver gsc_driver; extern struct platform_driver mic_driver; #endif diff --git a/drivers/gpu/drm/exynos/exynos_drm_scaler.c b/drivers/gpu/drm/exynos/exynos_drm_scaler.c new file mode 100644 index 000000000000..63b05b7c846a --- /dev/null +++ b/drivers/gpu/drm/exynos/exynos_drm_scaler.c @@ -0,0 +1,694 @@ +/* + * Copyright (C) 2017 Samsung Electronics Co.Ltd + * Author: + * Andrzej Pietrasiewicz + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundationr + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include "regs-scaler.h" +#include "exynos_drm_fb.h" +#include "exynos_drm_drv.h" +#include "exynos_drm_iommu.h" +#include "exynos_drm_ipp.h" + +#define scaler_read(offset) readl(scaler->regs + (offset)) +#define scaler_write(cfg, offset) writel(cfg, scaler->regs + (offset)) +#define SCALER_MAX_CLK 4 +#define SCALER_AUTOSUSPEND_DELAY 2000 + +struct scaler_data { + const char *clk_name[SCALER_MAX_CLK]; + unsigned int num_clk; + const struct exynos_drm_ipp_formats *formats; + unsigned int num_formats; +}; + +struct scaler_context { + struct exynos_drm_ipp ipp; + struct drm_device *drm_dev; + struct device *dev; + void __iomem *regs; + struct clk *clock[SCALER_MAX_CLK]; + struct exynos_drm_ipp_task *task; + const struct scaler_data *scaler_data; +}; + +static u32 scaler_get_format(u32 drm_fmt) +{ + switch (drm_fmt) { + case DRM_FORMAT_NV21: + return SCALER_YUV420_2P_UV; + case DRM_FORMAT_NV12: + return SCALER_YUV420_2P_VU; + case DRM_FORMAT_YUV420: + return SCALER_YUV420_3P; + case DRM_FORMAT_YUYV: + return SCALER_YUV422_1P_YUYV; + case DRM_FORMAT_UYVY: + return SCALER_YUV422_1P_UYVY; + case DRM_FORMAT_YVYU: + return SCALER_YUV422_1P_YVYU; + case DRM_FORMAT_NV61: + return SCALER_YUV422_2P_UV; + case DRM_FORMAT_NV16: + return SCALER_YUV422_2P_VU; + case DRM_FORMAT_YUV422: + return SCALER_YUV422_3P; + case DRM_FORMAT_NV42: + return SCALER_YUV444_2P_UV; + case DRM_FORMAT_NV24: + return SCALER_YUV444_2P_VU; + case DRM_FORMAT_YUV444: + return SCALER_YUV444_3P; + case DRM_FORMAT_RGB565: + return SCALER_RGB_565; + case DRM_FORMAT_XRGB1555: + return SCALER_ARGB1555; + case DRM_FORMAT_ARGB1555: + return SCALER_ARGB1555; + case DRM_FORMAT_XRGB4444: + return SCALER_ARGB4444; + case DRM_FORMAT_ARGB4444: + return SCALER_ARGB4444; + case DRM_FORMAT_XRGB8888: + return SCALER_ARGB8888; + case DRM_FORMAT_ARGB8888: + return SCALER_ARGB8888; + case DRM_FORMAT_RGBX8888: + return SCALER_RGBA8888; + case DRM_FORMAT_RGBA8888: + return SCALER_RGBA8888; + default: + break; + } + + return 0; +} + +static inline void scaler_enable_int(struct scaler_context *scaler) +{ + u32 val; + + val = SCALER_INT_EN_TIMEOUT | + SCALER_INT_EN_ILLEGAL_BLEND | + SCALER_INT_EN_ILLEGAL_RATIO | + SCALER_INT_EN_ILLEGAL_DST_HEIGHT | + SCALER_INT_EN_ILLEGAL_DST_WIDTH | + SCALER_INT_EN_ILLEGAL_DST_V_POS | + SCALER_INT_EN_ILLEGAL_DST_H_POS | + SCALER_INT_EN_ILLEGAL_DST_C_SPAN | + SCALER_INT_EN_ILLEGAL_DST_Y_SPAN | + SCALER_INT_EN_ILLEGAL_DST_CR_BASE | + SCALER_INT_EN_ILLEGAL_DST_CB_BASE | + SCALER_INT_EN_ILLEGAL_DST_Y_BASE | + SCALER_INT_EN_ILLEGAL_DST_COLOR | + SCALER_INT_EN_ILLEGAL_SRC_HEIGHT | + SCALER_INT_EN_ILLEGAL_SRC_WIDTH | + SCALER_INT_EN_ILLEGAL_SRC_CV_POS | + SCALER_INT_EN_ILLEGAL_SRC_CH_POS | + SCALER_INT_EN_ILLEGAL_SRC_YV_POS | + SCALER_INT_EN_ILLEGAL_SRC_YH_POS | + SCALER_INT_EN_ILLEGAL_DST_SPAN | + SCALER_INT_EN_ILLEGAL_SRC_Y_SPAN | + SCALER_INT_EN_ILLEGAL_SRC_CR_BASE | + SCALER_INT_EN_ILLEGAL_SRC_CB_BASE | + SCALER_INT_EN_ILLEGAL_SRC_Y_BASE | + SCALER_INT_EN_ILLEGAL_SRC_COLOR | + SCALER_INT_EN_FRAME_END; + scaler_write(val, SCALER_INT_EN); +} + +static inline void scaler_set_src_fmt(struct scaler_context *scaler, + u32 src_fmt) +{ + u32 val; + + val = SCALER_SRC_CFG_SET_COLOR_FORMAT(src_fmt); + scaler_write(val, SCALER_SRC_CFG); +} + +static inline void scaler_set_src_base(struct scaler_context *scaler, + struct exynos_drm_ipp_buffer *src_buf) +{ + static unsigned int bases[] = { + SCALER_SRC_Y_BASE, + SCALER_SRC_CB_BASE, + SCALER_SRC_CR_BASE, + }; + int i; + + for (i = 0; i < src_buf->format->num_planes; ++i) + scaler_write(src_buf->dma_addr[i], bases[i]); +} + +static inline void scaler_set_src_span(struct scaler_context *scaler, + struct exynos_drm_ipp_buffer *src_buf) +{ + u32 val; + + val = SCALER_SRC_SPAN_SET_Y_SPAN(src_buf->buf.pitch[0] / + src_buf->format->cpp[0]); + + if (src_buf->format->num_planes > 1) + val |= SCALER_SRC_SPAN_SET_C_SPAN(src_buf->buf.pitch[1]); + + scaler_write(val, SCALER_SRC_SPAN); +} + +static inline void scaler_set_src_luma_pos(struct scaler_context *scaler, + struct drm_exynos_ipp_task_rect *src_pos) +{ + u32 val; + + val = SCALER_SRC_Y_POS_SET_YH_POS(src_pos->x << 2); + val |= SCALER_SRC_Y_POS_SET_YV_POS(src_pos->y << 2); + scaler_write(val, SCALER_SRC_Y_POS); + scaler_write(val, SCALER_SRC_C_POS); /* ATTENTION! */ +} + +static inline void scaler_set_src_wh(struct scaler_context *scaler, + struct drm_exynos_ipp_task_rect *src_pos) +{ + u32 val; + + val = SCALER_SRC_WH_SET_WIDTH(src_pos->w); + val |= SCALER_SRC_WH_SET_HEIGHT(src_pos->h); + scaler_write(val, SCALER_SRC_WH); +} + +static inline void scaler_set_dst_fmt(struct scaler_context *scaler, + u32 dst_fmt) +{ + u32 val; + + val = SCALER_DST_CFG_SET_COLOR_FORMAT(dst_fmt); + scaler_write(val, SCALER_DST_CFG); +} + +static inline void scaler_set_dst_base(struct scaler_context *scaler, + struct exynos_drm_ipp_buffer *dst_buf) +{ + static unsigned int bases[] = { + SCALER_DST_Y_BASE, + SCALER_DST_CB_BASE, + SCALER_DST_CR_BASE, + }; + int i; + + for (i = 0; i < dst_buf->format->num_planes; ++i) + scaler_write(dst_buf->dma_addr[i], bases[i]); +} + +static inline void scaler_set_dst_span(struct scaler_context *scaler, + struct exynos_drm_ipp_buffer *dst_buf) +{ + u32 val; + + val = SCALER_DST_SPAN_SET_Y_SPAN(dst_buf->buf.pitch[0] / + dst_buf->format->cpp[0]); + + if (dst_buf->format->num_planes > 1) + val |= SCALER_DST_SPAN_SET_C_SPAN(dst_buf->buf.pitch[1]); + + scaler_write(val, SCALER_DST_SPAN); +} + +static inline void scaler_set_dst_luma_pos(struct scaler_context *scaler, + struct drm_exynos_ipp_task_rect *dst_pos) +{ + u32 val; + + val = SCALER_DST_WH_SET_WIDTH(dst_pos->w); + val |= SCALER_DST_WH_SET_HEIGHT(dst_pos->h); + scaler_write(val, SCALER_DST_WH); +} + +static inline void scaler_set_dst_wh(struct scaler_context *scaler, + struct drm_exynos_ipp_task_rect *dst_pos) +{ + u32 val; + + val = SCALER_DST_POS_SET_H_POS(dst_pos->x); + val |= SCALER_DST_POS_SET_V_POS(dst_pos->y); + scaler_write(val, SCALER_DST_POS); +} + +static inline void scaler_set_hv_ratio(struct scaler_context *scaler, + unsigned int rotation, + struct drm_exynos_ipp_task_rect *src_pos, + struct drm_exynos_ipp_task_rect *dst_pos) +{ + u32 val, h_ratio, v_ratio; + + if (drm_rotation_90_or_270(rotation)) { + h_ratio = (src_pos->h << 16) / dst_pos->w; + v_ratio = (src_pos->w << 16) / dst_pos->h; + } else { + h_ratio = (src_pos->w << 16) / dst_pos->w; + v_ratio = (src_pos->h << 16) / dst_pos->h; + } + + val = SCALER_H_RATIO_SET(h_ratio); + scaler_write(val, SCALER_H_RATIO); + + val = SCALER_V_RATIO_SET(v_ratio); + scaler_write(val, SCALER_V_RATIO); +} + +static inline void scaler_set_rotation(struct scaler_context *scaler, + unsigned int rotation) +{ + u32 val = 0; + + if (rotation & DRM_MODE_ROTATE_90) + val |= SCALER_ROT_CFG_SET_ROTMODE(SCALER_ROT_MODE_90); + else if (rotation & DRM_MODE_ROTATE_180) + val |= SCALER_ROT_CFG_SET_ROTMODE(SCALER_ROT_MODE_180); + else if (rotation & DRM_MODE_ROTATE_270) + val |= SCALER_ROT_CFG_SET_ROTMODE(SCALER_ROT_MODE_270); + if (rotation & DRM_MODE_REFLECT_X) + val |= SCALER_ROT_CFG_FLIP_X_EN; + if (rotation & DRM_MODE_REFLECT_Y) + val |= SCALER_ROT_CFG_FLIP_Y_EN; + scaler_write(val, SCALER_ROT_CFG); +} + +static inline void scaler_set_csc(struct scaler_context *scaler, + const struct drm_format_info *fmt) +{ + static const u32 csc_mtx[2][3][3] = { + { /* YCbCr to RGB */ + {0x254, 0x000, 0x331}, + {0x254, 0xf38, 0xe60}, + {0x254, 0x409, 0x000}, + }, + { /* RGB to YCbCr */ + {0x084, 0x102, 0x032}, + {0xfb4, 0xf6b, 0x0e1}, + {0x0e1, 0xf44, 0xfdc}, + }, + }; + int i, j, dir; + + switch (fmt->format) { + case DRM_FORMAT_RGB565: + case DRM_FORMAT_XRGB1555: + case DRM_FORMAT_ARGB1555: + case DRM_FORMAT_XRGB4444: + case DRM_FORMAT_ARGB4444: + case DRM_FORMAT_XRGB8888: + case DRM_FORMAT_ARGB8888: + case DRM_FORMAT_RGBX8888: + case DRM_FORMAT_RGBA8888: + dir = 1; + break; + default: + dir = 0; + } + + for (i = 0; i < 3; i++) + for (j = 0; j < 3; j++) + scaler_write(csc_mtx[dir][i][j], SCALER_CSC_COEF(j, i)); +} + +static inline void scaler_set_timer(struct scaler_context *scaler, + unsigned int timer, unsigned int divider) +{ + u32 val; + + val = SCALER_TIMEOUT_CTRL_TIMER_ENABLE; + val |= SCALER_TIMEOUT_CTRL_SET_TIMER_VALUE(timer); + val |= SCALER_TIMEOUT_CTRL_SET_TIMER_DIV(divider); + scaler_write(val, SCALER_TIMEOUT_CTRL); +} + +static inline void scaler_start_hw(struct scaler_context *scaler) +{ + scaler_write(SCALER_CFG_START_CMD, SCALER_CFG); +} + +static int scaler_commit(struct exynos_drm_ipp *ipp, + struct exynos_drm_ipp_task *task) +{ + struct scaler_context *scaler = + container_of(ipp, struct scaler_context, ipp); + + u32 src_fmt = scaler_get_format(task->src.buf.fourcc); + struct drm_exynos_ipp_task_rect *src_pos = &task->src.rect; + + u32 dst_fmt = scaler_get_format(task->dst.buf.fourcc); + struct drm_exynos_ipp_task_rect *dst_pos = &task->dst.rect; + + scaler->task = task; + + pm_runtime_get_sync(scaler->dev); + + scaler_set_src_fmt(scaler, src_fmt); + scaler_set_src_base(scaler, &task->src); + scaler_set_src_span(scaler, &task->src); + scaler_set_src_luma_pos(scaler, src_pos); + scaler_set_src_wh(scaler, src_pos); + + scaler_set_dst_fmt(scaler, dst_fmt); + scaler_set_dst_base(scaler, &task->dst); + scaler_set_dst_span(scaler, &task->dst); + scaler_set_dst_luma_pos(scaler, dst_pos); + scaler_set_dst_wh(scaler, dst_pos); + + scaler_set_hv_ratio(scaler, task->transform.rotation, src_pos, dst_pos); + scaler_set_rotation(scaler, task->transform.rotation); + + scaler_set_csc(scaler, task->src.format); + + scaler_set_timer(scaler, 0xffff, 0xf); + + scaler_enable_int(scaler); + scaler_start_hw(scaler); + + return 0; +} + +static struct exynos_drm_ipp_funcs ipp_funcs = { + .commit = scaler_commit, +}; + +static inline void scaler_disable_int(struct scaler_context *scaler) +{ + scaler_write(0, SCALER_INT_EN); +} + +static inline u32 scaler_get_int_status(struct scaler_context *scaler) +{ + return scaler_read(SCALER_INT_STATUS); +} + +static inline bool scaler_task_done(u32 val) +{ + return val & SCALER_INT_STATUS_FRAME_END ? 0 : -EINVAL; +} + +static irqreturn_t scaler_irq_handler(int irq, void *arg) +{ + struct scaler_context *scaler = arg; + + u32 val = scaler_get_int_status(scaler); + + scaler_disable_int(scaler); + + if (scaler->task) { + struct exynos_drm_ipp_task *task = scaler->task; + + scaler->task = NULL; + pm_runtime_mark_last_busy(scaler->dev); + pm_runtime_put_autosuspend(scaler->dev); + exynos_drm_ipp_task_done(task, scaler_task_done(val)); + } + + return IRQ_HANDLED; +} + +static int scaler_bind(struct device *dev, struct device *master, void *data) +{ + struct scaler_context *scaler = dev_get_drvdata(dev); + struct drm_device *drm_dev = data; + struct exynos_drm_ipp *ipp = &scaler->ipp; + + scaler->drm_dev = drm_dev; + drm_iommu_attach_device(drm_dev, dev); + + exynos_drm_ipp_register(drm_dev, ipp, &ipp_funcs, + DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE | + DRM_EXYNOS_IPP_CAP_SCALE | DRM_EXYNOS_IPP_CAP_CONVERT, + scaler->scaler_data->formats, + scaler->scaler_data->num_formats, "scaler"); + + dev_info(dev, "The exynos scaler has been probed successfully\n"); + + return 0; +} + +static void scaler_unbind(struct device *dev, struct device *master, + void *data) +{ + struct scaler_context *scaler = dev_get_drvdata(dev); + struct drm_device *drm_dev = data; + struct exynos_drm_ipp *ipp = &scaler->ipp; + + exynos_drm_ipp_unregister(drm_dev, ipp); + drm_iommu_detach_device(scaler->drm_dev, scaler->dev); +} + +static const struct component_ops scaler_component_ops = { + .bind = scaler_bind, + .unbind = scaler_unbind, +}; + +static int scaler_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct resource *regs_res; + struct scaler_context *scaler; + int irq; + int ret, i; + + scaler = devm_kzalloc(dev, sizeof(*scaler), GFP_KERNEL); + if (!scaler) + return -ENOMEM; + + scaler->scaler_data = + (struct scaler_data *)of_device_get_match_data(dev); + + scaler->dev = dev; + regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + scaler->regs = devm_ioremap_resource(dev, regs_res); + if (IS_ERR(scaler->regs)) + return PTR_ERR(scaler->regs); + + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(dev, "failed to get irq\n"); + return irq; + } + + ret = devm_request_threaded_irq(dev, irq, NULL, scaler_irq_handler, + IRQF_ONESHOT, "drm_scaler", scaler); + if (ret < 0) { + dev_err(dev, "failed to request irq\n"); + return ret; + } + + for (i = 0; i < scaler->scaler_data->num_clk; ++i) { + scaler->clock[i] = devm_clk_get(dev, + scaler->scaler_data->clk_name[i]); + if (IS_ERR(scaler->clock[i])) { + dev_err(dev, "failed to get clock\n"); + return PTR_ERR(scaler->clock[i]); + } + } + + pm_runtime_use_autosuspend(dev); + pm_runtime_set_autosuspend_delay(dev, SCALER_AUTOSUSPEND_DELAY); + pm_runtime_enable(dev); + platform_set_drvdata(pdev, scaler); + + ret = component_add(dev, &scaler_component_ops); + if (ret) + goto err_ippdrv_register; + + return 0; + +err_ippdrv_register: + pm_runtime_dont_use_autosuspend(dev); + pm_runtime_disable(dev); + return ret; +} + +static int scaler_remove(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + + component_del(dev, &scaler_component_ops); + pm_runtime_dont_use_autosuspend(dev); + pm_runtime_disable(dev); + + return 0; +} + +#ifdef CONFIG_PM + +static int clk_disable_unprepare_wrapper(struct clk *clk) +{ + clk_disable_unprepare(clk); + + return 0; +} + +static int scaler_clk_ctrl(struct scaler_context *scaler, bool enable) +{ + int (*clk_fun)(struct clk *clk), i; + + clk_fun = enable ? clk_prepare_enable : clk_disable_unprepare_wrapper; + + for (i = 0; i < scaler->scaler_data->num_clk; ++i) + clk_fun(scaler->clock[i]); + + return 0; +} + +static int scaler_runtime_suspend(struct device *dev) +{ + struct scaler_context *scaler = dev_get_drvdata(dev); + + return scaler_clk_ctrl(scaler, false); +} + +static int scaler_runtime_resume(struct device *dev) +{ + struct scaler_context *scaler = dev_get_drvdata(dev); + + return scaler_clk_ctrl(scaler, true); +} +#endif + +static const struct dev_pm_ops scaler_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, + pm_runtime_force_resume) + SET_RUNTIME_PM_OPS(scaler_runtime_suspend, scaler_runtime_resume, NULL) +}; + +static const struct drm_exynos_ipp_limit scaler_5420_two_pixel_hv_limits[] = { + { IPP_SIZE_LIMIT(BUFFER, .h = { 16, SZ_8K }, .v = { 16, SZ_8K }) }, + { IPP_SIZE_LIMIT(AREA, .h.align = 2, .v.align = 2) }, + { IPP_SCALE_LIMIT(.h = { 65536 * 1 / 4, 65536 * 16 }, + .v = { 65536 * 1 / 4, 65536 * 16 }) }, +}; + +static const struct drm_exynos_ipp_limit scaler_5420_two_pixel_h_limits[] = { + { IPP_SIZE_LIMIT(BUFFER, .h = { 16, SZ_8K }, .v = { 16, SZ_8K }) }, + { IPP_SIZE_LIMIT(AREA, .h.align = 2, .v.align = 1) }, + { IPP_SCALE_LIMIT(.h = { 65536 * 1 / 4, 65536 * 16 }, + .v = { 65536 * 1 / 4, 65536 * 16 }) }, +}; + +static const struct drm_exynos_ipp_limit scaler_5420_one_pixel_limits[] = { + { IPP_SIZE_LIMIT(BUFFER, .h = { 16, SZ_8K }, .v = { 16, SZ_8K }) }, + { IPP_SCALE_LIMIT(.h = { 65536 * 1 / 4, 65536 * 16 }, + .v = { 65536 * 1 / 4, 65536 * 16 }) }, +}; + +static const struct exynos_drm_ipp_formats exynos5420_formats[] = { + /* SCALER_YUV420_2P_UV */ + { IPP_SRCDST_FORMAT(NV21, scaler_5420_two_pixel_hv_limits) }, + + /* SCALER_YUV420_2P_VU */ + { IPP_SRCDST_FORMAT(NV12, scaler_5420_two_pixel_hv_limits) }, + + /* SCALER_YUV420_3P */ + { IPP_SRCDST_FORMAT(YUV420, scaler_5420_two_pixel_hv_limits) }, + + /* SCALER_YUV422_1P_YUYV */ + { IPP_SRCDST_FORMAT(YUYV, scaler_5420_two_pixel_h_limits) }, + + /* SCALER_YUV422_1P_UYVY */ + { IPP_SRCDST_FORMAT(UYVY, scaler_5420_two_pixel_h_limits) }, + + /* SCALER_YUV422_1P_YVYU */ + { IPP_SRCDST_FORMAT(YVYU, scaler_5420_two_pixel_h_limits) }, + + /* SCALER_YUV422_2P_UV */ + { IPP_SRCDST_FORMAT(NV61, scaler_5420_two_pixel_h_limits) }, + + /* SCALER_YUV422_2P_VU */ + { IPP_SRCDST_FORMAT(NV16, scaler_5420_two_pixel_h_limits) }, + + /* SCALER_YUV422_3P */ + { IPP_SRCDST_FORMAT(YUV422, scaler_5420_two_pixel_h_limits) }, + + /* SCALER_YUV444_2P_UV */ + { IPP_SRCDST_FORMAT(NV42, scaler_5420_one_pixel_limits) }, + + /* SCALER_YUV444_2P_VU */ + { IPP_SRCDST_FORMAT(NV24, scaler_5420_one_pixel_limits) }, + + /* SCALER_YUV444_3P */ + { IPP_SRCDST_FORMAT(YUV444, scaler_5420_one_pixel_limits) }, + + /* SCALER_RGB_565 */ + { IPP_SRCDST_FORMAT(RGB565, scaler_5420_one_pixel_limits) }, + + /* SCALER_ARGB1555 */ + { IPP_SRCDST_FORMAT(XRGB1555, scaler_5420_one_pixel_limits) }, + + /* SCALER_ARGB1555 */ + { IPP_SRCDST_FORMAT(ARGB1555, scaler_5420_one_pixel_limits) }, + + /* SCALER_ARGB4444 */ + { IPP_SRCDST_FORMAT(XRGB4444, scaler_5420_one_pixel_limits) }, + + /* SCALER_ARGB4444 */ + { IPP_SRCDST_FORMAT(ARGB4444, scaler_5420_one_pixel_limits) }, + + /* SCALER_ARGB8888 */ + { IPP_SRCDST_FORMAT(XRGB8888, scaler_5420_one_pixel_limits) }, + + /* SCALER_ARGB8888 */ + { IPP_SRCDST_FORMAT(ARGB8888, scaler_5420_one_pixel_limits) }, + + /* SCALER_RGBA8888 */ + { IPP_SRCDST_FORMAT(RGBX8888, scaler_5420_one_pixel_limits) }, + + /* SCALER_RGBA8888 */ + { IPP_SRCDST_FORMAT(RGBA8888, scaler_5420_one_pixel_limits) }, +}; + +static const struct scaler_data exynos5420_data = { + .clk_name = {"mscl"}, + .num_clk = 1, + .formats = exynos5420_formats, + .num_formats = ARRAY_SIZE(exynos5420_formats), +}; + +static const struct scaler_data exynos5433_data = { + .clk_name = {"pclk", "aclk", "aclk_xiu"}, + .num_clk = 3, + .formats = exynos5420_formats, /* intentional */ + .num_formats = ARRAY_SIZE(exynos5420_formats), +}; + +static const struct of_device_id exynos_scaler_match[] = { + { + .compatible = "samsung,exynos5420-scaler", + .data = &exynos5420_data, + }, { + .compatible = "samsung,exynos5433-scaler", + .data = &exynos5433_data, + }, { + }, +}; +MODULE_DEVICE_TABLE(of, exynos_scaler_match); + +struct platform_driver scaler_driver = { + .probe = scaler_probe, + .remove = scaler_remove, + .driver = { + .name = "exynos-scaler", + .owner = THIS_MODULE, + .pm = &scaler_pm_ops, + .of_match_table = exynos_scaler_match, + }, +}; diff --git a/drivers/gpu/drm/exynos/regs-scaler.h b/drivers/gpu/drm/exynos/regs-scaler.h new file mode 100644 index 000000000000..fc7ccad75e74 --- /dev/null +++ b/drivers/gpu/drm/exynos/regs-scaler.h @@ -0,0 +1,426 @@ +/* drivers/gpu/drm/exynos/regs-scaler.h + * + * Copyright (c) 2017 Samsung Electronics Co., Ltd. + * http://www.samsung.com/ + * Author: Andrzej Pietrasiewicz + * + * Register definition file for Samsung scaler driver + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef EXYNOS_REGS_SCALER_H +#define EXYNOS_REGS_SCALER_H + +/* Register part */ + +/* Global setting */ +#define SCALER_STATUS 0x0 /* no shadow */ +#define SCALER_CFG 0x4 + +/* Interrupt */ +#define SCALER_INT_EN 0x8 /* no shadow */ +#define SCALER_INT_STATUS 0xc /* no shadow */ + +/* SRC */ +#define SCALER_SRC_CFG 0x10 +#define SCALER_SRC_Y_BASE 0x14 +#define SCALER_SRC_CB_BASE 0x18 +#define SCALER_SRC_CR_BASE 0x294 +#define SCALER_SRC_SPAN 0x1c +#define SCALER_SRC_Y_POS 0x20 +#define SCALER_SRC_WH 0x24 +#define SCALER_SRC_C_POS 0x28 + +/* DST */ +#define SCALER_DST_CFG 0x30 +#define SCALER_DST_Y_BASE 0x34 +#define SCALER_DST_CB_BASE 0x38 +#define SCALER_DST_CR_BASE 0x298 +#define SCALER_DST_SPAN 0x3c +#define SCALER_DST_WH 0x40 +#define SCALER_DST_POS 0x44 + +/* Ratio */ +#define SCALER_H_RATIO 0x50 +#define SCALER_V_RATIO 0x54 + +/* Rotation */ +#define SCALER_ROT_CFG 0x58 + +/* Coefficient */ +/* + * YHCOEF_{x}{A|B|C|D} CHCOEF_{x}{A|B|C|D} + * + * A B C D A B C D + * 0 60 64 68 6c 140 144 148 14c + * 1 70 74 78 7c 150 154 158 15c + * 2 80 84 88 8c 160 164 168 16c + * 3 90 94 98 9c 170 174 178 17c + * 4 a0 a4 a8 ac 180 184 188 18c + * 5 b0 b4 b8 bc 190 194 198 19c + * 6 c0 c4 c8 cc 1a0 1a4 1a8 1ac + * 7 d0 d4 d8 dc 1b0 1b4 1b8 1bc + * 8 e0 e4 e8 ec 1c0 1c4 1c8 1cc + * + * + * YVCOEF_{x}{A|B} CVCOEF_{x}{A|B} + * + * A B A B + * 0 f0 f4 1d0 1d4 + * 1 f8 fc 1d8 1dc + * 2 100 104 1e0 1e4 + * 3 108 10c 1e8 1ec + * 4 110 114 1f0 1f4 + * 5 118 11c 1f8 1fc + * 6 120 124 200 204 + * 7 128 12c 208 20c + * 8 130 134 210 214 + */ +#define _SCALER_HCOEF_DELTA(r, c) ((r) * 0x10 + (c) * 0x4) +#define _SCALER_VCOEF_DELTA(r, c) ((r) * 0x8 + (c) * 0x4) + +#define SCALER_YHCOEF(r, c) (0x60 + _SCALER_HCOEF_DELTA((r), (c))) +#define SCALER_YVCOEF(r, c) (0xf0 + _SCALER_VCOEF_DELTA((r), (c))) +#define SCALER_CHCOEF(r, c) (0x140 + _SCALER_HCOEF_DELTA((r), (c))) +#define SCALER_CVCOEF(r, c) (0x1d0 + _SCALER_VCOEF_DELTA((r), (c))) + + +/* Color Space Conversion */ +#define SCALER_CSC_COEF(x, y) (0x220 + (y) * 0xc + (x) * 0x4) + +/* Dithering */ +#define SCALER_DITH_CFG 0x250 + +/* Version Number */ +#define SCALER_VER 0x260 /* no shadow */ + +/* Cycle count and Timeout */ +#define SCALER_CYCLE_COUNT 0x278 /* no shadow */ +#define SCALER_TIMEOUT_CTRL 0x2c0 /* no shadow */ +#define SCALER_TIMEOUT_CNT 0x2c4 /* no shadow */ + +/* Blending */ +#define SCALER_SRC_BLEND_COLOR 0x280 +#define SCALER_SRC_BLEND_ALPHA 0x284 +#define SCALER_DST_BLEND_COLOR 0x288 +#define SCALER_DST_BLEND_ALPHA 0x28c + +/* Color Fill */ +#define SCALER_FILL_COLOR 0x290 + +/* Multiple Command Queue */ +#define SCALER_ADDR_Q_CONFIG 0x2a0 /* no shadow */ +#define SCALER_SRC_ADDR_Q_STATUS 0x2a4 /* no shadow */ +#define SCALER_SRC_ADDR_Q 0x2a8 /* no shadow */ + +/* CRC */ +#define SCALER_CRC_COLOR00_10 0x2b0 /* no shadow */ +#define SCALER_CRC_COLOR20_30 0x2b4 /* no shadow */ +#define SCALER_CRC_COLOR01_11 0x2b8 /* no shadow */ +#define SCALER_CRC_COLOR21_31 0x2bc /* no shadow */ + +/* Shadow Registers */ +#define SCALER_SHADOW_OFFSET 0x1000 + + +/* Bit definition part */ +#define SCALER_MASK(hi_b, lo_b) ((1 << ((hi_b) - (lo_b) + 1)) - 1) +#define SCALER_GET(reg, hi_b, lo_b) \ + (((reg) >> (lo_b)) & SCALER_MASK(hi_b, lo_b)) +#define SCALER_SET(val, hi_b, lo_b) \ + (((val) & SCALER_MASK(hi_b, lo_b)) << lo_b) + +/* SCALER_STATUS */ +#define SCALER_STATUS_SCALER_RUNNING (1 << 1) +#define SCALER_STATUS_SCALER_READY_CLK_DOWN (1 << 0) + +/* SCALER_CFG */ +#define SCALER_CFG_FILL_EN (1 << 24) +#define SCALER_CFG_BLEND_COLOR_DIVIDE_ALPHA_EN (1 << 17) +#define SCALER_CFG_BLEND_EN (1 << 16) +#define SCALER_CFG_CSC_Y_OFFSET_SRC_EN (1 << 10) +#define SCALER_CFG_CSC_Y_OFFSET_DST_EN (1 << 9) +#define SCALER_CFG_16_BURST_MODE (1 << 8) +#define SCALER_CFG_SOFT_RESET (1 << 1) +#define SCALER_CFG_START_CMD (1 << 0) + +/* SCALER_INT_EN */ +#define SCALER_INT_EN_TIMEOUT (1 << 31) +#define SCALER_INT_EN_ILLEGAL_BLEND (1 << 24) +#define SCALER_INT_EN_ILLEGAL_RATIO (1 << 23) +#define SCALER_INT_EN_ILLEGAL_DST_HEIGHT (1 << 22) +#define SCALER_INT_EN_ILLEGAL_DST_WIDTH (1 << 21) +#define SCALER_INT_EN_ILLEGAL_DST_V_POS (1 << 20) +#define SCALER_INT_EN_ILLEGAL_DST_H_POS (1 << 19) +#define SCALER_INT_EN_ILLEGAL_DST_C_SPAN (1 << 18) +#define SCALER_INT_EN_ILLEGAL_DST_Y_SPAN (1 << 17) +#define SCALER_INT_EN_ILLEGAL_DST_CR_BASE (1 << 16) +#define SCALER_INT_EN_ILLEGAL_DST_CB_BASE (1 << 15) +#define SCALER_INT_EN_ILLEGAL_DST_Y_BASE (1 << 14) +#define SCALER_INT_EN_ILLEGAL_DST_COLOR (1 << 13) +#define SCALER_INT_EN_ILLEGAL_SRC_HEIGHT (1 << 12) +#define SCALER_INT_EN_ILLEGAL_SRC_WIDTH (1 << 11) +#define SCALER_INT_EN_ILLEGAL_SRC_CV_POS (1 << 10) +#define SCALER_INT_EN_ILLEGAL_SRC_CH_POS (1 << 9) +#define SCALER_INT_EN_ILLEGAL_SRC_YV_POS (1 << 8) +#define SCALER_INT_EN_ILLEGAL_SRC_YH_POS (1 << 7) +#define SCALER_INT_EN_ILLEGAL_DST_SPAN (1 << 6) +#define SCALER_INT_EN_ILLEGAL_SRC_Y_SPAN (1 << 5) +#define SCALER_INT_EN_ILLEGAL_SRC_CR_BASE (1 << 4) +#define SCALER_INT_EN_ILLEGAL_SRC_CB_BASE (1 << 3) +#define SCALER_INT_EN_ILLEGAL_SRC_Y_BASE (1 << 2) +#define SCALER_INT_EN_ILLEGAL_SRC_COLOR (1 << 1) +#define SCALER_INT_EN_FRAME_END (1 << 0) + +/* SCALER_INT_STATUS */ +#define SCALER_INT_STATUS_TIMEOUT (1 << 31) +#define SCALER_INT_STATUS_ILLEGAL_BLEND (1 << 24) +#define SCALER_INT_STATUS_ILLEGAL_RATIO (1 << 23) +#define SCALER_INT_STATUS_ILLEGAL_DST_HEIGHT (1 << 22) +#define SCALER_INT_STATUS_ILLEGAL_DST_WIDTH (1 << 21) +#define SCALER_INT_STATUS_ILLEGAL_DST_V_POS (1 << 20) +#define SCALER_INT_STATUS_ILLEGAL_DST_H_POS (1 << 19) +#define SCALER_INT_STATUS_ILLEGAL_DST_C_SPAN (1 << 18) +#define SCALER_INT_STATUS_ILLEGAL_DST_Y_SPAN (1 << 17) +#define SCALER_INT_STATUS_ILLEGAL_DST_CR_BASE (1 << 16) +#define SCALER_INT_STATUS_ILLEGAL_DST_CB_BASE (1 << 15) +#define SCALER_INT_STATUS_ILLEGAL_DST_Y_BASE (1 << 14) +#define SCALER_INT_STATUS_ILLEGAL_DST_COLOR (1 << 13) +#define SCALER_INT_STATUS_ILLEGAL_SRC_HEIGHT (1 << 12) +#define SCALER_INT_STATUS_ILLEGAL_SRC_WIDTH (1 << 11) +#define SCALER_INT_STATUS_ILLEGAL_SRC_CV_POS (1 << 10) +#define SCALER_INT_STATUS_ILLEGAL_SRC_CH_POS (1 << 9) +#define SCALER_INT_STATUS_ILLEGAL_SRC_YV_POS (1 << 8) +#define SCALER_INT_STATUS_ILLEGAL_SRC_YH_POS (1 << 7) +#define SCALER_INT_STATUS_ILLEGAL_DST_SPAN (1 << 6) +#define SCALER_INT_STATUS_ILLEGAL_SRC_Y_SPAN (1 << 5) +#define SCALER_INT_STATUS_ILLEGAL_SRC_CR_BASE (1 << 4) +#define SCALER_INT_STATUS_ILLEGAL_SRC_CB_BASE (1 << 3) +#define SCALER_INT_STATUS_ILLEGAL_SRC_Y_BASE (1 << 2) +#define SCALER_INT_STATUS_ILLEGAL_SRC_COLOR (1 << 1) +#define SCALER_INT_STATUS_FRAME_END (1 << 0) + +/* SCALER_SRC_CFG */ +#define SCALER_SRC_CFG_TILE_EN (1 << 10) +#define SCALER_SRC_CFG_GET_BYTE_SWAP(r) SCALER_GET(r, 6, 5) +#define SCALER_SRC_CFG_SET_BYTE_SWAP(v) SCALER_SET(v, 6, 5) +#define SCALER_SRC_CFG_GET_COLOR_FORMAT(r) SCALER_GET(r, 4, 0) +#define SCALER_SRC_CFG_SET_COLOR_FORMAT(v) SCALER_SET(v, 4, 0) +#define SCALER_YUV420_2P_UV 0 +#define SCALER_YUV422_2P_UV 2 +#define SCALER_YUV444_2P_UV 3 +#define SCALER_RGB_565 4 +#define SCALER_ARGB1555 5 +#define SCALER_ARGB8888 6 +#define SCALER_ARGB8888_PRE 7 +#define SCALER_YUV422_1P_YVYU 9 +#define SCALER_YUV422_1P_YUYV 10 +#define SCALER_YUV422_1P_UYVY 11 +#define SCALER_ARGB4444 12 +#define SCALER_L8A8 13 +#define SCALER_RGBA8888 14 +#define SCALER_L8 15 +#define SCALER_YUV420_2P_VU 16 +#define SCALER_YUV422_2P_VU 18 +#define SCALER_YUV444_2P_VU 19 +#define SCALER_YUV420_3P 20 +#define SCALER_YUV422_3P 22 +#define SCALER_YUV444_3P 23 + +/* SCALER_SRC_SPAN */ +#define SCALER_SRC_SPAN_GET_C_SPAN(r) SCALER_GET(r, 29, 16) +#define SCALER_SRC_SPAN_SET_C_SPAN(v) SCALER_SET(v, 29, 16) +#define SCALER_SRC_SPAN_GET_Y_SPAN(r) SCALER_GET(r, 13, 0) +#define SCALER_SRC_SPAN_SET_Y_SPAN(v) SCALER_SET(v, 13, 0) + +/* SCALER_SRC_Y_POS */ +#define SCALER_SRC_Y_POS_GET_YH_POS(r) SCALER_GET(r, 31, 16) +#define SCALER_SRC_Y_POS_SET_YH_POS(v) SCALER_SET(v, 31, 16) +#define SCALER_SRC_Y_POS_GET_YV_POS(r) SCALER_GET(r, 15, 0) +#define SCALER_SRC_Y_POS_SET_YV_POS(v) SCALER_SET(v, 15, 0) + +/* SCALER_SRC_WH */ +#define SCALER_SRC_WH_GET_WIDTH(r) SCALER_GET(r, 29, 16) +#define SCALER_SRC_WH_SET_WIDTH(v) SCALER_SET(v, 29, 16) +#define SCALER_SRC_WH_GET_HEIGHT(r) SCALER_GET(r, 13, 0) +#define SCALER_SRC_WH_SET_HEIGHT(v) SCALER_SET(v, 13, 0) + +/* SCALER_SRC_C_POS */ +#define SCALER_SRC_C_POS_GET_CH_POS(r) SCALER_GET(r, 31, 16) +#define SCALER_SRC_C_POS_SET_CH_POS(v) SCALER_SET(v, 31, 16) +#define SCALER_SRC_C_POS_GET_CV_POS(r) SCALER_GET(r, 15, 0) +#define SCALER_SRC_C_POS_SET_CV_POS(v) SCALER_SET(v, 15, 0) + +/* SCALER_DST_CFG */ +#define SCALER_DST_CFG_GET_BYTE_SWAP(r) SCALER_GET(r, 6, 5) +#define SCALER_DST_CFG_SET_BYTE_SWAP(v) SCALER_SET(v, 6, 5) +#define SCALER_DST_CFG_GET_COLOR_FORMAT(r) SCALER_GET(r, 4, 0) +#define SCALER_DST_CFG_SET_COLOR_FORMAT(v) SCALER_SET(v, 4, 0) + +/* SCALER_DST_SPAN */ +#define SCALER_DST_SPAN_GET_C_SPAN(r) SCALER_GET(r, 29, 16) +#define SCALER_DST_SPAN_SET_C_SPAN(v) SCALER_SET(v, 29, 16) +#define SCALER_DST_SPAN_GET_Y_SPAN(r) SCALER_GET(r, 13, 0) +#define SCALER_DST_SPAN_SET_Y_SPAN(v) SCALER_SET(v, 13, 0) + +/* SCALER_DST_WH */ +#define SCALER_DST_WH_GET_WIDTH(r) SCALER_GET(r, 29, 16) +#define SCALER_DST_WH_SET_WIDTH(v) SCALER_SET(v, 29, 16) +#define SCALER_DST_WH_GET_HEIGHT(r) SCALER_GET(r, 13, 0) +#define SCALER_DST_WH_SET_HEIGHT(v) SCALER_SET(v, 13, 0) + +/* SCALER_DST_POS */ +#define SCALER_DST_POS_GET_H_POS(r) SCALER_GET(r, 29, 16) +#define SCALER_DST_POS_SET_H_POS(v) SCALER_SET(v, 29, 16) +#define SCALER_DST_POS_GET_V_POS(r) SCALER_GET(r, 13, 0) +#define SCALER_DST_POS_SET_V_POS(v) SCALER_SET(v, 13, 0) + +/* SCALER_H_RATIO */ +#define SCALER_H_RATIO_GET(r) SCALER_GET(r, 18, 0) +#define SCALER_H_RATIO_SET(v) SCALER_SET(v, 18, 0) + +/* SCALER_V_RATIO */ +#define SCALER_V_RATIO_GET(r) SCALER_GET(r, 18, 0) +#define SCALER_V_RATIO_SET(v) SCALER_SET(v, 18, 0) + +/* SCALER_ROT_CFG */ +#define SCALER_ROT_CFG_FLIP_X_EN (1 << 3) +#define SCALER_ROT_CFG_FLIP_Y_EN (1 << 2) +#define SCALER_ROT_CFG_GET_ROTMODE(r) SCALER_GET(r, 1, 0) +#define SCALER_ROT_CFG_SET_ROTMODE(v) SCALER_SET(v, 1, 0) +#define SCALER_ROT_MODE_90 1 +#define SCALER_ROT_MODE_180 2 +#define SCALER_ROT_MODE_270 3 + +/* SCALER_HCOEF, SCALER_VCOEF */ +#define SCALER_COEF_SHIFT(i) (16 * (1 - (i) % 2)) +#define SCALER_COEF_GET(r, i) \ + (((r) >> SCALER_COEF_SHIFT(i)) & 0x1ff) +#define SCALER_COEF_SET(v, i) \ + (((v) & 0x1ff) << SCALER_COEF_SHIFT(i)) + +/* SCALER_CSC_COEFxy */ +#define SCALER_CSC_COEF_GET(r) SCALER_GET(r, 11, 0) +#define SCALER_CSC_COEF_SET(v) SCALER_SET(v, 11, 0) + +/* SCALER_DITH_CFG */ +#define SCALER_DITH_CFG_GET_R_TYPE(r) SCALER_GET(r, 8, 6) +#define SCALER_DITH_CFG_SET_R_TYPE(v) SCALER_SET(v, 8, 6) +#define SCALER_DITH_CFG_GET_G_TYPE(r) SCALER_GET(r, 5, 3) +#define SCALER_DITH_CFG_SET_G_TYPE(v) SCALER_SET(v, 5, 3) +#define SCALER_DITH_CFG_GET_B_TYPE(r) SCALER_GET(r, 2, 0) +#define SCALER_DITH_CFG_SET_B_TYPE(v) SCALER_SET(v, 2, 0) + +/* SCALER_TIMEOUT_CTRL */ +#define SCALER_TIMEOUT_CTRL_GET_TIMER_VALUE(r) SCALER_GET(r, 31, 16) +#define SCALER_TIMEOUT_CTRL_SET_TIMER_VALUE(v) SCALER_SET(v, 31, 16) +#define SCALER_TIMEOUT_CTRL_GET_TIMER_DIV(r) SCALER_GET(r, 7, 4) +#define SCALER_TIMEOUT_CTRL_SET_TIMER_DIV(v) SCALER_SET(v, 7, 4) +#define SCALER_TIMEOUT_CTRL_TIMER_ENABLE (1 << 0) + +/* SCALER_TIMEOUT_CNT */ +#define SCALER_TIMEOUT_CTRL_GET_TIMER_COUNT(r) SCALER_GET(r, 31, 16) + +/* SCALER_SRC_BLEND_COLOR */ +#define SCALER_SRC_BLEND_COLOR_SEL_INV (1 << 31) +#define SCALER_SRC_BLEND_COLOR_GET_SEL(r) SCALER_GET(r, 30, 29) +#define SCALER_SRC_BLEND_COLOR_SET_SEL(v) SCALER_SET(v, 30, 29) +#define SCALER_SRC_BLEND_COLOR_OP_SEL_INV (1 << 28) +#define SCALER_SRC_BLEND_COLOR_GET_OP_SEL(r) SCALER_GET(r, 27, 24) +#define SCALER_SRC_BLEND_COLOR_SET_OP_SEL(v) SCALER_SET(v, 27, 24) +#define SCALER_SRC_BLEND_COLOR_GET_COLOR0(r) SCALER_GET(r, 23, 16) +#define SCALER_SRC_BLEND_COLOR_SET_COLOR0(v) SCALER_SET(v, 23, 16) +#define SCALER_SRC_BLEND_COLOR_GET_COLOR1(r) SCALER_GET(r, 15, 8) +#define SCALER_SRC_BLEND_COLOR_SET_COLOR1(v) SCALER_SET(v, 15, 8) +#define SCALER_SRC_BLEND_COLOR_GET_COLOR2(r) SCALER_GET(r, 7, 0) +#define SCALER_SRC_BLEND_COLOR_SET_COLOR2(v) SCALER_SET(v, 7, 0) + +/* SCALER_SRC_BLEND_ALPHA */ +#define SCALER_SRC_BLEND_ALPHA_SEL_INV (1 << 31) +#define SCALER_SRC_BLEND_ALPHA_GET_SEL(r) SCALER_GET(r, 30, 29) +#define SCALER_SRC_BLEND_ALPHA_SET_SEL(v) SCALER_SET(v, 30, 29) +#define SCALER_SRC_BLEND_ALPHA_OP_SEL_INV (1 << 28) +#define SCALER_SRC_BLEND_ALPHA_GET_OP_SEL(r) SCALER_GET(r, 27, 24) +#define SCALER_SRC_BLEND_ALPHA_SET_OP_SEL(v) SCALER_SET(v, 27, 24) +#define SCALER_SRC_BLEND_ALPHA_GET_ALPHA(r) SCALER_GET(r, 7, 0) +#define SCALER_SRC_BLEND_ALPHA_SET_ALPHA(v) SCALER_SET(v, 7, 0) + +/* SCALER_DST_BLEND_COLOR */ +#define SCALER_DST_BLEND_COLOR_SEL_INV (1 << 31) +#define SCALER_DST_BLEND_COLOR_GET_SEL(r) SCALER_GET(r, 30, 29) +#define SCALER_DST_BLEND_COLOR_SET_SEL(v) SCALER_SET(v, 30, 29) +#define SCALER_DST_BLEND_COLOR_OP_SEL_INV (1 << 28) +#define SCALER_DST_BLEND_COLOR_GET_OP_SEL(r) SCALER_GET(r, 27, 24) +#define SCALER_DST_BLEND_COLOR_SET_OP_SEL(v) SCALER_SET(v, 27, 24) +#define SCALER_DST_BLEND_COLOR_GET_COLOR0(r) SCALER_GET(r, 23, 16) +#define SCALER_DST_BLEND_COLOR_SET_COLOR0(v) SCALER_SET(v, 23, 16) +#define SCALER_DST_BLEND_COLOR_GET_COLOR1(r) SCALER_GET(r, 15, 8) +#define SCALER_DST_BLEND_COLOR_SET_COLOR1(v) SCALER_SET(v, 15, 8) +#define SCALER_DST_BLEND_COLOR_GET_COLOR2(r) SCALER_GET(r, 7, 0) +#define SCALER_DST_BLEND_COLOR_SET_COLOR2(v) SCALER_SET(v, 7, 0) + +/* SCALER_DST_BLEND_ALPHA */ +#define SCALER_DST_BLEND_ALPHA_SEL_INV (1 << 31) +#define SCALER_DST_BLEND_ALPHA_GET_SEL(r) SCALER_GET(r, 30, 29) +#define SCALER_DST_BLEND_ALPHA_SET_SEL(v) SCALER_SET(v, 30, 29) +#define SCALER_DST_BLEND_ALPHA_OP_SEL_INV (1 << 28) +#define SCALER_DST_BLEND_ALPHA_GET_OP_SEL(r) SCALER_GET(r, 27, 24) +#define SCALER_DST_BLEND_ALPHA_SET_OP_SEL(v) SCALER_SET(v, 27, 24) +#define SCALER_DST_BLEND_ALPHA_GET_ALPHA(r) SCALER_GET(r, 7, 0) +#define SCALER_DST_BLEND_ALPHA_SET_ALPHA(v) SCALER_SET(v, 7, 0) + +/* SCALER_FILL_COLOR */ +#define SCALER_FILL_COLOR_GET_ALPHA(r) SCALER_GET(r, 31, 24) +#define SCALER_FILL_COLOR_SET_ALPHA(v) SCALER_SET(v, 31, 24) +#define SCALER_FILL_COLOR_GET_FILL_COLOR0(r) SCALER_GET(r, 23, 16) +#define SCALER_FILL_COLOR_SET_FILL_COLOR0(v) SCALER_SET(v, 23, 16) +#define SCALER_FILL_COLOR_GET_FILL_COLOR1(r) SCALER_GET(r, 15, 8) +#define SCALER_FILL_COLOR_SET_FILL_COLOR1(v) SCALER_SET(v, 15, 8) +#define SCALER_FILL_COLOR_GET_FILL_COLOR2(r) SCALER_GET(r, 7, 0) +#define SCALER_FILL_COLOR_SET_FILL_COLOR2(v) SCALER_SET(v, 7, 0) + +/* SCALER_ADDR_Q_CONFIG */ +#define SCALER_ADDR_Q_CONFIG_RST (1 << 0) + +/* SCALER_SRC_ADDR_Q_STATUS */ +#define SCALER_SRC_ADDR_Q_STATUS_Y_FULL (1 << 23) +#define SCALER_SRC_ADDR_Q_STATUS_Y_EMPTY (1 << 22) +#define SCALER_SRC_ADDR_Q_STATUS_GET_Y_WR_IDX(r) SCALER_GET(r, 21, 16) +#define SCALER_SRC_ADDR_Q_STATUS_CB_FULL (1 << 15) +#define SCALER_SRC_ADDR_Q_STATUS_CB_EMPTY (1 << 14) +#define SCALER_SRC_ADDR_Q_STATUS_GET_CB_WR_IDX(r) SCALER_GET(r, 13, 8) +#define SCALER_SRC_ADDR_Q_STATUS_CR_FULL (1 << 7) +#define SCALER_SRC_ADDR_Q_STATUS_CR_EMPTY (1 << 6) +#define SCALER_SRC_ADDR_Q_STATUS_GET_CR_WR_IDX(r) SCALER_GET(r, 5, 0) + +/* SCALER_DST_ADDR_Q_STATUS */ +#define SCALER_DST_ADDR_Q_STATUS_Y_FULL (1 << 23) +#define SCALER_DST_ADDR_Q_STATUS_Y_EMPTY (1 << 22) +#define SCALER_DST_ADDR_Q_STATUS_GET_Y_WR_IDX(r) SCALER_GET(r, 21, 16) +#define SCALER_DST_ADDR_Q_STATUS_CB_FULL (1 << 15) +#define SCALER_DST_ADDR_Q_STATUS_CB_EMPTY (1 << 14) +#define SCALER_DST_ADDR_Q_STATUS_GET_CB_WR_IDX(r) SCALER_GET(r, 13, 8) +#define SCALER_DST_ADDR_Q_STATUS_CR_FULL (1 << 7) +#define SCALER_DST_ADDR_Q_STATUS_CR_EMPTY (1 << 6) +#define SCALER_DST_ADDR_Q_STATUS_GET_CR_WR_IDX(r) SCALER_GET(r, 5, 0) + +/* SCALER_CRC_COLOR00_10 */ +#define SCALER_CRC_COLOR00_10_GET_00(r) SCALER_GET(r, 31, 16) +#define SCALER_CRC_COLOR00_10_GET_10(r) SCALER_GET(r, 15, 0) + +/* SCALER_CRC_COLOR20_30 */ +#define SCALER_CRC_COLOR20_30_GET_20(r) SCALER_GET(r, 31, 16) +#define SCALER_CRC_COLOR20_30_GET_30(r) SCALER_GET(r, 15, 0) + +/* SCALER_CRC_COLOR01_11 */ +#define SCALER_CRC_COLOR01_11_GET_01(r) SCALER_GET(r, 31, 16) +#define SCALER_CRC_COLOR01_11_GET_11(r) SCALER_GET(r, 15, 0) + +/* SCALER_CRC_COLOR21_31 */ +#define SCALER_CRC_COLOR21_31_GET_21(r) SCALER_GET(r, 31, 16) +#define SCALER_CRC_COLOR21_31_GET_31(r) SCALER_GET(r, 15, 0) + +#endif /* EXYNOS_REGS_SCALER_H */ From e8929999fa718da5758ff877592f33fea368ca8a Mon Sep 17 00:00:00 2001 From: Andrzej Hajda Date: Mon, 7 May 2018 11:29:28 +0200 Subject: [PATCH 17/17] drm/exynos/dsi: remove mode_set callback The callback was used only to copy provided mode to context for later usage. Since the mode is always available from crtc atomic state this code can be removed. Signed-off-by: Andrzej Hajda Signed-off-by: Inki Dae --- drivers/gpu/drm/exynos/exynos_drm_dsi.c | 40 +++++++------------------ 1 file changed, 10 insertions(+), 30 deletions(-) diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c index 7904ffa9abfb..eae44fd714f0 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c @@ -270,7 +270,6 @@ struct exynos_dsi { u32 lanes; u32 mode_flags; u32 format; - struct videomode vm; int state; struct drm_property *brightness; @@ -881,30 +880,30 @@ static int exynos_dsi_init_link(struct exynos_dsi *dsi) static void exynos_dsi_set_display_mode(struct exynos_dsi *dsi) { - struct videomode *vm = &dsi->vm; + struct drm_display_mode *m = &dsi->encoder.crtc->state->adjusted_mode; unsigned int num_bits_resol = dsi->driver_data->num_bits_resol; u32 reg; if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO) { reg = DSIM_CMD_ALLOW(0xf) - | DSIM_STABLE_VFP(vm->vfront_porch) - | DSIM_MAIN_VBP(vm->vback_porch); + | DSIM_STABLE_VFP(m->vsync_start - m->vdisplay) + | DSIM_MAIN_VBP(m->vtotal - m->vsync_end); exynos_dsi_write(dsi, DSIM_MVPORCH_REG, reg); - reg = DSIM_MAIN_HFP(vm->hfront_porch) - | DSIM_MAIN_HBP(vm->hback_porch); + reg = DSIM_MAIN_HFP(m->hsync_start - m->hdisplay) + | DSIM_MAIN_HBP(m->htotal - m->hsync_end); exynos_dsi_write(dsi, DSIM_MHPORCH_REG, reg); - reg = DSIM_MAIN_VSA(vm->vsync_len) - | DSIM_MAIN_HSA(vm->hsync_len); + reg = DSIM_MAIN_VSA(m->vsync_end - m->vsync_start) + | DSIM_MAIN_HSA(m->hsync_end - m->hsync_start); exynos_dsi_write(dsi, DSIM_MSYNC_REG, reg); } - reg = DSIM_MAIN_HRESOL(vm->hactive, num_bits_resol) | - DSIM_MAIN_VRESOL(vm->vactive, num_bits_resol); + reg = DSIM_MAIN_HRESOL(m->hdisplay, num_bits_resol) | + DSIM_MAIN_VRESOL(m->vdisplay, num_bits_resol); exynos_dsi_write(dsi, DSIM_MDRESOL_REG, reg); - dev_dbg(dsi->dev, "LCD size = %dx%d\n", vm->hactive, vm->vactive); + dev_dbg(dsi->dev, "LCD size = %dx%d\n", m->hdisplay, m->vdisplay); } static void exynos_dsi_set_display_enable(struct exynos_dsi *dsi, bool enable) @@ -1485,26 +1484,7 @@ static int exynos_dsi_create_connector(struct drm_encoder *encoder) return 0; } -static void exynos_dsi_mode_set(struct drm_encoder *encoder, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - struct exynos_dsi *dsi = encoder_to_dsi(encoder); - struct videomode *vm = &dsi->vm; - struct drm_display_mode *m = adjusted_mode; - - vm->hactive = m->hdisplay; - vm->vactive = m->vdisplay; - vm->vfront_porch = m->vsync_start - m->vdisplay; - vm->vback_porch = m->vtotal - m->vsync_end; - vm->vsync_len = m->vsync_end - m->vsync_start; - vm->hfront_porch = m->hsync_start - m->hdisplay; - vm->hback_porch = m->htotal - m->hsync_end; - vm->hsync_len = m->hsync_end - m->hsync_start; -} - static const struct drm_encoder_helper_funcs exynos_dsi_encoder_helper_funcs = { - .mode_set = exynos_dsi_mode_set, .enable = exynos_dsi_enable, .disable = exynos_dsi_disable, };