drm-misc-next for 5.10:

UAPI Changes:
 
 Cross-subsystem Changes:
 
 Core Changes:
   - dev: More devm_drm convertions and removal of drm_dev_init
 
 Driver Changes:
   - i915: selftests improvements
   - panfrost: support for Amlogic SoC
   - vc4: one fix
 -----BEGIN PGP SIGNATURE-----
 
 iHUEABYIAB0WIQRcEzekXsqa64kGDp7j7w1vZxhRxQUCX2jGxQAKCRDj7w1vZxhR
 xR3DAQCiZOnaxVcY49iG4343Z1aHHaIEShbnB0bDdaWstn7kiQD/UXBXUoOSFoFQ
 FkTsW31JsdXNnWP5e6/eJd2Lb6waVAA=
 =VlsU
 -----END PGP SIGNATURE-----

Merge tag 'drm-misc-next-2020-09-21' of git://anongit.freedesktop.org/drm/drm-misc into drm-next

drm-misc-next for 5.10:

UAPI Changes:

Cross-subsystem Changes:
  - virtio: Merged a PR for patches that will affect drm/virtio

Core Changes:
  - dev: More devm_drm convertions and removal of drm_dev_init
  - atomic: Split out drm_atomic_helper_calc_timestamping_constants of
    drm_atomic_helper_update_legacy_modeset_state
  - ttm: More rework

Driver Changes:
  - i915: selftests improvements
  - panfrost: support for Amlogic SoC
  - vc4: one fix
  - tree-wide: conversions to devm_drm_dev_alloc,
  - ast: simplifications of the atomic modesetting code
  - panfrost: multiple fixes
  - vc4: multiple fixes
Signed-off-by: Dave Airlie <airlied@redhat.com>

From: Maxime Ripard <maxime@cerno.tech>
Link: https://patchwork.freedesktop.org/patch/msgid/20200921152956.2gxnsdgxmwhvjyut@gilmour.lan
This commit is contained in:
Dave Airlie 2020-09-23 09:49:48 +10:00
commit 6ea6be7708
244 changed files with 13196 additions and 5521 deletions

View File

@ -0,0 +1,117 @@
# SPDX-License-Identifier: GPL-2.0
%YAML 1.2
---
$id: http://devicetree.org/schemas/display/brcm,bcm2711-hdmi.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Broadcom BCM2711 HDMI Controller Device Tree Bindings
maintainers:
- Eric Anholt <eric@anholt.net>
properties:
compatible:
enum:
- brcm,bcm2711-hdmi0
- brcm,bcm2711-hdmi1
reg:
items:
- description: HDMI controller register range
- description: DVP register range
- description: HDMI PHY register range
- description: Rate Manager register range
- description: Packet RAM register range
- description: Metadata RAM register range
- description: CSC register range
- description: CEC register range
- description: HD register range
reg-names:
items:
- const: hdmi
- const: dvp
- const: phy
- const: rm
- const: packet
- const: metadata
- const: csc
- const: cec
- const: hd
clocks:
items:
- description: The HDMI state machine clock
- description: The Pixel BVB clock
- description: The HDMI Audio parent clock
- description: The HDMI CEC parent clock
clock-names:
items:
- const: hdmi
- const: bvb
- const: audio
- const: cec
ddc:
allOf:
- $ref: /schemas/types.yaml#/definitions/phandle
description: >
Phandle of the I2C controller used for DDC EDID probing
hpd-gpios:
description: >
The GPIO pin for the HDMI hotplug detect (if it doesn't appear
as an interrupt/status bit in the HDMI controller itself)
dmas:
maxItems: 1
description: >
Should contain one entry pointing to the DMA channel used to
transfer audio data.
dma-names:
const: audio-rx
resets:
maxItems: 1
required:
- compatible
- reg
- reg-names
- clocks
- resets
- ddc
additionalProperties: false
examples:
- |
hdmi0: hdmi@7ef00700 {
compatible = "brcm,bcm2711-hdmi0";
reg = <0x7ef00700 0x300>,
<0x7ef00300 0x200>,
<0x7ef00f00 0x80>,
<0x7ef00f80 0x80>,
<0x7ef01b00 0x200>,
<0x7ef01f00 0x400>,
<0x7ef00200 0x80>,
<0x7ef04300 0x100>,
<0x7ef20000 0x100>;
reg-names = "hdmi",
"dvp",
"phy",
"rm",
"packet",
"metadata",
"csc",
"cec",
"hd";
clocks = <&firmware_clocks 13>, <&firmware_clocks 14>, <&dvp 1>, <&clk_27MHz>;
clock-names = "hdmi", "bvb", "audio", "cec";
resets = <&dvp 0>;
ddc = <&ddc0>;
};
...

View File

@ -11,7 +11,9 @@ maintainers:
properties: properties:
compatible: compatible:
const: brcm,bcm2835-hvs enum:
- brcm,bcm2711-hvs
- brcm,bcm2835-hvs
reg: reg:
maxItems: 1 maxItems: 1
@ -19,6 +21,10 @@ properties:
interrupts: interrupts:
maxItems: 1 maxItems: 1
clocks:
maxItems: 1
description: Core Clock
required: required:
- compatible - compatible
- reg - reg
@ -26,6 +32,16 @@ required:
additionalProperties: false additionalProperties: false
if:
properties:
compatible:
contains:
const: brcm,bcm2711-hvs"
then:
required:
- clocks
examples: examples:
- | - |
hvs@7e400000 { hvs@7e400000 {

View File

@ -15,6 +15,11 @@ properties:
- brcm,bcm2835-pixelvalve0 - brcm,bcm2835-pixelvalve0
- brcm,bcm2835-pixelvalve1 - brcm,bcm2835-pixelvalve1
- brcm,bcm2835-pixelvalve2 - brcm,bcm2835-pixelvalve2
- brcm,bcm2711-pixelvalve0
- brcm,bcm2711-pixelvalve1
- brcm,bcm2711-pixelvalve2
- brcm,bcm2711-pixelvalve3
- brcm,bcm2711-pixelvalve4
reg: reg:
maxItems: 1 maxItems: 1

View File

@ -17,6 +17,7 @@ description: >
properties: properties:
compatible: compatible:
enum: enum:
- brcm,bcm2711-vc5
- brcm,bcm2835-vc4 - brcm,bcm2835-vc4
- brcm,cygnus-vc4 - brcm,cygnus-vc4

View File

@ -0,0 +1,108 @@
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
# Copyright 2019 NXP
%YAML 1.2
---
$id: "http://devicetree.org/schemas/display/imx/nxp,imx8mq-dcss.yaml#"
$schema: "http://devicetree.org/meta-schemas/core.yaml#"
title: iMX8MQ Display Controller Subsystem (DCSS)
maintainers:
- Laurentiu Palcu <laurentiu.palcu@nxp.com>
description:
The DCSS (display controller sub system) is used to source up to three
display buffers, compose them, and drive a display using HDMI 2.0a(with HDCP
2.2) or MIPI-DSI. The DCSS is intended to support up to 4kp60 displays. HDR10
image processing capabilities are included to provide a solution capable of
driving next generation high dynamic range displays.
properties:
compatible:
const: nxp,imx8mq-dcss
reg:
items:
- description: DCSS base address and size, up to IRQ steer start
- description: DCSS BLKCTL base address and size
interrupts:
items:
- description: Context loader completion and error interrupt
- description: DTG interrupt used to signal context loader trigger time
- description: DTG interrupt for Vblank
interrupt-names:
items:
- const: ctxld
- const: ctxld_kick
- const: vblank
clocks:
items:
- description: Display APB clock for all peripheral PIO access interfaces
- description: Display AXI clock needed by DPR, Scaler, RTRAM_CTRL
- description: RTRAM clock
- description: Pixel clock, can be driven either by HDMI phy clock or MIPI
- description: DTRC clock, needed by video decompressor
clock-names:
items:
- const: apb
- const: axi
- const: rtrm
- const: pix
- const: dtrc
assigned-clocks:
items:
- description: Phandle and clock specifier of IMX8MQ_CLK_DISP_AXI_ROOT
- description: Phandle and clock specifier of IMX8MQ_CLK_DISP_RTRM
- description: Phandle and clock specifier of either IMX8MQ_VIDEO2_PLL1_REF_SEL or
IMX8MQ_VIDEO_PLL1_REF_SEL
assigned-clock-parents:
items:
- description: Phandle and clock specifier of IMX8MQ_SYS1_PLL_800M
- description: Phandle and clock specifier of IMX8MQ_SYS1_PLL_800M
- description: Phandle and clock specifier of IMX8MQ_CLK_27M
assigned-clock-rates:
items:
- description: Must be 800 MHz
- description: Must be 400 MHz
port:
type: object
description:
A port node pointing to the input port of a HDMI/DP or MIPI display bridge.
additionalProperties: false
examples:
- |
#include <dt-bindings/clock/imx8mq-clock.h>
dcss: display-controller@32e00000 {
compatible = "nxp,imx8mq-dcss";
reg = <0x32e00000 0x2d000>, <0x32e2f000 0x1000>;
interrupts = <6>, <8>, <9>;
interrupt-names = "ctxld", "ctxld_kick", "vblank";
interrupt-parent = <&irqsteer>;
clocks = <&clk IMX8MQ_CLK_DISP_APB_ROOT>, <&clk IMX8MQ_CLK_DISP_AXI_ROOT>,
<&clk IMX8MQ_CLK_DISP_RTRM_ROOT>, <&clk IMX8MQ_VIDEO2_PLL_OUT>,
<&clk IMX8MQ_CLK_DISP_DTRC>;
clock-names = "apb", "axi", "rtrm", "pix", "dtrc";
assigned-clocks = <&clk IMX8MQ_CLK_DISP_AXI>, <&clk IMX8MQ_CLK_DISP_RTRM>,
<&clk IMX8MQ_VIDEO2_PLL1_REF_SEL>;
assigned-clock-parents = <&clk IMX8MQ_SYS1_PLL_800M>, <&clk IMX8MQ_SYS1_PLL_800M>,
<&clk IMX8MQ_CLK_27M>;
assigned-clock-rates = <800000000>,
<400000000>;
port {
dcss_out: endpoint {
remote-endpoint = <&hdmi_in>;
};
};
};

View File

@ -19,6 +19,7 @@ Optional properties:
- vbat-supply: The supply for VBAT - vbat-supply: The supply for VBAT
- solomon,segment-no-remap: Display needs normal (non-inverted) data column - solomon,segment-no-remap: Display needs normal (non-inverted) data column
to segment mapping to segment mapping
- solomon,col-offset: Offset of columns (COL/SEG) that the screen is mapped to.
- solomon,com-seq: Display uses sequential COM pin configuration - solomon,com-seq: Display uses sequential COM pin configuration
- solomon,com-lrremap: Display uses left-right COM pin remap - solomon,com-lrremap: Display uses left-right COM pin remap
- solomon,com-invdir: Display uses inverted COM pin scan direction - solomon,com-invdir: Display uses inverted COM pin scan direction

View File

@ -263,7 +263,7 @@ DMA
dmam_pool_destroy() dmam_pool_destroy()
DRM DRM
devm_drm_dev_init() devm_drm_dev_alloc()
GPIO GPIO
devm_gpiod_get() devm_gpiod_get()

View File

@ -20,8 +20,8 @@ A. Configuration
================ ================
The framebuffer console can be enabled by using your favorite kernel The framebuffer console can be enabled by using your favorite kernel
configuration tool. It is under Device Drivers->Graphics Support->Frame configuration tool. It is under Device Drivers->Graphics Support->
buffer Devices->Console display driver support->Framebuffer Console Support. Console display driver support->Framebuffer Console Support.
Select 'y' to compile support statically or 'm' for module support. The Select 'y' to compile support statically or 'm' for module support. The
module will be fbcon. module will be fbcon.

View File

@ -356,8 +356,6 @@ Code Seq# Include File Comments
0xEC 00-01 drivers/platform/chrome/cros_ec_dev.h ChromeOS EC driver 0xEC 00-01 drivers/platform/chrome/cros_ec_dev.h ChromeOS EC driver
0xF3 00-3F drivers/usb/misc/sisusbvga/sisusb.h sisfb (in development) 0xF3 00-3F drivers/usb/misc/sisusbvga/sisusb.h sisfb (in development)
<mailto:thomas@winischhofer.net> <mailto:thomas@winischhofer.net>
0xF4 00-1F video/mbxfb.h mbxfb
<mailto:raph@8d.com>
0xF6 all LTTng Linux Trace Toolkit Next Generation 0xF6 all LTTng Linux Trace Toolkit Next Generation
<mailto:mathieu.desnoyers@efficios.com> <mailto:mathieu.desnoyers@efficios.com>
0xFD all linux/dm-ioctl.h 0xFD all linux/dm-ioctl.h

View File

@ -5411,7 +5411,7 @@ F: drivers/gpu/drm/panel/panel-arm-versatile.c
DRM DRIVER FOR ASPEED BMC GFX DRM DRIVER FOR ASPEED BMC GFX
M: Joel Stanley <joel@jms.id.au> M: Joel Stanley <joel@jms.id.au>
L: linux-aspeed@lists.ozlabs.org L: linux-aspeed@lists.ozlabs.org (moderated for non-subscribers)
S: Supported S: Supported
T: git git://anongit.freedesktop.org/drm/drm-misc T: git git://anongit.freedesktop.org/drm/drm-misc
F: Documentation/devicetree/bindings/gpu/aspeed-gfx.txt F: Documentation/devicetree/bindings/gpu/aspeed-gfx.txt
@ -5419,7 +5419,10 @@ F: drivers/gpu/drm/aspeed/
DRM DRIVER FOR AST SERVER GRAPHICS CHIPS DRM DRIVER FOR AST SERVER GRAPHICS CHIPS
M: Dave Airlie <airlied@redhat.com> M: Dave Airlie <airlied@redhat.com>
S: Odd Fixes R: Thomas Zimmermann <tzimmermann@suse.de>
L: dri-devel@lists.freedesktop.org
S: Supported
T: git git://anongit.freedesktop.org/drm/drm-misc
F: drivers/gpu/drm/ast/ F: drivers/gpu/drm/ast/
DRM DRIVER FOR BOCHS VIRTUAL GPU DRM DRIVER FOR BOCHS VIRTUAL GPU
@ -5507,7 +5510,10 @@ F: include/uapi/drm/mga_drm.h
DRM DRIVER FOR MGA G200 GRAPHICS CHIPS DRM DRIVER FOR MGA G200 GRAPHICS CHIPS
M: Dave Airlie <airlied@redhat.com> M: Dave Airlie <airlied@redhat.com>
S: Odd Fixes R: Thomas Zimmermann <tzimmermann@suse.de>
L: dri-devel@lists.freedesktop.org
S: Supported
T: git git://anongit.freedesktop.org/drm/drm-misc
F: drivers/gpu/drm/mgag200/ F: drivers/gpu/drm/mgag200/
DRM DRIVER FOR MI0283QT DRM DRIVER FOR MI0283QT
@ -5652,13 +5658,15 @@ F: drivers/gpu/drm/panel/panel-tpo-tpg110.c
DRM DRIVER FOR USB DISPLAYLINK VIDEO ADAPTERS DRM DRIVER FOR USB DISPLAYLINK VIDEO ADAPTERS
M: Dave Airlie <airlied@redhat.com> M: Dave Airlie <airlied@redhat.com>
R: Sean Paul <sean@poorly.run> R: Sean Paul <sean@poorly.run>
R: Thomas Zimmermann <tzimmermann@suse.de>
L: dri-devel@lists.freedesktop.org L: dri-devel@lists.freedesktop.org
S: Odd Fixes S: Supported
T: git git://anongit.freedesktop.org/drm/drm-misc T: git git://anongit.freedesktop.org/drm/drm-misc
F: drivers/gpu/drm/udl/ F: drivers/gpu/drm/udl/
DRM DRIVER FOR VIRTUAL KERNEL MODESETTING (VKMS) DRM DRIVER FOR VIRTUAL KERNEL MODESETTING (VKMS)
M: Rodrigo Siqueira <rodrigosiqueiramelo@gmail.com> M: Rodrigo Siqueira <rodrigosiqueiramelo@gmail.com>
M: Melissa Wen <melissa.srw@gmail.com>
R: Haneen Mohammed <hamohammed.sa@gmail.com> R: Haneen Mohammed <hamohammed.sa@gmail.com>
R: Daniel Vetter <daniel@ffwll.ch> R: Daniel Vetter <daniel@ffwll.ch>
L: dri-devel@lists.freedesktop.org L: dri-devel@lists.freedesktop.org
@ -12462,6 +12470,14 @@ F: drivers/iio/gyro/fxas21002c_core.c
F: drivers/iio/gyro/fxas21002c_i2c.c F: drivers/iio/gyro/fxas21002c_i2c.c
F: drivers/iio/gyro/fxas21002c_spi.c F: drivers/iio/gyro/fxas21002c_spi.c
NXP i.MX 8MQ DCSS DRIVER
M: Laurentiu Palcu <laurentiu.palcu@oss.nxp.com>
R: Lucas Stach <l.stach@pengutronix.de>
L: dri-devel@lists.freedesktop.org
S: Maintained
F: Documentation/devicetree/bindings/display/imx/nxp,imx8mq-dcss.yaml
F: drivers/gpu/drm/imx/dcss/
NXP SGTL5000 DRIVER NXP SGTL5000 DRIVER
M: Fabio Estevam <festevam@gmail.com> M: Fabio Estevam <festevam@gmail.com>
L: alsa-devel@alsa-project.org (moderated for non-subscribers) L: alsa-devel@alsa-project.org (moderated for non-subscribers)

View File

@ -283,6 +283,7 @@ EXPORT_SYMBOL(dma_fence_begin_signalling);
/** /**
* dma_fence_end_signalling - end a critical DMA fence signalling section * dma_fence_end_signalling - end a critical DMA fence signalling section
* @cookie: opaque cookie from dma_fence_begin_signalling()
* *
* Closes a critical section annotation opened by dma_fence_begin_signalling(). * Closes a critical section annotation opened by dma_fence_begin_signalling().
*/ */

View File

@ -98,12 +98,14 @@ static int __init dma_resv_lockdep(void)
struct mm_struct *mm = mm_alloc(); struct mm_struct *mm = mm_alloc();
struct ww_acquire_ctx ctx; struct ww_acquire_ctx ctx;
struct dma_resv obj; struct dma_resv obj;
struct address_space mapping;
int ret; int ret;
if (!mm) if (!mm)
return -ENOMEM; return -ENOMEM;
dma_resv_init(&obj); dma_resv_init(&obj);
address_space_init_once(&mapping);
mmap_read_lock(mm); mmap_read_lock(mm);
ww_acquire_init(&ctx, &reservation_ww_class); ww_acquire_init(&ctx, &reservation_ww_class);
@ -111,6 +113,9 @@ static int __init dma_resv_lockdep(void)
if (ret == -EDEADLK) if (ret == -EDEADLK)
dma_resv_lock_slow(&obj, &ctx); dma_resv_lock_slow(&obj, &ctx);
fs_reclaim_acquire(GFP_KERNEL); fs_reclaim_acquire(GFP_KERNEL);
/* for unmap_mapping_range on trylocked buffer objects in shrinkers */
i_mmap_lock_write(&mapping);
i_mmap_unlock_write(&mapping);
#ifdef CONFIG_MMU_NOTIFIER #ifdef CONFIG_MMU_NOTIFIER
lock_map_acquire(&__mmu_notifier_invalidate_range_start_map); lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
__dma_fence_might_wait(); __dma_fence_might_wait();

View File

@ -307,6 +307,9 @@ static long udmabuf_ioctl(struct file *filp, unsigned int ioctl,
static const struct file_operations udmabuf_fops = { static const struct file_operations udmabuf_fops = {
.owner = THIS_MODULE, .owner = THIS_MODULE,
.unlocked_ioctl = udmabuf_ioctl, .unlocked_ioctl = udmabuf_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = udmabuf_ioctl,
#endif
}; };
static struct miscdevice udmabuf_misc = { static struct miscdevice udmabuf_misc = {

View File

@ -100,7 +100,7 @@ obj-$(CONFIG_DRM_MSM) += msm/
obj-$(CONFIG_DRM_TEGRA) += tegra/ obj-$(CONFIG_DRM_TEGRA) += tegra/
obj-$(CONFIG_DRM_STM) += stm/ obj-$(CONFIG_DRM_STM) += stm/
obj-$(CONFIG_DRM_STI) += sti/ obj-$(CONFIG_DRM_STI) += sti/
obj-$(CONFIG_DRM_IMX) += imx/ obj-y += imx/
obj-$(CONFIG_DRM_INGENIC) += ingenic/ obj-$(CONFIG_DRM_INGENIC) += ingenic/
obj-$(CONFIG_DRM_MEDIATEK) += mediatek/ obj-$(CONFIG_DRM_MEDIATEK) += mediatek/
obj-$(CONFIG_DRM_MESON) += meson/ obj-$(CONFIG_DRM_MESON) += meson/

View File

@ -303,7 +303,8 @@ static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach,
switch (bo->tbo.mem.mem_type) { switch (bo->tbo.mem.mem_type) {
case TTM_PL_TT: case TTM_PL_TT:
sgt = drm_prime_pages_to_sg(bo->tbo.ttm->pages, sgt = drm_prime_pages_to_sg(obj->dev,
bo->tbo.ttm->pages,
bo->tbo.num_pages); bo->tbo.num_pages);
if (IS_ERR(sgt)) if (IS_ERR(sgt))
return sgt; return sgt;

View File

@ -1159,25 +1159,20 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
if (ret) if (ret)
return ret; return ret;
adev = kzalloc(sizeof(*adev), GFP_KERNEL); adev = devm_drm_dev_alloc(&pdev->dev, &kms_driver, typeof(*adev), ddev);
if (!adev) if (IS_ERR(adev))
return -ENOMEM; return PTR_ERR(adev);
adev->dev = &pdev->dev; adev->dev = &pdev->dev;
adev->pdev = pdev; adev->pdev = pdev;
ddev = adev_to_drm(adev); ddev = adev_to_drm(adev);
ret = drm_dev_init(ddev, &kms_driver, &pdev->dev);
if (ret)
goto err_free;
drmm_add_final_kfree(ddev, adev);
if (!supports_atomic) if (!supports_atomic)
ddev->driver_features &= ~DRIVER_ATOMIC; ddev->driver_features &= ~DRIVER_ATOMIC;
ret = pci_enable_device(pdev); ret = pci_enable_device(pdev);
if (ret) if (ret)
goto err_free; return ret;
ddev->pdev = pdev; ddev->pdev = pdev;
pci_set_drvdata(pdev, ddev); pci_set_drvdata(pdev, ddev);
@ -1205,8 +1200,6 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
err_pci: err_pci:
pci_disable_device(pdev); pci_disable_device(pdev);
err_free:
drm_dev_put(ddev);
return ret; return ret;
} }
@ -1223,7 +1216,6 @@ amdgpu_pci_remove(struct pci_dev *pdev)
amdgpu_driver_unload_kms(dev); amdgpu_driver_unload_kms(dev);
pci_disable_device(pdev); pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL); pci_set_drvdata(pdev, NULL);
drm_dev_put(dev);
} }
static void static void

View File

@ -95,8 +95,6 @@ int amdgpu_gtt_mgr_init(struct amdgpu_device *adev, uint64_t gtt_size)
man->use_tt = true; man->use_tt = true;
man->func = &amdgpu_gtt_mgr_func; man->func = &amdgpu_gtt_mgr_func;
man->available_caching = TTM_PL_MASK_CACHING;
man->default_caching = TTM_PL_FLAG_CACHED;
ttm_resource_manager_init(man, gtt_size >> PAGE_SHIFT); ttm_resource_manager_init(man, gtt_size >> PAGE_SHIFT);

View File

@ -136,8 +136,8 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
places[c].fpfn = 0; places[c].fpfn = 0;
places[c].lpfn = 0; places[c].lpfn = 0;
places[c].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | places[c].mem_type = TTM_PL_VRAM;
TTM_PL_FLAG_VRAM; places[c].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED;
if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
places[c].lpfn = visible_pfn; places[c].lpfn = visible_pfn;
@ -152,7 +152,8 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
if (domain & AMDGPU_GEM_DOMAIN_GTT) { if (domain & AMDGPU_GEM_DOMAIN_GTT) {
places[c].fpfn = 0; places[c].fpfn = 0;
places[c].lpfn = 0; places[c].lpfn = 0;
places[c].flags = TTM_PL_FLAG_TT; places[c].mem_type = TTM_PL_TT;
places[c].flags = 0;
if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
places[c].flags |= TTM_PL_FLAG_WC | places[c].flags |= TTM_PL_FLAG_WC |
TTM_PL_FLAG_UNCACHED; TTM_PL_FLAG_UNCACHED;
@ -164,7 +165,8 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
if (domain & AMDGPU_GEM_DOMAIN_CPU) { if (domain & AMDGPU_GEM_DOMAIN_CPU) {
places[c].fpfn = 0; places[c].fpfn = 0;
places[c].lpfn = 0; places[c].lpfn = 0;
places[c].flags = TTM_PL_FLAG_SYSTEM; places[c].mem_type = TTM_PL_SYSTEM;
places[c].flags = 0;
if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
places[c].flags |= TTM_PL_FLAG_WC | places[c].flags |= TTM_PL_FLAG_WC |
TTM_PL_FLAG_UNCACHED; TTM_PL_FLAG_UNCACHED;
@ -176,28 +178,32 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
if (domain & AMDGPU_GEM_DOMAIN_GDS) { if (domain & AMDGPU_GEM_DOMAIN_GDS) {
places[c].fpfn = 0; places[c].fpfn = 0;
places[c].lpfn = 0; places[c].lpfn = 0;
places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_GDS; places[c].mem_type = AMDGPU_PL_GDS;
places[c].flags = TTM_PL_FLAG_UNCACHED;
c++; c++;
} }
if (domain & AMDGPU_GEM_DOMAIN_GWS) { if (domain & AMDGPU_GEM_DOMAIN_GWS) {
places[c].fpfn = 0; places[c].fpfn = 0;
places[c].lpfn = 0; places[c].lpfn = 0;
places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_GWS; places[c].mem_type = AMDGPU_PL_GWS;
places[c].flags = TTM_PL_FLAG_UNCACHED;
c++; c++;
} }
if (domain & AMDGPU_GEM_DOMAIN_OA) { if (domain & AMDGPU_GEM_DOMAIN_OA) {
places[c].fpfn = 0; places[c].fpfn = 0;
places[c].lpfn = 0; places[c].lpfn = 0;
places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_OA; places[c].mem_type = AMDGPU_PL_OA;
places[c].flags = TTM_PL_FLAG_UNCACHED;
c++; c++;
} }
if (!c) { if (!c) {
places[c].fpfn = 0; places[c].fpfn = 0;
places[c].lpfn = 0; places[c].lpfn = 0;
places[c].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; places[c].mem_type = TTM_PL_SYSTEM;
places[c].flags = TTM_PL_MASK_CACHING;
c++; c++;
} }
@ -594,7 +600,7 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, 0); amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, 0);
if (bp->flags & AMDGPU_GEM_CREATE_VRAM_CLEARED && if (bp->flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) { bo->tbo.mem.mem_type == TTM_PL_VRAM) {
struct dma_fence *fence; struct dma_fence *fence;
r = amdgpu_fill_buffer(bo, 0, bo->tbo.base.resv, &fence); r = amdgpu_fill_buffer(bo, 0, bo->tbo.base.resv, &fence);

View File

@ -63,12 +63,15 @@
#define AMDGPU_TTM_VRAM_MAX_DW_READ (size_t)128 #define AMDGPU_TTM_VRAM_MAX_DW_READ (size_t)128
static int amdgpu_ttm_backend_bind(struct ttm_bo_device *bdev,
struct ttm_tt *ttm,
struct ttm_resource *bo_mem);
static int amdgpu_ttm_init_on_chip(struct amdgpu_device *adev, static int amdgpu_ttm_init_on_chip(struct amdgpu_device *adev,
unsigned int type, unsigned int type,
uint64_t size) uint64_t size)
{ {
return ttm_range_man_init(&adev->mman.bdev, type, return ttm_range_man_init(&adev->mman.bdev, type,
TTM_PL_FLAG_UNCACHED, TTM_PL_FLAG_UNCACHED,
false, size >> PAGE_SHIFT); false, size >> PAGE_SHIFT);
} }
@ -88,7 +91,8 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
static const struct ttm_place placements = { static const struct ttm_place placements = {
.fpfn = 0, .fpfn = 0,
.lpfn = 0, .lpfn = 0,
.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM .mem_type = TTM_PL_SYSTEM,
.flags = TTM_PL_MASK_CACHING
}; };
/* Don't handle scatter gather BOs */ /* Don't handle scatter gather BOs */
@ -174,24 +178,6 @@ static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp)
filp->private_data); filp->private_data);
} }
/**
* amdgpu_move_null - Register memory for a buffer object
*
* @bo: The bo to assign the memory to
* @new_mem: The memory to be assigned.
*
* Assign the memory from new_mem to the memory of the buffer object bo.
*/
static void amdgpu_move_null(struct ttm_buffer_object *bo,
struct ttm_resource *new_mem)
{
struct ttm_resource *old_mem = &bo->mem;
BUG_ON(old_mem->mm_node != NULL);
*old_mem = *new_mem;
new_mem->mm_node = NULL;
}
/** /**
* amdgpu_mm_node_addr - Compute the GPU relative offset of a GTT buffer. * amdgpu_mm_node_addr - Compute the GPU relative offset of a GTT buffer.
* *
@ -514,9 +500,9 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
/* Always block for VM page tables before committing the new location */ /* Always block for VM page tables before committing the new location */
if (bo->type == ttm_bo_type_kernel) if (bo->type == ttm_bo_type_kernel)
r = ttm_bo_move_accel_cleanup(bo, fence, true, new_mem); r = ttm_bo_move_accel_cleanup(bo, fence, true, false, new_mem);
else else
r = ttm_bo_pipeline_move(bo, fence, evict, new_mem); r = ttm_bo_move_accel_cleanup(bo, fence, evict, true, new_mem);
dma_fence_put(fence); dma_fence_put(fence);
return r; return r;
@ -551,7 +537,8 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict,
placement.busy_placement = &placements; placement.busy_placement = &placements;
placements.fpfn = 0; placements.fpfn = 0;
placements.lpfn = 0; placements.lpfn = 0;
placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; placements.mem_type = TTM_PL_TT;
placements.flags = TTM_PL_MASK_CACHING;
r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx); r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx);
if (unlikely(r)) { if (unlikely(r)) {
pr_err("Failed to find GTT space for blit from VRAM\n"); pr_err("Failed to find GTT space for blit from VRAM\n");
@ -564,8 +551,12 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict,
goto out_cleanup; goto out_cleanup;
} }
r = ttm_tt_populate(bo->bdev, bo->ttm, ctx);
if (unlikely(r))
goto out_cleanup;
/* Bind the memory to the GTT space */ /* Bind the memory to the GTT space */
r = ttm_tt_bind(bo->ttm, &tmp_mem, ctx); r = amdgpu_ttm_backend_bind(bo->bdev, bo->ttm, &tmp_mem);
if (unlikely(r)) { if (unlikely(r)) {
goto out_cleanup; goto out_cleanup;
} }
@ -607,7 +598,8 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, bool evict,
placement.busy_placement = &placements; placement.busy_placement = &placements;
placements.fpfn = 0; placements.fpfn = 0;
placements.lpfn = 0; placements.lpfn = 0;
placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; placements.mem_type = TTM_PL_TT;
placements.flags = TTM_PL_MASK_CACHING;
r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx); r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx);
if (unlikely(r)) { if (unlikely(r)) {
pr_err("Failed to find GTT space for blit to VRAM\n"); pr_err("Failed to find GTT space for blit to VRAM\n");
@ -676,7 +668,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
adev = amdgpu_ttm_adev(bo->bdev); adev = amdgpu_ttm_adev(bo->bdev);
if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
amdgpu_move_null(bo, new_mem); ttm_bo_move_null(bo, new_mem);
return 0; return 0;
} }
if ((old_mem->mem_type == TTM_PL_TT && if ((old_mem->mem_type == TTM_PL_TT &&
@ -684,7 +676,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
(old_mem->mem_type == TTM_PL_SYSTEM && (old_mem->mem_type == TTM_PL_SYSTEM &&
new_mem->mem_type == TTM_PL_TT)) { new_mem->mem_type == TTM_PL_TT)) {
/* bind is enough */ /* bind is enough */
amdgpu_move_null(bo, new_mem); ttm_bo_move_null(bo, new_mem);
return 0; return 0;
} }
if (old_mem->mem_type == AMDGPU_PL_GDS || if (old_mem->mem_type == AMDGPU_PL_GDS ||
@ -694,7 +686,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
new_mem->mem_type == AMDGPU_PL_GWS || new_mem->mem_type == AMDGPU_PL_GWS ||
new_mem->mem_type == AMDGPU_PL_OA) { new_mem->mem_type == AMDGPU_PL_OA) {
/* Nothing to save here */ /* Nothing to save here */
amdgpu_move_null(bo, new_mem); ttm_bo_move_null(bo, new_mem);
return 0; return 0;
} }
@ -773,7 +765,7 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_reso
mem->bus.addr = (u8 *)adev->mman.aper_base_kaddr + mem->bus.addr = (u8 *)adev->mman.aper_base_kaddr +
mem->bus.offset; mem->bus.offset;
mem->bus.base = adev->gmc.aper_base; mem->bus.offset += adev->gmc.aper_base;
mem->bus.is_iomem = true; mem->bus.is_iomem = true;
break; break;
default: default:
@ -785,12 +777,13 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_reso
static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo, static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
unsigned long page_offset) unsigned long page_offset)
{ {
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
uint64_t offset = (page_offset << PAGE_SHIFT); uint64_t offset = (page_offset << PAGE_SHIFT);
struct drm_mm_node *mm; struct drm_mm_node *mm;
mm = amdgpu_find_mm_node(&bo->mem, &offset); mm = amdgpu_find_mm_node(&bo->mem, &offset);
return (bo->mem.bus.base >> PAGE_SHIFT) + mm->start + offset += adev->gmc.aper_base;
(offset >> PAGE_SHIFT); return mm->start + (offset >> PAGE_SHIFT);
} }
/** /**
@ -824,6 +817,7 @@ struct amdgpu_ttm_tt {
uint64_t userptr; uint64_t userptr;
struct task_struct *usertask; struct task_struct *usertask;
uint32_t userflags; uint32_t userflags;
bool bound;
#if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR) #if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
struct hmm_range *range; struct hmm_range *range;
#endif #endif
@ -991,9 +985,10 @@ void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages)
* *
* Called by amdgpu_ttm_backend_bind() * Called by amdgpu_ttm_backend_bind()
**/ **/
static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm) static int amdgpu_ttm_tt_pin_userptr(struct ttm_bo_device *bdev,
struct ttm_tt *ttm)
{ {
struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
struct amdgpu_ttm_tt *gtt = (void *)ttm; struct amdgpu_ttm_tt *gtt = (void *)ttm;
int r; int r;
@ -1028,9 +1023,10 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm)
/** /**
* amdgpu_ttm_tt_unpin_userptr - Unpin and unmap userptr pages * amdgpu_ttm_tt_unpin_userptr - Unpin and unmap userptr pages
*/ */
static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm) static void amdgpu_ttm_tt_unpin_userptr(struct ttm_bo_device *bdev,
struct ttm_tt *ttm)
{ {
struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
struct amdgpu_ttm_tt *gtt = (void *)ttm; struct amdgpu_ttm_tt *gtt = (void *)ttm;
int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY); int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
@ -1111,16 +1107,23 @@ static int amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
* Called by ttm_tt_bind() on behalf of ttm_bo_handle_move_mem(). * Called by ttm_tt_bind() on behalf of ttm_bo_handle_move_mem().
* This handles binding GTT memory to the device address space. * This handles binding GTT memory to the device address space.
*/ */
static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm, static int amdgpu_ttm_backend_bind(struct ttm_bo_device *bdev,
struct ttm_tt *ttm,
struct ttm_resource *bo_mem) struct ttm_resource *bo_mem)
{ {
struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
struct amdgpu_ttm_tt *gtt = (void*)ttm; struct amdgpu_ttm_tt *gtt = (void*)ttm;
uint64_t flags; uint64_t flags;
int r = 0; int r = 0;
if (!bo_mem)
return -EINVAL;
if (gtt->bound)
return 0;
if (gtt->userptr) { if (gtt->userptr) {
r = amdgpu_ttm_tt_pin_userptr(ttm); r = amdgpu_ttm_tt_pin_userptr(bdev, ttm);
if (r) { if (r) {
DRM_ERROR("failed to pin userptr\n"); DRM_ERROR("failed to pin userptr\n");
return r; return r;
@ -1152,6 +1155,7 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
if (r) if (r)
DRM_ERROR("failed to bind %lu pages at 0x%08llX\n", DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
ttm->num_pages, gtt->offset); ttm->num_pages, gtt->offset);
gtt->bound = true;
return r; return r;
} }
@ -1191,8 +1195,8 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
placement.busy_placement = &placements; placement.busy_placement = &placements;
placements.fpfn = 0; placements.fpfn = 0;
placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT; placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT;
placements.flags = (bo->mem.placement & ~TTM_PL_MASK_MEM) | placements.mem_type = TTM_PL_TT;
TTM_PL_FLAG_TT; placements.flags = bo->mem.placement;
r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx); r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx);
if (unlikely(r)) if (unlikely(r))
@ -1243,15 +1247,19 @@ int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo)
* Called by ttm_tt_unbind() on behalf of ttm_bo_move_ttm() and * Called by ttm_tt_unbind() on behalf of ttm_bo_move_ttm() and
* ttm_tt_destroy(). * ttm_tt_destroy().
*/ */
static void amdgpu_ttm_backend_unbind(struct ttm_tt *ttm) static void amdgpu_ttm_backend_unbind(struct ttm_bo_device *bdev,
struct ttm_tt *ttm)
{ {
struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
struct amdgpu_ttm_tt *gtt = (void *)ttm; struct amdgpu_ttm_tt *gtt = (void *)ttm;
int r; int r;
if (!gtt->bound)
return;
/* if the pages have userptr pinning then clear that first */ /* if the pages have userptr pinning then clear that first */
if (gtt->userptr) if (gtt->userptr)
amdgpu_ttm_tt_unpin_userptr(ttm); amdgpu_ttm_tt_unpin_userptr(bdev, ttm);
if (gtt->offset == AMDGPU_BO_INVALID_OFFSET) if (gtt->offset == AMDGPU_BO_INVALID_OFFSET)
return; return;
@ -1261,12 +1269,16 @@ static void amdgpu_ttm_backend_unbind(struct ttm_tt *ttm)
if (r) if (r)
DRM_ERROR("failed to unbind %lu pages at 0x%08llX\n", DRM_ERROR("failed to unbind %lu pages at 0x%08llX\n",
gtt->ttm.ttm.num_pages, gtt->offset); gtt->ttm.ttm.num_pages, gtt->offset);
gtt->bound = false;
} }
static void amdgpu_ttm_backend_destroy(struct ttm_tt *ttm) static void amdgpu_ttm_backend_destroy(struct ttm_bo_device *bdev,
struct ttm_tt *ttm)
{ {
struct amdgpu_ttm_tt *gtt = (void *)ttm; struct amdgpu_ttm_tt *gtt = (void *)ttm;
amdgpu_ttm_backend_unbind(bdev, ttm);
ttm_tt_destroy_common(bdev, ttm);
if (gtt->usertask) if (gtt->usertask)
put_task_struct(gtt->usertask); put_task_struct(gtt->usertask);
@ -1274,12 +1286,6 @@ static void amdgpu_ttm_backend_destroy(struct ttm_tt *ttm)
kfree(gtt); kfree(gtt);
} }
static struct ttm_backend_func amdgpu_backend_func = {
.bind = &amdgpu_ttm_backend_bind,
.unbind = &amdgpu_ttm_backend_unbind,
.destroy = &amdgpu_ttm_backend_destroy,
};
/** /**
* amdgpu_ttm_tt_create - Create a ttm_tt object for a given BO * amdgpu_ttm_tt_create - Create a ttm_tt object for a given BO
* *
@ -1296,7 +1302,6 @@ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo,
if (gtt == NULL) { if (gtt == NULL) {
return NULL; return NULL;
} }
gtt->ttm.ttm.func = &amdgpu_backend_func;
gtt->gobj = &bo->base; gtt->gobj = &bo->base;
/* allocate space for the uninitialized page entries */ /* allocate space for the uninitialized page entries */
@ -1313,10 +1318,11 @@ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo,
* Map the pages of a ttm_tt object to an address space visible * Map the pages of a ttm_tt object to an address space visible
* to the underlying device. * to the underlying device.
*/ */
static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm, static int amdgpu_ttm_tt_populate(struct ttm_bo_device *bdev,
struct ttm_operation_ctx *ctx) struct ttm_tt *ttm,
struct ttm_operation_ctx *ctx)
{ {
struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
struct amdgpu_ttm_tt *gtt = (void *)ttm; struct amdgpu_ttm_tt *gtt = (void *)ttm;
/* user pages are bound by amdgpu_ttm_tt_pin_userptr() */ /* user pages are bound by amdgpu_ttm_tt_pin_userptr() */
@ -1326,7 +1332,7 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm,
return -ENOMEM; return -ENOMEM;
ttm->page_flags |= TTM_PAGE_FLAG_SG; ttm->page_flags |= TTM_PAGE_FLAG_SG;
ttm->state = tt_unbound; ttm_tt_set_populated(ttm);
return 0; return 0;
} }
@ -1346,7 +1352,7 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm,
drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
gtt->ttm.dma_address, gtt->ttm.dma_address,
ttm->num_pages); ttm->num_pages);
ttm->state = tt_unbound; ttm_tt_set_populated(ttm);
return 0; return 0;
} }
@ -1367,7 +1373,7 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm,
* Unmaps pages of a ttm_tt object from the device address space and * Unmaps pages of a ttm_tt object from the device address space and
* unpopulates the page array backing it. * unpopulates the page array backing it.
*/ */
static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm) static void amdgpu_ttm_tt_unpopulate(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
{ {
struct amdgpu_ttm_tt *gtt = (void *)ttm; struct amdgpu_ttm_tt *gtt = (void *)ttm;
struct amdgpu_device *adev; struct amdgpu_device *adev;
@ -1391,7 +1397,7 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
if (ttm->page_flags & TTM_PAGE_FLAG_SG) if (ttm->page_flags & TTM_PAGE_FLAG_SG)
return; return;
adev = amdgpu_ttm_adev(ttm->bdev); adev = amdgpu_ttm_adev(bdev);
#ifdef CONFIG_SWIOTLB #ifdef CONFIG_SWIOTLB
if (adev->need_swiotlb && swiotlb_nr_tbl()) { if (adev->need_swiotlb && swiotlb_nr_tbl()) {
@ -1697,6 +1703,9 @@ static struct ttm_bo_driver amdgpu_bo_driver = {
.ttm_tt_create = &amdgpu_ttm_tt_create, .ttm_tt_create = &amdgpu_ttm_tt_create,
.ttm_tt_populate = &amdgpu_ttm_tt_populate, .ttm_tt_populate = &amdgpu_ttm_tt_populate,
.ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate, .ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate,
.ttm_tt_bind = &amdgpu_ttm_backend_bind,
.ttm_tt_unbind = &amdgpu_ttm_backend_unbind,
.ttm_tt_destroy = &amdgpu_ttm_backend_destroy,
.eviction_valuable = amdgpu_ttm_bo_eviction_valuable, .eviction_valuable = amdgpu_ttm_bo_eviction_valuable,
.evict_flags = &amdgpu_evict_flags, .evict_flags = &amdgpu_evict_flags,
.move = &amdgpu_bo_move, .move = &amdgpu_bo_move,

View File

@ -32,10 +32,6 @@
#define AMDGPU_PL_GWS (TTM_PL_PRIV + 1) #define AMDGPU_PL_GWS (TTM_PL_PRIV + 1)
#define AMDGPU_PL_OA (TTM_PL_PRIV + 2) #define AMDGPU_PL_OA (TTM_PL_PRIV + 2)
#define AMDGPU_PL_FLAG_GDS (TTM_PL_FLAG_PRIV << 0)
#define AMDGPU_PL_FLAG_GWS (TTM_PL_FLAG_PRIV << 1)
#define AMDGPU_PL_FLAG_OA (TTM_PL_FLAG_PRIV << 2)
#define AMDGPU_GTT_MAX_TRANSFER_SIZE 512 #define AMDGPU_GTT_MAX_TRANSFER_SIZE 512
#define AMDGPU_GTT_NUM_TRANSFER_WINDOWS 2 #define AMDGPU_GTT_NUM_TRANSFER_WINDOWS 2

View File

@ -179,9 +179,6 @@ int amdgpu_vram_mgr_init(struct amdgpu_device *adev)
struct ttm_resource_manager *man = &mgr->manager; struct ttm_resource_manager *man = &mgr->manager;
int ret; int ret;
man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
man->default_caching = TTM_PL_FLAG_WC;
ttm_resource_manager_init(man, adev->gmc.real_vram_size >> PAGE_SHIFT); ttm_resource_manager_init(man, adev->gmc.real_vram_size >> PAGE_SHIFT);
man->func = &amdgpu_vram_mgr_func; man->func = &amdgpu_vram_mgr_func;

View File

@ -7494,6 +7494,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
bool mode_set_reset_required = false; bool mode_set_reset_required = false;
drm_atomic_helper_update_legacy_modeset_state(dev, state); drm_atomic_helper_update_legacy_modeset_state(dev, state);
drm_atomic_helper_calc_timestamping_constants(state);
dm_state = dm_atomic_get_new_state(state); dm_state = dm_atomic_get_new_state(state);
if (dm_state && dm_state->context) { if (dm_state && dm_state->context) {

View File

@ -757,7 +757,7 @@ static int armada_drm_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
static void armada_drm_crtc_destroy(struct drm_crtc *crtc) static void armada_drm_crtc_destroy(struct drm_crtc *crtc)
{ {
struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc); struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
struct armada_private *priv = crtc->dev->dev_private; struct armada_private *priv = drm_to_armada_dev(crtc->dev);
if (dcrtc->cursor_obj) if (dcrtc->cursor_obj)
drm_gem_object_put(&dcrtc->cursor_obj->obj); drm_gem_object_put(&dcrtc->cursor_obj->obj);
@ -901,7 +901,7 @@ static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
struct resource *res, int irq, const struct armada_variant *variant, struct resource *res, int irq, const struct armada_variant *variant,
struct device_node *port) struct device_node *port)
{ {
struct armada_private *priv = drm->dev_private; struct armada_private *priv = drm_to_armada_dev(drm);
struct armada_crtc *dcrtc; struct armada_crtc *dcrtc;
struct drm_plane *primary; struct drm_plane *primary;
void __iomem *base; void __iomem *base;

View File

@ -19,7 +19,7 @@ static int armada_debugfs_gem_linear_show(struct seq_file *m, void *data)
{ {
struct drm_info_node *node = m->private; struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev; struct drm_device *dev = node->minor->dev;
struct armada_private *priv = dev->dev_private; struct armada_private *priv = drm_to_armada_dev(dev);
struct drm_printer p = drm_seq_file_printer(m); struct drm_printer p = drm_seq_file_printer(m);
mutex_lock(&priv->linear_lock); mutex_lock(&priv->linear_lock);

View File

@ -73,6 +73,8 @@ struct armada_private {
#endif #endif
}; };
#define drm_to_armada_dev(dev) container_of(dev, struct armada_private, drm)
int armada_fbdev_init(struct drm_device *); int armada_fbdev_init(struct drm_device *);
void armada_fbdev_fini(struct drm_device *); void armada_fbdev_fini(struct drm_device *);

View File

@ -87,24 +87,13 @@ static int armada_drm_bind(struct device *dev)
"armada-drm")) "armada-drm"))
return -EBUSY; return -EBUSY;
priv = kzalloc(sizeof(*priv), GFP_KERNEL); priv = devm_drm_dev_alloc(dev, &armada_drm_driver,
if (!priv) struct armada_private, drm);
return -ENOMEM; if (IS_ERR(priv)) {
dev_err(dev, "[" DRM_NAME ":%s] devm_drm_dev_alloc failed: %li\n",
/* __func__, PTR_ERR(priv));
* The drm_device structure must be at the start of return PTR_ERR(priv);
* armada_private for drm_dev_put() to work correctly.
*/
BUILD_BUG_ON(offsetof(struct armada_private, drm) != 0);
ret = drm_dev_init(&priv->drm, &armada_drm_driver, dev);
if (ret) {
dev_err(dev, "[" DRM_NAME ":%s] drm_dev_init failed: %d\n",
__func__, ret);
kfree(priv);
return ret;
} }
drmm_add_final_kfree(&priv->drm, priv);
/* Remove early framebuffers */ /* Remove early framebuffers */
ret = drm_fb_helper_remove_conflicting_framebuffers(NULL, ret = drm_fb_helper_remove_conflicting_framebuffers(NULL,
@ -117,8 +106,6 @@ static int armada_drm_bind(struct device *dev)
return ret; return ret;
} }
priv->drm.dev_private = priv;
dev_set_drvdata(dev, &priv->drm); dev_set_drvdata(dev, &priv->drm);
/* Mode setting support */ /* Mode setting support */
@ -174,14 +161,13 @@ static int armada_drm_bind(struct device *dev)
err_kms: err_kms:
drm_mode_config_cleanup(&priv->drm); drm_mode_config_cleanup(&priv->drm);
drm_mm_takedown(&priv->linear); drm_mm_takedown(&priv->linear);
drm_dev_put(&priv->drm);
return ret; return ret;
} }
static void armada_drm_unbind(struct device *dev) static void armada_drm_unbind(struct device *dev)
{ {
struct drm_device *drm = dev_get_drvdata(dev); struct drm_device *drm = dev_get_drvdata(dev);
struct armada_private *priv = drm->dev_private; struct armada_private *priv = drm_to_armada_dev(drm);
drm_kms_helper_poll_fini(&priv->drm); drm_kms_helper_poll_fini(&priv->drm);
armada_fbdev_fini(&priv->drm); armada_fbdev_fini(&priv->drm);
@ -194,8 +180,6 @@ static void armada_drm_unbind(struct device *dev)
drm_mode_config_cleanup(&priv->drm); drm_mode_config_cleanup(&priv->drm);
drm_mm_takedown(&priv->linear); drm_mm_takedown(&priv->linear);
drm_dev_put(&priv->drm);
} }
static int compare_of(struct device *dev, void *data) static int compare_of(struct device *dev, void *data)

View File

@ -117,7 +117,7 @@ static const struct drm_fb_helper_funcs armada_fb_helper_funcs = {
int armada_fbdev_init(struct drm_device *dev) int armada_fbdev_init(struct drm_device *dev)
{ {
struct armada_private *priv = dev->dev_private; struct armada_private *priv = drm_to_armada_dev(dev);
struct drm_fb_helper *fbh; struct drm_fb_helper *fbh;
int ret; int ret;
@ -151,7 +151,7 @@ int armada_fbdev_init(struct drm_device *dev)
void armada_fbdev_fini(struct drm_device *dev) void armada_fbdev_fini(struct drm_device *dev)
{ {
struct armada_private *priv = dev->dev_private; struct armada_private *priv = drm_to_armada_dev(dev);
struct drm_fb_helper *fbh = priv->fbdev; struct drm_fb_helper *fbh = priv->fbdev;
if (fbh) { if (fbh) {

View File

@ -39,7 +39,7 @@ static size_t roundup_gem_size(size_t size)
void armada_gem_free_object(struct drm_gem_object *obj) void armada_gem_free_object(struct drm_gem_object *obj)
{ {
struct armada_gem_object *dobj = drm_to_armada_gem(obj); struct armada_gem_object *dobj = drm_to_armada_gem(obj);
struct armada_private *priv = obj->dev->dev_private; struct armada_private *priv = drm_to_armada_dev(obj->dev);
DRM_DEBUG_DRIVER("release obj %p\n", dobj); DRM_DEBUG_DRIVER("release obj %p\n", dobj);
@ -77,7 +77,7 @@ void armada_gem_free_object(struct drm_gem_object *obj)
int int
armada_gem_linear_back(struct drm_device *dev, struct armada_gem_object *obj) armada_gem_linear_back(struct drm_device *dev, struct armada_gem_object *obj)
{ {
struct armada_private *priv = dev->dev_private; struct armada_private *priv = drm_to_armada_dev(dev);
size_t size = obj->obj.size; size_t size = obj->obj.size;
if (obj->page || obj->linear) if (obj->page || obj->linear)

View File

@ -344,7 +344,7 @@ static int armada_overlay_set_property(struct drm_plane *plane,
struct drm_plane_state *state, struct drm_property *property, struct drm_plane_state *state, struct drm_property *property,
uint64_t val) uint64_t val)
{ {
struct armada_private *priv = plane->dev->dev_private; struct armada_private *priv = drm_to_armada_dev(plane->dev);
#define K2R(val) (((val) >> 0) & 0xff) #define K2R(val) (((val) >> 0) & 0xff)
#define K2G(val) (((val) >> 8) & 0xff) #define K2G(val) (((val) >> 8) & 0xff)
@ -412,7 +412,7 @@ static int armada_overlay_get_property(struct drm_plane *plane,
const struct drm_plane_state *state, struct drm_property *property, const struct drm_plane_state *state, struct drm_property *property,
uint64_t *val) uint64_t *val)
{ {
struct armada_private *priv = plane->dev->dev_private; struct armada_private *priv = drm_to_armada_dev(plane->dev);
#define C2K(c,s) (((c) >> (s)) & 0xff) #define C2K(c,s) (((c) >> (s)) & 0xff)
#define R2BGR(r,g,b,s) (C2K(r,s) << 0 | C2K(g,s) << 8 | C2K(b,s) << 16) #define R2BGR(r,g,b,s) (C2K(r,s) << 0 | C2K(g,s) << 8 | C2K(b,s) << 16)
@ -505,7 +505,7 @@ static const struct drm_prop_enum_list armada_drm_colorkey_enum_list[] = {
static int armada_overlay_create_properties(struct drm_device *dev) static int armada_overlay_create_properties(struct drm_device *dev)
{ {
struct armada_private *priv = dev->dev_private; struct armada_private *priv = drm_to_armada_dev(dev);
if (priv->colorkey_prop) if (priv->colorkey_prop)
return 0; return 0;
@ -539,7 +539,7 @@ static int armada_overlay_create_properties(struct drm_device *dev)
int armada_overlay_plane_create(struct drm_device *dev, unsigned long crtcs) int armada_overlay_plane_create(struct drm_device *dev, unsigned long crtcs)
{ {
struct armada_private *priv = dev->dev_private; struct armada_private *priv = drm_to_armada_dev(dev);
struct drm_mode_object *mobj; struct drm_mode_object *mobj;
struct drm_plane *overlay; struct drm_plane *overlay;
int ret; int ret;

View File

@ -63,15 +63,21 @@ static const struct drm_mode_config_funcs aspeed_gfx_mode_config_funcs = {
.atomic_commit = drm_atomic_helper_commit, .atomic_commit = drm_atomic_helper_commit,
}; };
static void aspeed_gfx_setup_mode_config(struct drm_device *drm) static int aspeed_gfx_setup_mode_config(struct drm_device *drm)
{ {
drm_mode_config_init(drm); int ret;
ret = drmm_mode_config_init(drm);
if (ret)
return ret;
drm->mode_config.min_width = 0; drm->mode_config.min_width = 0;
drm->mode_config.min_height = 0; drm->mode_config.min_height = 0;
drm->mode_config.max_width = 800; drm->mode_config.max_width = 800;
drm->mode_config.max_height = 600; drm->mode_config.max_height = 600;
drm->mode_config.funcs = &aspeed_gfx_mode_config_funcs; drm->mode_config.funcs = &aspeed_gfx_mode_config_funcs;
return ret;
} }
static irqreturn_t aspeed_gfx_irq_handler(int irq, void *data) static irqreturn_t aspeed_gfx_irq_handler(int irq, void *data)
@ -144,7 +150,9 @@ static int aspeed_gfx_load(struct drm_device *drm)
writel(0, priv->base + CRT_CTRL1); writel(0, priv->base + CRT_CTRL1);
writel(0, priv->base + CRT_CTRL2); writel(0, priv->base + CRT_CTRL2);
aspeed_gfx_setup_mode_config(drm); ret = aspeed_gfx_setup_mode_config(drm);
if (ret < 0)
return ret;
ret = drm_vblank_init(drm, 1); ret = drm_vblank_init(drm, 1);
if (ret < 0) { if (ret < 0) {
@ -179,7 +187,6 @@ static int aspeed_gfx_load(struct drm_device *drm)
static void aspeed_gfx_unload(struct drm_device *drm) static void aspeed_gfx_unload(struct drm_device *drm)
{ {
drm_kms_helper_poll_fini(drm); drm_kms_helper_poll_fini(drm);
drm_mode_config_cleanup(drm);
} }
DEFINE_DRM_GEM_CMA_FOPS(fops); DEFINE_DRM_GEM_CMA_FOPS(fops);

View File

@ -177,6 +177,8 @@ struct ast_private *ast_device_create(struct drm_driver *drv,
#define AST_IO_MM_OFFSET (0x380) #define AST_IO_MM_OFFSET (0x380)
#define AST_IO_VGAIR1_VREFRESH BIT(3)
#define __ast_read(x) \ #define __ast_read(x) \
static inline u##x ast_read##x(struct ast_private *ast, u32 reg) { \ static inline u##x ast_read##x(struct ast_private *ast, u32 reg) { \
u##x val = 0;\ u##x val = 0;\

View File

@ -514,6 +514,16 @@ static void ast_set_start_address_crt1(struct ast_private *ast,
} }
static void ast_wait_for_vretrace(struct ast_private *ast)
{
unsigned long timeout = jiffies + HZ;
u8 vgair1;
do {
vgair1 = ast_io_read8(ast, AST_IO_INPUT_STATUS1_READ);
} while (!(vgair1 & AST_IO_VGAIR1_VREFRESH) && time_before(jiffies, timeout));
}
/* /*
* Primary plane * Primary plane
*/ */
@ -562,13 +572,24 @@ ast_primary_plane_helper_atomic_update(struct drm_plane *plane,
struct drm_plane_state *state = plane->state; struct drm_plane_state *state = plane->state;
struct drm_gem_vram_object *gbo; struct drm_gem_vram_object *gbo;
s64 gpu_addr; s64 gpu_addr;
struct drm_framebuffer *fb = state->fb;
struct drm_framebuffer *old_fb = old_state->fb;
gbo = drm_gem_vram_of_gem(state->fb->obj[0]); if (!old_fb || (fb->format != old_fb->format)) {
struct drm_crtc_state *crtc_state = state->crtc->state;
struct ast_crtc_state *ast_crtc_state = to_ast_crtc_state(crtc_state);
struct ast_vbios_mode_info *vbios_mode_info = &ast_crtc_state->vbios_mode_info;
ast_set_color_reg(ast, fb->format);
ast_set_vbios_color_reg(ast, fb->format, vbios_mode_info);
}
gbo = drm_gem_vram_of_gem(fb->obj[0]);
gpu_addr = drm_gem_vram_offset(gbo); gpu_addr = drm_gem_vram_offset(gbo);
if (drm_WARN_ON_ONCE(dev, gpu_addr < 0)) if (drm_WARN_ON_ONCE(dev, gpu_addr < 0))
return; /* Bug: we didn't pin the BO to VRAM in prepare_fb. */ return; /* Bug: we didn't pin the BO to VRAM in prepare_fb. */
ast_set_offset_reg(ast, state->fb); ast_set_offset_reg(ast, fb);
ast_set_start_address_crt1(ast, (u32)gpu_addr); ast_set_start_address_crt1(ast, (u32)gpu_addr);
ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x1, 0xdf, 0x00); ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x1, 0xdf, 0x00);
@ -733,6 +754,7 @@ static void ast_crtc_dpms(struct drm_crtc *crtc, int mode)
static int ast_crtc_helper_atomic_check(struct drm_crtc *crtc, static int ast_crtc_helper_atomic_check(struct drm_crtc *crtc,
struct drm_crtc_state *state) struct drm_crtc_state *state)
{ {
struct drm_device *dev = crtc->dev;
struct ast_crtc_state *ast_state; struct ast_crtc_state *ast_state;
const struct drm_format_info *format; const struct drm_format_info *format;
bool succ; bool succ;
@ -743,8 +765,8 @@ static int ast_crtc_helper_atomic_check(struct drm_crtc *crtc,
ast_state = to_ast_crtc_state(state); ast_state = to_ast_crtc_state(state);
format = ast_state->format; format = ast_state->format;
if (!format) if (drm_WARN_ON_ONCE(dev, !format))
return 0; return -EINVAL; /* BUG: We didn't set format in primary check(). */
succ = ast_get_vbios_mode_info(format, &state->mode, succ = ast_get_vbios_mode_info(format, &state->mode,
&state->adjusted_mode, &state->adjusted_mode,
@ -755,39 +777,17 @@ static int ast_crtc_helper_atomic_check(struct drm_crtc *crtc,
return 0; return 0;
} }
static void ast_crtc_helper_atomic_begin(struct drm_crtc *crtc, static void
struct drm_crtc_state *old_crtc_state) ast_crtc_helper_atomic_enable(struct drm_crtc *crtc,
{ struct drm_crtc_state *old_crtc_state)
struct ast_private *ast = to_ast_private(crtc->dev);
ast_open_key(ast);
}
static void ast_crtc_helper_atomic_flush(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{ {
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc->dev;
struct ast_private *ast = to_ast_private(dev); struct ast_private *ast = to_ast_private(dev);
struct ast_crtc_state *ast_state; struct drm_crtc_state *crtc_state = crtc->state;
const struct drm_format_info *format; struct ast_crtc_state *ast_crtc_state = to_ast_crtc_state(crtc_state);
struct ast_vbios_mode_info *vbios_mode_info; struct ast_vbios_mode_info *vbios_mode_info =
struct drm_display_mode *adjusted_mode; &ast_crtc_state->vbios_mode_info;
struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
ast_state = to_ast_crtc_state(crtc->state);
format = ast_state->format;
if (!format)
return;
vbios_mode_info = &ast_state->vbios_mode_info;
ast_set_color_reg(ast, format);
ast_set_vbios_color_reg(ast, format, vbios_mode_info);
if (!crtc->state->mode_changed)
return;
adjusted_mode = &crtc->state->adjusted_mode;
ast_set_vbios_mode_reg(ast, adjusted_mode, vbios_mode_info); ast_set_vbios_mode_reg(ast, adjusted_mode, vbios_mode_info);
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa1, 0x06); ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa1, 0x06);
@ -796,12 +796,7 @@ static void ast_crtc_helper_atomic_flush(struct drm_crtc *crtc,
ast_set_dclk_reg(ast, adjusted_mode, vbios_mode_info); ast_set_dclk_reg(ast, adjusted_mode, vbios_mode_info);
ast_set_crtthd_reg(ast); ast_set_crtthd_reg(ast);
ast_set_sync_reg(ast, adjusted_mode, vbios_mode_info); ast_set_sync_reg(ast, adjusted_mode, vbios_mode_info);
}
static void
ast_crtc_helper_atomic_enable(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
ast_crtc_dpms(crtc, DRM_MODE_DPMS_ON); ast_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
} }
@ -809,13 +804,32 @@ static void
ast_crtc_helper_atomic_disable(struct drm_crtc *crtc, ast_crtc_helper_atomic_disable(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state) struct drm_crtc_state *old_crtc_state)
{ {
struct drm_device *dev = crtc->dev;
struct ast_private *ast = to_ast_private(dev);
ast_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); ast_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
/*
* HW cursors require the underlying primary plane and CRTC to
* display a valid mode and image. This is not the case during
* full modeset operations. So we temporarily disable any active
* plane, including the HW cursor. Each plane's atomic_update()
* helper will re-enable it if necessary.
*
* We only do this during *full* modesets. It does not affect
* simple pageflips on the planes.
*/
drm_atomic_helper_disable_planes_on_crtc(old_crtc_state, false);
/*
* Ensure that no scanout takes place before reprogramming mode
* and format registers.
*/
ast_wait_for_vretrace(ast);
} }
static const struct drm_crtc_helper_funcs ast_crtc_helper_funcs = { static const struct drm_crtc_helper_funcs ast_crtc_helper_funcs = {
.atomic_check = ast_crtc_helper_atomic_check, .atomic_check = ast_crtc_helper_atomic_check,
.atomic_begin = ast_crtc_helper_atomic_begin,
.atomic_flush = ast_crtc_helper_atomic_flush,
.atomic_enable = ast_crtc_helper_atomic_enable, .atomic_enable = ast_crtc_helper_atomic_enable,
.atomic_disable = ast_crtc_helper_atomic_disable, .atomic_disable = ast_crtc_helper_atomic_disable,
}; };
@ -1054,6 +1068,11 @@ static int ast_connector_init(struct drm_device *dev)
* Mode config * Mode config
*/ */
static const struct drm_mode_config_helper_funcs
ast_mode_config_helper_funcs = {
.atomic_commit_tail = drm_atomic_helper_commit_tail_rpm,
};
static const struct drm_mode_config_funcs ast_mode_config_funcs = { static const struct drm_mode_config_funcs ast_mode_config_funcs = {
.fb_create = drm_gem_fb_create, .fb_create = drm_gem_fb_create,
.mode_valid = drm_vram_helper_mode_valid, .mode_valid = drm_vram_helper_mode_valid,
@ -1093,6 +1112,8 @@ int ast_mode_config_init(struct ast_private *ast)
dev->mode_config.max_height = 1200; dev->mode_config.max_height = 1200;
} }
dev->mode_config.helper_private = &ast_mode_config_helper_funcs;
memset(&ast->primary_plane, 0, sizeof(ast->primary_plane)); memset(&ast->primary_plane, 0, sizeof(ast->primary_plane));
ret = drm_universal_plane_init(dev, &ast->primary_plane, 0x01, ret = drm_universal_plane_init(dev, &ast->primary_plane, 0x01,
&ast_primary_plane_funcs, &ast_primary_plane_funcs,

View File

@ -65,6 +65,7 @@ struct ps8640 {
struct regulator_bulk_data supplies[2]; struct regulator_bulk_data supplies[2];
struct gpio_desc *gpio_reset; struct gpio_desc *gpio_reset;
struct gpio_desc *gpio_powerdown; struct gpio_desc *gpio_powerdown;
bool powered;
}; };
static inline struct ps8640 *bridge_to_ps8640(struct drm_bridge *e) static inline struct ps8640 *bridge_to_ps8640(struct drm_bridge *e)
@ -91,13 +92,15 @@ static int ps8640_bridge_vdo_control(struct ps8640 *ps_bridge,
return 0; return 0;
} }
static void ps8640_pre_enable(struct drm_bridge *bridge) static void ps8640_bridge_poweron(struct ps8640 *ps_bridge)
{ {
struct ps8640 *ps_bridge = bridge_to_ps8640(bridge);
struct i2c_client *client = ps_bridge->page[PAGE2_TOP_CNTL]; struct i2c_client *client = ps_bridge->page[PAGE2_TOP_CNTL];
unsigned long timeout; unsigned long timeout;
int ret, status; int ret, status;
if (ps_bridge->powered)
return;
ret = regulator_bulk_enable(ARRAY_SIZE(ps_bridge->supplies), ret = regulator_bulk_enable(ARRAY_SIZE(ps_bridge->supplies),
ps_bridge->supplies); ps_bridge->supplies);
if (ret < 0) { if (ret < 0) {
@ -152,10 +155,6 @@ static void ps8640_pre_enable(struct drm_bridge *bridge)
goto err_regulators_disable; goto err_regulators_disable;
} }
ret = ps8640_bridge_vdo_control(ps_bridge, ENABLE);
if (ret)
goto err_regulators_disable;
/* Switch access edp panel's edid through i2c */ /* Switch access edp panel's edid through i2c */
ret = i2c_smbus_write_byte_data(client, PAGE2_I2C_BYPASS, ret = i2c_smbus_write_byte_data(client, PAGE2_I2C_BYPASS,
I2C_BYPASS_EN); I2C_BYPASS_EN);
@ -164,6 +163,8 @@ static void ps8640_pre_enable(struct drm_bridge *bridge)
goto err_regulators_disable; goto err_regulators_disable;
} }
ps_bridge->powered = true;
return; return;
err_regulators_disable: err_regulators_disable:
@ -171,12 +172,12 @@ static void ps8640_pre_enable(struct drm_bridge *bridge)
ps_bridge->supplies); ps_bridge->supplies);
} }
static void ps8640_post_disable(struct drm_bridge *bridge) static void ps8640_bridge_poweroff(struct ps8640 *ps_bridge)
{ {
struct ps8640 *ps_bridge = bridge_to_ps8640(bridge);
int ret; int ret;
ps8640_bridge_vdo_control(ps_bridge, DISABLE); if (!ps_bridge->powered)
return;
gpiod_set_value(ps_bridge->gpio_reset, 1); gpiod_set_value(ps_bridge->gpio_reset, 1);
gpiod_set_value(ps_bridge->gpio_powerdown, 1); gpiod_set_value(ps_bridge->gpio_powerdown, 1);
@ -184,6 +185,28 @@ static void ps8640_post_disable(struct drm_bridge *bridge)
ps_bridge->supplies); ps_bridge->supplies);
if (ret < 0) if (ret < 0)
DRM_ERROR("cannot disable regulators %d\n", ret); DRM_ERROR("cannot disable regulators %d\n", ret);
ps_bridge->powered = false;
}
static void ps8640_pre_enable(struct drm_bridge *bridge)
{
struct ps8640 *ps_bridge = bridge_to_ps8640(bridge);
int ret;
ps8640_bridge_poweron(ps_bridge);
ret = ps8640_bridge_vdo_control(ps_bridge, ENABLE);
if (ret < 0)
ps8640_bridge_poweroff(ps_bridge);
}
static void ps8640_post_disable(struct drm_bridge *bridge)
{
struct ps8640 *ps_bridge = bridge_to_ps8640(bridge);
ps8640_bridge_vdo_control(ps_bridge, DISABLE);
ps8640_bridge_poweroff(ps_bridge);
} }
static int ps8640_bridge_attach(struct drm_bridge *bridge, static int ps8640_bridge_attach(struct drm_bridge *bridge,
@ -249,9 +272,34 @@ static struct edid *ps8640_bridge_get_edid(struct drm_bridge *bridge,
struct drm_connector *connector) struct drm_connector *connector)
{ {
struct ps8640 *ps_bridge = bridge_to_ps8640(bridge); struct ps8640 *ps_bridge = bridge_to_ps8640(bridge);
bool poweroff = !ps_bridge->powered;
struct edid *edid;
return drm_get_edid(connector, /*
* When we end calling get_edid() triggered by an ioctl, i.e
*
* drm_mode_getconnector (ioctl)
* -> drm_helper_probe_single_connector_modes
* -> drm_bridge_connector_get_modes
* -> ps8640_bridge_get_edid
*
* We need to make sure that what we need is enabled before reading
* EDID, for this chip, we need to do a full poweron, otherwise it will
* fail.
*/
drm_bridge_chain_pre_enable(bridge);
edid = drm_get_edid(connector,
ps_bridge->page[PAGE0_DP_CNTL]->adapter); ps_bridge->page[PAGE0_DP_CNTL]->adapter);
/*
* If we call the get_edid() function without having enabled the chip
* before, return the chip to its original power state.
*/
if (poweroff)
drm_bridge_chain_post_disable(bridge);
return edid;
} }
static const struct drm_bridge_funcs ps8640_bridge_funcs = { static const struct drm_bridge_funcs ps8640_bridge_funcs = {

View File

@ -89,7 +89,9 @@
#define VID_MODE_TYPE_NON_BURST_SYNC_EVENTS 0x1 #define VID_MODE_TYPE_NON_BURST_SYNC_EVENTS 0x1
#define VID_MODE_TYPE_BURST 0x2 #define VID_MODE_TYPE_BURST 0x2
#define VID_MODE_TYPE_MASK 0x3 #define VID_MODE_TYPE_MASK 0x3
#define ENABLE_LOW_POWER_CMD BIT(15)
#define VID_MODE_VPG_ENABLE BIT(16) #define VID_MODE_VPG_ENABLE BIT(16)
#define VID_MODE_VPG_MODE BIT(20)
#define VID_MODE_VPG_HORIZONTAL BIT(24) #define VID_MODE_VPG_HORIZONTAL BIT(24)
#define DSI_VID_PKT_SIZE 0x3c #define DSI_VID_PKT_SIZE 0x3c
@ -220,6 +222,21 @@
#define PHY_STATUS_TIMEOUT_US 10000 #define PHY_STATUS_TIMEOUT_US 10000
#define CMD_PKT_STATUS_TIMEOUT_US 20000 #define CMD_PKT_STATUS_TIMEOUT_US 20000
#ifdef CONFIG_DEBUG_FS
#define VPG_DEFS(name, dsi) \
((void __force *)&((*dsi).vpg_defs.name))
#define REGISTER(name, mask, dsi) \
{ #name, VPG_DEFS(name, dsi), mask, dsi }
struct debugfs_entries {
const char *name;
bool *reg;
u32 mask;
struct dw_mipi_dsi *dsi;
};
#endif /* CONFIG_DEBUG_FS */
struct dw_mipi_dsi { struct dw_mipi_dsi {
struct drm_bridge bridge; struct drm_bridge bridge;
struct mipi_dsi_host dsi_host; struct mipi_dsi_host dsi_host;
@ -237,9 +254,12 @@ struct dw_mipi_dsi {
#ifdef CONFIG_DEBUG_FS #ifdef CONFIG_DEBUG_FS
struct dentry *debugfs; struct dentry *debugfs;
struct debugfs_entries *debugfs_vpg;
bool vpg; struct {
bool vpg_horizontal; bool vpg;
bool vpg_horizontal;
bool vpg_ber_pattern;
} vpg_defs;
#endif /* CONFIG_DEBUG_FS */ #endif /* CONFIG_DEBUG_FS */
struct dw_mipi_dsi *master; /* dual-dsi master ptr */ struct dw_mipi_dsi *master; /* dual-dsi master ptr */
@ -360,13 +380,28 @@ static void dw_mipi_message_config(struct dw_mipi_dsi *dsi,
bool lpm = msg->flags & MIPI_DSI_MSG_USE_LPM; bool lpm = msg->flags & MIPI_DSI_MSG_USE_LPM;
u32 val = 0; u32 val = 0;
/*
* TODO dw drv improvements
* largest packet sizes during hfp or during vsa/vpb/vfp
* should be computed according to byte lane, lane number and only
* if sending lp cmds in high speed is enable (PHY_TXREQUESTCLKHS)
*/
dsi_write(dsi, DSI_DPI_LP_CMD_TIM, OUTVACT_LPCMD_TIME(16)
| INVACT_LPCMD_TIME(4));
if (msg->flags & MIPI_DSI_MSG_REQ_ACK) if (msg->flags & MIPI_DSI_MSG_REQ_ACK)
val |= ACK_RQST_EN; val |= ACK_RQST_EN;
if (lpm) if (lpm)
val |= CMD_MODE_ALL_LP; val |= CMD_MODE_ALL_LP;
dsi_write(dsi, DSI_LPCLK_CTRL, lpm ? 0 : PHY_TXREQUESTCLKHS);
dsi_write(dsi, DSI_CMD_MODE_CFG, val); dsi_write(dsi, DSI_CMD_MODE_CFG, val);
val = dsi_read(dsi, DSI_VID_MODE_CFG);
if (lpm)
val |= ENABLE_LOW_POWER_CMD;
else
val &= ~ENABLE_LOW_POWER_CMD;
dsi_write(dsi, DSI_VID_MODE_CFG, val);
} }
static int dw_mipi_dsi_gen_pkt_hdr_write(struct dw_mipi_dsi *dsi, u32 hdr_val) static int dw_mipi_dsi_gen_pkt_hdr_write(struct dw_mipi_dsi *dsi, u32 hdr_val)
@ -529,9 +564,11 @@ static void dw_mipi_dsi_video_mode_config(struct dw_mipi_dsi *dsi)
val |= VID_MODE_TYPE_NON_BURST_SYNC_EVENTS; val |= VID_MODE_TYPE_NON_BURST_SYNC_EVENTS;
#ifdef CONFIG_DEBUG_FS #ifdef CONFIG_DEBUG_FS
if (dsi->vpg) { if (dsi->vpg_defs.vpg) {
val |= VID_MODE_VPG_ENABLE; val |= VID_MODE_VPG_ENABLE;
val |= dsi->vpg_horizontal ? VID_MODE_VPG_HORIZONTAL : 0; val |= dsi->vpg_defs.vpg_horizontal ?
VID_MODE_VPG_HORIZONTAL : 0;
val |= dsi->vpg_defs.vpg_ber_pattern ? VID_MODE_VPG_MODE : 0;
} }
#endif /* CONFIG_DEBUG_FS */ #endif /* CONFIG_DEBUG_FS */
@ -541,16 +578,22 @@ static void dw_mipi_dsi_video_mode_config(struct dw_mipi_dsi *dsi)
static void dw_mipi_dsi_set_mode(struct dw_mipi_dsi *dsi, static void dw_mipi_dsi_set_mode(struct dw_mipi_dsi *dsi,
unsigned long mode_flags) unsigned long mode_flags)
{ {
u32 val;
dsi_write(dsi, DSI_PWR_UP, RESET); dsi_write(dsi, DSI_PWR_UP, RESET);
if (mode_flags & MIPI_DSI_MODE_VIDEO) { if (mode_flags & MIPI_DSI_MODE_VIDEO) {
dsi_write(dsi, DSI_MODE_CFG, ENABLE_VIDEO_MODE); dsi_write(dsi, DSI_MODE_CFG, ENABLE_VIDEO_MODE);
dw_mipi_dsi_video_mode_config(dsi); dw_mipi_dsi_video_mode_config(dsi);
dsi_write(dsi, DSI_LPCLK_CTRL, PHY_TXREQUESTCLKHS);
} else { } else {
dsi_write(dsi, DSI_MODE_CFG, ENABLE_CMD_MODE); dsi_write(dsi, DSI_MODE_CFG, ENABLE_CMD_MODE);
} }
val = PHY_TXREQUESTCLKHS;
if (dsi->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS)
val |= AUTO_CLKLANE_CTRL;
dsi_write(dsi, DSI_LPCLK_CTRL, val);
dsi_write(dsi, DSI_PWR_UP, POWERUP); dsi_write(dsi, DSI_PWR_UP, POWERUP);
} }
@ -562,15 +605,30 @@ static void dw_mipi_dsi_disable(struct dw_mipi_dsi *dsi)
static void dw_mipi_dsi_init(struct dw_mipi_dsi *dsi) static void dw_mipi_dsi_init(struct dw_mipi_dsi *dsi)
{ {
const struct dw_mipi_dsi_phy_ops *phy_ops = dsi->plat_data->phy_ops;
unsigned int esc_rate; /* in MHz */
u32 esc_clk_division;
int ret;
/* /*
* The maximum permitted escape clock is 20MHz and it is derived from * The maximum permitted escape clock is 20MHz and it is derived from
* lanebyteclk, which is running at "lane_mbps / 8". Thus we want: * lanebyteclk, which is running at "lane_mbps / 8".
*
* (lane_mbps >> 3) / esc_clk_division < 20
* which is:
* (lane_mbps >> 3) / 20 > esc_clk_division
*/ */
u32 esc_clk_division = (dsi->lane_mbps >> 3) / 20 + 1; if (phy_ops->get_esc_clk_rate) {
ret = phy_ops->get_esc_clk_rate(dsi->plat_data->priv_data,
&esc_rate);
if (ret)
DRM_DEBUG_DRIVER("Phy get_esc_clk_rate() failed\n");
} else
esc_rate = 20; /* Default to 20MHz */
/*
* We want :
* (lane_mbps >> 3) / esc_clk_division < X
* which is:
* (lane_mbps >> 3) / X > esc_clk_division
*/
esc_clk_division = (dsi->lane_mbps >> 3) / esc_rate + 1;
dsi_write(dsi, DSI_PWR_UP, RESET); dsi_write(dsi, DSI_PWR_UP, RESET);
@ -611,14 +669,6 @@ static void dw_mipi_dsi_dpi_config(struct dw_mipi_dsi *dsi,
dsi_write(dsi, DSI_DPI_VCID, DPI_VCID(dsi->channel)); dsi_write(dsi, DSI_DPI_VCID, DPI_VCID(dsi->channel));
dsi_write(dsi, DSI_DPI_COLOR_CODING, color); dsi_write(dsi, DSI_DPI_COLOR_CODING, color);
dsi_write(dsi, DSI_DPI_CFG_POL, val); dsi_write(dsi, DSI_DPI_CFG_POL, val);
/*
* TODO dw drv improvements
* largest packet sizes during hfp or during vsa/vpb/vfp
* should be computed according to byte lane, lane number and only
* if sending lp cmds in high speed is enable (PHY_TXREQUESTCLKHS)
*/
dsi_write(dsi, DSI_DPI_LP_CMD_TIM, OUTVACT_LPCMD_TIME(4)
| INVACT_LPCMD_TIME(4));
} }
static void dw_mipi_dsi_packet_handler_config(struct dw_mipi_dsi *dsi) static void dw_mipi_dsi_packet_handler_config(struct dw_mipi_dsi *dsi)
@ -964,6 +1014,66 @@ static const struct drm_bridge_funcs dw_mipi_dsi_bridge_funcs = {
#ifdef CONFIG_DEBUG_FS #ifdef CONFIG_DEBUG_FS
static int dw_mipi_dsi_debugfs_write(void *data, u64 val)
{
struct debugfs_entries *vpg = data;
struct dw_mipi_dsi *dsi;
u32 mode_cfg;
if (!vpg)
return -ENODEV;
dsi = vpg->dsi;
*vpg->reg = (bool)val;
mode_cfg = dsi_read(dsi, DSI_VID_MODE_CFG);
if (*vpg->reg)
mode_cfg |= vpg->mask;
else
mode_cfg &= ~vpg->mask;
dsi_write(dsi, DSI_VID_MODE_CFG, mode_cfg);
return 0;
}
static int dw_mipi_dsi_debugfs_show(void *data, u64 *val)
{
struct debugfs_entries *vpg = data;
if (!vpg)
return -ENODEV;
*val = *vpg->reg;
return 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(fops_x32, dw_mipi_dsi_debugfs_show,
dw_mipi_dsi_debugfs_write, "%llu\n");
static void debugfs_create_files(void *data)
{
struct dw_mipi_dsi *dsi = data;
struct debugfs_entries debugfs[] = {
REGISTER(vpg, VID_MODE_VPG_ENABLE, dsi),
REGISTER(vpg_horizontal, VID_MODE_VPG_HORIZONTAL, dsi),
REGISTER(vpg_ber_pattern, VID_MODE_VPG_MODE, dsi),
};
int i;
dsi->debugfs_vpg = kmemdup(debugfs, sizeof(debugfs), GFP_KERNEL);
if (!dsi->debugfs_vpg)
return;
for (i = 0; i < ARRAY_SIZE(debugfs); i++)
debugfs_create_file(dsi->debugfs_vpg[i].name, 0644,
dsi->debugfs, &dsi->debugfs_vpg[i],
&fops_x32);
}
static void dw_mipi_dsi_debugfs_init(struct dw_mipi_dsi *dsi) static void dw_mipi_dsi_debugfs_init(struct dw_mipi_dsi *dsi)
{ {
dsi->debugfs = debugfs_create_dir(dev_name(dsi->dev), NULL); dsi->debugfs = debugfs_create_dir(dev_name(dsi->dev), NULL);
@ -972,14 +1082,13 @@ static void dw_mipi_dsi_debugfs_init(struct dw_mipi_dsi *dsi)
return; return;
} }
debugfs_create_bool("vpg", 0660, dsi->debugfs, &dsi->vpg); debugfs_create_files(dsi);
debugfs_create_bool("vpg_horizontal", 0660, dsi->debugfs,
&dsi->vpg_horizontal);
} }
static void dw_mipi_dsi_debugfs_remove(struct dw_mipi_dsi *dsi) static void dw_mipi_dsi_debugfs_remove(struct dw_mipi_dsi *dsi)
{ {
debugfs_remove_recursive(dsi->debugfs); debugfs_remove_recursive(dsi->debugfs);
kfree(dsi->debugfs_vpg);
} }
#else #else

View File

@ -485,7 +485,7 @@ static void tc_bridge_enable(struct drm_bridge *bridge)
val |= TC358775_LVCFG_PCLKDIV(DIVIDE_BY_6); val |= TC358775_LVCFG_PCLKDIV(DIVIDE_BY_6);
} else { } else {
val |= TC358775_LVCFG_PCLKDIV(DIVIDE_BY_3); val |= TC358775_LVCFG_PCLKDIV(DIVIDE_BY_3);
}; }
d2l_write(tc->i2c, LVCFG, val); d2l_write(tc->i2c, LVCFG, val);
} }

View File

@ -1115,9 +1115,7 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
* @old_state: atomic state object with old state structures * @old_state: atomic state object with old state structures
* *
* This function updates all the various legacy modeset state pointers in * This function updates all the various legacy modeset state pointers in
* connectors, encoders and CRTCs. It also updates the timestamping constants * connectors, encoders and CRTCs.
* used for precise vblank timestamps by calling
* drm_calc_timestamping_constants().
* *
* Drivers can use this for building their own atomic commit if they don't have * Drivers can use this for building their own atomic commit if they don't have
* a pure helper-based modeset implementation. * a pure helper-based modeset implementation.
@ -1186,13 +1184,30 @@ drm_atomic_helper_update_legacy_modeset_state(struct drm_device *dev,
crtc->x = new_plane_state->src_x >> 16; crtc->x = new_plane_state->src_x >> 16;
crtc->y = new_plane_state->src_y >> 16; crtc->y = new_plane_state->src_y >> 16;
} }
}
}
EXPORT_SYMBOL(drm_atomic_helper_update_legacy_modeset_state);
/**
* drm_atomic_helper_calc_timestamping_constants - update vblank timestamping constants
* @state: atomic state object
*
* Updates the timestamping constants used for precise vblank timestamps
* by calling drm_calc_timestamping_constants() for all enabled crtcs in @state.
*/
void drm_atomic_helper_calc_timestamping_constants(struct drm_atomic_state *state)
{
struct drm_crtc_state *new_crtc_state;
struct drm_crtc *crtc;
int i;
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
if (new_crtc_state->enable) if (new_crtc_state->enable)
drm_calc_timestamping_constants(crtc, drm_calc_timestamping_constants(crtc,
&new_crtc_state->adjusted_mode); &new_crtc_state->adjusted_mode);
} }
} }
EXPORT_SYMBOL(drm_atomic_helper_update_legacy_modeset_state); EXPORT_SYMBOL(drm_atomic_helper_calc_timestamping_constants);
static void static void
crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state) crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state)
@ -1276,6 +1291,7 @@ void drm_atomic_helper_commit_modeset_disables(struct drm_device *dev,
disable_outputs(dev, old_state); disable_outputs(dev, old_state);
drm_atomic_helper_update_legacy_modeset_state(dev, old_state); drm_atomic_helper_update_legacy_modeset_state(dev, old_state);
drm_atomic_helper_calc_timestamping_constants(old_state);
crtc_set_mode(dev, old_state); crtc_set_mode(dev, old_state);
} }

View File

@ -2289,7 +2289,7 @@ static struct drm_encoder *drm_connector_get_encoder(struct drm_connector *conne
static bool static bool
drm_mode_expose_to_userspace(const struct drm_display_mode *mode, drm_mode_expose_to_userspace(const struct drm_display_mode *mode,
const struct list_head *export_list, const struct list_head *modes,
const struct drm_file *file_priv) const struct drm_file *file_priv)
{ {
/* /*
@ -2305,15 +2305,17 @@ drm_mode_expose_to_userspace(const struct drm_display_mode *mode,
* while preparing the list of user-modes. * while preparing the list of user-modes.
*/ */
if (!file_priv->aspect_ratio_allowed) { if (!file_priv->aspect_ratio_allowed) {
struct drm_display_mode *mode_itr; const struct drm_display_mode *mode_itr;
list_for_each_entry(mode_itr, export_list, export_head) list_for_each_entry(mode_itr, modes, head) {
if (drm_mode_match(mode_itr, mode, if (mode_itr->expose_to_userspace &&
drm_mode_match(mode_itr, mode,
DRM_MODE_MATCH_TIMINGS | DRM_MODE_MATCH_TIMINGS |
DRM_MODE_MATCH_CLOCK | DRM_MODE_MATCH_CLOCK |
DRM_MODE_MATCH_FLAGS | DRM_MODE_MATCH_FLAGS |
DRM_MODE_MATCH_3D_FLAGS)) DRM_MODE_MATCH_3D_FLAGS))
return false; return false;
}
} }
return true; return true;
@ -2333,7 +2335,6 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
struct drm_mode_modeinfo u_mode; struct drm_mode_modeinfo u_mode;
struct drm_mode_modeinfo __user *mode_ptr; struct drm_mode_modeinfo __user *mode_ptr;
uint32_t __user *encoder_ptr; uint32_t __user *encoder_ptr;
LIST_HEAD(export_list);
if (!drm_core_check_feature(dev, DRIVER_MODESET)) if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EOPNOTSUPP; return -EOPNOTSUPP;
@ -2377,25 +2378,30 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
out_resp->connection = connector->status; out_resp->connection = connector->status;
/* delayed so we get modes regardless of pre-fill_modes state */ /* delayed so we get modes regardless of pre-fill_modes state */
list_for_each_entry(mode, &connector->modes, head) list_for_each_entry(mode, &connector->modes, head) {
if (drm_mode_expose_to_userspace(mode, &export_list, WARN_ON(mode->expose_to_userspace);
if (drm_mode_expose_to_userspace(mode, &connector->modes,
file_priv)) { file_priv)) {
list_add_tail(&mode->export_head, &export_list); mode->expose_to_userspace = true;
mode_count++; mode_count++;
} }
}
/* /*
* This ioctl is called twice, once to determine how much space is * This ioctl is called twice, once to determine how much space is
* needed, and the 2nd time to fill it. * needed, and the 2nd time to fill it.
* The modes that need to be exposed to the user are maintained in the
* 'export_list'. When the ioctl is called first time to determine the,
* space, the export_list gets filled, to find the no.of modes. In the
* 2nd time, the user modes are filled, one by one from the export_list.
*/ */
if ((out_resp->count_modes >= mode_count) && mode_count) { if ((out_resp->count_modes >= mode_count) && mode_count) {
copied = 0; copied = 0;
mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr; mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr;
list_for_each_entry(mode, &export_list, export_head) { list_for_each_entry(mode, &connector->modes, head) {
if (!mode->expose_to_userspace)
continue;
/* Clear the tag for the next time around */
mode->expose_to_userspace = false;
drm_mode_convert_to_umode(&u_mode, mode); drm_mode_convert_to_umode(&u_mode, mode);
/* /*
* Reset aspect ratio flags of user-mode, if modes with * Reset aspect ratio flags of user-mode, if modes with
@ -2406,13 +2412,26 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
if (copy_to_user(mode_ptr + copied, if (copy_to_user(mode_ptr + copied,
&u_mode, sizeof(u_mode))) { &u_mode, sizeof(u_mode))) {
ret = -EFAULT; ret = -EFAULT;
/*
* Clear the tag for the rest of
* the modes for the next time around.
*/
list_for_each_entry_continue(mode, &connector->modes, head)
mode->expose_to_userspace = false;
mutex_unlock(&dev->mode_config.mutex); mutex_unlock(&dev->mode_config.mutex);
goto out; goto out;
} }
copied++; copied++;
} }
} else {
/* Clear the tag for the next time around */
list_for_each_entry(mode, &connector->modes, head)
mode->expose_to_userspace = false;
} }
out_resp->count_modes = mode_count; out_resp->count_modes = mode_count;
mutex_unlock(&dev->mode_config.mutex); mutex_unlock(&dev->mode_config.mutex);

View File

@ -144,8 +144,10 @@ static ssize_t crc_control_write(struct file *file, const char __user *ubuf,
source[len - 1] = '\0'; source[len - 1] = '\0';
ret = crtc->funcs->verify_crc_source(crtc, source, &values_cnt); ret = crtc->funcs->verify_crc_source(crtc, source, &values_cnt);
if (ret) if (ret) {
kfree(source);
return ret; return ret;
}
spin_lock_irq(&crc->lock); spin_lock_irq(&crc->lock);

View File

@ -1039,6 +1039,8 @@ static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx *raw,
return drm_dp_sideband_parse_remote_dpcd_write(raw, msg); return drm_dp_sideband_parse_remote_dpcd_write(raw, msg);
case DP_REMOTE_I2C_READ: case DP_REMOTE_I2C_READ:
return drm_dp_sideband_parse_remote_i2c_read_ack(raw, msg); return drm_dp_sideband_parse_remote_i2c_read_ack(raw, msg);
case DP_REMOTE_I2C_WRITE:
return true; /* since there's nothing to parse */
case DP_ENUM_PATH_RESOURCES: case DP_ENUM_PATH_RESOURCES:
return drm_dp_sideband_parse_enum_path_resources_ack(raw, msg); return drm_dp_sideband_parse_enum_path_resources_ack(raw, msg);
case DP_ALLOCATE_PAYLOAD: case DP_ALLOCATE_PAYLOAD:
@ -5499,29 +5501,29 @@ static bool remote_i2c_read_ok(const struct i2c_msg msgs[], int num)
msgs[num - 1].len <= 0xff; msgs[num - 1].len <= 0xff;
} }
/* I2C device */ static bool remote_i2c_write_ok(const struct i2c_msg msgs[], int num)
static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, {
int num) int i;
for (i = 0; i < num - 1; i++) {
if (msgs[i].flags & I2C_M_RD || !(msgs[i].flags & I2C_M_STOP) ||
msgs[i].len > 0xff)
return false;
}
return !(msgs[num - 1].flags & I2C_M_RD) && msgs[num - 1].len <= 0xff;
}
static int drm_dp_mst_i2c_read(struct drm_dp_mst_branch *mstb,
struct drm_dp_mst_port *port,
struct i2c_msg *msgs, int num)
{ {
struct drm_dp_aux *aux = adapter->algo_data;
struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port, aux);
struct drm_dp_mst_branch *mstb;
struct drm_dp_mst_topology_mgr *mgr = port->mgr; struct drm_dp_mst_topology_mgr *mgr = port->mgr;
unsigned int i; unsigned int i;
struct drm_dp_sideband_msg_req_body msg; struct drm_dp_sideband_msg_req_body msg;
struct drm_dp_sideband_msg_tx *txmsg = NULL; struct drm_dp_sideband_msg_tx *txmsg = NULL;
int ret; int ret;
mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
if (!mstb)
return -EREMOTEIO;
if (!remote_i2c_read_ok(msgs, num)) {
DRM_DEBUG_KMS("Unsupported I2C transaction for MST device\n");
ret = -EIO;
goto out;
}
memset(&msg, 0, sizeof(msg)); memset(&msg, 0, sizeof(msg));
msg.req_type = DP_REMOTE_I2C_READ; msg.req_type = DP_REMOTE_I2C_READ;
msg.u.i2c_read.num_transactions = num - 1; msg.u.i2c_read.num_transactions = num - 1;
@ -5562,6 +5564,78 @@ static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs
} }
out: out:
kfree(txmsg); kfree(txmsg);
return ret;
}
static int drm_dp_mst_i2c_write(struct drm_dp_mst_branch *mstb,
struct drm_dp_mst_port *port,
struct i2c_msg *msgs, int num)
{
struct drm_dp_mst_topology_mgr *mgr = port->mgr;
unsigned int i;
struct drm_dp_sideband_msg_req_body msg;
struct drm_dp_sideband_msg_tx *txmsg = NULL;
int ret;
txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
if (!txmsg) {
ret = -ENOMEM;
goto out;
}
for (i = 0; i < num; i++) {
memset(&msg, 0, sizeof(msg));
msg.req_type = DP_REMOTE_I2C_WRITE;
msg.u.i2c_write.port_number = port->port_num;
msg.u.i2c_write.write_i2c_device_id = msgs[i].addr;
msg.u.i2c_write.num_bytes = msgs[i].len;
msg.u.i2c_write.bytes = msgs[i].buf;
memset(txmsg, 0, sizeof(*txmsg));
txmsg->dst = mstb;
drm_dp_encode_sideband_req(&msg, txmsg);
drm_dp_queue_down_tx(mgr, txmsg);
ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
if (ret > 0) {
if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
ret = -EREMOTEIO;
goto out;
}
} else {
goto out;
}
}
ret = num;
out:
kfree(txmsg);
return ret;
}
/* I2C device */
static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter,
struct i2c_msg *msgs, int num)
{
struct drm_dp_aux *aux = adapter->algo_data;
struct drm_dp_mst_port *port =
container_of(aux, struct drm_dp_mst_port, aux);
struct drm_dp_mst_branch *mstb;
struct drm_dp_mst_topology_mgr *mgr = port->mgr;
int ret;
mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
if (!mstb)
return -EREMOTEIO;
if (remote_i2c_read_ok(msgs, num)) {
ret = drm_dp_mst_i2c_read(mstb, port, msgs, num);
} else if (remote_i2c_write_ok(msgs, num)) {
ret = drm_dp_mst_i2c_write(mstb, port, msgs, num);
} else {
DRM_DEBUG_KMS("Unsupported I2C transaction for MST device\n");
ret = -EIO;
}
drm_dp_mst_topology_put_mstb(mstb); drm_dp_mst_topology_put_mstb(mstb);
return ret; return ret;
} }

View File

@ -240,13 +240,13 @@ void drm_minor_release(struct drm_minor *minor)
* DOC: driver instance overview * DOC: driver instance overview
* *
* A device instance for a drm driver is represented by &struct drm_device. This * A device instance for a drm driver is represented by &struct drm_device. This
* is initialized with drm_dev_init(), usually from bus-specific ->probe() * is allocated and initialized with devm_drm_dev_alloc(), usually from
* callbacks implemented by the driver. The driver then needs to initialize all * bus-specific ->probe() callbacks implemented by the driver. The driver then
* the various subsystems for the drm device like memory management, vblank * needs to initialize all the various subsystems for the drm device like memory
* handling, modesetting support and intial output configuration plus obviously * management, vblank handling, modesetting support and initial output
* initialize all the corresponding hardware bits. Finally when everything is up * configuration plus obviously initialize all the corresponding hardware bits.
* and running and ready for userspace the device instance can be published * Finally when everything is up and running and ready for userspace the device
* using drm_dev_register(). * instance can be published using drm_dev_register().
* *
* There is also deprecated support for initalizing device instances using * There is also deprecated support for initalizing device instances using
* bus-specific helpers and the &drm_driver.load callback. But due to * bus-specific helpers and the &drm_driver.load callback. But due to
@ -274,7 +274,7 @@ void drm_minor_release(struct drm_minor *minor)
* *
* The following example shows a typical structure of a DRM display driver. * The following example shows a typical structure of a DRM display driver.
* The example focus on the probe() function and the other functions that is * The example focus on the probe() function and the other functions that is
* almost always present and serves as a demonstration of devm_drm_dev_init(). * almost always present and serves as a demonstration of devm_drm_dev_alloc().
* *
* .. code-block:: c * .. code-block:: c
* *
@ -294,22 +294,12 @@ void drm_minor_release(struct drm_minor *minor)
* struct drm_device *drm; * struct drm_device *drm;
* int ret; * int ret;
* *
* // devm_kzalloc() can't be used here because the drm_device ' * priv = devm_drm_dev_alloc(&pdev->dev, &driver_drm_driver,
* // lifetime can exceed the device lifetime if driver unbind * struct driver_device, drm);
* // happens when userspace still has open file descriptors. * if (IS_ERR(priv))
* priv = kzalloc(sizeof(*priv), GFP_KERNEL); * return PTR_ERR(priv);
* if (!priv)
* return -ENOMEM;
*
* drm = &priv->drm; * drm = &priv->drm;
* *
* ret = devm_drm_dev_init(&pdev->dev, drm, &driver_drm_driver);
* if (ret) {
* kfree(priv);
* return ret;
* }
* drmm_add_final_kfree(drm, priv);
*
* ret = drmm_mode_config_init(drm); * ret = drmm_mode_config_init(drm);
* if (ret) * if (ret)
* return ret; * return ret;
@ -550,9 +540,9 @@ static void drm_fs_inode_free(struct inode *inode)
* following guidelines apply: * following guidelines apply:
* *
* - The entire device initialization procedure should be run from the * - The entire device initialization procedure should be run from the
* &component_master_ops.master_bind callback, starting with drm_dev_init(), * &component_master_ops.master_bind callback, starting with
* then binding all components with component_bind_all() and finishing with * devm_drm_dev_alloc(), then binding all components with
* drm_dev_register(). * component_bind_all() and finishing with drm_dev_register().
* *
* - The opaque pointer passed to all components through component_bind_all() * - The opaque pointer passed to all components through component_bind_all()
* should point at &struct drm_device of the device instance, not some driver * should point at &struct drm_device of the device instance, not some driver
@ -583,43 +573,9 @@ static void drm_dev_init_release(struct drm_device *dev, void *res)
drm_legacy_destroy_members(dev); drm_legacy_destroy_members(dev);
} }
/** static int drm_dev_init(struct drm_device *dev,
* drm_dev_init - Initialise new DRM device struct drm_driver *driver,
* @dev: DRM device struct device *parent)
* @driver: DRM driver
* @parent: Parent device object
*
* Initialize a new DRM device. No device registration is done.
* Call drm_dev_register() to advertice the device to user space and register it
* with other core subsystems. This should be done last in the device
* initialization sequence to make sure userspace can't access an inconsistent
* state.
*
* The initial ref-count of the object is 1. Use drm_dev_get() and
* drm_dev_put() to take and drop further ref-counts.
*
* It is recommended that drivers embed &struct drm_device into their own device
* structure.
*
* Drivers that do not want to allocate their own device struct
* embedding &struct drm_device can call drm_dev_alloc() instead. For drivers
* that do embed &struct drm_device it must be placed first in the overall
* structure, and the overall structure must be allocated using kmalloc(): The
* drm core's release function unconditionally calls kfree() on the @dev pointer
* when the final reference is released. To override this behaviour, and so
* allow embedding of the drm_device inside the driver's device struct at an
* arbitrary offset, you must supply a &drm_driver.release callback and control
* the finalization explicitly.
*
* Note that drivers must call drmm_add_final_kfree() after this function has
* completed successfully.
*
* RETURNS:
* 0 on success, or error code on failure.
*/
int drm_dev_init(struct drm_device *dev,
struct drm_driver *driver,
struct device *parent)
{ {
int ret; int ret;
@ -699,31 +655,15 @@ int drm_dev_init(struct drm_device *dev,
return ret; return ret;
} }
EXPORT_SYMBOL(drm_dev_init);
static void devm_drm_dev_init_release(void *data) static void devm_drm_dev_init_release(void *data)
{ {
drm_dev_put(data); drm_dev_put(data);
} }
/** static int devm_drm_dev_init(struct device *parent,
* devm_drm_dev_init - Resource managed drm_dev_init() struct drm_device *dev,
* @parent: Parent device object struct drm_driver *driver)
* @dev: DRM device
* @driver: DRM driver
*
* Managed drm_dev_init(). The DRM device initialized with this function is
* automatically put on driver detach using drm_dev_put().
*
* Note that drivers must call drmm_add_final_kfree() after this function has
* completed successfully.
*
* RETURNS:
* 0 on success, or error code on failure.
*/
int devm_drm_dev_init(struct device *parent,
struct drm_device *dev,
struct drm_driver *driver)
{ {
int ret; int ret;
@ -737,7 +677,6 @@ int devm_drm_dev_init(struct device *parent,
return ret; return ret;
} }
EXPORT_SYMBOL(devm_drm_dev_init);
void *__devm_drm_dev_alloc(struct device *parent, struct drm_driver *driver, void *__devm_drm_dev_alloc(struct device *parent, struct drm_driver *driver,
size_t size, size_t offset) size_t size, size_t offset)
@ -767,19 +706,9 @@ EXPORT_SYMBOL(__devm_drm_dev_alloc);
* @driver: DRM driver to allocate device for * @driver: DRM driver to allocate device for
* @parent: Parent device object * @parent: Parent device object
* *
* Allocate and initialize a new DRM device. No device registration is done. * This is the deprecated version of devm_drm_dev_alloc(), which does not support
* Call drm_dev_register() to advertice the device to user space and register it * subclassing through embedding the struct &drm_device in a driver private
* with other core subsystems. This should be done last in the device * structure, and which does not support automatic cleanup through devres.
* initialization sequence to make sure userspace can't access an inconsistent
* state.
*
* The initial ref-count of the object is 1. Use drm_dev_get() and
* drm_dev_put() to take and drop further ref-counts.
*
* Note that for purely virtual devices @parent can be NULL.
*
* Drivers that wish to subclass or embed &struct drm_device into their
* own struct should look at using drm_dev_init() instead.
* *
* RETURNS: * RETURNS:
* Pointer to new DRM device, or ERR_PTR on failure. * Pointer to new DRM device, or ERR_PTR on failure.

View File

@ -176,8 +176,7 @@ static int framebuffer_check(struct drm_device *dev,
int i; int i;
/* check if the format is supported at all */ /* check if the format is supported at all */
info = __drm_format_info(r->pixel_format); if (!__drm_format_info(r->pixel_format)) {
if (!info) {
struct drm_format_name_buf format_name; struct drm_format_name_buf format_name;
DRM_DEBUG_KMS("bad framebuffer format %s\n", DRM_DEBUG_KMS("bad framebuffer format %s\n",
@ -186,9 +185,6 @@ static int framebuffer_check(struct drm_device *dev,
return -EINVAL; return -EINVAL;
} }
/* now let the driver pick its own format info */
info = drm_get_format_info(dev, r);
if (r->width == 0) { if (r->width == 0) {
DRM_DEBUG_KMS("bad framebuffer width %u\n", r->width); DRM_DEBUG_KMS("bad framebuffer width %u\n", r->width);
return -EINVAL; return -EINVAL;
@ -199,6 +195,9 @@ static int framebuffer_check(struct drm_device *dev,
return -EINVAL; return -EINVAL;
} }
/* now let the driver pick its own format info */
info = drm_get_format_info(dev, r);
for (i = 0; i < info->num_planes; i++) { for (i = 0; i < info->num_planes; i++) {
unsigned int width = fb_plane_width(r->width, info, i); unsigned int width = fb_plane_width(r->width, info, i);
unsigned int height = fb_plane_height(r->height, info, i); unsigned int height = fb_plane_height(r->height, info, i);

View File

@ -655,7 +655,7 @@ struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_object *obj)
WARN_ON(shmem->base.import_attach); WARN_ON(shmem->base.import_attach);
return drm_prime_pages_to_sg(shmem->pages, obj->size >> PAGE_SHIFT); return drm_prime_pages_to_sg(obj->dev, shmem->pages, obj->size >> PAGE_SHIFT);
} }
EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table); EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table);

View File

@ -43,12 +43,9 @@ void drm_gem_ttm_print_info(struct drm_printer *p, unsigned int indent,
drm_print_bits(p, bo->mem.placement, plname, ARRAY_SIZE(plname)); drm_print_bits(p, bo->mem.placement, plname, ARRAY_SIZE(plname));
drm_printf(p, "\n"); drm_printf(p, "\n");
if (bo->mem.bus.is_iomem) { if (bo->mem.bus.is_iomem)
drm_printf_indent(p, indent, "bus.base=%lx\n",
(unsigned long)bo->mem.bus.base);
drm_printf_indent(p, indent, "bus.offset=%lx\n", drm_printf_indent(p, indent, "bus.offset=%lx\n",
(unsigned long)bo->mem.bus.offset); (unsigned long)bo->mem.bus.offset);
}
} }
EXPORT_SYMBOL(drm_gem_ttm_print_info); EXPORT_SYMBOL(drm_gem_ttm_print_info);

View File

@ -97,8 +97,8 @@ static const struct drm_gem_object_funcs drm_gem_vram_object_funcs;
* hardware's draing engine. * hardware's draing engine.
* *
* To access a buffer object's memory from the DRM driver, call * To access a buffer object's memory from the DRM driver, call
* drm_gem_vram_kmap(). It (optionally) maps the buffer into kernel address * drm_gem_vram_vmap(). It maps the buffer into kernel address
* space and returns the memory address. Use drm_gem_vram_kunmap() to * space and returns the memory address. Use drm_gem_vram_vunmap() to
* release the mapping. * release the mapping.
*/ */
@ -135,28 +135,28 @@ static void ttm_buffer_object_destroy(struct ttm_buffer_object *bo)
static void drm_gem_vram_placement(struct drm_gem_vram_object *gbo, static void drm_gem_vram_placement(struct drm_gem_vram_object *gbo,
unsigned long pl_flag) unsigned long pl_flag)
{ {
u32 invariant_flags = 0;
unsigned int i; unsigned int i;
unsigned int c = 0; unsigned int c = 0;
u32 invariant_flags = pl_flag & TTM_PL_FLAG_TOPDOWN;
if (pl_flag & DRM_GEM_VRAM_PL_FLAG_TOPDOWN)
pl_flag = TTM_PL_FLAG_TOPDOWN;
gbo->placement.placement = gbo->placements; gbo->placement.placement = gbo->placements;
gbo->placement.busy_placement = gbo->placements; gbo->placement.busy_placement = gbo->placements;
if (pl_flag & TTM_PL_FLAG_VRAM) if (pl_flag & DRM_GEM_VRAM_PL_FLAG_VRAM) {
gbo->placements[c].mem_type = TTM_PL_VRAM;
gbo->placements[c++].flags = TTM_PL_FLAG_WC | gbo->placements[c++].flags = TTM_PL_FLAG_WC |
TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_UNCACHED |
TTM_PL_FLAG_VRAM |
invariant_flags; invariant_flags;
}
if (pl_flag & TTM_PL_FLAG_SYSTEM) if (pl_flag & DRM_GEM_VRAM_PL_FLAG_SYSTEM || !c) {
gbo->placements[c].mem_type = TTM_PL_SYSTEM;
gbo->placements[c++].flags = TTM_PL_MASK_CACHING | gbo->placements[c++].flags = TTM_PL_MASK_CACHING |
TTM_PL_FLAG_SYSTEM |
invariant_flags;
if (!c)
gbo->placements[c++].flags = TTM_PL_MASK_CACHING |
TTM_PL_FLAG_SYSTEM |
invariant_flags; invariant_flags;
}
gbo->placement.num_placement = c; gbo->placement.num_placement = c;
gbo->placement.num_busy_placement = c; gbo->placement.num_busy_placement = c;
@ -167,6 +167,10 @@ static void drm_gem_vram_placement(struct drm_gem_vram_object *gbo,
} }
} }
/*
* Note that on error, drm_gem_vram_init will free the buffer object.
*/
static int drm_gem_vram_init(struct drm_device *dev, static int drm_gem_vram_init(struct drm_device *dev,
struct drm_gem_vram_object *gbo, struct drm_gem_vram_object *gbo,
size_t size, unsigned long pg_align) size_t size, unsigned long pg_align)
@ -176,32 +180,37 @@ static int drm_gem_vram_init(struct drm_device *dev,
int ret; int ret;
size_t acc_size; size_t acc_size;
if (WARN_ONCE(!vmm, "VRAM MM not initialized")) if (WARN_ONCE(!vmm, "VRAM MM not initialized")) {
kfree(gbo);
return -EINVAL; return -EINVAL;
}
bdev = &vmm->bdev; bdev = &vmm->bdev;
gbo->bo.base.funcs = &drm_gem_vram_object_funcs; gbo->bo.base.funcs = &drm_gem_vram_object_funcs;
ret = drm_gem_object_init(dev, &gbo->bo.base, size); ret = drm_gem_object_init(dev, &gbo->bo.base, size);
if (ret) if (ret) {
kfree(gbo);
return ret; return ret;
}
acc_size = ttm_bo_dma_acc_size(bdev, size, sizeof(*gbo)); acc_size = ttm_bo_dma_acc_size(bdev, size, sizeof(*gbo));
gbo->bo.bdev = bdev; gbo->bo.bdev = bdev;
drm_gem_vram_placement(gbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM); drm_gem_vram_placement(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM |
DRM_GEM_VRAM_PL_FLAG_SYSTEM);
ret = ttm_bo_init(bdev, &gbo->bo, size, ttm_bo_type_device, ret = ttm_bo_init(bdev, &gbo->bo, size, ttm_bo_type_device,
&gbo->placement, pg_align, false, acc_size, &gbo->placement, pg_align, false, acc_size,
NULL, NULL, ttm_buffer_object_destroy); NULL, NULL, ttm_buffer_object_destroy);
if (ret) if (ret)
goto err_drm_gem_object_release; /*
* A failing ttm_bo_init will call ttm_buffer_object_destroy
* to release gbo->bo.base and kfree gbo.
*/
return ret;
return 0; return 0;
err_drm_gem_object_release:
drm_gem_object_release(&gbo->bo.base);
return ret;
} }
/** /**
@ -235,13 +244,9 @@ struct drm_gem_vram_object *drm_gem_vram_create(struct drm_device *dev,
ret = drm_gem_vram_init(dev, gbo, size, pg_align); ret = drm_gem_vram_init(dev, gbo, size, pg_align);
if (ret < 0) if (ret < 0)
goto err_kfree; return ERR_PTR(ret);
return gbo; return gbo;
err_kfree:
kfree(gbo);
return ERR_PTR(ret);
} }
EXPORT_SYMBOL(drm_gem_vram_create); EXPORT_SYMBOL(drm_gem_vram_create);
@ -436,39 +441,6 @@ static void *drm_gem_vram_kmap_locked(struct drm_gem_vram_object *gbo,
return kmap->virtual; return kmap->virtual;
} }
/**
* drm_gem_vram_kmap() - Maps a GEM VRAM object into kernel address space
* @gbo: the GEM VRAM object
* @map: establish a mapping if necessary
* @is_iomem: returns true if the mapped memory is I/O memory, or false \
otherwise; can be NULL
*
* This function maps the buffer object into the kernel's address space
* or returns the current mapping. If the parameter map is false, the
* function only queries the current mapping, but does not establish a
* new one.
*
* Returns:
* The buffers virtual address if mapped, or
* NULL if not mapped, or
* an ERR_PTR()-encoded error code otherwise.
*/
void *drm_gem_vram_kmap(struct drm_gem_vram_object *gbo, bool map,
bool *is_iomem)
{
int ret;
void *virtual;
ret = ttm_bo_reserve(&gbo->bo, true, false, NULL);
if (ret)
return ERR_PTR(ret);
virtual = drm_gem_vram_kmap_locked(gbo, map, is_iomem);
ttm_bo_unreserve(&gbo->bo);
return virtual;
}
EXPORT_SYMBOL(drm_gem_vram_kmap);
static void drm_gem_vram_kunmap_locked(struct drm_gem_vram_object *gbo) static void drm_gem_vram_kunmap_locked(struct drm_gem_vram_object *gbo)
{ {
if (WARN_ON_ONCE(!gbo->kmap_use_count)) if (WARN_ON_ONCE(!gbo->kmap_use_count))
@ -484,22 +456,6 @@ static void drm_gem_vram_kunmap_locked(struct drm_gem_vram_object *gbo)
*/ */
} }
/**
* drm_gem_vram_kunmap() - Unmaps a GEM VRAM object
* @gbo: the GEM VRAM object
*/
void drm_gem_vram_kunmap(struct drm_gem_vram_object *gbo)
{
int ret;
ret = ttm_bo_reserve(&gbo->bo, false, false, NULL);
if (WARN_ONCE(ret, "ttm_bo_reserve_failed(): ret=%d\n", ret))
return;
drm_gem_vram_kunmap_locked(gbo);
ttm_bo_unreserve(&gbo->bo);
}
EXPORT_SYMBOL(drm_gem_vram_kunmap);
/** /**
* drm_gem_vram_vmap() - Pins and maps a GEM VRAM object into kernel address * drm_gem_vram_vmap() - Pins and maps a GEM VRAM object into kernel address
* space * space
@ -511,9 +467,6 @@ EXPORT_SYMBOL(drm_gem_vram_kunmap);
* permanently. Call drm_gem_vram_vunmap() with the returned address to * permanently. Call drm_gem_vram_vunmap() with the returned address to
* unmap and unpin the GEM VRAM object. * unmap and unpin the GEM VRAM object.
* *
* If you have special requirements for the pinning or mapping operations,
* call drm_gem_vram_pin() and drm_gem_vram_kmap() directly.
*
* Returns: * Returns:
* The buffer's virtual address on success, or * The buffer's virtual address on success, or
* an ERR_PTR()-encoded error code otherwise. * an ERR_PTR()-encoded error code otherwise.
@ -647,7 +600,7 @@ static bool drm_is_gem_vram(struct ttm_buffer_object *bo)
static void drm_gem_vram_bo_driver_evict_flags(struct drm_gem_vram_object *gbo, static void drm_gem_vram_bo_driver_evict_flags(struct drm_gem_vram_object *gbo,
struct ttm_placement *pl) struct ttm_placement *pl)
{ {
drm_gem_vram_placement(gbo, TTM_PL_FLAG_SYSTEM); drm_gem_vram_placement(gbo, DRM_GEM_VRAM_PL_FLAG_SYSTEM);
*pl = gbo->placement; *pl = gbo->placement;
} }
@ -967,16 +920,13 @@ static const struct drm_gem_object_funcs drm_gem_vram_object_funcs = {
* TTM TT * TTM TT
*/ */
static void backend_func_destroy(struct ttm_tt *tt) static void bo_driver_ttm_tt_destroy(struct ttm_bo_device *bdev, struct ttm_tt *tt)
{ {
ttm_tt_destroy_common(bdev, tt);
ttm_tt_fini(tt); ttm_tt_fini(tt);
kfree(tt); kfree(tt);
} }
static struct ttm_backend_func backend_func = {
.destroy = backend_func_destroy
};
/* /*
* TTM BO device * TTM BO device
*/ */
@ -991,8 +941,6 @@ static struct ttm_tt *bo_driver_ttm_tt_create(struct ttm_buffer_object *bo,
if (!tt) if (!tt)
return NULL; return NULL;
tt->func = &backend_func;
ret = ttm_tt_init(tt, bo, page_flags); ret = ttm_tt_init(tt, bo, page_flags);
if (ret < 0) if (ret < 0)
goto err_ttm_tt_init; goto err_ttm_tt_init;
@ -1042,8 +990,7 @@ static int bo_driver_io_mem_reserve(struct ttm_bo_device *bdev,
case TTM_PL_SYSTEM: /* nothing to do */ case TTM_PL_SYSTEM: /* nothing to do */
break; break;
case TTM_PL_VRAM: case TTM_PL_VRAM:
mem->bus.offset = mem->start << PAGE_SHIFT; mem->bus.offset = (mem->start << PAGE_SHIFT) + vmm->vram_base;
mem->bus.base = vmm->vram_base;
mem->bus.is_iomem = true; mem->bus.is_iomem = true;
break; break;
default: default:
@ -1055,6 +1002,7 @@ static int bo_driver_io_mem_reserve(struct ttm_bo_device *bdev,
static struct ttm_bo_driver bo_driver = { static struct ttm_bo_driver bo_driver = {
.ttm_tt_create = bo_driver_ttm_tt_create, .ttm_tt_create = bo_driver_ttm_tt_create,
.ttm_tt_destroy = bo_driver_ttm_tt_destroy,
.eviction_valuable = ttm_bo_eviction_valuable, .eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = bo_driver_evict_flags, .evict_flags = bo_driver_evict_flags,
.move_notify = bo_driver_move_notify, .move_notify = bo_driver_move_notify,
@ -1110,9 +1058,7 @@ static int drm_vram_mm_init(struct drm_vram_mm *vmm, struct drm_device *dev,
return ret; return ret;
ret = ttm_range_man_init(&vmm->bdev, TTM_PL_VRAM, ret = ttm_range_man_init(&vmm->bdev, TTM_PL_VRAM,
TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC, false, vram_size >> PAGE_SHIFT);
TTM_PL_FLAG_WC, false,
vram_size >> PAGE_SHIFT);
if (ret) if (ret)
return ret; return ret;

View File

@ -95,6 +95,7 @@ void drm_minor_release(struct drm_minor *minor);
/* drm_managed.c */ /* drm_managed.c */
void drm_managed_release(struct drm_device *dev); void drm_managed_release(struct drm_device *dev);
void drmm_add_final_kfree(struct drm_device *dev, void *container);
/* drm_vblank.c */ /* drm_vblank.c */
static inline bool drm_vblank_passed(u64 seq, u64 ref) static inline bool drm_vblank_passed(u64 seq, u64 ref)

View File

@ -27,7 +27,7 @@
* be done directly with drmm_kmalloc() and the related functions. Everything * be done directly with drmm_kmalloc() and the related functions. Everything
* will be released on the final drm_dev_put() in reverse order of how the * will be released on the final drm_dev_put() in reverse order of how the
* release actions have been added and memory has been allocated since driver * release actions have been added and memory has been allocated since driver
* loading started with drm_dev_init(). * loading started with devm_drm_dev_alloc().
* *
* Note that release actions and managed memory can also be added and removed * Note that release actions and managed memory can also be added and removed
* during the lifetime of the driver, all the functions are fully concurrent * during the lifetime of the driver, all the functions are fully concurrent
@ -125,18 +125,6 @@ static void add_dr(struct drm_device *dev, struct drmres *dr)
dr, dr->node.name, (unsigned long) dr->node.size); dr, dr->node.name, (unsigned long) dr->node.size);
} }
/**
* drmm_add_final_kfree - add release action for the final kfree()
* @dev: DRM device
* @container: pointer to the kmalloc allocation containing @dev
*
* Since the allocation containing the struct &drm_device must be allocated
* before it can be initialized with drm_dev_init() there's no way to allocate
* that memory with drmm_kmalloc(). To side-step this chicken-egg problem the
* pointer for this final kfree() must be specified by calling this function. It
* will be released in the final drm_dev_put() for @dev, after all other release
* actions installed through drmm_add_action() have been processed.
*/
void drmm_add_final_kfree(struct drm_device *dev, void *container) void drmm_add_final_kfree(struct drm_device *dev, void *container)
{ {
WARN_ON(dev->managed.final_kfree); WARN_ON(dev->managed.final_kfree);
@ -144,7 +132,6 @@ void drmm_add_final_kfree(struct drm_device *dev, void *container)
WARN_ON(dev + 1 > (struct drm_device *) (container + ksize(container))); WARN_ON(dev + 1 > (struct drm_device *) (container + ksize(container)));
dev->managed.final_kfree = container; dev->managed.final_kfree = container;
} }
EXPORT_SYMBOL(drmm_add_final_kfree);
int __drmm_add_action(struct drm_device *dev, int __drmm_add_action(struct drm_device *dev,
drmres_release_t action, drmres_release_t action,

View File

@ -803,9 +803,11 @@ static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = {
* *
* This is useful for implementing &drm_gem_object_funcs.get_sg_table. * This is useful for implementing &drm_gem_object_funcs.get_sg_table.
*/ */
struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_pages) struct sg_table *drm_prime_pages_to_sg(struct drm_device *dev,
struct page **pages, unsigned int nr_pages)
{ {
struct sg_table *sg = NULL; struct sg_table *sg = NULL;
size_t max_segment = 0;
int ret; int ret;
sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL); sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
@ -814,8 +816,13 @@ struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_page
goto out; goto out;
} }
ret = sg_alloc_table_from_pages(sg, pages, nr_pages, 0, if (dev)
nr_pages << PAGE_SHIFT, GFP_KERNEL); max_segment = dma_max_mapping_size(dev->dev);
if (max_segment == 0 || max_segment > SCATTERLIST_MAX_SEGMENT)
max_segment = SCATTERLIST_MAX_SEGMENT;
ret = __sg_alloc_table_from_pages(sg, pages, nr_pages, 0,
nr_pages << PAGE_SHIFT,
max_segment, GFP_KERNEL);
if (ret) if (ret)
goto out; goto out;

View File

@ -674,7 +674,7 @@ EXPORT_SYMBOL(drm_calc_timestamping_constants);
* *
* Note that atomic drivers must call drm_calc_timestamping_constants() before * Note that atomic drivers must call drm_calc_timestamping_constants() before
* enabling a CRTC. The atomic helpers already take care of that in * enabling a CRTC. The atomic helpers already take care of that in
* drm_atomic_helper_update_legacy_modeset_state(). * drm_atomic_helper_calc_timestamping_constants().
* *
* Returns: * Returns:
* *
@ -819,7 +819,7 @@ EXPORT_SYMBOL(drm_crtc_vblank_helper_get_vblank_timestamp_internal);
* *
* Note that atomic drivers must call drm_calc_timestamping_constants() before * Note that atomic drivers must call drm_calc_timestamping_constants() before
* enabling a CRTC. The atomic helpers already take care of that in * enabling a CRTC. The atomic helpers already take care of that in
* drm_atomic_helper_update_legacy_modeset_state(). * drm_atomic_helper_calc_timestamping_constants().
* *
* Returns: * Returns:
* *

View File

@ -103,7 +103,8 @@ struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
int npages = etnaviv_obj->base.size >> PAGE_SHIFT; int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
struct sg_table *sgt; struct sg_table *sgt;
sgt = drm_prime_pages_to_sg(etnaviv_obj->pages, npages); sgt = drm_prime_pages_to_sg(etnaviv_obj->base.dev,
etnaviv_obj->pages, npages);
if (IS_ERR(sgt)) { if (IS_ERR(sgt)) {
dev_err(dev->dev, "failed to allocate sgt: %ld\n", dev_err(dev->dev, "failed to allocate sgt: %ld\n",
PTR_ERR(sgt)); PTR_ERR(sgt));

View File

@ -19,7 +19,7 @@ struct sg_table *etnaviv_gem_prime_get_sg_table(struct drm_gem_object *obj)
if (WARN_ON(!etnaviv_obj->pages)) /* should have already pinned! */ if (WARN_ON(!etnaviv_obj->pages)) /* should have already pinned! */
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
return drm_prime_pages_to_sg(etnaviv_obj->pages, npages); return drm_prime_pages_to_sg(obj->dev, etnaviv_obj->pages, npages);
} }
void *etnaviv_gem_prime_vmap(struct drm_gem_object *obj) void *etnaviv_gem_prime_vmap(struct drm_gem_object *obj)

View File

@ -164,7 +164,7 @@ static int psbfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
return 0; return 0;
} }
static struct fb_ops psbfb_ops = { static const struct fb_ops psbfb_ops = {
.owner = THIS_MODULE, .owner = THIS_MODULE,
DRM_FB_HELPER_DEFAULT_OPS, DRM_FB_HELPER_DEFAULT_OPS,
.fb_setcolreg = psbfb_setcolreg, .fb_setcolreg = psbfb_setcolreg,
@ -175,7 +175,7 @@ static struct fb_ops psbfb_ops = {
.fb_sync = psbfb_sync, .fb_sync = psbfb_sync,
}; };
static struct fb_ops psbfb_roll_ops = { static const struct fb_ops psbfb_roll_ops = {
.owner = THIS_MODULE, .owner = THIS_MODULE,
DRM_FB_HELPER_DEFAULT_OPS, DRM_FB_HELPER_DEFAULT_OPS,
.fb_setcolreg = psbfb_setcolreg, .fb_setcolreg = psbfb_setcolreg,
@ -186,7 +186,7 @@ static struct fb_ops psbfb_roll_ops = {
.fb_mmap = psbfb_mmap, .fb_mmap = psbfb_mmap,
}; };
static struct fb_ops psbfb_unaccel_ops = { static const struct fb_ops psbfb_unaccel_ops = {
.owner = THIS_MODULE, .owner = THIS_MODULE,
DRM_FB_HELPER_DEFAULT_OPS, DRM_FB_HELPER_DEFAULT_OPS,
.fb_setcolreg = psbfb_setcolreg, .fb_setcolreg = psbfb_setcolreg,

View File

@ -853,11 +853,11 @@ static void i810_dma_quiescent(struct drm_device *dev)
i810_wait_ring(dev, dev_priv->ring.Size - 8); i810_wait_ring(dev, dev_priv->ring.Size - 8);
} }
static int i810_flush_queue(struct drm_device *dev) static void i810_flush_queue(struct drm_device *dev)
{ {
drm_i810_private_t *dev_priv = dev->dev_private; drm_i810_private_t *dev_priv = dev->dev_private;
struct drm_device_dma *dma = dev->dma; struct drm_device_dma *dma = dev->dma;
int i, ret = 0; int i;
RING_LOCALS; RING_LOCALS;
i810_kernel_lost_context(dev); i810_kernel_lost_context(dev);
@ -882,7 +882,7 @@ static int i810_flush_queue(struct drm_device *dev)
DRM_DEBUG("still on client\n"); DRM_DEBUG("still on client\n");
} }
return ret; return;
} }
/* Must be called with the lock held */ /* Must be called with the lock held */

View File

@ -13484,12 +13484,6 @@ intel_modeset_pipe_config(struct intel_crtc_state *pipe_config)
"hw max bpp: %i, pipe bpp: %i, dithering: %i\n", "hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
base_bpp, pipe_config->pipe_bpp, pipe_config->dither); base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
/*
* Make drm_calc_timestamping_constants in
* drm_atomic_helper_update_legacy_modeset_state() happy
*/
pipe_config->uapi.adjusted_mode = pipe_config->hw.adjusted_mode;
return 0; return 0;
} }

View File

@ -1617,7 +1617,7 @@ int i915_gem_huge_page_mock_selftests(void)
out_put: out_put:
i915_vm_put(&ppgtt->vm); i915_vm_put(&ppgtt->vm);
out_unlock: out_unlock:
drm_dev_put(&dev_priv->drm); mock_destroy_device(dev_priv);
return err; return err;
} }

View File

@ -1997,7 +1997,7 @@ int i915_gem_context_mock_selftests(void)
err = i915_subtests(tests, i915); err = i915_subtests(tests, i915);
drm_dev_put(&i915->drm); mock_destroy_device(i915);
return err; return err;
} }

View File

@ -272,7 +272,7 @@ int i915_gem_dmabuf_mock_selftests(void)
err = i915_subtests(tests, i915); err = i915_subtests(tests, i915);
drm_dev_put(&i915->drm); mock_destroy_device(i915);
return err; return err;
} }

View File

@ -85,7 +85,7 @@ int i915_gem_object_mock_selftests(void)
err = i915_subtests(tests, i915); err = i915_subtests(tests, i915);
drm_dev_put(&i915->drm); mock_destroy_device(i915);
return err; return err;
} }

View File

@ -73,6 +73,6 @@ int i915_gem_phys_mock_selftests(void)
err = i915_subtests(tests, i915); err = i915_subtests(tests, i915);
drm_dev_put(&i915->drm); mock_destroy_device(i915);
return err; return err;
} }

View File

@ -158,7 +158,7 @@ static int mock_hwsp_freelist(void *arg)
__mock_hwsp_record(&state, na, NULL); __mock_hwsp_record(&state, na, NULL);
kfree(state.history); kfree(state.history);
err_put: err_put:
drm_dev_put(&i915->drm); mock_destroy_device(i915);
return err; return err;
} }

View File

@ -536,7 +536,7 @@ int i915_gem_evict_mock_selftests(void)
with_intel_runtime_pm(&i915->runtime_pm, wakeref) with_intel_runtime_pm(&i915->runtime_pm, wakeref)
err = i915_subtests(tests, &i915->gt); err = i915_subtests(tests, &i915->gt);
drm_dev_put(&i915->drm); mock_destroy_device(i915);
return err; return err;
} }

View File

@ -1727,7 +1727,7 @@ int i915_gem_gtt_mock_selftests(void)
mock_fini_ggtt(ggtt); mock_fini_ggtt(ggtt);
kfree(ggtt); kfree(ggtt);
out_put: out_put:
drm_dev_put(&i915->drm); mock_destroy_device(i915);
return err; return err;
} }

View File

@ -527,7 +527,7 @@ int i915_request_mock_selftests(void)
with_intel_runtime_pm(&i915->runtime_pm, wakeref) with_intel_runtime_pm(&i915->runtime_pm, wakeref)
err = i915_subtests(tests, i915); err = i915_subtests(tests, i915);
drm_dev_put(&i915->drm); mock_destroy_device(i915);
return err; return err;
} }

View File

@ -841,7 +841,7 @@ int i915_vma_mock_selftests(void)
mock_fini_ggtt(ggtt); mock_fini_ggtt(ggtt);
kfree(ggtt); kfree(ggtt);
out_put: out_put:
drm_dev_put(&i915->drm); mock_destroy_device(i915);
return err; return err;
} }

View File

@ -791,7 +791,7 @@ int intel_memory_region_mock_selftests(void)
intel_memory_region_put(mem); intel_memory_region_put(mem);
out_unref: out_unref:
drm_dev_put(&i915->drm); mock_destroy_device(i915);
return err; return err;
} }

View File

@ -79,8 +79,6 @@ static void mock_device_release(struct drm_device *dev)
out: out:
i915_params_free(&i915->params); i915_params_free(&i915->params);
put_device(&i915->drm.pdev->dev);
i915->drm.pdev = NULL;
} }
static struct drm_driver mock_driver = { static struct drm_driver mock_driver = {
@ -123,17 +121,10 @@ struct drm_i915_private *mock_gem_device(void)
#endif #endif
struct drm_i915_private *i915; struct drm_i915_private *i915;
struct pci_dev *pdev; struct pci_dev *pdev;
int err;
pdev = kzalloc(sizeof(*pdev), GFP_KERNEL); pdev = kzalloc(sizeof(*pdev), GFP_KERNEL);
if (!pdev) if (!pdev)
return NULL; return NULL;
i915 = kzalloc(sizeof(*i915), GFP_KERNEL);
if (!i915) {
kfree(pdev);
return NULL;
}
device_initialize(&pdev->dev); device_initialize(&pdev->dev);
pdev->class = PCI_BASE_CLASS_DISPLAY << 16; pdev->class = PCI_BASE_CLASS_DISPLAY << 16;
pdev->dev.release = release_dev; pdev->dev.release = release_dev;
@ -144,8 +135,23 @@ struct drm_i915_private *mock_gem_device(void)
/* HACK to disable iommu for the fake device; force identity mapping */ /* HACK to disable iommu for the fake device; force identity mapping */
pdev->dev.iommu = &fake_iommu; pdev->dev.iommu = &fake_iommu;
#endif #endif
if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL)) {
put_device(&pdev->dev);
return NULL;
}
i915 = devm_drm_dev_alloc(&pdev->dev, &mock_driver,
struct drm_i915_private, drm);
if (IS_ERR(i915)) {
pr_err("Failed to allocate mock GEM device: err=%ld\n", PTR_ERR(i915));
devres_release_group(&pdev->dev, NULL);
put_device(&pdev->dev);
return NULL;
}
pci_set_drvdata(pdev, i915); pci_set_drvdata(pdev, i915);
i915->drm.pdev = pdev;
dev_pm_domain_set(&pdev->dev, &pm_domain); dev_pm_domain_set(&pdev->dev, &pm_domain);
pm_runtime_enable(&pdev->dev); pm_runtime_enable(&pdev->dev);
@ -153,16 +159,6 @@ struct drm_i915_private *mock_gem_device(void)
if (pm_runtime_enabled(&pdev->dev)) if (pm_runtime_enabled(&pdev->dev))
WARN_ON(pm_runtime_get_sync(&pdev->dev)); WARN_ON(pm_runtime_get_sync(&pdev->dev));
err = drm_dev_init(&i915->drm, &mock_driver, &pdev->dev);
if (err) {
pr_err("Failed to initialise mock GEM device: err=%d\n", err);
put_device(&pdev->dev);
kfree(i915);
return NULL;
}
i915->drm.pdev = pdev;
drmm_add_final_kfree(&i915->drm, i915);
i915_params_copy(&i915->params, &i915_modparams); i915_params_copy(&i915->params, &i915_modparams);
@ -222,7 +218,15 @@ struct drm_i915_private *mock_gem_device(void)
intel_gt_driver_late_release(&i915->gt); intel_gt_driver_late_release(&i915->gt);
intel_memory_regions_driver_release(i915); intel_memory_regions_driver_release(i915);
drm_mode_config_cleanup(&i915->drm); drm_mode_config_cleanup(&i915->drm);
drm_dev_put(&i915->drm); mock_destroy_device(i915);
return NULL; return NULL;
} }
void mock_destroy_device(struct drm_i915_private *i915)
{
struct device *dev = i915->drm.dev;
devres_release_group(dev, NULL);
put_device(dev);
}

View File

@ -7,4 +7,6 @@ struct drm_i915_private;
struct drm_i915_private *mock_gem_device(void); struct drm_i915_private *mock_gem_device(void);
void mock_device_flush(struct drm_i915_private *i915); void mock_device_flush(struct drm_i915_private *i915);
void mock_destroy_device(struct drm_i915_private *i915);
#endif /* !__MOCK_GEM_DEVICE_H__ */ #endif /* !__MOCK_GEM_DEVICE_H__ */

View File

@ -39,3 +39,5 @@ config DRM_IMX_HDMI
depends on DRM_IMX depends on DRM_IMX
help help
Choose this if you want to use HDMI on i.MX6. Choose this if you want to use HDMI on i.MX6.
source "drivers/gpu/drm/imx/dcss/Kconfig"

View File

@ -9,3 +9,4 @@ obj-$(CONFIG_DRM_IMX_TVE) += imx-tve.o
obj-$(CONFIG_DRM_IMX_LDB) += imx-ldb.o obj-$(CONFIG_DRM_IMX_LDB) += imx-ldb.o
obj-$(CONFIG_DRM_IMX_HDMI) += dw_hdmi-imx.o obj-$(CONFIG_DRM_IMX_HDMI) += dw_hdmi-imx.o
obj-$(CONFIG_DRM_IMX_DCSS) += dcss/

View File

@ -0,0 +1,9 @@
config DRM_IMX_DCSS
tristate "i.MX8MQ DCSS"
select IMX_IRQSTEER
select DRM_KMS_CMA_HELPER
select VIDEOMODE_HELPERS
depends on DRM && ARCH_MXC && ARM64
help
Choose this if you have a NXP i.MX8MQ based system and want to use the
Display Controller Subsystem. This option enables DCSS support.

View File

@ -0,0 +1,6 @@
imx-dcss-objs := dcss-drv.o dcss-dev.o dcss-blkctl.o dcss-ctxld.o dcss-dtg.o \
dcss-ss.o dcss-dpr.o dcss-scaler.o dcss-kms.o dcss-crtc.o \
dcss-plane.o
obj-$(CONFIG_DRM_IMX_DCSS) += imx-dcss.o

View File

@ -0,0 +1,70 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2019 NXP.
*/
#include <linux/device.h>
#include <linux/of.h>
#include <linux/slab.h>
#include "dcss-dev.h"
#define DCSS_BLKCTL_RESET_CTRL 0x00
#define B_CLK_RESETN BIT(0)
#define APB_CLK_RESETN BIT(1)
#define P_CLK_RESETN BIT(2)
#define RTR_CLK_RESETN BIT(4)
#define DCSS_BLKCTL_CONTROL0 0x10
#define HDMI_MIPI_CLK_SEL BIT(0)
#define DISPMIX_REFCLK_SEL_POS 4
#define DISPMIX_REFCLK_SEL_MASK GENMASK(5, 4)
#define DISPMIX_PIXCLK_SEL BIT(8)
#define HDMI_SRC_SECURE_EN BIT(16)
struct dcss_blkctl {
struct dcss_dev *dcss;
void __iomem *base_reg;
};
void dcss_blkctl_cfg(struct dcss_blkctl *blkctl)
{
if (blkctl->dcss->hdmi_output)
dcss_writel(0, blkctl->base_reg + DCSS_BLKCTL_CONTROL0);
else
dcss_writel(DISPMIX_PIXCLK_SEL,
blkctl->base_reg + DCSS_BLKCTL_CONTROL0);
dcss_set(B_CLK_RESETN | APB_CLK_RESETN | P_CLK_RESETN | RTR_CLK_RESETN,
blkctl->base_reg + DCSS_BLKCTL_RESET_CTRL);
}
int dcss_blkctl_init(struct dcss_dev *dcss, unsigned long blkctl_base)
{
struct dcss_blkctl *blkctl;
blkctl = kzalloc(sizeof(*blkctl), GFP_KERNEL);
if (!blkctl)
return -ENOMEM;
blkctl->base_reg = ioremap(blkctl_base, SZ_4K);
if (!blkctl->base_reg) {
dev_err(dcss->dev, "unable to remap BLK CTRL base\n");
kfree(blkctl);
return -ENOMEM;
}
dcss->blkctl = blkctl;
blkctl->dcss = dcss;
dcss_blkctl_cfg(blkctl);
return 0;
}
void dcss_blkctl_exit(struct dcss_blkctl *blkctl)
{
if (blkctl->base_reg)
iounmap(blkctl->base_reg);
kfree(blkctl);
}

View File

@ -0,0 +1,219 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2019 NXP.
*/
#include <drm/drm_atomic_helper.h>
#include <drm/drm_vblank.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include "dcss-dev.h"
#include "dcss-kms.h"
static int dcss_enable_vblank(struct drm_crtc *crtc)
{
struct dcss_crtc *dcss_crtc = container_of(crtc, struct dcss_crtc,
base);
struct dcss_dev *dcss = crtc->dev->dev_private;
dcss_dtg_vblank_irq_enable(dcss->dtg, true);
dcss_dtg_ctxld_kick_irq_enable(dcss->dtg, true);
enable_irq(dcss_crtc->irq);
return 0;
}
static void dcss_disable_vblank(struct drm_crtc *crtc)
{
struct dcss_crtc *dcss_crtc = container_of(crtc, struct dcss_crtc,
base);
struct dcss_dev *dcss = dcss_crtc->base.dev->dev_private;
disable_irq_nosync(dcss_crtc->irq);
dcss_dtg_vblank_irq_enable(dcss->dtg, false);
if (dcss_crtc->disable_ctxld_kick_irq)
dcss_dtg_ctxld_kick_irq_enable(dcss->dtg, false);
}
static const struct drm_crtc_funcs dcss_crtc_funcs = {
.set_config = drm_atomic_helper_set_config,
.destroy = drm_crtc_cleanup,
.page_flip = drm_atomic_helper_page_flip,
.reset = drm_atomic_helper_crtc_reset,
.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
.enable_vblank = dcss_enable_vblank,
.disable_vblank = dcss_disable_vblank,
};
static void dcss_crtc_atomic_begin(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
drm_crtc_vblank_on(crtc);
}
static void dcss_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
struct dcss_crtc *dcss_crtc = container_of(crtc, struct dcss_crtc,
base);
struct dcss_dev *dcss = dcss_crtc->base.dev->dev_private;
spin_lock_irq(&crtc->dev->event_lock);
if (crtc->state->event) {
WARN_ON(drm_crtc_vblank_get(crtc));
drm_crtc_arm_vblank_event(crtc, crtc->state->event);
crtc->state->event = NULL;
}
spin_unlock_irq(&crtc->dev->event_lock);
if (dcss_dtg_is_enabled(dcss->dtg))
dcss_ctxld_enable(dcss->ctxld);
}
static void dcss_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
struct dcss_crtc *dcss_crtc = container_of(crtc, struct dcss_crtc,
base);
struct dcss_dev *dcss = dcss_crtc->base.dev->dev_private;
struct drm_display_mode *mode = &crtc->state->adjusted_mode;
struct drm_display_mode *old_mode = &old_crtc_state->adjusted_mode;
struct videomode vm;
drm_display_mode_to_videomode(mode, &vm);
pm_runtime_get_sync(dcss->dev);
vm.pixelclock = mode->crtc_clock * 1000;
dcss_ss_subsam_set(dcss->ss);
dcss_dtg_css_set(dcss->dtg);
if (!drm_mode_equal(mode, old_mode) || !old_crtc_state->active) {
dcss_dtg_sync_set(dcss->dtg, &vm);
dcss_ss_sync_set(dcss->ss, &vm,
mode->flags & DRM_MODE_FLAG_PHSYNC,
mode->flags & DRM_MODE_FLAG_PVSYNC);
}
dcss_enable_dtg_and_ss(dcss);
dcss_ctxld_enable(dcss->ctxld);
/* Allow CTXLD kick interrupt to be disabled when VBLANK is disabled. */
dcss_crtc->disable_ctxld_kick_irq = true;
}
static void dcss_crtc_atomic_disable(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
struct dcss_crtc *dcss_crtc = container_of(crtc, struct dcss_crtc,
base);
struct dcss_dev *dcss = dcss_crtc->base.dev->dev_private;
struct drm_display_mode *mode = &crtc->state->adjusted_mode;
struct drm_display_mode *old_mode = &old_crtc_state->adjusted_mode;
drm_atomic_helper_disable_planes_on_crtc(old_crtc_state, false);
spin_lock_irq(&crtc->dev->event_lock);
if (crtc->state->event) {
drm_crtc_send_vblank_event(crtc, crtc->state->event);
crtc->state->event = NULL;
}
spin_unlock_irq(&crtc->dev->event_lock);
dcss_dtg_ctxld_kick_irq_enable(dcss->dtg, true);
reinit_completion(&dcss->disable_completion);
dcss_disable_dtg_and_ss(dcss);
dcss_ctxld_enable(dcss->ctxld);
if (!drm_mode_equal(mode, old_mode) || !crtc->state->active)
if (!wait_for_completion_timeout(&dcss->disable_completion,
msecs_to_jiffies(100)))
dev_err(dcss->dev, "Shutting off DTG timed out.\n");
/*
* Do not shut off CTXLD kick interrupt when shutting VBLANK off. It
* will be needed to commit the last changes, before going to suspend.
*/
dcss_crtc->disable_ctxld_kick_irq = false;
drm_crtc_vblank_off(crtc);
pm_runtime_mark_last_busy(dcss->dev);
pm_runtime_put_autosuspend(dcss->dev);
}
static const struct drm_crtc_helper_funcs dcss_helper_funcs = {
.atomic_begin = dcss_crtc_atomic_begin,
.atomic_flush = dcss_crtc_atomic_flush,
.atomic_enable = dcss_crtc_atomic_enable,
.atomic_disable = dcss_crtc_atomic_disable,
};
static irqreturn_t dcss_crtc_irq_handler(int irq, void *dev_id)
{
struct dcss_crtc *dcss_crtc = dev_id;
struct dcss_dev *dcss = dcss_crtc->base.dev->dev_private;
if (!dcss_dtg_vblank_irq_valid(dcss->dtg))
return IRQ_NONE;
if (dcss_ctxld_is_flushed(dcss->ctxld))
drm_crtc_handle_vblank(&dcss_crtc->base);
dcss_dtg_vblank_irq_clear(dcss->dtg);
return IRQ_HANDLED;
}
int dcss_crtc_init(struct dcss_crtc *crtc, struct drm_device *drm)
{
struct dcss_dev *dcss = drm->dev_private;
struct platform_device *pdev = to_platform_device(dcss->dev);
int ret;
crtc->plane[0] = dcss_plane_init(drm, drm_crtc_mask(&crtc->base),
DRM_PLANE_TYPE_PRIMARY, 0);
if (IS_ERR(crtc->plane[0]))
return PTR_ERR(crtc->plane[0]);
crtc->base.port = dcss->of_port;
drm_crtc_helper_add(&crtc->base, &dcss_helper_funcs);
ret = drm_crtc_init_with_planes(drm, &crtc->base, &crtc->plane[0]->base,
NULL, &dcss_crtc_funcs, NULL);
if (ret) {
dev_err(dcss->dev, "failed to init crtc\n");
return ret;
}
crtc->irq = platform_get_irq_byname(pdev, "vblank");
if (crtc->irq < 0)
return crtc->irq;
ret = request_irq(crtc->irq, dcss_crtc_irq_handler,
0, "dcss_drm", crtc);
if (ret) {
dev_err(dcss->dev, "irq request failed with %d.\n", ret);
return ret;
}
disable_irq(crtc->irq);
return 0;
}
void dcss_crtc_deinit(struct dcss_crtc *crtc, struct drm_device *drm)
{
free_irq(crtc->irq, crtc);
}

View File

@ -0,0 +1,424 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2019 NXP.
*/
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include "dcss-dev.h"
#define DCSS_CTXLD_CONTROL_STATUS 0x0
#define CTXLD_ENABLE BIT(0)
#define ARB_SEL BIT(1)
#define RD_ERR_EN BIT(2)
#define DB_COMP_EN BIT(3)
#define SB_HP_COMP_EN BIT(4)
#define SB_LP_COMP_EN BIT(5)
#define DB_PEND_SB_REC_EN BIT(6)
#define SB_PEND_DISP_ACTIVE_EN BIT(7)
#define AHB_ERR_EN BIT(8)
#define RD_ERR BIT(16)
#define DB_COMP BIT(17)
#define SB_HP_COMP BIT(18)
#define SB_LP_COMP BIT(19)
#define DB_PEND_SB_REC BIT(20)
#define SB_PEND_DISP_ACTIVE BIT(21)
#define AHB_ERR BIT(22)
#define DCSS_CTXLD_DB_BASE_ADDR 0x10
#define DCSS_CTXLD_DB_COUNT 0x14
#define DCSS_CTXLD_SB_BASE_ADDR 0x18
#define DCSS_CTXLD_SB_COUNT 0x1C
#define SB_HP_COUNT_POS 0
#define SB_HP_COUNT_MASK 0xffff
#define SB_LP_COUNT_POS 16
#define SB_LP_COUNT_MASK 0xffff0000
#define DCSS_AHB_ERR_ADDR 0x20
#define CTXLD_IRQ_COMPLETION (DB_COMP | SB_HP_COMP | SB_LP_COMP)
#define CTXLD_IRQ_ERROR (RD_ERR | DB_PEND_SB_REC | AHB_ERR)
/* The following sizes are in context loader entries, 8 bytes each. */
#define CTXLD_DB_CTX_ENTRIES 1024 /* max 65536 */
#define CTXLD_SB_LP_CTX_ENTRIES 10240 /* max 65536 */
#define CTXLD_SB_HP_CTX_ENTRIES 20000 /* max 65536 */
#define CTXLD_SB_CTX_ENTRIES (CTXLD_SB_LP_CTX_ENTRIES + \
CTXLD_SB_HP_CTX_ENTRIES)
/* Sizes, in entries, of the DB, SB_HP and SB_LP context regions. */
static u16 dcss_ctxld_ctx_size[3] = {
CTXLD_DB_CTX_ENTRIES,
CTXLD_SB_HP_CTX_ENTRIES,
CTXLD_SB_LP_CTX_ENTRIES
};
/* this represents an entry in the context loader map */
struct dcss_ctxld_item {
u32 val;
u32 ofs;
};
#define CTX_ITEM_SIZE sizeof(struct dcss_ctxld_item)
struct dcss_ctxld {
struct device *dev;
void __iomem *ctxld_reg;
int irq;
bool irq_en;
struct dcss_ctxld_item *db[2];
struct dcss_ctxld_item *sb_hp[2];
struct dcss_ctxld_item *sb_lp[2];
dma_addr_t db_paddr[2];
dma_addr_t sb_paddr[2];
u16 ctx_size[2][3]; /* holds the sizes of DB, SB_HP and SB_LP ctx */
u8 current_ctx;
bool in_use;
bool armed;
spinlock_t lock; /* protects concurent access to private data */
};
static irqreturn_t dcss_ctxld_irq_handler(int irq, void *data)
{
struct dcss_ctxld *ctxld = data;
struct dcss_dev *dcss = dcss_drv_dev_to_dcss(ctxld->dev);
u32 irq_status;
irq_status = dcss_readl(ctxld->ctxld_reg + DCSS_CTXLD_CONTROL_STATUS);
if (irq_status & CTXLD_IRQ_COMPLETION &&
!(irq_status & CTXLD_ENABLE) && ctxld->in_use) {
ctxld->in_use = false;
if (dcss && dcss->disable_callback)
dcss->disable_callback(dcss);
} else if (irq_status & CTXLD_IRQ_ERROR) {
/*
* Except for throwing an error message and clearing the status
* register, there's not much we can do here.
*/
dev_err(ctxld->dev, "ctxld: error encountered: %08x\n",
irq_status);
dev_err(ctxld->dev, "ctxld: db=%d, sb_hp=%d, sb_lp=%d\n",
ctxld->ctx_size[ctxld->current_ctx ^ 1][CTX_DB],
ctxld->ctx_size[ctxld->current_ctx ^ 1][CTX_SB_HP],
ctxld->ctx_size[ctxld->current_ctx ^ 1][CTX_SB_LP]);
}
dcss_clr(irq_status & (CTXLD_IRQ_ERROR | CTXLD_IRQ_COMPLETION),
ctxld->ctxld_reg + DCSS_CTXLD_CONTROL_STATUS);
return IRQ_HANDLED;
}
static int dcss_ctxld_irq_config(struct dcss_ctxld *ctxld,
struct platform_device *pdev)
{
int ret;
ctxld->irq = platform_get_irq_byname(pdev, "ctxld");
if (ctxld->irq < 0)
return ctxld->irq;
ret = request_irq(ctxld->irq, dcss_ctxld_irq_handler,
0, "dcss_ctxld", ctxld);
if (ret) {
dev_err(ctxld->dev, "ctxld: irq request failed.\n");
return ret;
}
ctxld->irq_en = true;
return 0;
}
static void dcss_ctxld_hw_cfg(struct dcss_ctxld *ctxld)
{
dcss_writel(RD_ERR_EN | SB_HP_COMP_EN |
DB_PEND_SB_REC_EN | AHB_ERR_EN | RD_ERR | AHB_ERR,
ctxld->ctxld_reg + DCSS_CTXLD_CONTROL_STATUS);
}
static void dcss_ctxld_free_ctx(struct dcss_ctxld *ctxld)
{
struct dcss_ctxld_item *ctx;
int i;
for (i = 0; i < 2; i++) {
if (ctxld->db[i]) {
dma_free_coherent(ctxld->dev,
CTXLD_DB_CTX_ENTRIES * sizeof(*ctx),
ctxld->db[i], ctxld->db_paddr[i]);
ctxld->db[i] = NULL;
ctxld->db_paddr[i] = 0;
}
if (ctxld->sb_hp[i]) {
dma_free_coherent(ctxld->dev,
CTXLD_SB_CTX_ENTRIES * sizeof(*ctx),
ctxld->sb_hp[i], ctxld->sb_paddr[i]);
ctxld->sb_hp[i] = NULL;
ctxld->sb_paddr[i] = 0;
}
}
}
static int dcss_ctxld_alloc_ctx(struct dcss_ctxld *ctxld)
{
struct dcss_ctxld_item *ctx;
int i;
for (i = 0; i < 2; i++) {
ctx = dma_alloc_coherent(ctxld->dev,
CTXLD_DB_CTX_ENTRIES * sizeof(*ctx),
&ctxld->db_paddr[i], GFP_KERNEL);
if (!ctx)
return -ENOMEM;
ctxld->db[i] = ctx;
ctx = dma_alloc_coherent(ctxld->dev,
CTXLD_SB_CTX_ENTRIES * sizeof(*ctx),
&ctxld->sb_paddr[i], GFP_KERNEL);
if (!ctx)
return -ENOMEM;
ctxld->sb_hp[i] = ctx;
ctxld->sb_lp[i] = ctx + CTXLD_SB_HP_CTX_ENTRIES;
}
return 0;
}
int dcss_ctxld_init(struct dcss_dev *dcss, unsigned long ctxld_base)
{
struct dcss_ctxld *ctxld;
int ret;
ctxld = kzalloc(sizeof(*ctxld), GFP_KERNEL);
if (!ctxld)
return -ENOMEM;
dcss->ctxld = ctxld;
ctxld->dev = dcss->dev;
spin_lock_init(&ctxld->lock);
ret = dcss_ctxld_alloc_ctx(ctxld);
if (ret) {
dev_err(dcss->dev, "ctxld: cannot allocate context memory.\n");
goto err;
}
ctxld->ctxld_reg = ioremap(ctxld_base, SZ_4K);
if (!ctxld->ctxld_reg) {
dev_err(dcss->dev, "ctxld: unable to remap ctxld base\n");
ret = -ENOMEM;
goto err;
}
ret = dcss_ctxld_irq_config(ctxld, to_platform_device(dcss->dev));
if (ret)
goto err_irq;
dcss_ctxld_hw_cfg(ctxld);
return 0;
err_irq:
iounmap(ctxld->ctxld_reg);
err:
dcss_ctxld_free_ctx(ctxld);
kfree(ctxld);
return ret;
}
void dcss_ctxld_exit(struct dcss_ctxld *ctxld)
{
free_irq(ctxld->irq, ctxld);
if (ctxld->ctxld_reg)
iounmap(ctxld->ctxld_reg);
dcss_ctxld_free_ctx(ctxld);
kfree(ctxld);
}
static int dcss_ctxld_enable_locked(struct dcss_ctxld *ctxld)
{
int curr_ctx = ctxld->current_ctx;
u32 db_base, sb_base, sb_count;
u32 sb_hp_cnt, sb_lp_cnt, db_cnt;
struct dcss_dev *dcss = dcss_drv_dev_to_dcss(ctxld->dev);
if (!dcss)
return 0;
dcss_dpr_write_sysctrl(dcss->dpr);
dcss_scaler_write_sclctrl(dcss->scaler);
sb_hp_cnt = ctxld->ctx_size[curr_ctx][CTX_SB_HP];
sb_lp_cnt = ctxld->ctx_size[curr_ctx][CTX_SB_LP];
db_cnt = ctxld->ctx_size[curr_ctx][CTX_DB];
/* make sure SB_LP context area comes after SB_HP */
if (sb_lp_cnt &&
ctxld->sb_lp[curr_ctx] != ctxld->sb_hp[curr_ctx] + sb_hp_cnt) {
struct dcss_ctxld_item *sb_lp_adjusted;
sb_lp_adjusted = ctxld->sb_hp[curr_ctx] + sb_hp_cnt;
memcpy(sb_lp_adjusted, ctxld->sb_lp[curr_ctx],
sb_lp_cnt * CTX_ITEM_SIZE);
}
db_base = db_cnt ? ctxld->db_paddr[curr_ctx] : 0;
dcss_writel(db_base, ctxld->ctxld_reg + DCSS_CTXLD_DB_BASE_ADDR);
dcss_writel(db_cnt, ctxld->ctxld_reg + DCSS_CTXLD_DB_COUNT);
if (sb_hp_cnt)
sb_count = ((sb_hp_cnt << SB_HP_COUNT_POS) & SB_HP_COUNT_MASK) |
((sb_lp_cnt << SB_LP_COUNT_POS) & SB_LP_COUNT_MASK);
else
sb_count = (sb_lp_cnt << SB_HP_COUNT_POS) & SB_HP_COUNT_MASK;
sb_base = sb_count ? ctxld->sb_paddr[curr_ctx] : 0;
dcss_writel(sb_base, ctxld->ctxld_reg + DCSS_CTXLD_SB_BASE_ADDR);
dcss_writel(sb_count, ctxld->ctxld_reg + DCSS_CTXLD_SB_COUNT);
/* enable the context loader */
dcss_set(CTXLD_ENABLE, ctxld->ctxld_reg + DCSS_CTXLD_CONTROL_STATUS);
ctxld->in_use = true;
/*
* Toggle the current context to the alternate one so that any updates
* in the modules' settings take place there.
*/
ctxld->current_ctx ^= 1;
ctxld->ctx_size[ctxld->current_ctx][CTX_DB] = 0;
ctxld->ctx_size[ctxld->current_ctx][CTX_SB_HP] = 0;
ctxld->ctx_size[ctxld->current_ctx][CTX_SB_LP] = 0;
return 0;
}
int dcss_ctxld_enable(struct dcss_ctxld *ctxld)
{
spin_lock_irq(&ctxld->lock);
ctxld->armed = true;
spin_unlock_irq(&ctxld->lock);
return 0;
}
void dcss_ctxld_kick(struct dcss_ctxld *ctxld)
{
unsigned long flags;
spin_lock_irqsave(&ctxld->lock, flags);
if (ctxld->armed && !ctxld->in_use) {
ctxld->armed = false;
dcss_ctxld_enable_locked(ctxld);
}
spin_unlock_irqrestore(&ctxld->lock, flags);
}
void dcss_ctxld_write_irqsafe(struct dcss_ctxld *ctxld, u32 ctx_id, u32 val,
u32 reg_ofs)
{
int curr_ctx = ctxld->current_ctx;
struct dcss_ctxld_item *ctx[] = {
[CTX_DB] = ctxld->db[curr_ctx],
[CTX_SB_HP] = ctxld->sb_hp[curr_ctx],
[CTX_SB_LP] = ctxld->sb_lp[curr_ctx]
};
int item_idx = ctxld->ctx_size[curr_ctx][ctx_id];
if (item_idx + 1 > dcss_ctxld_ctx_size[ctx_id]) {
WARN_ON(1);
return;
}
ctx[ctx_id][item_idx].val = val;
ctx[ctx_id][item_idx].ofs = reg_ofs;
ctxld->ctx_size[curr_ctx][ctx_id] += 1;
}
void dcss_ctxld_write(struct dcss_ctxld *ctxld, u32 ctx_id,
u32 val, u32 reg_ofs)
{
spin_lock_irq(&ctxld->lock);
dcss_ctxld_write_irqsafe(ctxld, ctx_id, val, reg_ofs);
spin_unlock_irq(&ctxld->lock);
}
bool dcss_ctxld_is_flushed(struct dcss_ctxld *ctxld)
{
return ctxld->ctx_size[ctxld->current_ctx][CTX_DB] == 0 &&
ctxld->ctx_size[ctxld->current_ctx][CTX_SB_HP] == 0 &&
ctxld->ctx_size[ctxld->current_ctx][CTX_SB_LP] == 0;
}
int dcss_ctxld_resume(struct dcss_ctxld *ctxld)
{
dcss_ctxld_hw_cfg(ctxld);
if (!ctxld->irq_en) {
enable_irq(ctxld->irq);
ctxld->irq_en = true;
}
return 0;
}
int dcss_ctxld_suspend(struct dcss_ctxld *ctxld)
{
int ret = 0;
unsigned long timeout = jiffies + msecs_to_jiffies(500);
if (!dcss_ctxld_is_flushed(ctxld)) {
dcss_ctxld_kick(ctxld);
while (!time_after(jiffies, timeout) && ctxld->in_use)
msleep(20);
if (time_after(jiffies, timeout))
return -ETIMEDOUT;
}
spin_lock_irq(&ctxld->lock);
if (ctxld->irq_en) {
disable_irq_nosync(ctxld->irq);
ctxld->irq_en = false;
}
/* reset context region and sizes */
ctxld->current_ctx = 0;
ctxld->ctx_size[0][CTX_DB] = 0;
ctxld->ctx_size[0][CTX_SB_HP] = 0;
ctxld->ctx_size[0][CTX_SB_LP] = 0;
spin_unlock_irq(&ctxld->lock);
return ret;
}
void dcss_ctxld_assert_locked(struct dcss_ctxld *ctxld)
{
lockdep_assert_held(&ctxld->lock);
}

View File

@ -0,0 +1,325 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2019 NXP.
*/
#include <linux/clk.h>
#include <linux/of_device.h>
#include <linux/of_graph.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <drm/drm_bridge_connector.h>
#include <drm/drm_device.h>
#include <drm/drm_modeset_helper.h>
#include "dcss-dev.h"
#include "dcss-kms.h"
static void dcss_clocks_enable(struct dcss_dev *dcss)
{
clk_prepare_enable(dcss->axi_clk);
clk_prepare_enable(dcss->apb_clk);
clk_prepare_enable(dcss->rtrm_clk);
clk_prepare_enable(dcss->dtrc_clk);
clk_prepare_enable(dcss->pix_clk);
}
static void dcss_clocks_disable(struct dcss_dev *dcss)
{
clk_disable_unprepare(dcss->pix_clk);
clk_disable_unprepare(dcss->dtrc_clk);
clk_disable_unprepare(dcss->rtrm_clk);
clk_disable_unprepare(dcss->apb_clk);
clk_disable_unprepare(dcss->axi_clk);
}
static void dcss_disable_dtg_and_ss_cb(void *data)
{
struct dcss_dev *dcss = data;
dcss->disable_callback = NULL;
dcss_ss_shutoff(dcss->ss);
dcss_dtg_shutoff(dcss->dtg);
complete(&dcss->disable_completion);
}
void dcss_disable_dtg_and_ss(struct dcss_dev *dcss)
{
dcss->disable_callback = dcss_disable_dtg_and_ss_cb;
}
void dcss_enable_dtg_and_ss(struct dcss_dev *dcss)
{
if (dcss->disable_callback)
dcss->disable_callback = NULL;
dcss_dtg_enable(dcss->dtg);
dcss_ss_enable(dcss->ss);
}
static int dcss_submodules_init(struct dcss_dev *dcss)
{
int ret = 0;
u32 base_addr = dcss->start_addr;
const struct dcss_type_data *devtype = dcss->devtype;
dcss_clocks_enable(dcss);
ret = dcss_blkctl_init(dcss, base_addr + devtype->blkctl_ofs);
if (ret)
return ret;
ret = dcss_ctxld_init(dcss, base_addr + devtype->ctxld_ofs);
if (ret)
goto ctxld_err;
ret = dcss_dtg_init(dcss, base_addr + devtype->dtg_ofs);
if (ret)
goto dtg_err;
ret = dcss_ss_init(dcss, base_addr + devtype->ss_ofs);
if (ret)
goto ss_err;
ret = dcss_dpr_init(dcss, base_addr + devtype->dpr_ofs);
if (ret)
goto dpr_err;
ret = dcss_scaler_init(dcss, base_addr + devtype->scaler_ofs);
if (ret)
goto scaler_err;
dcss_clocks_disable(dcss);
return 0;
scaler_err:
dcss_dpr_exit(dcss->dpr);
dpr_err:
dcss_ss_exit(dcss->ss);
ss_err:
dcss_dtg_exit(dcss->dtg);
dtg_err:
dcss_ctxld_exit(dcss->ctxld);
ctxld_err:
dcss_blkctl_exit(dcss->blkctl);
dcss_clocks_disable(dcss);
return ret;
}
static void dcss_submodules_stop(struct dcss_dev *dcss)
{
dcss_clocks_enable(dcss);
dcss_scaler_exit(dcss->scaler);
dcss_dpr_exit(dcss->dpr);
dcss_ss_exit(dcss->ss);
dcss_dtg_exit(dcss->dtg);
dcss_ctxld_exit(dcss->ctxld);
dcss_blkctl_exit(dcss->blkctl);
dcss_clocks_disable(dcss);
}
static int dcss_clks_init(struct dcss_dev *dcss)
{
int i;
struct {
const char *id;
struct clk **clk;
} clks[] = {
{"apb", &dcss->apb_clk},
{"axi", &dcss->axi_clk},
{"pix", &dcss->pix_clk},
{"rtrm", &dcss->rtrm_clk},
{"dtrc", &dcss->dtrc_clk},
};
for (i = 0; i < ARRAY_SIZE(clks); i++) {
*clks[i].clk = devm_clk_get(dcss->dev, clks[i].id);
if (IS_ERR(*clks[i].clk)) {
dev_err(dcss->dev, "failed to get %s clock\n",
clks[i].id);
return PTR_ERR(*clks[i].clk);
}
}
return 0;
}
static void dcss_clks_release(struct dcss_dev *dcss)
{
devm_clk_put(dcss->dev, dcss->dtrc_clk);
devm_clk_put(dcss->dev, dcss->rtrm_clk);
devm_clk_put(dcss->dev, dcss->pix_clk);
devm_clk_put(dcss->dev, dcss->axi_clk);
devm_clk_put(dcss->dev, dcss->apb_clk);
}
struct dcss_dev *dcss_dev_create(struct device *dev, bool hdmi_output)
{
struct platform_device *pdev = to_platform_device(dev);
int ret;
struct resource *res;
struct dcss_dev *dcss;
const struct dcss_type_data *devtype;
devtype = of_device_get_match_data(dev);
if (!devtype) {
dev_err(dev, "no device match found\n");
return ERR_PTR(-ENODEV);
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(dev, "cannot get memory resource\n");
return ERR_PTR(-EINVAL);
}
dcss = kzalloc(sizeof(*dcss), GFP_KERNEL);
if (!dcss)
return ERR_PTR(-ENOMEM);
dcss->dev = dev;
dcss->devtype = devtype;
dcss->hdmi_output = hdmi_output;
ret = dcss_clks_init(dcss);
if (ret) {
dev_err(dev, "clocks initialization failed\n");
goto err;
}
dcss->of_port = of_graph_get_port_by_id(dev->of_node, 0);
if (!dcss->of_port) {
dev_err(dev, "no port@0 node in %s\n", dev->of_node->full_name);
ret = -ENODEV;
goto clks_err;
}
dcss->start_addr = res->start;
ret = dcss_submodules_init(dcss);
if (ret) {
dev_err(dev, "submodules initialization failed\n");
goto clks_err;
}
init_completion(&dcss->disable_completion);
pm_runtime_set_autosuspend_delay(dev, 100);
pm_runtime_use_autosuspend(dev);
pm_runtime_set_suspended(dev);
pm_runtime_allow(dev);
pm_runtime_enable(dev);
return dcss;
clks_err:
dcss_clks_release(dcss);
err:
kfree(dcss);
return ERR_PTR(ret);
}
void dcss_dev_destroy(struct dcss_dev *dcss)
{
if (!pm_runtime_suspended(dcss->dev)) {
dcss_ctxld_suspend(dcss->ctxld);
dcss_clocks_disable(dcss);
}
pm_runtime_disable(dcss->dev);
dcss_submodules_stop(dcss);
dcss_clks_release(dcss);
kfree(dcss);
}
#ifdef CONFIG_PM_SLEEP
int dcss_dev_suspend(struct device *dev)
{
struct dcss_dev *dcss = dcss_drv_dev_to_dcss(dev);
struct drm_device *ddev = dcss_drv_dev_to_drm(dev);
struct dcss_kms_dev *kms = container_of(ddev, struct dcss_kms_dev, base);
int ret;
drm_bridge_connector_disable_hpd(kms->connector);
drm_mode_config_helper_suspend(ddev);
if (pm_runtime_suspended(dev))
return 0;
ret = dcss_ctxld_suspend(dcss->ctxld);
if (ret)
return ret;
dcss_clocks_disable(dcss);
return 0;
}
int dcss_dev_resume(struct device *dev)
{
struct dcss_dev *dcss = dcss_drv_dev_to_dcss(dev);
struct drm_device *ddev = dcss_drv_dev_to_drm(dev);
struct dcss_kms_dev *kms = container_of(ddev, struct dcss_kms_dev, base);
if (pm_runtime_suspended(dev)) {
drm_mode_config_helper_resume(ddev);
return 0;
}
dcss_clocks_enable(dcss);
dcss_blkctl_cfg(dcss->blkctl);
dcss_ctxld_resume(dcss->ctxld);
drm_mode_config_helper_resume(ddev);
drm_bridge_connector_enable_hpd(kms->connector);
return 0;
}
#endif /* CONFIG_PM_SLEEP */
#ifdef CONFIG_PM
int dcss_dev_runtime_suspend(struct device *dev)
{
struct dcss_dev *dcss = dcss_drv_dev_to_dcss(dev);
int ret;
ret = dcss_ctxld_suspend(dcss->ctxld);
if (ret)
return ret;
dcss_clocks_disable(dcss);
return 0;
}
int dcss_dev_runtime_resume(struct device *dev)
{
struct dcss_dev *dcss = dcss_drv_dev_to_dcss(dev);
dcss_clocks_enable(dcss);
dcss_blkctl_cfg(dcss->blkctl);
dcss_ctxld_resume(dcss->ctxld);
return 0;
}
#endif /* CONFIG_PM */

View File

@ -0,0 +1,177 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright 2019 NXP.
*/
#ifndef __DCSS_PRV_H__
#define __DCSS_PRV_H__
#include <drm/drm_fourcc.h>
#include <linux/io.h>
#include <video/videomode.h>
#define SET 0x04
#define CLR 0x08
#define TGL 0x0C
#define dcss_writel(v, c) writel((v), (c))
#define dcss_readl(c) readl(c)
#define dcss_set(v, c) writel((v), (c) + SET)
#define dcss_clr(v, c) writel((v), (c) + CLR)
#define dcss_toggle(v, c) writel((v), (c) + TGL)
static inline void dcss_update(u32 v, u32 m, void __iomem *c)
{
writel((readl(c) & ~(m)) | (v), (c));
}
#define DCSS_DBG_REG(reg) {.name = #reg, .ofs = reg}
enum {
DCSS_IMX8MQ = 0,
};
struct dcss_type_data {
const char *name;
u32 blkctl_ofs;
u32 ctxld_ofs;
u32 rdsrc_ofs;
u32 wrscl_ofs;
u32 dtg_ofs;
u32 scaler_ofs;
u32 ss_ofs;
u32 dpr_ofs;
u32 dtrc_ofs;
u32 dec400d_ofs;
u32 hdr10_ofs;
};
struct dcss_debug_reg {
char *name;
u32 ofs;
};
enum dcss_ctxld_ctx_type {
CTX_DB,
CTX_SB_HP, /* high-priority */
CTX_SB_LP, /* low-priority */
};
struct dcss_dev {
struct device *dev;
const struct dcss_type_data *devtype;
struct device_node *of_port;
u32 start_addr;
struct dcss_blkctl *blkctl;
struct dcss_ctxld *ctxld;
struct dcss_dpr *dpr;
struct dcss_dtg *dtg;
struct dcss_ss *ss;
struct dcss_hdr10 *hdr10;
struct dcss_scaler *scaler;
struct dcss_dtrc *dtrc;
struct dcss_dec400d *dec400d;
struct dcss_wrscl *wrscl;
struct dcss_rdsrc *rdsrc;
struct clk *apb_clk;
struct clk *axi_clk;
struct clk *pix_clk;
struct clk *rtrm_clk;
struct clk *dtrc_clk;
struct clk *pll_src_clk;
struct clk *pll_phy_ref_clk;
bool hdmi_output;
void (*disable_callback)(void *data);
struct completion disable_completion;
};
struct dcss_dev *dcss_drv_dev_to_dcss(struct device *dev);
struct drm_device *dcss_drv_dev_to_drm(struct device *dev);
struct dcss_dev *dcss_dev_create(struct device *dev, bool hdmi_output);
void dcss_dev_destroy(struct dcss_dev *dcss);
int dcss_dev_runtime_suspend(struct device *dev);
int dcss_dev_runtime_resume(struct device *dev);
int dcss_dev_suspend(struct device *dev);
int dcss_dev_resume(struct device *dev);
void dcss_enable_dtg_and_ss(struct dcss_dev *dcss);
void dcss_disable_dtg_and_ss(struct dcss_dev *dcss);
/* BLKCTL */
int dcss_blkctl_init(struct dcss_dev *dcss, unsigned long blkctl_base);
void dcss_blkctl_cfg(struct dcss_blkctl *blkctl);
void dcss_blkctl_exit(struct dcss_blkctl *blkctl);
/* CTXLD */
int dcss_ctxld_init(struct dcss_dev *dcss, unsigned long ctxld_base);
void dcss_ctxld_exit(struct dcss_ctxld *ctxld);
void dcss_ctxld_write(struct dcss_ctxld *ctxld, u32 ctx_id,
u32 val, u32 reg_idx);
int dcss_ctxld_resume(struct dcss_ctxld *dcss_ctxld);
int dcss_ctxld_suspend(struct dcss_ctxld *dcss_ctxld);
void dcss_ctxld_write_irqsafe(struct dcss_ctxld *ctlxd, u32 ctx_id, u32 val,
u32 reg_ofs);
void dcss_ctxld_kick(struct dcss_ctxld *ctxld);
bool dcss_ctxld_is_flushed(struct dcss_ctxld *ctxld);
int dcss_ctxld_enable(struct dcss_ctxld *ctxld);
void dcss_ctxld_register_completion(struct dcss_ctxld *ctxld,
struct completion *dis_completion);
void dcss_ctxld_assert_locked(struct dcss_ctxld *ctxld);
/* DPR */
int dcss_dpr_init(struct dcss_dev *dcss, unsigned long dpr_base);
void dcss_dpr_exit(struct dcss_dpr *dpr);
void dcss_dpr_write_sysctrl(struct dcss_dpr *dpr);
void dcss_dpr_set_res(struct dcss_dpr *dpr, int ch_num, u32 xres, u32 yres);
void dcss_dpr_addr_set(struct dcss_dpr *dpr, int ch_num, u32 luma_base_addr,
u32 chroma_base_addr, u16 pitch);
void dcss_dpr_enable(struct dcss_dpr *dpr, int ch_num, bool en);
void dcss_dpr_format_set(struct dcss_dpr *dpr, int ch_num,
const struct drm_format_info *format, u64 modifier);
void dcss_dpr_set_rotation(struct dcss_dpr *dpr, int ch_num, u32 rotation);
/* DTG */
int dcss_dtg_init(struct dcss_dev *dcss, unsigned long dtg_base);
void dcss_dtg_exit(struct dcss_dtg *dtg);
bool dcss_dtg_vblank_irq_valid(struct dcss_dtg *dtg);
void dcss_dtg_vblank_irq_enable(struct dcss_dtg *dtg, bool en);
void dcss_dtg_vblank_irq_clear(struct dcss_dtg *dtg);
void dcss_dtg_sync_set(struct dcss_dtg *dtg, struct videomode *vm);
void dcss_dtg_css_set(struct dcss_dtg *dtg);
void dcss_dtg_enable(struct dcss_dtg *dtg);
void dcss_dtg_shutoff(struct dcss_dtg *dtg);
bool dcss_dtg_is_enabled(struct dcss_dtg *dtg);
void dcss_dtg_ctxld_kick_irq_enable(struct dcss_dtg *dtg, bool en);
bool dcss_dtg_global_alpha_changed(struct dcss_dtg *dtg, int ch_num, int alpha);
void dcss_dtg_plane_alpha_set(struct dcss_dtg *dtg, int ch_num,
const struct drm_format_info *format, int alpha);
void dcss_dtg_plane_pos_set(struct dcss_dtg *dtg, int ch_num,
int px, int py, int pw, int ph);
void dcss_dtg_ch_enable(struct dcss_dtg *dtg, int ch_num, bool en);
/* SUBSAM */
int dcss_ss_init(struct dcss_dev *dcss, unsigned long subsam_base);
void dcss_ss_exit(struct dcss_ss *ss);
void dcss_ss_enable(struct dcss_ss *ss);
void dcss_ss_shutoff(struct dcss_ss *ss);
void dcss_ss_subsam_set(struct dcss_ss *ss);
void dcss_ss_sync_set(struct dcss_ss *ss, struct videomode *vm,
bool phsync, bool pvsync);
/* SCALER */
int dcss_scaler_init(struct dcss_dev *dcss, unsigned long scaler_base);
void dcss_scaler_exit(struct dcss_scaler *scl);
void dcss_scaler_setup(struct dcss_scaler *scl, int ch_num,
const struct drm_format_info *format,
int src_xres, int src_yres, int dst_xres, int dst_yres,
u32 vrefresh_hz);
void dcss_scaler_ch_enable(struct dcss_scaler *scl, int ch_num, bool en);
int dcss_scaler_get_min_max_ratios(struct dcss_scaler *scl, int ch_num,
int *min, int *max);
void dcss_scaler_write_sclctrl(struct dcss_scaler *scl);
#endif /* __DCSS_PRV_H__ */

View File

@ -0,0 +1,562 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2019 NXP.
*/
#include <linux/device.h>
#include <linux/slab.h>
#include "dcss-dev.h"
#define DCSS_DPR_SYSTEM_CTRL0 0x000
#define RUN_EN BIT(0)
#define SOFT_RESET BIT(1)
#define REPEAT_EN BIT(2)
#define SHADOW_LOAD_EN BIT(3)
#define SW_SHADOW_LOAD_SEL BIT(4)
#define BCMD2AXI_MSTR_ID_CTRL BIT(16)
#define DCSS_DPR_IRQ_MASK 0x020
#define DCSS_DPR_IRQ_MASK_STATUS 0x030
#define DCSS_DPR_IRQ_NONMASK_STATUS 0x040
#define IRQ_DPR_CTRL_DONE BIT(0)
#define IRQ_DPR_RUN BIT(1)
#define IRQ_DPR_SHADOW_LOADED BIT(2)
#define IRQ_AXI_READ_ERR BIT(3)
#define DPR2RTR_YRGB_FIFO_OVFL BIT(4)
#define DPR2RTR_UV_FIFO_OVFL BIT(5)
#define DPR2RTR_FIFO_LD_BUF_RDY_YRGB_ERR BIT(6)
#define DPR2RTR_FIFO_LD_BUF_RDY_UV_ERR BIT(7)
#define DCSS_DPR_MODE_CTRL0 0x050
#define RTR_3BUF_EN BIT(0)
#define RTR_4LINE_BUF_EN BIT(1)
#define TILE_TYPE_POS 2
#define TILE_TYPE_MASK GENMASK(4, 2)
#define YUV_EN BIT(6)
#define COMP_2PLANE_EN BIT(7)
#define PIX_SIZE_POS 8
#define PIX_SIZE_MASK GENMASK(9, 8)
#define PIX_LUMA_UV_SWAP BIT(10)
#define PIX_UV_SWAP BIT(11)
#define B_COMP_SEL_POS 12
#define B_COMP_SEL_MASK GENMASK(13, 12)
#define G_COMP_SEL_POS 14
#define G_COMP_SEL_MASK GENMASK(15, 14)
#define R_COMP_SEL_POS 16
#define R_COMP_SEL_MASK GENMASK(17, 16)
#define A_COMP_SEL_POS 18
#define A_COMP_SEL_MASK GENMASK(19, 18)
#define DCSS_DPR_FRAME_CTRL0 0x070
#define HFLIP_EN BIT(0)
#define VFLIP_EN BIT(1)
#define ROT_ENC_POS 2
#define ROT_ENC_MASK GENMASK(3, 2)
#define ROT_FLIP_ORDER_EN BIT(4)
#define PITCH_POS 16
#define PITCH_MASK GENMASK(31, 16)
#define DCSS_DPR_FRAME_1P_CTRL0 0x090
#define DCSS_DPR_FRAME_1P_PIX_X_CTRL 0x0A0
#define DCSS_DPR_FRAME_1P_PIX_Y_CTRL 0x0B0
#define DCSS_DPR_FRAME_1P_BASE_ADDR 0x0C0
#define DCSS_DPR_FRAME_2P_CTRL0 0x0E0
#define DCSS_DPR_FRAME_2P_PIX_X_CTRL 0x0F0
#define DCSS_DPR_FRAME_2P_PIX_Y_CTRL 0x100
#define DCSS_DPR_FRAME_2P_BASE_ADDR 0x110
#define DCSS_DPR_STATUS_CTRL0 0x130
#define STATUS_MUX_SEL_MASK GENMASK(2, 0)
#define STATUS_SRC_SEL_POS 16
#define STATUS_SRC_SEL_MASK GENMASK(18, 16)
#define DCSS_DPR_STATUS_CTRL1 0x140
#define DCSS_DPR_RTRAM_CTRL0 0x200
#define NUM_ROWS_ACTIVE BIT(0)
#define THRES_HIGH_POS 1
#define THRES_HIGH_MASK GENMASK(3, 1)
#define THRES_LOW_POS 4
#define THRES_LOW_MASK GENMASK(6, 4)
#define ABORT_SEL BIT(7)
enum dcss_tile_type {
TILE_LINEAR = 0,
TILE_GPU_STANDARD,
TILE_GPU_SUPER,
TILE_VPU_YUV420,
TILE_VPU_VP9,
};
enum dcss_pix_size {
PIX_SIZE_8,
PIX_SIZE_16,
PIX_SIZE_32,
};
struct dcss_dpr_ch {
struct dcss_dpr *dpr;
void __iomem *base_reg;
u32 base_ofs;
struct drm_format_info format;
enum dcss_pix_size pix_size;
enum dcss_tile_type tile;
bool rtram_4line_en;
bool rtram_3buf_en;
u32 frame_ctrl;
u32 mode_ctrl;
u32 sys_ctrl;
u32 rtram_ctrl;
bool sys_ctrl_chgd;
int ch_num;
int irq;
};
struct dcss_dpr {
struct device *dev;
struct dcss_ctxld *ctxld;
u32 ctx_id;
struct dcss_dpr_ch ch[3];
};
static void dcss_dpr_write(struct dcss_dpr_ch *ch, u32 val, u32 ofs)
{
struct dcss_dpr *dpr = ch->dpr;
dcss_ctxld_write(dpr->ctxld, dpr->ctx_id, val, ch->base_ofs + ofs);
}
static int dcss_dpr_ch_init_all(struct dcss_dpr *dpr, unsigned long dpr_base)
{
struct dcss_dpr_ch *ch;
int i;
for (i = 0; i < 3; i++) {
ch = &dpr->ch[i];
ch->base_ofs = dpr_base + i * 0x1000;
ch->base_reg = ioremap(ch->base_ofs, SZ_4K);
if (!ch->base_reg) {
dev_err(dpr->dev, "dpr: unable to remap ch %d base\n",
i);
return -ENOMEM;
}
ch->dpr = dpr;
ch->ch_num = i;
dcss_writel(0xff, ch->base_reg + DCSS_DPR_IRQ_MASK);
}
return 0;
}
int dcss_dpr_init(struct dcss_dev *dcss, unsigned long dpr_base)
{
struct dcss_dpr *dpr;
dpr = kzalloc(sizeof(*dpr), GFP_KERNEL);
if (!dpr)
return -ENOMEM;
dcss->dpr = dpr;
dpr->dev = dcss->dev;
dpr->ctxld = dcss->ctxld;
dpr->ctx_id = CTX_SB_HP;
if (dcss_dpr_ch_init_all(dpr, dpr_base)) {
int i;
for (i = 0; i < 3; i++) {
if (dpr->ch[i].base_reg)
iounmap(dpr->ch[i].base_reg);
}
kfree(dpr);
return -ENOMEM;
}
return 0;
}
void dcss_dpr_exit(struct dcss_dpr *dpr)
{
int ch_no;
/* stop DPR on all channels */
for (ch_no = 0; ch_no < 3; ch_no++) {
struct dcss_dpr_ch *ch = &dpr->ch[ch_no];
dcss_writel(0, ch->base_reg + DCSS_DPR_SYSTEM_CTRL0);
if (ch->base_reg)
iounmap(ch->base_reg);
}
kfree(dpr);
}
static u32 dcss_dpr_x_pix_wide_adjust(struct dcss_dpr_ch *ch, u32 pix_wide,
u32 pix_format)
{
u8 pix_in_64byte_map[3][5] = {
/* LIN, GPU_STD, GPU_SUP, VPU_YUV420, VPU_VP9 */
{ 64, 8, 8, 8, 16}, /* PIX_SIZE_8 */
{ 32, 8, 8, 8, 8}, /* PIX_SIZE_16 */
{ 16, 4, 4, 8, 8}, /* PIX_SIZE_32 */
};
u32 offset;
u32 div_64byte_mod, pix_in_64byte;
pix_in_64byte = pix_in_64byte_map[ch->pix_size][ch->tile];
div_64byte_mod = pix_wide % pix_in_64byte;
offset = (div_64byte_mod == 0) ? 0 : (pix_in_64byte - div_64byte_mod);
return pix_wide + offset;
}
static u32 dcss_dpr_y_pix_high_adjust(struct dcss_dpr_ch *ch, u32 pix_high,
u32 pix_format)
{
u8 num_rows_buf = ch->rtram_4line_en ? 4 : 8;
u32 offset, pix_y_mod;
pix_y_mod = pix_high % num_rows_buf;
offset = pix_y_mod ? (num_rows_buf - pix_y_mod) : 0;
return pix_high + offset;
}
void dcss_dpr_set_res(struct dcss_dpr *dpr, int ch_num, u32 xres, u32 yres)
{
struct dcss_dpr_ch *ch = &dpr->ch[ch_num];
u32 pix_format = ch->format.format;
u32 gap = DCSS_DPR_FRAME_2P_BASE_ADDR - DCSS_DPR_FRAME_1P_BASE_ADDR;
int plane, max_planes = 1;
u32 pix_x_wide, pix_y_high;
if (pix_format == DRM_FORMAT_NV12 ||
pix_format == DRM_FORMAT_NV21)
max_planes = 2;
for (plane = 0; plane < max_planes; plane++) {
yres = plane == 1 ? yres >> 1 : yres;
pix_x_wide = dcss_dpr_x_pix_wide_adjust(ch, xres, pix_format);
pix_y_high = dcss_dpr_y_pix_high_adjust(ch, yres, pix_format);
dcss_dpr_write(ch, pix_x_wide,
DCSS_DPR_FRAME_1P_PIX_X_CTRL + plane * gap);
dcss_dpr_write(ch, pix_y_high,
DCSS_DPR_FRAME_1P_PIX_Y_CTRL + plane * gap);
dcss_dpr_write(ch, 2, DCSS_DPR_FRAME_1P_CTRL0 + plane * gap);
}
}
void dcss_dpr_addr_set(struct dcss_dpr *dpr, int ch_num, u32 luma_base_addr,
u32 chroma_base_addr, u16 pitch)
{
struct dcss_dpr_ch *ch = &dpr->ch[ch_num];
dcss_dpr_write(ch, luma_base_addr, DCSS_DPR_FRAME_1P_BASE_ADDR);
dcss_dpr_write(ch, chroma_base_addr, DCSS_DPR_FRAME_2P_BASE_ADDR);
ch->frame_ctrl &= ~PITCH_MASK;
ch->frame_ctrl |= (((u32)pitch << PITCH_POS) & PITCH_MASK);
}
static void dcss_dpr_argb_comp_sel(struct dcss_dpr_ch *ch, int a_sel, int r_sel,
int g_sel, int b_sel)
{
u32 sel;
sel = ((a_sel << A_COMP_SEL_POS) & A_COMP_SEL_MASK) |
((r_sel << R_COMP_SEL_POS) & R_COMP_SEL_MASK) |
((g_sel << G_COMP_SEL_POS) & G_COMP_SEL_MASK) |
((b_sel << B_COMP_SEL_POS) & B_COMP_SEL_MASK);
ch->mode_ctrl &= ~(A_COMP_SEL_MASK | R_COMP_SEL_MASK |
G_COMP_SEL_MASK | B_COMP_SEL_MASK);
ch->mode_ctrl |= sel;
}
static void dcss_dpr_pix_size_set(struct dcss_dpr_ch *ch,
const struct drm_format_info *format)
{
u32 val;
switch (format->format) {
case DRM_FORMAT_NV12:
case DRM_FORMAT_NV21:
val = PIX_SIZE_8;
break;
case DRM_FORMAT_UYVY:
case DRM_FORMAT_VYUY:
case DRM_FORMAT_YUYV:
case DRM_FORMAT_YVYU:
val = PIX_SIZE_16;
break;
default:
val = PIX_SIZE_32;
break;
}
ch->pix_size = val;
ch->mode_ctrl &= ~PIX_SIZE_MASK;
ch->mode_ctrl |= ((val << PIX_SIZE_POS) & PIX_SIZE_MASK);
}
static void dcss_dpr_uv_swap(struct dcss_dpr_ch *ch, bool swap)
{
ch->mode_ctrl &= ~PIX_UV_SWAP;
ch->mode_ctrl |= (swap ? PIX_UV_SWAP : 0);
}
static void dcss_dpr_y_uv_swap(struct dcss_dpr_ch *ch, bool swap)
{
ch->mode_ctrl &= ~PIX_LUMA_UV_SWAP;
ch->mode_ctrl |= (swap ? PIX_LUMA_UV_SWAP : 0);
}
static void dcss_dpr_2plane_en(struct dcss_dpr_ch *ch, bool en)
{
ch->mode_ctrl &= ~COMP_2PLANE_EN;
ch->mode_ctrl |= (en ? COMP_2PLANE_EN : 0);
}
static void dcss_dpr_yuv_en(struct dcss_dpr_ch *ch, bool en)
{
ch->mode_ctrl &= ~YUV_EN;
ch->mode_ctrl |= (en ? YUV_EN : 0);
}
void dcss_dpr_enable(struct dcss_dpr *dpr, int ch_num, bool en)
{
struct dcss_dpr_ch *ch = &dpr->ch[ch_num];
u32 sys_ctrl;
sys_ctrl = (en ? REPEAT_EN | RUN_EN : 0);
if (en) {
dcss_dpr_write(ch, ch->mode_ctrl, DCSS_DPR_MODE_CTRL0);
dcss_dpr_write(ch, ch->frame_ctrl, DCSS_DPR_FRAME_CTRL0);
dcss_dpr_write(ch, ch->rtram_ctrl, DCSS_DPR_RTRAM_CTRL0);
}
if (ch->sys_ctrl != sys_ctrl)
ch->sys_ctrl_chgd = true;
ch->sys_ctrl = sys_ctrl;
}
struct rgb_comp_sel {
u32 drm_format;
int a_sel;
int r_sel;
int g_sel;
int b_sel;
};
static struct rgb_comp_sel comp_sel_map[] = {
{DRM_FORMAT_ARGB8888, 3, 2, 1, 0},
{DRM_FORMAT_XRGB8888, 3, 2, 1, 0},
{DRM_FORMAT_ABGR8888, 3, 0, 1, 2},
{DRM_FORMAT_XBGR8888, 3, 0, 1, 2},
{DRM_FORMAT_RGBA8888, 0, 3, 2, 1},
{DRM_FORMAT_RGBX8888, 0, 3, 2, 1},
{DRM_FORMAT_BGRA8888, 0, 1, 2, 3},
{DRM_FORMAT_BGRX8888, 0, 1, 2, 3},
};
static int to_comp_sel(u32 pix_fmt, int *a_sel, int *r_sel, int *g_sel,
int *b_sel)
{
int i;
for (i = 0; i < ARRAY_SIZE(comp_sel_map); i++) {
if (comp_sel_map[i].drm_format == pix_fmt) {
*a_sel = comp_sel_map[i].a_sel;
*r_sel = comp_sel_map[i].r_sel;
*g_sel = comp_sel_map[i].g_sel;
*b_sel = comp_sel_map[i].b_sel;
return 0;
}
}
return -1;
}
static void dcss_dpr_rtram_set(struct dcss_dpr_ch *ch, u32 pix_format)
{
u32 val, mask;
switch (pix_format) {
case DRM_FORMAT_NV21:
case DRM_FORMAT_NV12:
ch->rtram_3buf_en = true;
ch->rtram_4line_en = false;
break;
default:
ch->rtram_3buf_en = true;
ch->rtram_4line_en = true;
break;
}
val = (ch->rtram_4line_en ? RTR_4LINE_BUF_EN : 0);
val |= (ch->rtram_3buf_en ? RTR_3BUF_EN : 0);
mask = RTR_4LINE_BUF_EN | RTR_3BUF_EN;
ch->mode_ctrl &= ~mask;
ch->mode_ctrl |= (val & mask);
val = (ch->rtram_4line_en ? 0 : NUM_ROWS_ACTIVE);
val |= (3 << THRES_LOW_POS) & THRES_LOW_MASK;
val |= (4 << THRES_HIGH_POS) & THRES_HIGH_MASK;
mask = THRES_LOW_MASK | THRES_HIGH_MASK | NUM_ROWS_ACTIVE;
ch->rtram_ctrl &= ~mask;
ch->rtram_ctrl |= (val & mask);
}
static void dcss_dpr_setup_components(struct dcss_dpr_ch *ch,
const struct drm_format_info *format)
{
int a_sel, r_sel, g_sel, b_sel;
bool uv_swap, y_uv_swap;
switch (format->format) {
case DRM_FORMAT_YVYU:
uv_swap = true;
y_uv_swap = true;
break;
case DRM_FORMAT_VYUY:
case DRM_FORMAT_NV21:
uv_swap = true;
y_uv_swap = false;
break;
case DRM_FORMAT_YUYV:
uv_swap = false;
y_uv_swap = true;
break;
default:
uv_swap = false;
y_uv_swap = false;
break;
}
dcss_dpr_uv_swap(ch, uv_swap);
dcss_dpr_y_uv_swap(ch, y_uv_swap);
if (!format->is_yuv) {
if (!to_comp_sel(format->format, &a_sel, &r_sel,
&g_sel, &b_sel)) {
dcss_dpr_argb_comp_sel(ch, a_sel, r_sel, g_sel, b_sel);
} else {
dcss_dpr_argb_comp_sel(ch, 3, 2, 1, 0);
}
} else {
dcss_dpr_argb_comp_sel(ch, 0, 0, 0, 0);
}
}
static void dcss_dpr_tile_set(struct dcss_dpr_ch *ch, uint64_t modifier)
{
switch (ch->ch_num) {
case 0:
switch (modifier) {
case DRM_FORMAT_MOD_LINEAR:
ch->tile = TILE_LINEAR;
break;
case DRM_FORMAT_MOD_VIVANTE_TILED:
ch->tile = TILE_GPU_STANDARD;
break;
case DRM_FORMAT_MOD_VIVANTE_SUPER_TILED:
ch->tile = TILE_GPU_SUPER;
break;
default:
WARN_ON(1);
break;
}
break;
case 1:
case 2:
ch->tile = TILE_LINEAR;
break;
default:
WARN_ON(1);
return;
}
ch->mode_ctrl &= ~TILE_TYPE_MASK;
ch->mode_ctrl |= ((ch->tile << TILE_TYPE_POS) & TILE_TYPE_MASK);
}
void dcss_dpr_format_set(struct dcss_dpr *dpr, int ch_num,
const struct drm_format_info *format, u64 modifier)
{
struct dcss_dpr_ch *ch = &dpr->ch[ch_num];
ch->format = *format;
dcss_dpr_yuv_en(ch, format->is_yuv);
dcss_dpr_pix_size_set(ch, format);
dcss_dpr_setup_components(ch, format);
dcss_dpr_2plane_en(ch, format->num_planes == 2);
dcss_dpr_rtram_set(ch, format->format);
dcss_dpr_tile_set(ch, modifier);
}
/* This function will be called from interrupt context. */
void dcss_dpr_write_sysctrl(struct dcss_dpr *dpr)
{
int chnum;
dcss_ctxld_assert_locked(dpr->ctxld);
for (chnum = 0; chnum < 3; chnum++) {
struct dcss_dpr_ch *ch = &dpr->ch[chnum];
if (ch->sys_ctrl_chgd) {
dcss_ctxld_write_irqsafe(dpr->ctxld, dpr->ctx_id,
ch->sys_ctrl,
ch->base_ofs +
DCSS_DPR_SYSTEM_CTRL0);
ch->sys_ctrl_chgd = false;
}
}
}
void dcss_dpr_set_rotation(struct dcss_dpr *dpr, int ch_num, u32 rotation)
{
struct dcss_dpr_ch *ch = &dpr->ch[ch_num];
ch->frame_ctrl &= ~(HFLIP_EN | VFLIP_EN | ROT_ENC_MASK);
ch->frame_ctrl |= rotation & DRM_MODE_REFLECT_X ? HFLIP_EN : 0;
ch->frame_ctrl |= rotation & DRM_MODE_REFLECT_Y ? VFLIP_EN : 0;
if (rotation & DRM_MODE_ROTATE_90)
ch->frame_ctrl |= 1 << ROT_ENC_POS;
else if (rotation & DRM_MODE_ROTATE_180)
ch->frame_ctrl |= 2 << ROT_ENC_POS;
else if (rotation & DRM_MODE_ROTATE_270)
ch->frame_ctrl |= 3 << ROT_ENC_POS;
}

View File

@ -0,0 +1,138 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2019 NXP.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <drm/drm_of.h>
#include "dcss-dev.h"
#include "dcss-kms.h"
struct dcss_drv {
struct dcss_dev *dcss;
struct dcss_kms_dev *kms;
};
struct dcss_dev *dcss_drv_dev_to_dcss(struct device *dev)
{
struct dcss_drv *mdrv = dev_get_drvdata(dev);
return mdrv ? mdrv->dcss : NULL;
}
struct drm_device *dcss_drv_dev_to_drm(struct device *dev)
{
struct dcss_drv *mdrv = dev_get_drvdata(dev);
return mdrv ? &mdrv->kms->base : NULL;
}
static int dcss_drv_platform_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *remote;
struct dcss_drv *mdrv;
int err = 0;
bool hdmi_output = true;
if (!dev->of_node)
return -ENODEV;
remote = of_graph_get_remote_node(dev->of_node, 0, 0);
if (!remote)
return -ENODEV;
hdmi_output = !of_device_is_compatible(remote, "fsl,imx8mq-nwl-dsi");
of_node_put(remote);
mdrv = kzalloc(sizeof(*mdrv), GFP_KERNEL);
if (!mdrv)
return -ENOMEM;
mdrv->dcss = dcss_dev_create(dev, hdmi_output);
if (IS_ERR(mdrv->dcss)) {
err = PTR_ERR(mdrv->dcss);
goto err;
}
dev_set_drvdata(dev, mdrv);
mdrv->kms = dcss_kms_attach(mdrv->dcss);
if (IS_ERR(mdrv->kms)) {
err = PTR_ERR(mdrv->kms);
goto dcss_shutoff;
}
return 0;
dcss_shutoff:
dcss_dev_destroy(mdrv->dcss);
dev_set_drvdata(dev, NULL);
err:
kfree(mdrv);
return err;
}
static int dcss_drv_platform_remove(struct platform_device *pdev)
{
struct dcss_drv *mdrv = dev_get_drvdata(&pdev->dev);
if (!mdrv)
return 0;
dcss_kms_detach(mdrv->kms);
dcss_dev_destroy(mdrv->dcss);
dev_set_drvdata(&pdev->dev, NULL);
kfree(mdrv);
return 0;
}
static struct dcss_type_data dcss_types[] = {
[DCSS_IMX8MQ] = {
.name = "DCSS_IMX8MQ",
.blkctl_ofs = 0x2F000,
.ctxld_ofs = 0x23000,
.dtg_ofs = 0x20000,
.scaler_ofs = 0x1C000,
.ss_ofs = 0x1B000,
.dpr_ofs = 0x18000,
},
};
static const struct of_device_id dcss_of_match[] = {
{ .compatible = "nxp,imx8mq-dcss", .data = &dcss_types[DCSS_IMX8MQ], },
{},
};
MODULE_DEVICE_TABLE(of, dcss_of_match);
static const struct dev_pm_ops dcss_dev_pm = {
SET_SYSTEM_SLEEP_PM_OPS(dcss_dev_suspend, dcss_dev_resume)
SET_RUNTIME_PM_OPS(dcss_dev_runtime_suspend,
dcss_dev_runtime_resume, NULL)
};
static struct platform_driver dcss_platform_driver = {
.probe = dcss_drv_platform_probe,
.remove = dcss_drv_platform_remove,
.driver = {
.name = "imx-dcss",
.of_match_table = dcss_of_match,
.pm = &dcss_dev_pm,
},
};
module_platform_driver(dcss_platform_driver);
MODULE_AUTHOR("Laurentiu Palcu <laurentiu.palcu@nxp.com>");
MODULE_DESCRIPTION("DCSS driver for i.MX8MQ");
MODULE_LICENSE("GPL v2");

View File

@ -0,0 +1,409 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2019 NXP.
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include "dcss-dev.h"
#define DCSS_DTG_TC_CONTROL_STATUS 0x00
#define CH3_EN BIT(0)
#define CH2_EN BIT(1)
#define CH1_EN BIT(2)
#define OVL_DATA_MODE BIT(3)
#define BLENDER_VIDEO_ALPHA_SEL BIT(7)
#define DTG_START BIT(8)
#define DBY_MODE_EN BIT(9)
#define CH1_ALPHA_SEL BIT(10)
#define CSS_PIX_COMP_SWAP_POS 12
#define CSS_PIX_COMP_SWAP_MASK GENMASK(14, 12)
#define DEFAULT_FG_ALPHA_POS 24
#define DEFAULT_FG_ALPHA_MASK GENMASK(31, 24)
#define DCSS_DTG_TC_DTG 0x04
#define DCSS_DTG_TC_DISP_TOP 0x08
#define DCSS_DTG_TC_DISP_BOT 0x0C
#define DCSS_DTG_TC_CH1_TOP 0x10
#define DCSS_DTG_TC_CH1_BOT 0x14
#define DCSS_DTG_TC_CH2_TOP 0x18
#define DCSS_DTG_TC_CH2_BOT 0x1C
#define DCSS_DTG_TC_CH3_TOP 0x20
#define DCSS_DTG_TC_CH3_BOT 0x24
#define TC_X_POS 0
#define TC_X_MASK GENMASK(12, 0)
#define TC_Y_POS 16
#define TC_Y_MASK GENMASK(28, 16)
#define DCSS_DTG_TC_CTXLD 0x28
#define TC_CTXLD_DB_Y_POS 0
#define TC_CTXLD_DB_Y_MASK GENMASK(12, 0)
#define TC_CTXLD_SB_Y_POS 16
#define TC_CTXLD_SB_Y_MASK GENMASK(28, 16)
#define DCSS_DTG_TC_CH1_BKRND 0x2C
#define DCSS_DTG_TC_CH2_BKRND 0x30
#define BKRND_R_Y_COMP_POS 20
#define BKRND_R_Y_COMP_MASK GENMASK(29, 20)
#define BKRND_G_U_COMP_POS 10
#define BKRND_G_U_COMP_MASK GENMASK(19, 10)
#define BKRND_B_V_COMP_POS 0
#define BKRND_B_V_COMP_MASK GENMASK(9, 0)
#define DCSS_DTG_BLENDER_DBY_RANGEINV 0x38
#define DCSS_DTG_BLENDER_DBY_RANGEMIN 0x3C
#define DCSS_DTG_BLENDER_DBY_BDP 0x40
#define DCSS_DTG_BLENDER_BKRND_I 0x44
#define DCSS_DTG_BLENDER_BKRND_P 0x48
#define DCSS_DTG_BLENDER_BKRND_T 0x4C
#define DCSS_DTG_LINE0_INT 0x50
#define DCSS_DTG_LINE1_INT 0x54
#define DCSS_DTG_BG_ALPHA_DEFAULT 0x58
#define DCSS_DTG_INT_STATUS 0x5C
#define DCSS_DTG_INT_CONTROL 0x60
#define DCSS_DTG_TC_CH3_BKRND 0x64
#define DCSS_DTG_INT_MASK 0x68
#define LINE0_IRQ BIT(0)
#define LINE1_IRQ BIT(1)
#define LINE2_IRQ BIT(2)
#define LINE3_IRQ BIT(3)
#define DCSS_DTG_LINE2_INT 0x6C
#define DCSS_DTG_LINE3_INT 0x70
#define DCSS_DTG_DBY_OL 0x74
#define DCSS_DTG_DBY_BL 0x78
#define DCSS_DTG_DBY_EL 0x7C
struct dcss_dtg {
struct device *dev;
struct dcss_ctxld *ctxld;
void __iomem *base_reg;
u32 base_ofs;
u32 ctx_id;
bool in_use;
u32 dis_ulc_x;
u32 dis_ulc_y;
u32 control_status;
u32 alpha;
u32 alpha_cfg;
int ctxld_kick_irq;
bool ctxld_kick_irq_en;
};
static void dcss_dtg_write(struct dcss_dtg *dtg, u32 val, u32 ofs)
{
if (!dtg->in_use)
dcss_writel(val, dtg->base_reg + ofs);
dcss_ctxld_write(dtg->ctxld, dtg->ctx_id,
val, dtg->base_ofs + ofs);
}
static irqreturn_t dcss_dtg_irq_handler(int irq, void *data)
{
struct dcss_dtg *dtg = data;
u32 status;
status = dcss_readl(dtg->base_reg + DCSS_DTG_INT_STATUS);
if (!(status & LINE0_IRQ))
return IRQ_NONE;
dcss_ctxld_kick(dtg->ctxld);
dcss_writel(status & LINE0_IRQ, dtg->base_reg + DCSS_DTG_INT_CONTROL);
return IRQ_HANDLED;
}
static int dcss_dtg_irq_config(struct dcss_dtg *dtg,
struct platform_device *pdev)
{
int ret;
dtg->ctxld_kick_irq = platform_get_irq_byname(pdev, "ctxld_kick");
if (dtg->ctxld_kick_irq < 0)
return dtg->ctxld_kick_irq;
dcss_update(0, LINE0_IRQ | LINE1_IRQ,
dtg->base_reg + DCSS_DTG_INT_MASK);
ret = request_irq(dtg->ctxld_kick_irq, dcss_dtg_irq_handler,
0, "dcss_ctxld_kick", dtg);
if (ret) {
dev_err(dtg->dev, "dtg: irq request failed.\n");
return ret;
}
disable_irq(dtg->ctxld_kick_irq);
dtg->ctxld_kick_irq_en = false;
return 0;
}
int dcss_dtg_init(struct dcss_dev *dcss, unsigned long dtg_base)
{
int ret = 0;
struct dcss_dtg *dtg;
dtg = kzalloc(sizeof(*dtg), GFP_KERNEL);
if (!dtg)
return -ENOMEM;
dcss->dtg = dtg;
dtg->dev = dcss->dev;
dtg->ctxld = dcss->ctxld;
dtg->base_reg = ioremap(dtg_base, SZ_4K);
if (!dtg->base_reg) {
dev_err(dcss->dev, "dtg: unable to remap dtg base\n");
ret = -ENOMEM;
goto err_ioremap;
}
dtg->base_ofs = dtg_base;
dtg->ctx_id = CTX_DB;
dtg->alpha = 255;
dtg->control_status |= OVL_DATA_MODE | BLENDER_VIDEO_ALPHA_SEL |
((dtg->alpha << DEFAULT_FG_ALPHA_POS) & DEFAULT_FG_ALPHA_MASK);
ret = dcss_dtg_irq_config(dtg, to_platform_device(dcss->dev));
if (ret)
goto err_irq;
return 0;
err_irq:
iounmap(dtg->base_reg);
err_ioremap:
kfree(dtg);
return ret;
}
void dcss_dtg_exit(struct dcss_dtg *dtg)
{
free_irq(dtg->ctxld_kick_irq, dtg);
if (dtg->base_reg)
iounmap(dtg->base_reg);
kfree(dtg);
}
void dcss_dtg_sync_set(struct dcss_dtg *dtg, struct videomode *vm)
{
struct dcss_dev *dcss = dcss_drv_dev_to_dcss(dtg->dev);
u16 dtg_lrc_x, dtg_lrc_y;
u16 dis_ulc_x, dis_ulc_y;
u16 dis_lrc_x, dis_lrc_y;
u32 sb_ctxld_trig, db_ctxld_trig;
u32 pixclock = vm->pixelclock;
u32 actual_clk;
dtg_lrc_x = vm->hfront_porch + vm->hback_porch + vm->hsync_len +
vm->hactive - 1;
dtg_lrc_y = vm->vfront_porch + vm->vback_porch + vm->vsync_len +
vm->vactive - 1;
dis_ulc_x = vm->hsync_len + vm->hback_porch - 1;
dis_ulc_y = vm->vsync_len + vm->vfront_porch + vm->vback_porch - 1;
dis_lrc_x = vm->hsync_len + vm->hback_porch + vm->hactive - 1;
dis_lrc_y = vm->vsync_len + vm->vfront_porch + vm->vback_porch +
vm->vactive - 1;
clk_disable_unprepare(dcss->pix_clk);
clk_set_rate(dcss->pix_clk, vm->pixelclock);
clk_prepare_enable(dcss->pix_clk);
actual_clk = clk_get_rate(dcss->pix_clk);
if (pixclock != actual_clk) {
dev_info(dtg->dev,
"Pixel clock set to %u kHz instead of %u kHz.\n",
(actual_clk / 1000), (pixclock / 1000));
}
dcss_dtg_write(dtg, ((dtg_lrc_y << TC_Y_POS) | dtg_lrc_x),
DCSS_DTG_TC_DTG);
dcss_dtg_write(dtg, ((dis_ulc_y << TC_Y_POS) | dis_ulc_x),
DCSS_DTG_TC_DISP_TOP);
dcss_dtg_write(dtg, ((dis_lrc_y << TC_Y_POS) | dis_lrc_x),
DCSS_DTG_TC_DISP_BOT);
dtg->dis_ulc_x = dis_ulc_x;
dtg->dis_ulc_y = dis_ulc_y;
sb_ctxld_trig = ((0 * dis_lrc_y / 100) << TC_CTXLD_SB_Y_POS) &
TC_CTXLD_SB_Y_MASK;
db_ctxld_trig = ((99 * dis_lrc_y / 100) << TC_CTXLD_DB_Y_POS) &
TC_CTXLD_DB_Y_MASK;
dcss_dtg_write(dtg, sb_ctxld_trig | db_ctxld_trig, DCSS_DTG_TC_CTXLD);
/* vblank trigger */
dcss_dtg_write(dtg, 0, DCSS_DTG_LINE1_INT);
/* CTXLD trigger */
dcss_dtg_write(dtg, ((90 * dis_lrc_y) / 100) << 16, DCSS_DTG_LINE0_INT);
}
void dcss_dtg_plane_pos_set(struct dcss_dtg *dtg, int ch_num,
int px, int py, int pw, int ph)
{
u16 p_ulc_x, p_ulc_y;
u16 p_lrc_x, p_lrc_y;
p_ulc_x = dtg->dis_ulc_x + px;
p_ulc_y = dtg->dis_ulc_y + py;
p_lrc_x = p_ulc_x + pw;
p_lrc_y = p_ulc_y + ph;
if (!px && !py && !pw && !ph) {
dcss_dtg_write(dtg, 0, DCSS_DTG_TC_CH1_TOP + 0x8 * ch_num);
dcss_dtg_write(dtg, 0, DCSS_DTG_TC_CH1_BOT + 0x8 * ch_num);
} else {
dcss_dtg_write(dtg, ((p_ulc_y << TC_Y_POS) | p_ulc_x),
DCSS_DTG_TC_CH1_TOP + 0x8 * ch_num);
dcss_dtg_write(dtg, ((p_lrc_y << TC_Y_POS) | p_lrc_x),
DCSS_DTG_TC_CH1_BOT + 0x8 * ch_num);
}
}
bool dcss_dtg_global_alpha_changed(struct dcss_dtg *dtg, int ch_num, int alpha)
{
if (ch_num)
return false;
return alpha != dtg->alpha;
}
void dcss_dtg_plane_alpha_set(struct dcss_dtg *dtg, int ch_num,
const struct drm_format_info *format, int alpha)
{
/* we care about alpha only when channel 0 is concerned */
if (ch_num)
return;
/*
* Use global alpha if pixel format does not have alpha channel or the
* user explicitly chose to use global alpha (i.e. alpha is not OPAQUE).
*/
if (!format->has_alpha || alpha != 255)
dtg->alpha_cfg = (alpha << DEFAULT_FG_ALPHA_POS) & DEFAULT_FG_ALPHA_MASK;
else /* use per-pixel alpha otherwise */
dtg->alpha_cfg = CH1_ALPHA_SEL;
dtg->alpha = alpha;
}
void dcss_dtg_css_set(struct dcss_dtg *dtg)
{
dtg->control_status |=
(0x5 << CSS_PIX_COMP_SWAP_POS) & CSS_PIX_COMP_SWAP_MASK;
}
void dcss_dtg_enable(struct dcss_dtg *dtg)
{
dtg->control_status |= DTG_START;
dtg->control_status &= ~(CH1_ALPHA_SEL | DEFAULT_FG_ALPHA_MASK);
dtg->control_status |= dtg->alpha_cfg;
dcss_dtg_write(dtg, dtg->control_status, DCSS_DTG_TC_CONTROL_STATUS);
dtg->in_use = true;
}
void dcss_dtg_shutoff(struct dcss_dtg *dtg)
{
dtg->control_status &= ~DTG_START;
dcss_writel(dtg->control_status,
dtg->base_reg + DCSS_DTG_TC_CONTROL_STATUS);
dtg->in_use = false;
}
bool dcss_dtg_is_enabled(struct dcss_dtg *dtg)
{
return dtg->in_use;
}
void dcss_dtg_ch_enable(struct dcss_dtg *dtg, int ch_num, bool en)
{
u32 ch_en_map[] = {CH1_EN, CH2_EN, CH3_EN};
u32 control_status;
control_status = dtg->control_status & ~ch_en_map[ch_num];
control_status |= en ? ch_en_map[ch_num] : 0;
control_status &= ~(CH1_ALPHA_SEL | DEFAULT_FG_ALPHA_MASK);
control_status |= dtg->alpha_cfg;
if (dtg->control_status != control_status)
dcss_dtg_write(dtg, control_status, DCSS_DTG_TC_CONTROL_STATUS);
dtg->control_status = control_status;
}
void dcss_dtg_vblank_irq_enable(struct dcss_dtg *dtg, bool en)
{
u32 status;
u32 mask = en ? LINE1_IRQ : 0;
if (en) {
status = dcss_readl(dtg->base_reg + DCSS_DTG_INT_STATUS);
dcss_writel(status & LINE1_IRQ,
dtg->base_reg + DCSS_DTG_INT_CONTROL);
}
dcss_update(mask, LINE1_IRQ, dtg->base_reg + DCSS_DTG_INT_MASK);
}
void dcss_dtg_ctxld_kick_irq_enable(struct dcss_dtg *dtg, bool en)
{
u32 status;
u32 mask = en ? LINE0_IRQ : 0;
if (en) {
status = dcss_readl(dtg->base_reg + DCSS_DTG_INT_STATUS);
if (!dtg->ctxld_kick_irq_en) {
dcss_writel(status & LINE0_IRQ,
dtg->base_reg + DCSS_DTG_INT_CONTROL);
enable_irq(dtg->ctxld_kick_irq);
dtg->ctxld_kick_irq_en = true;
dcss_update(mask, LINE0_IRQ,
dtg->base_reg + DCSS_DTG_INT_MASK);
}
return;
}
if (!dtg->ctxld_kick_irq_en)
return;
disable_irq_nosync(dtg->ctxld_kick_irq);
dtg->ctxld_kick_irq_en = false;
dcss_update(mask, LINE0_IRQ, dtg->base_reg + DCSS_DTG_INT_MASK);
}
void dcss_dtg_vblank_irq_clear(struct dcss_dtg *dtg)
{
dcss_update(LINE1_IRQ, LINE1_IRQ, dtg->base_reg + DCSS_DTG_INT_CONTROL);
}
bool dcss_dtg_vblank_irq_valid(struct dcss_dtg *dtg)
{
return !!(dcss_readl(dtg->base_reg + DCSS_DTG_INT_STATUS) & LINE1_IRQ);
}

View File

@ -0,0 +1,198 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2019 NXP.
*/
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge_connector.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
#include "dcss-dev.h"
#include "dcss-kms.h"
DEFINE_DRM_GEM_CMA_FOPS(dcss_cma_fops);
static const struct drm_mode_config_funcs dcss_drm_mode_config_funcs = {
.fb_create = drm_gem_fb_create,
.output_poll_changed = drm_fb_helper_output_poll_changed,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
static struct drm_driver dcss_kms_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
.gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.dumb_create = drm_gem_cma_dumb_create,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_import = drm_gem_prime_import,
.gem_prime_export = drm_gem_prime_export,
.gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
.gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
.gem_prime_vmap = drm_gem_cma_prime_vmap,
.gem_prime_vunmap = drm_gem_cma_prime_vunmap,
.gem_prime_mmap = drm_gem_cma_prime_mmap,
.fops = &dcss_cma_fops,
.name = "imx-dcss",
.desc = "i.MX8MQ Display Subsystem",
.date = "20190917",
.major = 1,
.minor = 0,
.patchlevel = 0,
};
static const struct drm_mode_config_helper_funcs dcss_mode_config_helpers = {
.atomic_commit_tail = drm_atomic_helper_commit_tail_rpm,
};
static void dcss_kms_mode_config_init(struct dcss_kms_dev *kms)
{
struct drm_mode_config *config = &kms->base.mode_config;
drm_mode_config_init(&kms->base);
config->min_width = 1;
config->min_height = 1;
config->max_width = 4096;
config->max_height = 4096;
config->allow_fb_modifiers = true;
config->normalize_zpos = true;
config->funcs = &dcss_drm_mode_config_funcs;
config->helper_private = &dcss_mode_config_helpers;
}
static const struct drm_encoder_funcs dcss_kms_simple_encoder_funcs = {
.destroy = drm_encoder_cleanup,
};
static int dcss_kms_bridge_connector_init(struct dcss_kms_dev *kms)
{
struct drm_device *ddev = &kms->base;
struct drm_encoder *encoder = &kms->encoder;
struct drm_crtc *crtc = (struct drm_crtc *)&kms->crtc;
struct drm_panel *panel;
struct drm_bridge *bridge;
int ret;
ret = drm_of_find_panel_or_bridge(ddev->dev->of_node, 0, 0,
&panel, &bridge);
if (ret)
return ret;
if (!bridge) {
dev_err(ddev->dev, "No bridge found %d.\n", ret);
return -ENODEV;
}
encoder->possible_crtcs = drm_crtc_mask(crtc);
ret = drm_encoder_init(&kms->base, encoder,
&dcss_kms_simple_encoder_funcs,
DRM_MODE_ENCODER_NONE, NULL);
if (ret) {
dev_err(ddev->dev, "Failed initializing encoder %d.\n", ret);
return ret;
}
ret = drm_bridge_attach(encoder, bridge, NULL,
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (ret < 0) {
dev_err(ddev->dev, "Unable to attach bridge %pOF\n",
bridge->of_node);
return ret;
}
kms->connector = drm_bridge_connector_init(ddev, encoder);
if (IS_ERR(kms->connector)) {
dev_err(ddev->dev, "Unable to create bridge connector.\n");
return PTR_ERR(kms->connector);
}
drm_connector_attach_encoder(kms->connector, encoder);
return 0;
}
struct dcss_kms_dev *dcss_kms_attach(struct dcss_dev *dcss)
{
struct dcss_kms_dev *kms;
struct drm_device *drm;
struct dcss_crtc *crtc;
int ret;
kms = devm_drm_dev_alloc(dcss->dev, &dcss_kms_driver,
struct dcss_kms_dev, base);
if (IS_ERR(kms))
return kms;
drm = &kms->base;
crtc = &kms->crtc;
drm->dev_private = dcss;
dcss_kms_mode_config_init(kms);
ret = drm_vblank_init(drm, 1);
if (ret)
goto cleanup_mode_config;
drm->irq_enabled = true;
ret = dcss_kms_bridge_connector_init(kms);
if (ret)
goto cleanup_mode_config;
ret = dcss_crtc_init(crtc, drm);
if (ret)
goto cleanup_mode_config;
drm_mode_config_reset(drm);
drm_kms_helper_poll_init(drm);
drm_bridge_connector_enable_hpd(kms->connector);
ret = drm_dev_register(drm, 0);
if (ret)
goto cleanup_crtc;
drm_fbdev_generic_setup(drm, 32);
return kms;
cleanup_crtc:
drm_bridge_connector_disable_hpd(kms->connector);
drm_kms_helper_poll_fini(drm);
dcss_crtc_deinit(crtc, drm);
cleanup_mode_config:
drm_mode_config_cleanup(drm);
drm->dev_private = NULL;
return ERR_PTR(ret);
}
void dcss_kms_detach(struct dcss_kms_dev *kms)
{
struct drm_device *drm = &kms->base;
drm_dev_unregister(drm);
drm_bridge_connector_disable_hpd(kms->connector);
drm_kms_helper_poll_fini(drm);
drm_atomic_helper_shutdown(drm);
drm_crtc_vblank_off(&kms->crtc.base);
drm->irq_enabled = false;
drm_mode_config_cleanup(drm);
dcss_crtc_deinit(&kms->crtc, drm);
drm->dev_private = NULL;
}

View File

@ -0,0 +1,44 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright 2019 NXP.
*/
#ifndef _DCSS_KMS_H_
#define _DCSS_KMS_H_
#include <drm/drm_encoder.h>
struct dcss_plane {
struct drm_plane base;
int ch_num;
};
struct dcss_crtc {
struct drm_crtc base;
struct drm_crtc_state *state;
struct dcss_plane *plane[3];
int irq;
bool disable_ctxld_kick_irq;
};
struct dcss_kms_dev {
struct drm_device base;
struct dcss_crtc crtc;
struct drm_encoder encoder;
struct drm_connector *connector;
};
struct dcss_kms_dev *dcss_kms_attach(struct dcss_dev *dcss);
void dcss_kms_detach(struct dcss_kms_dev *kms);
int dcss_crtc_init(struct dcss_crtc *crtc, struct drm_device *drm);
void dcss_crtc_deinit(struct dcss_crtc *crtc, struct drm_device *drm);
struct dcss_plane *dcss_plane_init(struct drm_device *drm,
unsigned int possible_crtcs,
enum drm_plane_type type,
unsigned int zpos);
#endif

View File

@ -0,0 +1,405 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2019 NXP.
*/
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include "dcss-dev.h"
#include "dcss-kms.h"
static const u32 dcss_common_formats[] = {
/* RGB */
DRM_FORMAT_ARGB8888,
DRM_FORMAT_XRGB8888,
DRM_FORMAT_ABGR8888,
DRM_FORMAT_XBGR8888,
DRM_FORMAT_RGBA8888,
DRM_FORMAT_RGBX8888,
DRM_FORMAT_BGRA8888,
DRM_FORMAT_BGRX8888,
DRM_FORMAT_XRGB2101010,
DRM_FORMAT_XBGR2101010,
DRM_FORMAT_RGBX1010102,
DRM_FORMAT_BGRX1010102,
DRM_FORMAT_ARGB2101010,
DRM_FORMAT_ABGR2101010,
DRM_FORMAT_RGBA1010102,
DRM_FORMAT_BGRA1010102,
};
static const u64 dcss_video_format_modifiers[] = {
DRM_FORMAT_MOD_LINEAR,
DRM_FORMAT_MOD_INVALID,
};
static const u64 dcss_graphics_format_modifiers[] = {
DRM_FORMAT_MOD_VIVANTE_TILED,
DRM_FORMAT_MOD_VIVANTE_SUPER_TILED,
DRM_FORMAT_MOD_LINEAR,
DRM_FORMAT_MOD_INVALID,
};
static inline struct dcss_plane *to_dcss_plane(struct drm_plane *p)
{
return container_of(p, struct dcss_plane, base);
}
static inline bool dcss_plane_fb_is_linear(const struct drm_framebuffer *fb)
{
return ((fb->flags & DRM_MODE_FB_MODIFIERS) == 0) ||
((fb->flags & DRM_MODE_FB_MODIFIERS) != 0 &&
fb->modifier == DRM_FORMAT_MOD_LINEAR);
}
static void dcss_plane_destroy(struct drm_plane *plane)
{
struct dcss_plane *dcss_plane = container_of(plane, struct dcss_plane,
base);
drm_plane_cleanup(plane);
kfree(dcss_plane);
}
static bool dcss_plane_format_mod_supported(struct drm_plane *plane,
u32 format,
u64 modifier)
{
switch (plane->type) {
case DRM_PLANE_TYPE_PRIMARY:
switch (format) {
case DRM_FORMAT_ARGB8888:
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_ARGB2101010:
return modifier == DRM_FORMAT_MOD_LINEAR ||
modifier == DRM_FORMAT_MOD_VIVANTE_TILED ||
modifier == DRM_FORMAT_MOD_VIVANTE_SUPER_TILED;
default:
return modifier == DRM_FORMAT_MOD_LINEAR;
}
break;
case DRM_PLANE_TYPE_OVERLAY:
return modifier == DRM_FORMAT_MOD_LINEAR;
default:
return false;
}
}
static const struct drm_plane_funcs dcss_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = dcss_plane_destroy,
.reset = drm_atomic_helper_plane_reset,
.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
.format_mod_supported = dcss_plane_format_mod_supported,
};
static bool dcss_plane_can_rotate(const struct drm_format_info *format,
bool mod_present, u64 modifier,
unsigned int rotation)
{
bool linear_format = !mod_present ||
(mod_present && modifier == DRM_FORMAT_MOD_LINEAR);
u32 supported_rotation = DRM_MODE_ROTATE_0;
if (!format->is_yuv && linear_format)
supported_rotation = DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
DRM_MODE_REFLECT_MASK;
else if (!format->is_yuv &&
modifier == DRM_FORMAT_MOD_VIVANTE_TILED)
supported_rotation = DRM_MODE_ROTATE_MASK |
DRM_MODE_REFLECT_MASK;
else if (format->is_yuv && linear_format &&
(format->format == DRM_FORMAT_NV12 ||
format->format == DRM_FORMAT_NV21))
supported_rotation = DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
DRM_MODE_REFLECT_MASK;
return !!(rotation & supported_rotation);
}
static bool dcss_plane_is_source_size_allowed(u16 src_w, u16 src_h, u32 pix_fmt)
{
if (src_w < 64 &&
(pix_fmt == DRM_FORMAT_NV12 || pix_fmt == DRM_FORMAT_NV21))
return false;
else if (src_w < 32 &&
(pix_fmt == DRM_FORMAT_UYVY || pix_fmt == DRM_FORMAT_VYUY ||
pix_fmt == DRM_FORMAT_YUYV || pix_fmt == DRM_FORMAT_YVYU))
return false;
return src_w >= 16 && src_h >= 8;
}
static int dcss_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *state)
{
struct dcss_plane *dcss_plane = to_dcss_plane(plane);
struct dcss_dev *dcss = plane->dev->dev_private;
struct drm_framebuffer *fb = state->fb;
bool is_primary_plane = plane->type == DRM_PLANE_TYPE_PRIMARY;
struct drm_gem_cma_object *cma_obj;
struct drm_crtc_state *crtc_state;
int hdisplay, vdisplay;
int min, max;
int ret;
if (!fb || !state->crtc)
return 0;
cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
WARN_ON(!cma_obj);
crtc_state = drm_atomic_get_existing_crtc_state(state->state,
state->crtc);
hdisplay = crtc_state->adjusted_mode.hdisplay;
vdisplay = crtc_state->adjusted_mode.vdisplay;
if (!dcss_plane_is_source_size_allowed(state->src_w >> 16,
state->src_h >> 16,
fb->format->format)) {
DRM_DEBUG_KMS("Source plane size is not allowed!\n");
return -EINVAL;
}
dcss_scaler_get_min_max_ratios(dcss->scaler, dcss_plane->ch_num,
&min, &max);
ret = drm_atomic_helper_check_plane_state(state, crtc_state,
min, max, !is_primary_plane,
false);
if (ret)
return ret;
if (!state->visible)
return 0;
if (!dcss_plane_can_rotate(fb->format,
!!(fb->flags & DRM_MODE_FB_MODIFIERS),
fb->modifier,
state->rotation)) {
DRM_DEBUG_KMS("requested rotation is not allowed!\n");
return -EINVAL;
}
if ((state->crtc_x < 0 || state->crtc_y < 0 ||
state->crtc_x + state->crtc_w > hdisplay ||
state->crtc_y + state->crtc_h > vdisplay) &&
!dcss_plane_fb_is_linear(fb)) {
DRM_DEBUG_KMS("requested cropping operation is not allowed!\n");
return -EINVAL;
}
if ((fb->flags & DRM_MODE_FB_MODIFIERS) &&
!plane->funcs->format_mod_supported(plane,
fb->format->format,
fb->modifier)) {
DRM_DEBUG_KMS("Invalid modifier: %llx", fb->modifier);
return -EINVAL;
}
return 0;
}
static void dcss_plane_atomic_set_base(struct dcss_plane *dcss_plane)
{
struct drm_plane *plane = &dcss_plane->base;
struct drm_plane_state *state = plane->state;
struct dcss_dev *dcss = plane->dev->dev_private;
struct drm_framebuffer *fb = state->fb;
const struct drm_format_info *format = fb->format;
struct drm_gem_cma_object *cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
unsigned long p1_ba = 0, p2_ba = 0;
if (!format->is_yuv ||
format->format == DRM_FORMAT_NV12 ||
format->format == DRM_FORMAT_NV21)
p1_ba = cma_obj->paddr + fb->offsets[0] +
fb->pitches[0] * (state->src.y1 >> 16) +
format->char_per_block[0] * (state->src.x1 >> 16);
else if (format->format == DRM_FORMAT_UYVY ||
format->format == DRM_FORMAT_VYUY ||
format->format == DRM_FORMAT_YUYV ||
format->format == DRM_FORMAT_YVYU)
p1_ba = cma_obj->paddr + fb->offsets[0] +
fb->pitches[0] * (state->src.y1 >> 16) +
2 * format->char_per_block[0] * (state->src.x1 >> 17);
if (format->format == DRM_FORMAT_NV12 ||
format->format == DRM_FORMAT_NV21)
p2_ba = cma_obj->paddr + fb->offsets[1] +
(((fb->pitches[1] >> 1) * (state->src.y1 >> 17) +
(state->src.x1 >> 17)) << 1);
dcss_dpr_addr_set(dcss->dpr, dcss_plane->ch_num, p1_ba, p2_ba,
fb->pitches[0]);
}
static bool dcss_plane_needs_setup(struct drm_plane_state *state,
struct drm_plane_state *old_state)
{
struct drm_framebuffer *fb = state->fb;
struct drm_framebuffer *old_fb = old_state->fb;
return state->crtc_x != old_state->crtc_x ||
state->crtc_y != old_state->crtc_y ||
state->crtc_w != old_state->crtc_w ||
state->crtc_h != old_state->crtc_h ||
state->src_x != old_state->src_x ||
state->src_y != old_state->src_y ||
state->src_w != old_state->src_w ||
state->src_h != old_state->src_h ||
fb->format->format != old_fb->format->format ||
fb->modifier != old_fb->modifier ||
state->rotation != old_state->rotation;
}
static void dcss_plane_atomic_update(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
struct drm_plane_state *state = plane->state;
struct dcss_plane *dcss_plane = to_dcss_plane(plane);
struct dcss_dev *dcss = plane->dev->dev_private;
struct drm_framebuffer *fb = state->fb;
u32 pixel_format;
struct drm_crtc_state *crtc_state;
bool modifiers_present;
u32 src_w, src_h, dst_w, dst_h;
struct drm_rect src, dst;
bool enable = true;
if (!fb || !state->crtc || !state->visible)
return;
pixel_format = state->fb->format->format;
crtc_state = state->crtc->state;
modifiers_present = !!(fb->flags & DRM_MODE_FB_MODIFIERS);
if (old_state->fb && !drm_atomic_crtc_needs_modeset(crtc_state) &&
!dcss_plane_needs_setup(state, old_state)) {
dcss_plane_atomic_set_base(dcss_plane);
return;
}
src = plane->state->src;
dst = plane->state->dst;
/*
* The width and height after clipping.
*/
src_w = drm_rect_width(&src) >> 16;
src_h = drm_rect_height(&src) >> 16;
dst_w = drm_rect_width(&dst);
dst_h = drm_rect_height(&dst);
if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
modifiers_present && fb->modifier == DRM_FORMAT_MOD_LINEAR)
modifiers_present = false;
dcss_dpr_format_set(dcss->dpr, dcss_plane->ch_num, state->fb->format,
modifiers_present ? fb->modifier :
DRM_FORMAT_MOD_LINEAR);
dcss_dpr_set_res(dcss->dpr, dcss_plane->ch_num, src_w, src_h);
dcss_dpr_set_rotation(dcss->dpr, dcss_plane->ch_num,
state->rotation);
dcss_plane_atomic_set_base(dcss_plane);
dcss_scaler_setup(dcss->scaler, dcss_plane->ch_num,
state->fb->format, src_w, src_h,
dst_w, dst_h,
drm_mode_vrefresh(&crtc_state->mode));
dcss_dtg_plane_pos_set(dcss->dtg, dcss_plane->ch_num,
dst.x1, dst.y1, dst_w, dst_h);
dcss_dtg_plane_alpha_set(dcss->dtg, dcss_plane->ch_num,
fb->format, state->alpha >> 8);
if (!dcss_plane->ch_num && (state->alpha >> 8) == 0)
enable = false;
dcss_dpr_enable(dcss->dpr, dcss_plane->ch_num, enable);
dcss_scaler_ch_enable(dcss->scaler, dcss_plane->ch_num, enable);
if (!enable)
dcss_dtg_plane_pos_set(dcss->dtg, dcss_plane->ch_num,
0, 0, 0, 0);
dcss_dtg_ch_enable(dcss->dtg, dcss_plane->ch_num, enable);
}
static void dcss_plane_atomic_disable(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
struct dcss_plane *dcss_plane = to_dcss_plane(plane);
struct dcss_dev *dcss = plane->dev->dev_private;
dcss_dpr_enable(dcss->dpr, dcss_plane->ch_num, false);
dcss_scaler_ch_enable(dcss->scaler, dcss_plane->ch_num, false);
dcss_dtg_plane_pos_set(dcss->dtg, dcss_plane->ch_num, 0, 0, 0, 0);
dcss_dtg_ch_enable(dcss->dtg, dcss_plane->ch_num, false);
}
static const struct drm_plane_helper_funcs dcss_plane_helper_funcs = {
.prepare_fb = drm_gem_fb_prepare_fb,
.atomic_check = dcss_plane_atomic_check,
.atomic_update = dcss_plane_atomic_update,
.atomic_disable = dcss_plane_atomic_disable,
};
struct dcss_plane *dcss_plane_init(struct drm_device *drm,
unsigned int possible_crtcs,
enum drm_plane_type type,
unsigned int zpos)
{
struct dcss_plane *dcss_plane;
const u64 *format_modifiers = dcss_video_format_modifiers;
int ret;
if (zpos > 2)
return ERR_PTR(-EINVAL);
dcss_plane = kzalloc(sizeof(*dcss_plane), GFP_KERNEL);
if (!dcss_plane) {
DRM_ERROR("failed to allocate plane\n");
return ERR_PTR(-ENOMEM);
}
if (type == DRM_PLANE_TYPE_PRIMARY)
format_modifiers = dcss_graphics_format_modifiers;
ret = drm_universal_plane_init(drm, &dcss_plane->base, possible_crtcs,
&dcss_plane_funcs, dcss_common_formats,
ARRAY_SIZE(dcss_common_formats),
format_modifiers, type, NULL);
if (ret) {
DRM_ERROR("failed to initialize plane\n");
kfree(dcss_plane);
return ERR_PTR(ret);
}
drm_plane_helper_add(&dcss_plane->base, &dcss_plane_helper_funcs);
ret = drm_plane_create_zpos_immutable_property(&dcss_plane->base, zpos);
if (ret)
return ERR_PTR(ret);
drm_plane_create_rotation_property(&dcss_plane->base,
DRM_MODE_ROTATE_0,
DRM_MODE_ROTATE_0 |
DRM_MODE_ROTATE_90 |
DRM_MODE_ROTATE_180 |
DRM_MODE_ROTATE_270 |
DRM_MODE_REFLECT_X |
DRM_MODE_REFLECT_Y);
dcss_plane->ch_num = zpos;
return dcss_plane;
}

View File

@ -0,0 +1,826 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2019 NXP.
*
* Scaling algorithms were contributed by Dzung Hoang <dzung.hoang@nxp.com>
*/
#include <linux/device.h>
#include <linux/slab.h>
#include "dcss-dev.h"
#define DCSS_SCALER_CTRL 0x00
#define SCALER_EN BIT(0)
#define REPEAT_EN BIT(4)
#define SCALE2MEM_EN BIT(8)
#define MEM2OFIFO_EN BIT(12)
#define DCSS_SCALER_OFIFO_CTRL 0x04
#define OFIFO_LOW_THRES_POS 0
#define OFIFO_LOW_THRES_MASK GENMASK(9, 0)
#define OFIFO_HIGH_THRES_POS 16
#define OFIFO_HIGH_THRES_MASK GENMASK(25, 16)
#define UNDERRUN_DETECT_CLR BIT(26)
#define LOW_THRES_DETECT_CLR BIT(27)
#define HIGH_THRES_DETECT_CLR BIT(28)
#define UNDERRUN_DETECT_EN BIT(29)
#define LOW_THRES_DETECT_EN BIT(30)
#define HIGH_THRES_DETECT_EN BIT(31)
#define DCSS_SCALER_SDATA_CTRL 0x08
#define YUV_EN BIT(0)
#define RTRAM_8LINES BIT(1)
#define Y_UV_BYTE_SWAP BIT(4)
#define A2R10G10B10_FORMAT_POS 8
#define A2R10G10B10_FORMAT_MASK GENMASK(11, 8)
#define DCSS_SCALER_BIT_DEPTH 0x0C
#define LUM_BIT_DEPTH_POS 0
#define LUM_BIT_DEPTH_MASK GENMASK(1, 0)
#define CHR_BIT_DEPTH_POS 4
#define CHR_BIT_DEPTH_MASK GENMASK(5, 4)
#define DCSS_SCALER_SRC_FORMAT 0x10
#define DCSS_SCALER_DST_FORMAT 0x14
#define FORMAT_MASK GENMASK(1, 0)
#define DCSS_SCALER_SRC_LUM_RES 0x18
#define DCSS_SCALER_SRC_CHR_RES 0x1C
#define DCSS_SCALER_DST_LUM_RES 0x20
#define DCSS_SCALER_DST_CHR_RES 0x24
#define WIDTH_POS 0
#define WIDTH_MASK GENMASK(11, 0)
#define HEIGHT_POS 16
#define HEIGHT_MASK GENMASK(27, 16)
#define DCSS_SCALER_V_LUM_START 0x48
#define V_START_MASK GENMASK(15, 0)
#define DCSS_SCALER_V_LUM_INC 0x4C
#define V_INC_MASK GENMASK(15, 0)
#define DCSS_SCALER_H_LUM_START 0x50
#define H_START_MASK GENMASK(18, 0)
#define DCSS_SCALER_H_LUM_INC 0x54
#define H_INC_MASK GENMASK(15, 0)
#define DCSS_SCALER_V_CHR_START 0x58
#define DCSS_SCALER_V_CHR_INC 0x5C
#define DCSS_SCALER_H_CHR_START 0x60
#define DCSS_SCALER_H_CHR_INC 0x64
#define DCSS_SCALER_COEF_VLUM 0x80
#define DCSS_SCALER_COEF_HLUM 0x140
#define DCSS_SCALER_COEF_VCHR 0x200
#define DCSS_SCALER_COEF_HCHR 0x300
struct dcss_scaler_ch {
void __iomem *base_reg;
u32 base_ofs;
struct dcss_scaler *scl;
u32 sdata_ctrl;
u32 scaler_ctrl;
bool scaler_ctrl_chgd;
u32 c_vstart;
u32 c_hstart;
};
struct dcss_scaler {
struct device *dev;
struct dcss_ctxld *ctxld;
u32 ctx_id;
struct dcss_scaler_ch ch[3];
};
/* scaler coefficients generator */
#define PSC_FRAC_BITS 30
#define PSC_FRAC_SCALE BIT(PSC_FRAC_BITS)
#define PSC_BITS_FOR_PHASE 4
#define PSC_NUM_PHASES 16
#define PSC_STORED_PHASES (PSC_NUM_PHASES / 2 + 1)
#define PSC_NUM_TAPS 7
#define PSC_NUM_TAPS_RGBA 5
#define PSC_COEFF_PRECISION 10
#define PSC_PHASE_FRACTION_BITS 13
#define PSC_PHASE_MASK (PSC_NUM_PHASES - 1)
#define PSC_Q_FRACTION 19
#define PSC_Q_ROUND_OFFSET (1 << (PSC_Q_FRACTION - 1))
/**
* mult_q() - Performs fixed-point multiplication.
* @A: multiplier
* @B: multiplicand
*/
static int mult_q(int A, int B)
{
int result;
s64 temp;
temp = (int64_t)A * (int64_t)B;
temp += PSC_Q_ROUND_OFFSET;
result = (int)(temp >> PSC_Q_FRACTION);
return result;
}
/**
* div_q() - Performs fixed-point division.
* @A: dividend
* @B: divisor
*/
static int div_q(int A, int B)
{
int result;
s64 temp;
temp = (int64_t)A << PSC_Q_FRACTION;
if ((temp >= 0 && B >= 0) || (temp < 0 && B < 0))
temp += B / 2;
else
temp -= B / 2;
result = (int)(temp / B);
return result;
}
/**
* exp_approx_q() - Compute approximation to exp(x) function using Taylor
* series.
* @x: fixed-point argument of exp function
*/
static int exp_approx_q(int x)
{
int sum = 1 << PSC_Q_FRACTION;
int term = 1 << PSC_Q_FRACTION;
term = mult_q(term, div_q(x, 1 << PSC_Q_FRACTION));
sum += term;
term = mult_q(term, div_q(x, 2 << PSC_Q_FRACTION));
sum += term;
term = mult_q(term, div_q(x, 3 << PSC_Q_FRACTION));
sum += term;
term = mult_q(term, div_q(x, 4 << PSC_Q_FRACTION));
sum += term;
return sum;
}
/**
* dcss_scaler_gaussian_filter() - Generate gaussian prototype filter.
* @fc_q: fixed-point cutoff frequency normalized to range [0, 1]
* @use_5_taps: indicates whether to use 5 taps or 7 taps
* @coef: output filter coefficients
*/
static void dcss_scaler_gaussian_filter(int fc_q, bool use_5_taps,
bool phase0_identity,
int coef[][PSC_NUM_TAPS])
{
int sigma_q, g0_q, g1_q, g2_q;
int tap_cnt1, tap_cnt2, tap_idx, phase_cnt;
int mid;
int phase;
int i;
int taps;
if (use_5_taps)
for (phase = 0; phase < PSC_STORED_PHASES; phase++) {
coef[phase][0] = 0;
coef[phase][PSC_NUM_TAPS - 1] = 0;
}
/* seed coefficient scanner */
taps = use_5_taps ? PSC_NUM_TAPS_RGBA : PSC_NUM_TAPS;
mid = (PSC_NUM_PHASES * taps) / 2 - 1;
phase_cnt = (PSC_NUM_PHASES * (PSC_NUM_TAPS + 1)) / 2;
tap_cnt1 = (PSC_NUM_PHASES * PSC_NUM_TAPS) / 2;
tap_cnt2 = (PSC_NUM_PHASES * PSC_NUM_TAPS) / 2;
/* seed gaussian filter generator */
sigma_q = div_q(PSC_Q_ROUND_OFFSET, fc_q);
g0_q = 1 << PSC_Q_FRACTION;
g1_q = exp_approx_q(div_q(-PSC_Q_ROUND_OFFSET,
mult_q(sigma_q, sigma_q)));
g2_q = mult_q(g1_q, g1_q);
coef[phase_cnt & PSC_PHASE_MASK][tap_cnt1 >> PSC_BITS_FOR_PHASE] = g0_q;
for (i = 0; i < mid; i++) {
phase_cnt++;
tap_cnt1--;
tap_cnt2++;
g0_q = mult_q(g0_q, g1_q);
g1_q = mult_q(g1_q, g2_q);
if ((phase_cnt & PSC_PHASE_MASK) <= 8) {
tap_idx = tap_cnt1 >> PSC_BITS_FOR_PHASE;
coef[phase_cnt & PSC_PHASE_MASK][tap_idx] = g0_q;
}
if (((-phase_cnt) & PSC_PHASE_MASK) <= 8) {
tap_idx = tap_cnt2 >> PSC_BITS_FOR_PHASE;
coef[(-phase_cnt) & PSC_PHASE_MASK][tap_idx] = g0_q;
}
}
phase_cnt++;
tap_cnt1--;
coef[phase_cnt & PSC_PHASE_MASK][tap_cnt1 >> PSC_BITS_FOR_PHASE] = 0;
/* override phase 0 with identity filter if specified */
if (phase0_identity)
for (i = 0; i < PSC_NUM_TAPS; i++)
coef[0][i] = i == (PSC_NUM_TAPS >> 1) ?
(1 << PSC_COEFF_PRECISION) : 0;
/* normalize coef */
for (phase = 0; phase < PSC_STORED_PHASES; phase++) {
int sum = 0;
s64 ll_temp;
for (i = 0; i < PSC_NUM_TAPS; i++)
sum += coef[phase][i];
for (i = 0; i < PSC_NUM_TAPS; i++) {
ll_temp = coef[phase][i];
ll_temp <<= PSC_COEFF_PRECISION;
ll_temp += sum >> 1;
ll_temp /= sum;
coef[phase][i] = (int)ll_temp;
}
}
}
/**
* dcss_scaler_filter_design() - Compute filter coefficients using
* Gaussian filter.
* @src_length: length of input
* @dst_length: length of output
* @use_5_taps: 0 for 7 taps per phase, 1 for 5 taps
* @coef: output coefficients
*/
static void dcss_scaler_filter_design(int src_length, int dst_length,
bool use_5_taps, bool phase0_identity,
int coef[][PSC_NUM_TAPS])
{
int fc_q;
/* compute cutoff frequency */
if (dst_length >= src_length)
fc_q = div_q(1, PSC_NUM_PHASES);
else
fc_q = div_q(dst_length, src_length * PSC_NUM_PHASES);
/* compute gaussian filter coefficients */
dcss_scaler_gaussian_filter(fc_q, use_5_taps, phase0_identity, coef);
}
static void dcss_scaler_write(struct dcss_scaler_ch *ch, u32 val, u32 ofs)
{
struct dcss_scaler *scl = ch->scl;
dcss_ctxld_write(scl->ctxld, scl->ctx_id, val, ch->base_ofs + ofs);
}
static int dcss_scaler_ch_init_all(struct dcss_scaler *scl,
unsigned long scaler_base)
{
struct dcss_scaler_ch *ch;
int i;
for (i = 0; i < 3; i++) {
ch = &scl->ch[i];
ch->base_ofs = scaler_base + i * 0x400;
ch->base_reg = ioremap(ch->base_ofs, SZ_4K);
if (!ch->base_reg) {
dev_err(scl->dev, "scaler: unable to remap ch base\n");
return -ENOMEM;
}
ch->scl = scl;
}
return 0;
}
int dcss_scaler_init(struct dcss_dev *dcss, unsigned long scaler_base)
{
struct dcss_scaler *scaler;
scaler = kzalloc(sizeof(*scaler), GFP_KERNEL);
if (!scaler)
return -ENOMEM;
dcss->scaler = scaler;
scaler->dev = dcss->dev;
scaler->ctxld = dcss->ctxld;
scaler->ctx_id = CTX_SB_HP;
if (dcss_scaler_ch_init_all(scaler, scaler_base)) {
int i;
for (i = 0; i < 3; i++) {
if (scaler->ch[i].base_reg)
iounmap(scaler->ch[i].base_reg);
}
kfree(scaler);
return -ENOMEM;
}
return 0;
}
void dcss_scaler_exit(struct dcss_scaler *scl)
{
int ch_no;
for (ch_no = 0; ch_no < 3; ch_no++) {
struct dcss_scaler_ch *ch = &scl->ch[ch_no];
dcss_writel(0, ch->base_reg + DCSS_SCALER_CTRL);
if (ch->base_reg)
iounmap(ch->base_reg);
}
kfree(scl);
}
void dcss_scaler_ch_enable(struct dcss_scaler *scl, int ch_num, bool en)
{
struct dcss_scaler_ch *ch = &scl->ch[ch_num];
u32 scaler_ctrl;
scaler_ctrl = en ? SCALER_EN | REPEAT_EN : 0;
if (en)
dcss_scaler_write(ch, ch->sdata_ctrl, DCSS_SCALER_SDATA_CTRL);
if (ch->scaler_ctrl != scaler_ctrl)
ch->scaler_ctrl_chgd = true;
ch->scaler_ctrl = scaler_ctrl;
}
static void dcss_scaler_yuv_enable(struct dcss_scaler_ch *ch, bool en)
{
ch->sdata_ctrl &= ~YUV_EN;
ch->sdata_ctrl |= en ? YUV_EN : 0;
}
static void dcss_scaler_rtr_8lines_enable(struct dcss_scaler_ch *ch, bool en)
{
ch->sdata_ctrl &= ~RTRAM_8LINES;
ch->sdata_ctrl |= en ? RTRAM_8LINES : 0;
}
static void dcss_scaler_bit_depth_set(struct dcss_scaler_ch *ch, int depth)
{
u32 val;
val = depth == 30 ? 2 : 0;
dcss_scaler_write(ch,
((val << CHR_BIT_DEPTH_POS) & CHR_BIT_DEPTH_MASK) |
((val << LUM_BIT_DEPTH_POS) & LUM_BIT_DEPTH_MASK),
DCSS_SCALER_BIT_DEPTH);
}
enum buffer_format {
BUF_FMT_YUV420,
BUF_FMT_YUV422,
BUF_FMT_ARGB8888_YUV444,
};
enum chroma_location {
PSC_LOC_HORZ_0_VERT_1_OVER_4 = 0,
PSC_LOC_HORZ_1_OVER_4_VERT_1_OVER_4 = 1,
PSC_LOC_HORZ_0_VERT_0 = 2,
PSC_LOC_HORZ_1_OVER_4_VERT_0 = 3,
PSC_LOC_HORZ_0_VERT_1_OVER_2 = 4,
PSC_LOC_HORZ_1_OVER_4_VERT_1_OVER_2 = 5
};
static void dcss_scaler_format_set(struct dcss_scaler_ch *ch,
enum buffer_format src_fmt,
enum buffer_format dst_fmt)
{
dcss_scaler_write(ch, src_fmt, DCSS_SCALER_SRC_FORMAT);
dcss_scaler_write(ch, dst_fmt, DCSS_SCALER_DST_FORMAT);
}
static void dcss_scaler_res_set(struct dcss_scaler_ch *ch,
int src_xres, int src_yres,
int dst_xres, int dst_yres,
u32 pix_format, enum buffer_format dst_format)
{
u32 lsrc_xres, lsrc_yres, csrc_xres, csrc_yres;
u32 ldst_xres, ldst_yres, cdst_xres, cdst_yres;
bool src_is_444 = true;
lsrc_xres = src_xres;
csrc_xres = src_xres;
lsrc_yres = src_yres;
csrc_yres = src_yres;
ldst_xres = dst_xres;
cdst_xres = dst_xres;
ldst_yres = dst_yres;
cdst_yres = dst_yres;
if (pix_format == DRM_FORMAT_UYVY || pix_format == DRM_FORMAT_VYUY ||
pix_format == DRM_FORMAT_YUYV || pix_format == DRM_FORMAT_YVYU) {
csrc_xres >>= 1;
src_is_444 = false;
} else if (pix_format == DRM_FORMAT_NV12 ||
pix_format == DRM_FORMAT_NV21) {
csrc_xres >>= 1;
csrc_yres >>= 1;
src_is_444 = false;
}
if (dst_format == BUF_FMT_YUV422)
cdst_xres >>= 1;
/* for 4:4:4 to 4:2:2 conversion, source height should be 1 less */
if (src_is_444 && dst_format == BUF_FMT_YUV422) {
lsrc_yres--;
csrc_yres--;
}
dcss_scaler_write(ch, (((lsrc_yres - 1) << HEIGHT_POS) & HEIGHT_MASK) |
(((lsrc_xres - 1) << WIDTH_POS) & WIDTH_MASK),
DCSS_SCALER_SRC_LUM_RES);
dcss_scaler_write(ch, (((csrc_yres - 1) << HEIGHT_POS) & HEIGHT_MASK) |
(((csrc_xres - 1) << WIDTH_POS) & WIDTH_MASK),
DCSS_SCALER_SRC_CHR_RES);
dcss_scaler_write(ch, (((ldst_yres - 1) << HEIGHT_POS) & HEIGHT_MASK) |
(((ldst_xres - 1) << WIDTH_POS) & WIDTH_MASK),
DCSS_SCALER_DST_LUM_RES);
dcss_scaler_write(ch, (((cdst_yres - 1) << HEIGHT_POS) & HEIGHT_MASK) |
(((cdst_xres - 1) << WIDTH_POS) & WIDTH_MASK),
DCSS_SCALER_DST_CHR_RES);
}
#define downscale_fp(factor, fp_pos) ((factor) << (fp_pos))
#define upscale_fp(factor, fp_pos) ((1 << (fp_pos)) / (factor))
struct dcss_scaler_factors {
int downscale;
int upscale;
};
static const struct dcss_scaler_factors dcss_scaler_factors[] = {
{3, 8}, {5, 8}, {5, 8},
};
static void dcss_scaler_fractions_set(struct dcss_scaler_ch *ch,
int src_xres, int src_yres,
int dst_xres, int dst_yres,
u32 src_format, u32 dst_format,
enum chroma_location src_chroma_loc)
{
int src_c_xres, src_c_yres, dst_c_xres, dst_c_yres;
u32 l_vinc, l_hinc, c_vinc, c_hinc;
u32 c_vstart, c_hstart;
src_c_xres = src_xres;
src_c_yres = src_yres;
dst_c_xres = dst_xres;
dst_c_yres = dst_yres;
c_vstart = 0;
c_hstart = 0;
/* adjustments for source chroma location */
if (src_format == BUF_FMT_YUV420) {
/* vertical input chroma position adjustment */
switch (src_chroma_loc) {
case PSC_LOC_HORZ_0_VERT_1_OVER_4:
case PSC_LOC_HORZ_1_OVER_4_VERT_1_OVER_4:
/*
* move chroma up to first luma line
* (1/4 chroma input line spacing)
*/
c_vstart -= (1 << (PSC_PHASE_FRACTION_BITS - 2));
break;
case PSC_LOC_HORZ_0_VERT_1_OVER_2:
case PSC_LOC_HORZ_1_OVER_4_VERT_1_OVER_2:
/*
* move chroma up to first luma line
* (1/2 chroma input line spacing)
*/
c_vstart -= (1 << (PSC_PHASE_FRACTION_BITS - 1));
break;
default:
break;
}
/* horizontal input chroma position adjustment */
switch (src_chroma_loc) {
case PSC_LOC_HORZ_1_OVER_4_VERT_1_OVER_4:
case PSC_LOC_HORZ_1_OVER_4_VERT_0:
case PSC_LOC_HORZ_1_OVER_4_VERT_1_OVER_2:
/* move chroma left 1/4 chroma input sample spacing */
c_hstart -= (1 << (PSC_PHASE_FRACTION_BITS - 2));
break;
default:
break;
}
}
/* adjustments to chroma resolution */
if (src_format == BUF_FMT_YUV420) {
src_c_xres >>= 1;
src_c_yres >>= 1;
} else if (src_format == BUF_FMT_YUV422) {
src_c_xres >>= 1;
}
if (dst_format == BUF_FMT_YUV422)
dst_c_xres >>= 1;
l_vinc = ((src_yres << 13) + (dst_yres >> 1)) / dst_yres;
c_vinc = ((src_c_yres << 13) + (dst_c_yres >> 1)) / dst_c_yres;
l_hinc = ((src_xres << 13) + (dst_xres >> 1)) / dst_xres;
c_hinc = ((src_c_xres << 13) + (dst_c_xres >> 1)) / dst_c_xres;
/* save chroma start phase */
ch->c_vstart = c_vstart;
ch->c_hstart = c_hstart;
dcss_scaler_write(ch, 0, DCSS_SCALER_V_LUM_START);
dcss_scaler_write(ch, l_vinc, DCSS_SCALER_V_LUM_INC);
dcss_scaler_write(ch, 0, DCSS_SCALER_H_LUM_START);
dcss_scaler_write(ch, l_hinc, DCSS_SCALER_H_LUM_INC);
dcss_scaler_write(ch, c_vstart, DCSS_SCALER_V_CHR_START);
dcss_scaler_write(ch, c_vinc, DCSS_SCALER_V_CHR_INC);
dcss_scaler_write(ch, c_hstart, DCSS_SCALER_H_CHR_START);
dcss_scaler_write(ch, c_hinc, DCSS_SCALER_H_CHR_INC);
}
int dcss_scaler_get_min_max_ratios(struct dcss_scaler *scl, int ch_num,
int *min, int *max)
{
*min = upscale_fp(dcss_scaler_factors[ch_num].upscale, 16);
*max = downscale_fp(dcss_scaler_factors[ch_num].downscale, 16);
return 0;
}
static void dcss_scaler_program_5_coef_set(struct dcss_scaler_ch *ch,
int base_addr,
int coef[][PSC_NUM_TAPS])
{
int i, phase;
for (i = 0; i < PSC_STORED_PHASES; i++) {
dcss_scaler_write(ch, ((coef[i][1] & 0xfff) << 16 |
(coef[i][2] & 0xfff) << 4 |
(coef[i][3] & 0xf00) >> 8),
base_addr + i * sizeof(u32));
dcss_scaler_write(ch, ((coef[i][3] & 0x0ff) << 20 |
(coef[i][4] & 0xfff) << 8 |
(coef[i][5] & 0xff0) >> 4),
base_addr + 0x40 + i * sizeof(u32));
dcss_scaler_write(ch, ((coef[i][5] & 0x00f) << 24),
base_addr + 0x80 + i * sizeof(u32));
}
/* reverse both phase and tap orderings */
for (phase = (PSC_NUM_PHASES >> 1) - 1;
i < PSC_NUM_PHASES; i++, phase--) {
dcss_scaler_write(ch, ((coef[phase][5] & 0xfff) << 16 |
(coef[phase][4] & 0xfff) << 4 |
(coef[phase][3] & 0xf00) >> 8),
base_addr + i * sizeof(u32));
dcss_scaler_write(ch, ((coef[phase][3] & 0x0ff) << 20 |
(coef[phase][2] & 0xfff) << 8 |
(coef[phase][1] & 0xff0) >> 4),
base_addr + 0x40 + i * sizeof(u32));
dcss_scaler_write(ch, ((coef[phase][1] & 0x00f) << 24),
base_addr + 0x80 + i * sizeof(u32));
}
}
static void dcss_scaler_program_7_coef_set(struct dcss_scaler_ch *ch,
int base_addr,
int coef[][PSC_NUM_TAPS])
{
int i, phase;
for (i = 0; i < PSC_STORED_PHASES; i++) {
dcss_scaler_write(ch, ((coef[i][0] & 0xfff) << 16 |
(coef[i][1] & 0xfff) << 4 |
(coef[i][2] & 0xf00) >> 8),
base_addr + i * sizeof(u32));
dcss_scaler_write(ch, ((coef[i][2] & 0x0ff) << 20 |
(coef[i][3] & 0xfff) << 8 |
(coef[i][4] & 0xff0) >> 4),
base_addr + 0x40 + i * sizeof(u32));
dcss_scaler_write(ch, ((coef[i][4] & 0x00f) << 24 |
(coef[i][5] & 0xfff) << 12 |
(coef[i][6] & 0xfff)),
base_addr + 0x80 + i * sizeof(u32));
}
/* reverse both phase and tap orderings */
for (phase = (PSC_NUM_PHASES >> 1) - 1;
i < PSC_NUM_PHASES; i++, phase--) {
dcss_scaler_write(ch, ((coef[phase][6] & 0xfff) << 16 |
(coef[phase][5] & 0xfff) << 4 |
(coef[phase][4] & 0xf00) >> 8),
base_addr + i * sizeof(u32));
dcss_scaler_write(ch, ((coef[phase][4] & 0x0ff) << 20 |
(coef[phase][3] & 0xfff) << 8 |
(coef[phase][2] & 0xff0) >> 4),
base_addr + 0x40 + i * sizeof(u32));
dcss_scaler_write(ch, ((coef[phase][2] & 0x00f) << 24 |
(coef[phase][1] & 0xfff) << 12 |
(coef[phase][0] & 0xfff)),
base_addr + 0x80 + i * sizeof(u32));
}
}
static void dcss_scaler_yuv_coef_set(struct dcss_scaler_ch *ch,
enum buffer_format src_format,
enum buffer_format dst_format,
bool use_5_taps,
int src_xres, int src_yres, int dst_xres,
int dst_yres)
{
int coef[PSC_STORED_PHASES][PSC_NUM_TAPS];
bool program_5_taps = use_5_taps ||
(dst_format == BUF_FMT_YUV422 &&
src_format == BUF_FMT_ARGB8888_YUV444);
/* horizontal luma */
dcss_scaler_filter_design(src_xres, dst_xres, false,
src_xres == dst_xres, coef);
dcss_scaler_program_7_coef_set(ch, DCSS_SCALER_COEF_HLUM, coef);
/* vertical luma */
dcss_scaler_filter_design(src_yres, dst_yres, program_5_taps,
src_yres == dst_yres, coef);
if (program_5_taps)
dcss_scaler_program_5_coef_set(ch, DCSS_SCALER_COEF_VLUM, coef);
else
dcss_scaler_program_7_coef_set(ch, DCSS_SCALER_COEF_VLUM, coef);
/* adjust chroma resolution */
if (src_format != BUF_FMT_ARGB8888_YUV444)
src_xres >>= 1;
if (src_format == BUF_FMT_YUV420)
src_yres >>= 1;
if (dst_format != BUF_FMT_ARGB8888_YUV444)
dst_xres >>= 1;
if (dst_format == BUF_FMT_YUV420) /* should not happen */
dst_yres >>= 1;
/* horizontal chroma */
dcss_scaler_filter_design(src_xres, dst_xres, false,
(src_xres == dst_xres) && (ch->c_hstart == 0),
coef);
dcss_scaler_program_7_coef_set(ch, DCSS_SCALER_COEF_HCHR, coef);
/* vertical chroma */
dcss_scaler_filter_design(src_yres, dst_yres, program_5_taps,
(src_yres == dst_yres) && (ch->c_vstart == 0),
coef);
if (program_5_taps)
dcss_scaler_program_5_coef_set(ch, DCSS_SCALER_COEF_VCHR, coef);
else
dcss_scaler_program_7_coef_set(ch, DCSS_SCALER_COEF_VCHR, coef);
}
static void dcss_scaler_rgb_coef_set(struct dcss_scaler_ch *ch,
int src_xres, int src_yres, int dst_xres,
int dst_yres)
{
int coef[PSC_STORED_PHASES][PSC_NUM_TAPS];
/* horizontal RGB */
dcss_scaler_filter_design(src_xres, dst_xres, false,
src_xres == dst_xres, coef);
dcss_scaler_program_7_coef_set(ch, DCSS_SCALER_COEF_HLUM, coef);
/* vertical RGB */
dcss_scaler_filter_design(src_yres, dst_yres, false,
src_yres == dst_yres, coef);
dcss_scaler_program_7_coef_set(ch, DCSS_SCALER_COEF_VLUM, coef);
}
static void dcss_scaler_set_rgb10_order(struct dcss_scaler_ch *ch,
const struct drm_format_info *format)
{
u32 a2r10g10b10_format;
if (format->is_yuv)
return;
ch->sdata_ctrl &= ~A2R10G10B10_FORMAT_MASK;
if (format->depth != 30)
return;
switch (format->format) {
case DRM_FORMAT_ARGB2101010:
case DRM_FORMAT_XRGB2101010:
a2r10g10b10_format = 0;
break;
case DRM_FORMAT_ABGR2101010:
case DRM_FORMAT_XBGR2101010:
a2r10g10b10_format = 5;
break;
case DRM_FORMAT_RGBA1010102:
case DRM_FORMAT_RGBX1010102:
a2r10g10b10_format = 6;
break;
case DRM_FORMAT_BGRA1010102:
case DRM_FORMAT_BGRX1010102:
a2r10g10b10_format = 11;
break;
default:
a2r10g10b10_format = 0;
break;
}
ch->sdata_ctrl |= a2r10g10b10_format << A2R10G10B10_FORMAT_POS;
}
void dcss_scaler_setup(struct dcss_scaler *scl, int ch_num,
const struct drm_format_info *format,
int src_xres, int src_yres, int dst_xres, int dst_yres,
u32 vrefresh_hz)
{
struct dcss_scaler_ch *ch = &scl->ch[ch_num];
unsigned int pixel_depth = 0;
bool rtr_8line_en = false;
bool use_5_taps = false;
enum buffer_format src_format = BUF_FMT_ARGB8888_YUV444;
enum buffer_format dst_format = BUF_FMT_ARGB8888_YUV444;
u32 pix_format = format->format;
if (format->is_yuv) {
dcss_scaler_yuv_enable(ch, true);
if (pix_format == DRM_FORMAT_NV12 ||
pix_format == DRM_FORMAT_NV21) {
rtr_8line_en = true;
src_format = BUF_FMT_YUV420;
} else if (pix_format == DRM_FORMAT_UYVY ||
pix_format == DRM_FORMAT_VYUY ||
pix_format == DRM_FORMAT_YUYV ||
pix_format == DRM_FORMAT_YVYU) {
src_format = BUF_FMT_YUV422;
}
use_5_taps = !rtr_8line_en;
} else {
dcss_scaler_yuv_enable(ch, false);
pixel_depth = format->depth;
}
dcss_scaler_fractions_set(ch, src_xres, src_yres, dst_xres,
dst_yres, src_format, dst_format,
PSC_LOC_HORZ_0_VERT_1_OVER_4);
if (format->is_yuv)
dcss_scaler_yuv_coef_set(ch, src_format, dst_format,
use_5_taps, src_xres, src_yres,
dst_xres, dst_yres);
else
dcss_scaler_rgb_coef_set(ch, src_xres, src_yres,
dst_xres, dst_yres);
dcss_scaler_rtr_8lines_enable(ch, rtr_8line_en);
dcss_scaler_bit_depth_set(ch, pixel_depth);
dcss_scaler_set_rgb10_order(ch, format);
dcss_scaler_format_set(ch, src_format, dst_format);
dcss_scaler_res_set(ch, src_xres, src_yres, dst_xres, dst_yres,
pix_format, dst_format);
}
/* This function will be called from interrupt context. */
void dcss_scaler_write_sclctrl(struct dcss_scaler *scl)
{
int chnum;
dcss_ctxld_assert_locked(scl->ctxld);
for (chnum = 0; chnum < 3; chnum++) {
struct dcss_scaler_ch *ch = &scl->ch[chnum];
if (ch->scaler_ctrl_chgd) {
dcss_ctxld_write_irqsafe(scl->ctxld, scl->ctx_id,
ch->scaler_ctrl,
ch->base_ofs +
DCSS_SCALER_CTRL);
ch->scaler_ctrl_chgd = false;
}
}
}

View File

@ -0,0 +1,180 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2019 NXP.
*/
#include <linux/device.h>
#include <linux/slab.h>
#include "dcss-dev.h"
#define DCSS_SS_SYS_CTRL 0x00
#define RUN_EN BIT(0)
#define DCSS_SS_DISPLAY 0x10
#define LRC_X_POS 0
#define LRC_X_MASK GENMASK(12, 0)
#define LRC_Y_POS 16
#define LRC_Y_MASK GENMASK(28, 16)
#define DCSS_SS_HSYNC 0x20
#define DCSS_SS_VSYNC 0x30
#define SYNC_START_POS 0
#define SYNC_START_MASK GENMASK(12, 0)
#define SYNC_END_POS 16
#define SYNC_END_MASK GENMASK(28, 16)
#define SYNC_POL BIT(31)
#define DCSS_SS_DE_ULC 0x40
#define ULC_X_POS 0
#define ULC_X_MASK GENMASK(12, 0)
#define ULC_Y_POS 16
#define ULC_Y_MASK GENMASK(28, 16)
#define ULC_POL BIT(31)
#define DCSS_SS_DE_LRC 0x50
#define DCSS_SS_MODE 0x60
#define PIPE_MODE_POS 0
#define PIPE_MODE_MASK GENMASK(1, 0)
#define DCSS_SS_COEFF 0x70
#define HORIZ_A_POS 0
#define HORIZ_A_MASK GENMASK(3, 0)
#define HORIZ_B_POS 4
#define HORIZ_B_MASK GENMASK(7, 4)
#define HORIZ_C_POS 8
#define HORIZ_C_MASK GENMASK(11, 8)
#define HORIZ_H_NORM_POS 12
#define HORIZ_H_NORM_MASK GENMASK(14, 12)
#define VERT_A_POS 16
#define VERT_A_MASK GENMASK(19, 16)
#define VERT_B_POS 20
#define VERT_B_MASK GENMASK(23, 20)
#define VERT_C_POS 24
#define VERT_C_MASK GENMASK(27, 24)
#define VERT_H_NORM_POS 28
#define VERT_H_NORM_MASK GENMASK(30, 28)
#define DCSS_SS_CLIP_CB 0x80
#define DCSS_SS_CLIP_CR 0x90
#define CLIP_MIN_POS 0
#define CLIP_MIN_MASK GENMASK(9, 0)
#define CLIP_MAX_POS 0
#define CLIP_MAX_MASK GENMASK(23, 16)
#define DCSS_SS_INTER_MODE 0xA0
#define INT_EN BIT(0)
#define VSYNC_SHIFT BIT(1)
struct dcss_ss {
struct device *dev;
void __iomem *base_reg;
u32 base_ofs;
struct dcss_ctxld *ctxld;
u32 ctx_id;
bool in_use;
};
static void dcss_ss_write(struct dcss_ss *ss, u32 val, u32 ofs)
{
if (!ss->in_use)
dcss_writel(val, ss->base_reg + ofs);
dcss_ctxld_write(ss->ctxld, ss->ctx_id, val,
ss->base_ofs + ofs);
}
int dcss_ss_init(struct dcss_dev *dcss, unsigned long ss_base)
{
struct dcss_ss *ss;
ss = kzalloc(sizeof(*ss), GFP_KERNEL);
if (!ss)
return -ENOMEM;
dcss->ss = ss;
ss->dev = dcss->dev;
ss->ctxld = dcss->ctxld;
ss->base_reg = ioremap(ss_base, SZ_4K);
if (!ss->base_reg) {
dev_err(dcss->dev, "ss: unable to remap ss base\n");
kfree(ss);
return -ENOMEM;
}
ss->base_ofs = ss_base;
ss->ctx_id = CTX_SB_HP;
return 0;
}
void dcss_ss_exit(struct dcss_ss *ss)
{
/* stop SS */
dcss_writel(0, ss->base_reg + DCSS_SS_SYS_CTRL);
if (ss->base_reg)
iounmap(ss->base_reg);
kfree(ss);
}
void dcss_ss_subsam_set(struct dcss_ss *ss)
{
dcss_ss_write(ss, 0x41614161, DCSS_SS_COEFF);
dcss_ss_write(ss, 0, DCSS_SS_MODE);
dcss_ss_write(ss, 0x03ff0000, DCSS_SS_CLIP_CB);
dcss_ss_write(ss, 0x03ff0000, DCSS_SS_CLIP_CR);
}
void dcss_ss_sync_set(struct dcss_ss *ss, struct videomode *vm,
bool phsync, bool pvsync)
{
u16 lrc_x, lrc_y;
u16 hsync_start, hsync_end;
u16 vsync_start, vsync_end;
u16 de_ulc_x, de_ulc_y;
u16 de_lrc_x, de_lrc_y;
lrc_x = vm->hfront_porch + vm->hback_porch + vm->hsync_len +
vm->hactive - 1;
lrc_y = vm->vfront_porch + vm->vback_porch + vm->vsync_len +
vm->vactive - 1;
dcss_ss_write(ss, (lrc_y << LRC_Y_POS) | lrc_x, DCSS_SS_DISPLAY);
hsync_start = vm->hfront_porch + vm->hback_porch + vm->hsync_len +
vm->hactive - 1;
hsync_end = vm->hsync_len - 1;
dcss_ss_write(ss, (phsync ? SYNC_POL : 0) |
((u32)hsync_end << SYNC_END_POS) | hsync_start,
DCSS_SS_HSYNC);
vsync_start = vm->vfront_porch - 1;
vsync_end = vm->vfront_porch + vm->vsync_len - 1;
dcss_ss_write(ss, (pvsync ? SYNC_POL : 0) |
((u32)vsync_end << SYNC_END_POS) | vsync_start,
DCSS_SS_VSYNC);
de_ulc_x = vm->hsync_len + vm->hback_porch - 1;
de_ulc_y = vm->vsync_len + vm->vfront_porch + vm->vback_porch;
dcss_ss_write(ss, SYNC_POL | ((u32)de_ulc_y << ULC_Y_POS) | de_ulc_x,
DCSS_SS_DE_ULC);
de_lrc_x = vm->hsync_len + vm->hback_porch + vm->hactive - 1;
de_lrc_y = vm->vsync_len + vm->vfront_porch + vm->vback_porch +
vm->vactive - 1;
dcss_ss_write(ss, (de_lrc_y << LRC_Y_POS) | de_lrc_x, DCSS_SS_DE_LRC);
}
void dcss_ss_enable(struct dcss_ss *ss)
{
dcss_ss_write(ss, RUN_EN, DCSS_SS_SYS_CTRL);
ss->in_use = true;
}
void dcss_ss_shutoff(struct dcss_ss *ss)
{
dcss_writel(0, ss->base_reg + DCSS_SS_SYS_CTRL);
ss->in_use = false;
}

View File

@ -9,6 +9,8 @@
#include <linux/component.h> #include <linux/component.h>
#include <linux/clk.h> #include <linux/clk.h>
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <linux/dma-noncoherent.h>
#include <linux/io.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/of_device.h> #include <linux/of_device.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
@ -19,6 +21,7 @@
#include <drm/drm_bridge.h> #include <drm/drm_bridge.h>
#include <drm/drm_crtc.h> #include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h> #include <drm/drm_crtc_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_drv.h> #include <drm/drm_drv.h>
#include <drm/drm_gem_cma_helper.h> #include <drm/drm_gem_cma_helper.h>
#include <drm/drm_fb_cma_helper.h> #include <drm/drm_fb_cma_helper.h>
@ -76,6 +79,11 @@ static const u32 ingenic_drm_primary_formats[] = {
DRM_FORMAT_XRGB8888, DRM_FORMAT_XRGB8888,
}; };
static bool ingenic_drm_cached_gem_buf;
module_param_named(cached_gem_buffers, ingenic_drm_cached_gem_buf, bool, 0400);
MODULE_PARM_DESC(cached_gem_buffers,
"Enable fully cached GEM buffers [default=false]");
static bool ingenic_drm_writeable_reg(struct device *dev, unsigned int reg) static bool ingenic_drm_writeable_reg(struct device *dev, unsigned int reg)
{ {
switch (reg) { switch (reg) {
@ -338,6 +346,8 @@ static int ingenic_drm_plane_atomic_check(struct drm_plane *plane,
plane->state->fb->format->format != state->fb->format->format)) plane->state->fb->format->format != state->fb->format->format))
crtc_state->mode_changed = true; crtc_state->mode_changed = true;
drm_atomic_helper_check_plane_damage(state->state, state);
return 0; return 0;
} }
@ -440,6 +450,38 @@ void ingenic_drm_plane_config(struct device *dev,
} }
} }
void ingenic_drm_sync_data(struct device *dev,
struct drm_plane_state *old_state,
struct drm_plane_state *state)
{
const struct drm_format_info *finfo = state->fb->format;
struct ingenic_drm *priv = dev_get_drvdata(dev);
struct drm_atomic_helper_damage_iter iter;
unsigned int offset, i;
struct drm_rect clip;
dma_addr_t paddr;
void *addr;
if (!ingenic_drm_cached_gem_buf)
return;
drm_atomic_helper_damage_iter_init(&iter, old_state, state);
drm_atomic_for_each_plane_damage(&iter, &clip) {
for (i = 0; i < finfo->num_planes; i++) {
paddr = drm_fb_cma_get_gem_addr(state->fb, state, i);
addr = phys_to_virt(paddr);
/* Ignore x1/x2 values, invalidate complete lines */
offset = clip.y1 * state->fb->pitches[i];
dma_cache_sync(priv->dev, addr + offset,
(clip.y2 - clip.y1) * state->fb->pitches[i],
DMA_TO_DEVICE);
}
}
}
static void ingenic_drm_plane_atomic_update(struct drm_plane *plane, static void ingenic_drm_plane_atomic_update(struct drm_plane *plane,
struct drm_plane_state *oldstate) struct drm_plane_state *oldstate)
{ {
@ -450,6 +492,8 @@ static void ingenic_drm_plane_atomic_update(struct drm_plane *plane,
dma_addr_t addr; dma_addr_t addr;
if (state && state->fb) { if (state && state->fb) {
ingenic_drm_sync_data(priv->dev, oldstate, state);
addr = drm_fb_cma_get_gem_addr(state->fb, state, 0); addr = drm_fb_cma_get_gem_addr(state->fb, state, 0);
width = state->src_w >> 16; width = state->src_w >> 16;
height = state->src_h >> 16; height = state->src_h >> 16;
@ -605,7 +649,69 @@ static void ingenic_drm_disable_vblank(struct drm_crtc *crtc)
regmap_update_bits(priv->map, JZ_REG_LCD_CTRL, JZ_LCD_CTRL_EOF_IRQ, 0); regmap_update_bits(priv->map, JZ_REG_LCD_CTRL, JZ_LCD_CTRL_EOF_IRQ, 0);
} }
DEFINE_DRM_GEM_CMA_FOPS(ingenic_drm_fops); static struct drm_framebuffer *
ingenic_drm_gem_fb_create(struct drm_device *dev, struct drm_file *file,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
if (ingenic_drm_cached_gem_buf)
return drm_gem_fb_create_with_dirty(dev, file, mode_cmd);
return drm_gem_fb_create(dev, file, mode_cmd);
}
static int ingenic_drm_gem_mmap(struct drm_gem_object *obj,
struct vm_area_struct *vma)
{
struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
struct device *dev = cma_obj->base.dev->dev;
unsigned long attrs;
int ret;
if (ingenic_drm_cached_gem_buf)
attrs = DMA_ATTR_NON_CONSISTENT;
else
attrs = DMA_ATTR_WRITE_COMBINE;
/*
* Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
* vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
* the whole buffer.
*/
vma->vm_flags &= ~VM_PFNMAP;
vma->vm_pgoff = 0;
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
ret = dma_mmap_attrs(dev, vma, cma_obj->vaddr, cma_obj->paddr,
vma->vm_end - vma->vm_start, attrs);
if (ret)
drm_gem_vm_close(vma);
return ret;
}
static int ingenic_drm_gem_cma_mmap(struct file *filp,
struct vm_area_struct *vma)
{
int ret;
ret = drm_gem_mmap(filp, vma);
if (ret)
return ret;
return ingenic_drm_gem_mmap(vma->vm_private_data, vma);
}
static const struct file_operations ingenic_drm_fops = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
.compat_ioctl = drm_compat_ioctl,
.poll = drm_poll,
.read = drm_read,
.llseek = noop_llseek,
.mmap = ingenic_drm_gem_cma_mmap,
};
static struct drm_driver ingenic_drm_driver_data = { static struct drm_driver ingenic_drm_driver_data = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC, .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
@ -669,7 +775,7 @@ static const struct drm_encoder_helper_funcs ingenic_drm_encoder_helper_funcs =
}; };
static const struct drm_mode_config_funcs ingenic_drm_mode_config_funcs = { static const struct drm_mode_config_funcs ingenic_drm_mode_config_funcs = {
.fb_create = drm_gem_fb_create, .fb_create = ingenic_drm_gem_fb_create,
.output_poll_changed = drm_fb_helper_output_poll_changed, .output_poll_changed = drm_fb_helper_output_poll_changed,
.atomic_check = drm_atomic_helper_check, .atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit, .atomic_commit = drm_atomic_helper_commit,
@ -796,6 +902,8 @@ static int ingenic_drm_bind(struct device *dev, bool has_components)
return ret; return ret;
} }
drm_plane_enable_fb_damage_clips(&priv->f1);
drm_crtc_helper_add(&priv->crtc, &ingenic_drm_crtc_helper_funcs); drm_crtc_helper_add(&priv->crtc, &ingenic_drm_crtc_helper_funcs);
ret = drm_crtc_init_with_planes(drm, &priv->crtc, &priv->f1, ret = drm_crtc_init_with_planes(drm, &priv->crtc, &priv->f1,
@ -821,6 +929,8 @@ static int ingenic_drm_bind(struct device *dev, bool has_components)
return ret; return ret;
} }
drm_plane_enable_fb_damage_clips(&priv->f0);
if (IS_ENABLED(CONFIG_DRM_INGENIC_IPU) && has_components) { if (IS_ENABLED(CONFIG_DRM_INGENIC_IPU) && has_components) {
ret = component_bind_all(dev, drm); ret = component_bind_all(dev, drm);
if (ret) { if (ret) {

View File

@ -168,6 +168,10 @@ void ingenic_drm_plane_config(struct device *dev,
struct drm_plane *plane, u32 fourcc); struct drm_plane *plane, u32 fourcc);
void ingenic_drm_plane_disable(struct device *dev, struct drm_plane *plane); void ingenic_drm_plane_disable(struct device *dev, struct drm_plane *plane);
void ingenic_drm_sync_data(struct device *dev,
struct drm_plane_state *old_state,
struct drm_plane_state *state);
extern struct platform_driver *ingenic_ipu_driver_ptr; extern struct platform_driver *ingenic_ipu_driver_ptr;
#endif /* DRIVERS_GPU_DRM_INGENIC_INGENIC_DRM_H */ #endif /* DRIVERS_GPU_DRM_INGENIC_INGENIC_DRM_H */

View File

@ -20,6 +20,7 @@
#include <drm/drm_atomic.h> #include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h> #include <drm/drm_atomic_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_drv.h> #include <drm/drm_drv.h>
#include <drm/drm_fb_cma_helper.h> #include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fourcc.h> #include <drm/drm_fourcc.h>
@ -316,6 +317,8 @@ static void ingenic_ipu_plane_atomic_update(struct drm_plane *plane,
JZ_IPU_CTRL_CHIP_EN | JZ_IPU_CTRL_LCDC_SEL); JZ_IPU_CTRL_CHIP_EN | JZ_IPU_CTRL_LCDC_SEL);
} }
ingenic_drm_sync_data(ipu->master, oldstate, state);
/* New addresses will be committed in vblank handler... */ /* New addresses will be committed in vblank handler... */
ipu->addr_y = drm_fb_cma_get_gem_addr(state->fb, state, 0); ipu->addr_y = drm_fb_cma_get_gem_addr(state->fb, state, 0);
if (finfo->num_planes > 1) if (finfo->num_planes > 1)
@ -534,7 +537,7 @@ static int ingenic_ipu_plane_atomic_check(struct drm_plane *plane,
if (!state->crtc || if (!state->crtc ||
!crtc_state->mode.hdisplay || !crtc_state->mode.vdisplay) !crtc_state->mode.hdisplay || !crtc_state->mode.vdisplay)
return 0; goto out_check_damage;
/* Plane must be fully visible */ /* Plane must be fully visible */
if (state->crtc_x < 0 || state->crtc_y < 0 || if (state->crtc_x < 0 || state->crtc_y < 0 ||
@ -551,7 +554,7 @@ static int ingenic_ipu_plane_atomic_check(struct drm_plane *plane,
return -EINVAL; return -EINVAL;
if (!osd_changed(state, plane->state)) if (!osd_changed(state, plane->state))
return 0; goto out_check_damage;
crtc_state->mode_changed = true; crtc_state->mode_changed = true;
@ -578,6 +581,9 @@ static int ingenic_ipu_plane_atomic_check(struct drm_plane *plane,
ipu->denom_w = denom_w; ipu->denom_w = denom_w;
ipu->denom_h = denom_h; ipu->denom_h = denom_h;
out_check_damage:
drm_atomic_helper_check_plane_damage(state->state, state);
return 0; return 0;
} }
@ -759,6 +765,8 @@ static int ingenic_ipu_bind(struct device *dev, struct device *master, void *d)
return err; return err;
} }
drm_plane_enable_fb_damage_clips(plane);
/* /*
* Sharpness settings range is [0,32] * Sharpness settings range is [0,32]
* 0 : nearest-neighbor * 0 : nearest-neighbor

View File

@ -123,7 +123,7 @@ static struct page **get_pages(struct drm_gem_object *obj)
msm_obj->pages = p; msm_obj->pages = p;
msm_obj->sgt = drm_prime_pages_to_sg(p, npages); msm_obj->sgt = drm_prime_pages_to_sg(obj->dev, p, npages);
if (IS_ERR(msm_obj->sgt)) { if (IS_ERR(msm_obj->sgt)) {
void *ptr = ERR_CAST(msm_obj->sgt); void *ptr = ERR_CAST(msm_obj->sgt);

View File

@ -19,7 +19,7 @@ struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj)
if (WARN_ON(!msm_obj->pages)) /* should have already pinned! */ if (WARN_ON(!msm_obj->pages)) /* should have already pinned! */
return NULL; return NULL;
return drm_prime_pages_to_sg(msm_obj->pages, npages); return drm_prime_pages_to_sg(obj->dev, msm_obj->pages, npages);
} }
void *msm_gem_prime_vmap(struct drm_gem_object *obj) void *msm_gem_prime_vmap(struct drm_gem_object *obj)

View File

@ -21,6 +21,7 @@
#include <drm/drm_connector.h> #include <drm/drm_connector.h>
#include <drm/drm_drv.h> #include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h> #include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_cma_helper.h> #include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_irq.h> #include <drm/drm_irq.h>
@ -81,8 +82,26 @@ void mxsfb_disable_axi_clk(struct mxsfb_drm_private *mxsfb)
clk_disable_unprepare(mxsfb->clk_axi); clk_disable_unprepare(mxsfb->clk_axi);
} }
static struct drm_framebuffer *
mxsfb_fb_create(struct drm_device *dev, struct drm_file *file_priv,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
const struct drm_format_info *info;
info = drm_get_format_info(dev, mode_cmd);
if (!info)
return ERR_PTR(-EINVAL);
if (mode_cmd->width * info->cpp[0] != mode_cmd->pitches[0]) {
dev_dbg(dev->dev, "Invalid pitch: fb width must match pitch\n");
return ERR_PTR(-EINVAL);
}
return drm_gem_fb_create(dev, file_priv, mode_cmd);
}
static const struct drm_mode_config_funcs mxsfb_mode_config_funcs = { static const struct drm_mode_config_funcs mxsfb_mode_config_funcs = {
.fb_create = drm_gem_fb_create, .fb_create = mxsfb_fb_create,
.atomic_check = drm_atomic_helper_check, .atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit, .atomic_commit = drm_atomic_helper_commit,
}; };

View File

@ -615,7 +615,7 @@ nv_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb)
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
int ret; int ret;
ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM, false); ret = nouveau_bo_pin(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, false);
if (ret == 0) { if (ret == 0) {
if (disp->image[nv_crtc->index]) if (disp->image[nv_crtc->index])
nouveau_bo_unpin(disp->image[nv_crtc->index]); nouveau_bo_unpin(disp->image[nv_crtc->index]);
@ -1172,7 +1172,7 @@ nv04_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
return -ENOMEM; return -ENOMEM;
if (new_bo != old_bo) { if (new_bo != old_bo) {
ret = nouveau_bo_pin(new_bo, TTM_PL_FLAG_VRAM, true); ret = nouveau_bo_pin(new_bo, NOUVEAU_GEM_DOMAIN_VRAM, true);
if (ret) if (ret)
goto fail_free; goto fail_free;
} }
@ -1336,10 +1336,11 @@ nv04_crtc_create(struct drm_device *dev, int crtc_num)
drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256); drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256);
ret = nouveau_bo_new(&nouveau_drm(dev)->client, 64*64*4, 0x100, ret = nouveau_bo_new(&nouveau_drm(dev)->client, 64*64*4, 0x100,
TTM_PL_FLAG_VRAM, 0, 0x0000, NULL, NULL, NOUVEAU_GEM_DOMAIN_VRAM, 0, 0x0000, NULL, NULL,
&nv_crtc->cursor.nvbo); &nv_crtc->cursor.nvbo);
if (!ret) { if (!ret) {
ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM, false); ret = nouveau_bo_pin(nv_crtc->cursor.nvbo,
NOUVEAU_GEM_DOMAIN_VRAM, false);
if (!ret) { if (!ret) {
ret = nouveau_bo_map(nv_crtc->cursor.nvbo); ret = nouveau_bo_map(nv_crtc->cursor.nvbo);
if (ret) if (ret)

View File

@ -134,7 +134,7 @@ nv04_display_init(struct drm_device *dev, bool resume, bool runtime)
if (!fb || !fb->obj[0]) if (!fb || !fb->obj[0])
continue; continue;
nvbo = nouveau_gem_object(fb->obj[0]); nvbo = nouveau_gem_object(fb->obj[0]);
ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM, true); ret = nouveau_bo_pin(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, true);
if (ret) if (ret)
NV_ERROR(drm, "Could not pin framebuffer\n"); NV_ERROR(drm, "Could not pin framebuffer\n");
} }
@ -144,7 +144,8 @@ nv04_display_init(struct drm_device *dev, bool resume, bool runtime)
if (!nv_crtc->cursor.nvbo) if (!nv_crtc->cursor.nvbo)
continue; continue;
ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM, true); ret = nouveau_bo_pin(nv_crtc->cursor.nvbo,
NOUVEAU_GEM_DOMAIN_VRAM, true);
if (!ret && nv_crtc->cursor.set_offset) if (!ret && nv_crtc->cursor.set_offset)
ret = nouveau_bo_map(nv_crtc->cursor.nvbo); ret = nouveau_bo_map(nv_crtc->cursor.nvbo);
if (ret) if (ret)

View File

@ -142,7 +142,7 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
return ret; return ret;
nvbo = nouveau_gem_object(fb->obj[0]); nvbo = nouveau_gem_object(fb->obj[0]);
ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM, false); ret = nouveau_bo_pin(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, false);
if (ret) if (ret)
return ret; return ret;
@ -387,7 +387,7 @@ nv04_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
return ret; return ret;
nvbo = nouveau_gem_object(fb->obj[0]); nvbo = nouveau_gem_object(fb->obj[0]);
ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM, false); ret = nouveau_bo_pin(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, false);
if (ret) if (ret)
return ret; return ret;

View File

@ -2069,6 +2069,7 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
drm_atomic_helper_wait_for_fences(dev, state, false); drm_atomic_helper_wait_for_fences(dev, state, false);
drm_atomic_helper_wait_for_dependencies(state); drm_atomic_helper_wait_for_dependencies(state);
drm_atomic_helper_update_legacy_modeset_state(dev, state); drm_atomic_helper_update_legacy_modeset_state(dev, state);
drm_atomic_helper_calc_timestamping_constants(state);
if (atom->lock_core) if (atom->lock_core)
mutex_lock(&disp->mutex); mutex_lock(&disp->mutex);
@ -2622,10 +2623,11 @@ nv50_display_create(struct drm_device *dev)
dev->mode_config.normalize_zpos = true; dev->mode_config.normalize_zpos = true;
/* small shared memory area we use for notifiers and semaphores */ /* small shared memory area we use for notifiers and semaphores */
ret = nouveau_bo_new(&drm->client, 4096, 0x1000, TTM_PL_FLAG_VRAM, ret = nouveau_bo_new(&drm->client, 4096, 0x1000,
NOUVEAU_GEM_DOMAIN_VRAM,
0, 0x0000, NULL, NULL, &disp->sync); 0, 0x0000, NULL, NULL, &disp->sync);
if (!ret) { if (!ret) {
ret = nouveau_bo_pin(disp->sync, TTM_PL_FLAG_VRAM, true); ret = nouveau_bo_pin(disp->sync, NOUVEAU_GEM_DOMAIN_VRAM, true);
if (!ret) { if (!ret) {
ret = nouveau_bo_map(disp->sync); ret = nouveau_bo_map(disp->sync);
if (ret) if (ret)

View File

@ -542,7 +542,7 @@ nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state)
return 0; return 0;
nvbo = nouveau_gem_object(fb->obj[0]); nvbo = nouveau_gem_object(fb->obj[0]);
ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM, true); ret = nouveau_bo_pin(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, true);
if (ret) if (ret)
return ret; return ret;

View File

@ -328,7 +328,8 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
ret = nouveau_gem_new(cli, PAGE_SIZE, 0, NOUVEAU_GEM_DOMAIN_GART, ret = nouveau_gem_new(cli, PAGE_SIZE, 0, NOUVEAU_GEM_DOMAIN_GART,
0, 0, &chan->ntfy); 0, 0, &chan->ntfy);
if (ret == 0) if (ret == 0)
ret = nouveau_bo_pin(chan->ntfy, TTM_PL_FLAG_TT, false); ret = nouveau_bo_pin(chan->ntfy, NOUVEAU_GEM_DOMAIN_GART,
false);
if (ret) if (ret)
goto done; goto done;

View File

@ -44,6 +44,9 @@
#include <nvif/if500b.h> #include <nvif/if500b.h>
#include <nvif/if900b.h> #include <nvif/if900b.h>
static int nouveau_ttm_tt_bind(struct ttm_bo_device *bdev, struct ttm_tt *ttm,
struct ttm_resource *reg);
/* /*
* NV10-NV40 tiling helpers * NV10-NV40 tiling helpers
*/ */
@ -137,6 +140,7 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
struct nouveau_bo *nvbo = nouveau_bo(bo); struct nouveau_bo *nvbo = nouveau_bo(bo);
WARN_ON(nvbo->pin_refcnt > 0); WARN_ON(nvbo->pin_refcnt > 0);
nouveau_bo_del_io_reserve_lru(bo);
nv10_bo_put_tile_region(dev, nvbo->tile, NULL); nv10_bo_put_tile_region(dev, nvbo->tile, NULL);
/* /*
@ -158,8 +162,7 @@ roundup_64(u64 x, u32 y)
} }
static void static void
nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags, nouveau_bo_fixup_align(struct nouveau_bo *nvbo, int *align, u64 *size)
int *align, u64 *size)
{ {
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
struct nvif_device *device = &drm->client.device; struct nvif_device *device = &drm->client.device;
@ -192,7 +195,7 @@ nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
} }
struct nouveau_bo * struct nouveau_bo *
nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 flags, nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 domain,
u32 tile_mode, u32 tile_flags) u32 tile_mode, u32 tile_flags)
{ {
struct nouveau_drm *drm = cli->drm; struct nouveau_drm *drm = cli->drm;
@ -218,7 +221,7 @@ nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 flags,
* mapping, but is what NOUVEAU_GEM_DOMAIN_COHERENT gets translated * mapping, but is what NOUVEAU_GEM_DOMAIN_COHERENT gets translated
* into in nouveau_gem_new(). * into in nouveau_gem_new().
*/ */
if (flags & TTM_PL_FLAG_UNCACHED) { if (domain & NOUVEAU_GEM_DOMAIN_COHERENT) {
/* Determine if we can get a cache-coherent map, forcing /* Determine if we can get a cache-coherent map, forcing
* uncached mapping if we can't. * uncached mapping if we can't.
*/ */
@ -258,9 +261,9 @@ nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 flags,
* Skip page sizes that can't support needed domains. * Skip page sizes that can't support needed domains.
*/ */
if (cli->device.info.family > NV_DEVICE_INFO_V0_CURIE && if (cli->device.info.family > NV_DEVICE_INFO_V0_CURIE &&
(flags & TTM_PL_FLAG_VRAM) && !vmm->page[i].vram) (domain & NOUVEAU_GEM_DOMAIN_VRAM) && !vmm->page[i].vram)
continue; continue;
if ((flags & TTM_PL_FLAG_TT) && if ((domain & NOUVEAU_GEM_DOMAIN_GART) &&
(!vmm->page[i].host || vmm->page[i].shift > PAGE_SHIFT)) (!vmm->page[i].host || vmm->page[i].shift > PAGE_SHIFT))
continue; continue;
@ -287,13 +290,13 @@ nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 flags,
} }
nvbo->page = vmm->page[pi].shift; nvbo->page = vmm->page[pi].shift;
nouveau_bo_fixup_align(nvbo, flags, align, size); nouveau_bo_fixup_align(nvbo, align, size);
return nvbo; return nvbo;
} }
int int
nouveau_bo_init(struct nouveau_bo *nvbo, u64 size, int align, u32 flags, nouveau_bo_init(struct nouveau_bo *nvbo, u64 size, int align, u32 domain,
struct sg_table *sg, struct dma_resv *robj) struct sg_table *sg, struct dma_resv *robj)
{ {
int type = sg ? ttm_bo_type_sg : ttm_bo_type_device; int type = sg ? ttm_bo_type_sg : ttm_bo_type_device;
@ -303,7 +306,8 @@ nouveau_bo_init(struct nouveau_bo *nvbo, u64 size, int align, u32 flags,
acc_size = ttm_bo_dma_acc_size(nvbo->bo.bdev, size, sizeof(*nvbo)); acc_size = ttm_bo_dma_acc_size(nvbo->bo.bdev, size, sizeof(*nvbo));
nvbo->bo.mem.num_pages = size >> PAGE_SHIFT; nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
nouveau_bo_placement_set(nvbo, flags, 0); nouveau_bo_placement_set(nvbo, domain, 0);
INIT_LIST_HEAD(&nvbo->io_reserve_lru);
ret = ttm_bo_init(nvbo->bo.bdev, &nvbo->bo, size, type, ret = ttm_bo_init(nvbo->bo.bdev, &nvbo->bo, size, type,
&nvbo->placement, align >> PAGE_SHIFT, false, &nvbo->placement, align >> PAGE_SHIFT, false,
@ -318,19 +322,19 @@ nouveau_bo_init(struct nouveau_bo *nvbo, u64 size, int align, u32 flags,
int int
nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align, nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
uint32_t flags, uint32_t tile_mode, uint32_t tile_flags, uint32_t domain, uint32_t tile_mode, uint32_t tile_flags,
struct sg_table *sg, struct dma_resv *robj, struct sg_table *sg, struct dma_resv *robj,
struct nouveau_bo **pnvbo) struct nouveau_bo **pnvbo)
{ {
struct nouveau_bo *nvbo; struct nouveau_bo *nvbo;
int ret; int ret;
nvbo = nouveau_bo_alloc(cli, &size, &align, flags, tile_mode, nvbo = nouveau_bo_alloc(cli, &size, &align, domain, tile_mode,
tile_flags); tile_flags);
if (IS_ERR(nvbo)) if (IS_ERR(nvbo))
return PTR_ERR(nvbo); return PTR_ERR(nvbo);
ret = nouveau_bo_init(nvbo, size, align, flags, sg, robj); ret = nouveau_bo_init(nvbo, size, align, domain, sg, robj);
if (ret) if (ret)
return ret; return ret;
@ -339,27 +343,49 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
} }
static void static void
set_placement_list(struct ttm_place *pl, unsigned *n, uint32_t type, uint32_t flags) set_placement_list(struct nouveau_drm *drm, struct ttm_place *pl, unsigned *n,
uint32_t domain, uint32_t flags)
{ {
*n = 0; *n = 0;
if (type & TTM_PL_FLAG_VRAM) if (domain & NOUVEAU_GEM_DOMAIN_VRAM) {
pl[(*n)++].flags = TTM_PL_FLAG_VRAM | flags; struct nvif_mmu *mmu = &drm->client.mmu;
if (type & TTM_PL_FLAG_TT) const u8 type = mmu->type[drm->ttm.type_vram].type;
pl[(*n)++].flags = TTM_PL_FLAG_TT | flags;
if (type & TTM_PL_FLAG_SYSTEM) pl[*n].mem_type = TTM_PL_VRAM;
pl[(*n)++].flags = TTM_PL_FLAG_SYSTEM | flags; pl[*n].flags = flags & ~TTM_PL_FLAG_CACHED;
/* Some BARs do not support being ioremapped WC */
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA &&
type & NVIF_MEM_UNCACHED)
pl[*n].flags &= ~TTM_PL_FLAG_WC;
(*n)++;
}
if (domain & NOUVEAU_GEM_DOMAIN_GART) {
pl[*n].mem_type = TTM_PL_TT;
pl[*n].flags = flags;
if (drm->agp.bridge)
pl[*n].flags &= ~TTM_PL_FLAG_CACHED;
(*n)++;
}
if (domain & NOUVEAU_GEM_DOMAIN_CPU) {
pl[*n].mem_type = TTM_PL_SYSTEM;
pl[(*n)++].flags = flags;
}
} }
static void static void
set_placement_range(struct nouveau_bo *nvbo, uint32_t type) set_placement_range(struct nouveau_bo *nvbo, uint32_t domain)
{ {
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
u32 vram_pages = drm->client.device.info.ram_size >> PAGE_SHIFT; u32 vram_pages = drm->client.device.info.ram_size >> PAGE_SHIFT;
unsigned i, fpfn, lpfn; unsigned i, fpfn, lpfn;
if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CELSIUS && if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CELSIUS &&
nvbo->mode && (type & TTM_PL_FLAG_VRAM) && nvbo->mode && (domain & NOUVEAU_GEM_DOMAIN_VRAM) &&
nvbo->bo.mem.num_pages < vram_pages / 4) { nvbo->bo.mem.num_pages < vram_pages / 4) {
/* /*
* Make sure that the color and depth buffers are handled * Make sure that the color and depth buffers are handled
@ -386,26 +412,28 @@ set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
} }
void void
nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy) nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t domain,
uint32_t busy)
{ {
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
struct ttm_placement *pl = &nvbo->placement; struct ttm_placement *pl = &nvbo->placement;
uint32_t flags = (nvbo->force_coherent ? TTM_PL_FLAG_UNCACHED : uint32_t flags = (nvbo->force_coherent ? TTM_PL_FLAG_UNCACHED :
TTM_PL_MASK_CACHING) | TTM_PL_MASK_CACHING) |
(nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0); (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0);
pl->placement = nvbo->placements; pl->placement = nvbo->placements;
set_placement_list(nvbo->placements, &pl->num_placement, set_placement_list(drm, nvbo->placements, &pl->num_placement,
type, flags); domain, flags);
pl->busy_placement = nvbo->busy_placements; pl->busy_placement = nvbo->busy_placements;
set_placement_list(nvbo->busy_placements, &pl->num_busy_placement, set_placement_list(drm, nvbo->busy_placements, &pl->num_busy_placement,
type | busy, flags); domain | busy, flags);
set_placement_range(nvbo, type); set_placement_range(nvbo, domain);
} }
int int
nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype, bool contig) nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t domain, bool contig)
{ {
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
struct ttm_buffer_object *bo = &nvbo->bo; struct ttm_buffer_object *bo = &nvbo->bo;
@ -417,7 +445,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype, bool contig)
return ret; return ret;
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA && if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA &&
memtype == TTM_PL_FLAG_VRAM && contig) { domain == NOUVEAU_GEM_DOMAIN_VRAM && contig) {
if (!nvbo->contig) { if (!nvbo->contig) {
nvbo->contig = true; nvbo->contig = true;
force = true; force = true;
@ -426,10 +454,22 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype, bool contig)
} }
if (nvbo->pin_refcnt) { if (nvbo->pin_refcnt) {
if (!(memtype & (1 << bo->mem.mem_type)) || evict) { bool error = evict;
switch (bo->mem.mem_type) {
case TTM_PL_VRAM:
error |= !(domain & NOUVEAU_GEM_DOMAIN_VRAM);
break;
case TTM_PL_TT:
error |= !(domain & NOUVEAU_GEM_DOMAIN_GART);
default:
break;
}
if (error) {
NV_ERROR(drm, "bo %p pinned elsewhere: " NV_ERROR(drm, "bo %p pinned elsewhere: "
"0x%08x vs 0x%08x\n", bo, "0x%08x vs 0x%08x\n", bo,
1 << bo->mem.mem_type, memtype); bo->mem.mem_type, domain);
ret = -EBUSY; ret = -EBUSY;
} }
nvbo->pin_refcnt++; nvbo->pin_refcnt++;
@ -437,14 +477,14 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype, bool contig)
} }
if (evict) { if (evict) {
nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT, 0); nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART, 0);
ret = nouveau_bo_validate(nvbo, false, false); ret = nouveau_bo_validate(nvbo, false, false);
if (ret) if (ret)
goto out; goto out;
} }
nvbo->pin_refcnt++; nvbo->pin_refcnt++;
nouveau_bo_placement_set(nvbo, memtype, 0); nouveau_bo_placement_set(nvbo, domain, 0);
/* drop pin_refcnt temporarily, so we don't trip the assertion /* drop pin_refcnt temporarily, so we don't trip the assertion
* in nouveau_bo_move() that makes sure we're not trying to * in nouveau_bo_move() that makes sure we're not trying to
@ -490,7 +530,16 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo)
if (ref) if (ref)
goto out; goto out;
nouveau_bo_placement_set(nvbo, bo->mem.placement, 0); switch (bo->mem.mem_type) {
case TTM_PL_VRAM:
nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, 0);
break;
case TTM_PL_TT:
nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART, 0);
break;
default:
break;
}
ret = nouveau_bo_validate(nvbo, false, false); ret = nouveau_bo_validate(nvbo, false, false);
if (ret == 0) { if (ret == 0) {
@ -574,6 +623,26 @@ nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
PAGE_SIZE, DMA_FROM_DEVICE); PAGE_SIZE, DMA_FROM_DEVICE);
} }
void nouveau_bo_add_io_reserve_lru(struct ttm_buffer_object *bo)
{
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct nouveau_bo *nvbo = nouveau_bo(bo);
mutex_lock(&drm->ttm.io_reserve_mutex);
list_move_tail(&nvbo->io_reserve_lru, &drm->ttm.io_reserve_lru);
mutex_unlock(&drm->ttm.io_reserve_mutex);
}
void nouveau_bo_del_io_reserve_lru(struct ttm_buffer_object *bo)
{
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct nouveau_bo *nvbo = nouveau_bo(bo);
mutex_lock(&drm->ttm.io_reserve_mutex);
list_del_init(&nvbo->io_reserve_lru);
mutex_unlock(&drm->ttm.io_reserve_mutex);
}
int int
nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible, nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
bool no_wait_gpu) bool no_wait_gpu)
@ -646,6 +715,36 @@ nouveau_ttm_tt_create(struct ttm_buffer_object *bo, uint32_t page_flags)
return nouveau_sgdma_create_ttm(bo, page_flags); return nouveau_sgdma_create_ttm(bo, page_flags);
} }
static int
nouveau_ttm_tt_bind(struct ttm_bo_device *bdev, struct ttm_tt *ttm,
struct ttm_resource *reg)
{
#if IS_ENABLED(CONFIG_AGP)
struct nouveau_drm *drm = nouveau_bdev(bdev);
#endif
if (!reg)
return -EINVAL;
#if IS_ENABLED(CONFIG_AGP)
if (drm->agp.bridge)
return ttm_agp_bind(ttm, reg);
#endif
return nouveau_sgdma_bind(bdev, ttm, reg);
}
static void
nouveau_ttm_tt_unbind(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
{
#if IS_ENABLED(CONFIG_AGP)
struct nouveau_drm *drm = nouveau_bdev(bdev);
if (drm->agp.bridge) {
ttm_agp_unbind(ttm);
return;
}
#endif
nouveau_sgdma_unbind(bdev, ttm);
}
static void static void
nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl) nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
{ {
@ -653,11 +752,11 @@ nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
switch (bo->mem.mem_type) { switch (bo->mem.mem_type) {
case TTM_PL_VRAM: case TTM_PL_VRAM:
nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT, nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART,
TTM_PL_FLAG_SYSTEM); NOUVEAU_GEM_DOMAIN_CPU);
break; break;
default: default:
nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0); nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_CPU, 0);
break; break;
} }
@ -725,7 +824,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
if (ret == 0) { if (ret == 0) {
ret = ttm_bo_move_accel_cleanup(bo, ret = ttm_bo_move_accel_cleanup(bo,
&fence->base, &fence->base,
evict, evict, false,
new_reg); new_reg);
nouveau_fence_unref(&fence); nouveau_fence_unref(&fence);
} }
@ -811,7 +910,8 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
struct ttm_place placement_memtype = { struct ttm_place placement_memtype = {
.fpfn = 0, .fpfn = 0,
.lpfn = 0, .lpfn = 0,
.flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING .mem_type = TTM_PL_TT,
.flags = TTM_PL_MASK_CACHING
}; };
struct ttm_placement placement; struct ttm_placement placement;
struct ttm_resource tmp_reg; struct ttm_resource tmp_reg;
@ -826,7 +926,11 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
if (ret) if (ret)
return ret; return ret;
ret = ttm_tt_bind(bo->ttm, &tmp_reg, &ctx); ret = ttm_tt_populate(bo->bdev, bo->ttm, &ctx);
if (ret)
goto out;
ret = nouveau_ttm_tt_bind(bo->bdev, bo->ttm, &tmp_reg);
if (ret) if (ret)
goto out; goto out;
@ -848,7 +952,8 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
struct ttm_place placement_memtype = { struct ttm_place placement_memtype = {
.fpfn = 0, .fpfn = 0,
.lpfn = 0, .lpfn = 0,
.flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING .mem_type = TTM_PL_TT,
.flags = TTM_PL_MASK_CACHING
}; };
struct ttm_placement placement; struct ttm_placement placement;
struct ttm_resource tmp_reg; struct ttm_resource tmp_reg;
@ -888,6 +993,8 @@ nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, bool evict,
if (bo->destroy != nouveau_bo_del_ttm) if (bo->destroy != nouveau_bo_del_ttm)
return; return;
nouveau_bo_del_io_reserve_lru(bo);
if (mem && new_reg->mem_type != TTM_PL_SYSTEM && if (mem && new_reg->mem_type != TTM_PL_SYSTEM &&
mem->mem.page == nvbo->page) { mem->mem.page == nvbo->page) {
list_for_each_entry(vma, &nvbo->vma_list, head) { list_for_each_entry(vma, &nvbo->vma_list, head) {
@ -969,9 +1076,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict,
/* Fake bo copy. */ /* Fake bo copy. */
if (old_reg->mem_type == TTM_PL_SYSTEM && !bo->ttm) { if (old_reg->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
BUG_ON(bo->mem.mm_node != NULL); ttm_bo_move_null(bo, new_reg);
bo->mem = *new_reg;
new_reg->mm_node = NULL;
goto out; goto out;
} }
@ -1018,32 +1123,60 @@ nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
filp->private_data); filp->private_data);
} }
static void
nouveau_ttm_io_mem_free_locked(struct nouveau_drm *drm,
struct ttm_resource *reg)
{
struct nouveau_mem *mem = nouveau_mem(reg);
if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) {
switch (reg->mem_type) {
case TTM_PL_TT:
if (mem->kind)
nvif_object_unmap_handle(&mem->mem.object);
break;
case TTM_PL_VRAM:
nvif_object_unmap_handle(&mem->mem.object);
break;
default:
break;
}
}
}
static int static int
nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *reg) nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *reg)
{ {
struct nouveau_drm *drm = nouveau_bdev(bdev); struct nouveau_drm *drm = nouveau_bdev(bdev);
struct nvkm_device *device = nvxx_device(&drm->client.device); struct nvkm_device *device = nvxx_device(&drm->client.device);
struct nouveau_mem *mem = nouveau_mem(reg); struct nouveau_mem *mem = nouveau_mem(reg);
int ret;
mutex_lock(&drm->ttm.io_reserve_mutex);
retry:
switch (reg->mem_type) { switch (reg->mem_type) {
case TTM_PL_SYSTEM: case TTM_PL_SYSTEM:
/* System memory */ /* System memory */
return 0; ret = 0;
goto out;
case TTM_PL_TT: case TTM_PL_TT:
#if IS_ENABLED(CONFIG_AGP) #if IS_ENABLED(CONFIG_AGP)
if (drm->agp.bridge) { if (drm->agp.bridge) {
reg->bus.offset = reg->start << PAGE_SHIFT; reg->bus.offset = (reg->start << PAGE_SHIFT) +
reg->bus.base = drm->agp.base; drm->agp.base;
reg->bus.is_iomem = !drm->agp.cma; reg->bus.is_iomem = !drm->agp.cma;
} }
#endif #endif
if (drm->client.mem->oclass < NVIF_CLASS_MEM_NV50 || !mem->kind) if (drm->client.mem->oclass < NVIF_CLASS_MEM_NV50 ||
!mem->kind) {
/* untiled */ /* untiled */
ret = 0;
break; break;
}
fallthrough; /* tiled memory */ fallthrough; /* tiled memory */
case TTM_PL_VRAM: case TTM_PL_VRAM:
reg->bus.offset = reg->start << PAGE_SHIFT; reg->bus.offset = (reg->start << PAGE_SHIFT) +
reg->bus.base = device->func->resource_addr(device, 1); device->func->resource_addr(device, 1);
reg->bus.is_iomem = true; reg->bus.is_iomem = true;
if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) { if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) {
union { union {
@ -1052,7 +1185,6 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *reg)
} args; } args;
u64 handle, length; u64 handle, length;
u32 argc = 0; u32 argc = 0;
int ret;
switch (mem->mem.object.oclass) { switch (mem->mem.object.oclass) {
case NVIF_CLASS_MEM_NV50: case NVIF_CLASS_MEM_NV50:
@ -1078,39 +1210,46 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *reg)
&handle, &length); &handle, &length);
if (ret != 1) { if (ret != 1) {
if (WARN_ON(ret == 0)) if (WARN_ON(ret == 0))
return -EINVAL; ret = -EINVAL;
return ret; goto out;
} }
reg->bus.base = 0;
reg->bus.offset = handle; reg->bus.offset = handle;
ret = 0;
} }
break; break;
default: default:
return -EINVAL; ret = -EINVAL;
} }
return 0;
out:
if (ret == -ENOSPC) {
struct nouveau_bo *nvbo;
nvbo = list_first_entry_or_null(&drm->ttm.io_reserve_lru,
typeof(*nvbo),
io_reserve_lru);
if (nvbo) {
list_del_init(&nvbo->io_reserve_lru);
drm_vma_node_unmap(&nvbo->bo.base.vma_node,
bdev->dev_mapping);
nouveau_ttm_io_mem_free_locked(drm, &nvbo->bo.mem);
goto retry;
}
}
mutex_unlock(&drm->ttm.io_reserve_mutex);
return ret;
} }
static void static void
nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_resource *reg) nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_resource *reg)
{ {
struct nouveau_drm *drm = nouveau_bdev(bdev); struct nouveau_drm *drm = nouveau_bdev(bdev);
struct nouveau_mem *mem = nouveau_mem(reg);
if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) { mutex_lock(&drm->ttm.io_reserve_mutex);
switch (reg->mem_type) { nouveau_ttm_io_mem_free_locked(drm, reg);
case TTM_PL_TT: mutex_unlock(&drm->ttm.io_reserve_mutex);
if (mem->kind)
nvif_object_unmap_handle(&mem->mem.object);
break;
case TTM_PL_VRAM:
nvif_object_unmap_handle(&mem->mem.object);
break;
default:
break;
}
}
} }
static int static int
@ -1131,7 +1270,8 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
return 0; return 0;
if (bo->mem.mem_type == TTM_PL_SYSTEM) { if (bo->mem.mem_type == TTM_PL_SYSTEM) {
nouveau_bo_placement_set(nvbo, TTM_PL_TT, 0); nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART,
0);
ret = nouveau_bo_validate(nvbo, false, false); ret = nouveau_bo_validate(nvbo, false, false);
if (ret) if (ret)
@ -1155,35 +1295,36 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
nvbo->busy_placements[i].lpfn = mappable; nvbo->busy_placements[i].lpfn = mappable;
} }
nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0); nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, 0);
return nouveau_bo_validate(nvbo, false, false); return nouveau_bo_validate(nvbo, false, false);
} }
static int static int
nouveau_ttm_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) nouveau_ttm_tt_populate(struct ttm_bo_device *bdev,
struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
{ {
struct ttm_dma_tt *ttm_dma = (void *)ttm; struct ttm_dma_tt *ttm_dma = (void *)ttm;
struct nouveau_drm *drm; struct nouveau_drm *drm;
struct device *dev; struct device *dev;
bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
if (ttm->state != tt_unpopulated) if (ttm_tt_is_populated(ttm))
return 0; return 0;
if (slave && ttm->sg) { if (slave && ttm->sg) {
/* make userspace faulting work */ /* make userspace faulting work */
drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
ttm_dma->dma_address, ttm->num_pages); ttm_dma->dma_address, ttm->num_pages);
ttm->state = tt_unbound; ttm_tt_set_populated(ttm);
return 0; return 0;
} }
drm = nouveau_bdev(ttm->bdev); drm = nouveau_bdev(bdev);
dev = drm->dev->dev; dev = drm->dev->dev;
#if IS_ENABLED(CONFIG_AGP) #if IS_ENABLED(CONFIG_AGP)
if (drm->agp.bridge) { if (drm->agp.bridge) {
return ttm_agp_tt_populate(ttm, ctx); return ttm_pool_populate(ttm, ctx);
} }
#endif #endif
@ -1196,7 +1337,8 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
} }
static void static void
nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm) nouveau_ttm_tt_unpopulate(struct ttm_bo_device *bdev,
struct ttm_tt *ttm)
{ {
struct ttm_dma_tt *ttm_dma = (void *)ttm; struct ttm_dma_tt *ttm_dma = (void *)ttm;
struct nouveau_drm *drm; struct nouveau_drm *drm;
@ -1206,12 +1348,12 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
if (slave) if (slave)
return; return;
drm = nouveau_bdev(ttm->bdev); drm = nouveau_bdev(bdev);
dev = drm->dev->dev; dev = drm->dev->dev;
#if IS_ENABLED(CONFIG_AGP) #if IS_ENABLED(CONFIG_AGP)
if (drm->agp.bridge) { if (drm->agp.bridge) {
ttm_agp_tt_unpopulate(ttm); ttm_pool_unpopulate(ttm);
return; return;
} }
#endif #endif
@ -1226,6 +1368,22 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
ttm_unmap_and_unpopulate_pages(dev, ttm_dma); ttm_unmap_and_unpopulate_pages(dev, ttm_dma);
} }
static void
nouveau_ttm_tt_destroy(struct ttm_bo_device *bdev,
struct ttm_tt *ttm)
{
#if IS_ENABLED(CONFIG_AGP)
struct nouveau_drm *drm = nouveau_bdev(bdev);
if (drm->agp.bridge) {
ttm_agp_unbind(ttm);
ttm_tt_destroy_common(bdev, ttm);
ttm_agp_destroy(ttm);
return;
}
#endif
nouveau_sgdma_destroy(bdev, ttm);
}
void void
nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence, bool exclusive) nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence, bool exclusive)
{ {
@ -1241,6 +1399,9 @@ struct ttm_bo_driver nouveau_bo_driver = {
.ttm_tt_create = &nouveau_ttm_tt_create, .ttm_tt_create = &nouveau_ttm_tt_create,
.ttm_tt_populate = &nouveau_ttm_tt_populate, .ttm_tt_populate = &nouveau_ttm_tt_populate,
.ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate, .ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate,
.ttm_tt_bind = &nouveau_ttm_tt_bind,
.ttm_tt_unbind = &nouveau_ttm_tt_unbind,
.ttm_tt_destroy = &nouveau_ttm_tt_destroy,
.eviction_valuable = ttm_bo_eviction_valuable, .eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = nouveau_bo_evict_flags, .evict_flags = nouveau_bo_evict_flags,
.move_notify = nouveau_bo_move_ntfy, .move_notify = nouveau_bo_move_ntfy,

View File

@ -18,6 +18,7 @@ struct nouveau_bo {
bool force_coherent; bool force_coherent;
struct ttm_bo_kmap_obj kmap; struct ttm_bo_kmap_obj kmap;
struct list_head head; struct list_head head;
struct list_head io_reserve_lru;
/* protected by ttm_bo_reserve() */ /* protected by ttm_bo_reserve() */
struct drm_file *reserved_by; struct drm_file *reserved_by;
@ -76,10 +77,10 @@ extern struct ttm_bo_driver nouveau_bo_driver;
void nouveau_bo_move_init(struct nouveau_drm *); void nouveau_bo_move_init(struct nouveau_drm *);
struct nouveau_bo *nouveau_bo_alloc(struct nouveau_cli *, u64 *size, int *align, struct nouveau_bo *nouveau_bo_alloc(struct nouveau_cli *, u64 *size, int *align,
u32 flags, u32 tile_mode, u32 tile_flags); u32 domain, u32 tile_mode, u32 tile_flags);
int nouveau_bo_init(struct nouveau_bo *, u64 size, int align, u32 flags, int nouveau_bo_init(struct nouveau_bo *, u64 size, int align, u32 domain,
struct sg_table *sg, struct dma_resv *robj); struct sg_table *sg, struct dma_resv *robj);
int nouveau_bo_new(struct nouveau_cli *, u64 size, int align, u32 flags, int nouveau_bo_new(struct nouveau_cli *, u64 size, int align, u32 domain,
u32 tile_mode, u32 tile_flags, struct sg_table *sg, u32 tile_mode, u32 tile_flags, struct sg_table *sg,
struct dma_resv *robj, struct dma_resv *robj,
struct nouveau_bo **); struct nouveau_bo **);
@ -96,6 +97,8 @@ int nouveau_bo_validate(struct nouveau_bo *, bool interruptible,
bool no_wait_gpu); bool no_wait_gpu);
void nouveau_bo_sync_for_device(struct nouveau_bo *nvbo); void nouveau_bo_sync_for_device(struct nouveau_bo *nvbo);
void nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo); void nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo);
void nouveau_bo_add_io_reserve_lru(struct ttm_buffer_object *bo);
void nouveau_bo_del_io_reserve_lru(struct ttm_buffer_object *bo);
/* TODO: submit equivalent to TTM generic API upstream? */ /* TODO: submit equivalent to TTM generic API upstream? */
static inline void __iomem * static inline void __iomem *
@ -119,13 +122,13 @@ nouveau_bo_unmap_unpin_unref(struct nouveau_bo **pnvbo)
} }
static inline int static inline int
nouveau_bo_new_pin_map(struct nouveau_cli *cli, u64 size, int align, u32 flags, nouveau_bo_new_pin_map(struct nouveau_cli *cli, u64 size, int align, u32 domain,
struct nouveau_bo **pnvbo) struct nouveau_bo **pnvbo)
{ {
int ret = nouveau_bo_new(cli, size, align, flags, int ret = nouveau_bo_new(cli, size, align, domain,
0, 0, NULL, NULL, pnvbo); 0, 0, NULL, NULL, pnvbo);
if (ret == 0) { if (ret == 0) {
ret = nouveau_bo_pin(*pnvbo, flags, true); ret = nouveau_bo_pin(*pnvbo, domain, true);
if (ret == 0) { if (ret == 0) {
ret = nouveau_bo_map(*pnvbo); ret = nouveau_bo_map(*pnvbo);
if (ret == 0) if (ret == 0)

View File

@ -163,9 +163,9 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
atomic_set(&chan->killed, 0); atomic_set(&chan->killed, 0);
/* allocate memory for dma push buffer */ /* allocate memory for dma push buffer */
target = TTM_PL_FLAG_TT | TTM_PL_FLAG_UNCACHED; target = NOUVEAU_GEM_DOMAIN_GART | NOUVEAU_GEM_DOMAIN_COHERENT;
if (nouveau_vram_pushbuf) if (nouveau_vram_pushbuf)
target = TTM_PL_FLAG_VRAM; target = NOUVEAU_GEM_DOMAIN_VRAM;
ret = nouveau_bo_new(cli, size, 0, target, 0, 0, NULL, NULL, ret = nouveau_bo_new(cli, size, 0, target, 0, 0, NULL, NULL,
&chan->push.buffer); &chan->push.buffer);

View File

@ -254,12 +254,12 @@ nouveau_dmem_chunk_alloc(struct nouveau_drm *drm, struct page **ppage)
chunk->pagemap.owner = drm->dev; chunk->pagemap.owner = drm->dev;
ret = nouveau_bo_new(&drm->client, DMEM_CHUNK_SIZE, 0, ret = nouveau_bo_new(&drm->client, DMEM_CHUNK_SIZE, 0,
TTM_PL_FLAG_VRAM, 0, 0, NULL, NULL, NOUVEAU_GEM_DOMAIN_VRAM, 0, 0, NULL, NULL,
&chunk->bo); &chunk->bo);
if (ret) if (ret)
goto out_release; goto out_release;
ret = nouveau_bo_pin(chunk->bo, TTM_PL_FLAG_VRAM, false); ret = nouveau_bo_pin(chunk->bo, NOUVEAU_GEM_DOMAIN_VRAM, false);
if (ret) if (ret)
goto out_bo_free; goto out_bo_free;
@ -346,7 +346,7 @@ nouveau_dmem_resume(struct nouveau_drm *drm)
mutex_lock(&drm->dmem->mutex); mutex_lock(&drm->dmem->mutex);
list_for_each_entry(chunk, &drm->dmem->chunks, list) { list_for_each_entry(chunk, &drm->dmem->chunks, list) {
ret = nouveau_bo_pin(chunk->bo, TTM_PL_FLAG_VRAM, false); ret = nouveau_bo_pin(chunk->bo, NOUVEAU_GEM_DOMAIN_VRAM, false);
/* FIXME handle pin failure */ /* FIXME handle pin failure */
WARN_ON(ret); WARN_ON(ret);
} }

View File

@ -164,6 +164,8 @@ struct nouveau_drm {
int type_vram; int type_vram;
int type_host[2]; int type_host[2];
int type_ncoh[2]; int type_ncoh[2];
struct mutex io_reserve_mutex;
struct list_head io_reserve_lru;
} ttm; } ttm;
/* GEM interface support */ /* GEM interface support */

Some files were not shown because too many files have changed in this diff Show More